diff options
author | dartraiden <wowemuh@gmail.com> | 2024-03-27 16:04:05 +0300 |
---|---|---|
committer | dartraiden <wowemuh@gmail.com> | 2024-03-27 16:04:05 +0300 |
commit | 26b7f4e979561aa1f17d6719fc1dad91e1a5d1b4 (patch) | |
tree | 98aafdc421d93e855ddc886fe56e9e084f992ac8 /libs/libcurl | |
parent | 9be45ab3f9c03107dfb717798e41dda72576122d (diff) |
libcurl: update to 8.7.1
Diffstat (limited to 'libs/libcurl')
125 files changed, 9989 insertions, 6870 deletions
diff --git a/libs/libcurl/docs/CHANGES b/libs/libcurl/docs/CHANGES index 8e70215477..80d1cc84b5 100644 --- a/libs/libcurl/docs/CHANGES +++ b/libs/libcurl/docs/CHANGES @@ -6,6 +6,2543 @@ Changelog
+Version 8.7.1 (27 Mar 2024)
+
+Daniel Stenberg (27 Mar 2024)
+
+- RELEASE-PROCEDURE: remove old release dates, add new pending ones
+
+Version 8.7.0 (27 Mar 2024)
+
+Daniel Stenberg (27 Mar 2024)
+
+- RELEASE-NOTES: synced
+
+ curl 8.7.0 release
+
+- THANKS: new contributors from the 8.7.0 release
+
+- CURLOPT_POSTFIELDS.md: used for MQTT as well
+
+ Closes #13189
+
+- http: remove stale comment about rewindbeforesend
+
+ ... because that struct field exists no more.
+
+ Follow-up to 14bcea074a782272.
+
+ Closes #13187
+
+- DISTROS: add document with distro pointers
+
+ Lots of organizations distribute curl packages to end users. This is a
+ collection of pointers to where to learn more about curl on and with
+ each distro.
+
+ Assisted-by: Alan Coopersmith
+ Assisted-by: Andrew Kaster
+ Assisted-by: Andy Fiddaman
+ Assisted-by: Arjan van de Ven
+ Assisted-by: Brian Clemens
+ Assisted-by: chrysos349 on github
+ Assisted-by: Dan Fandrich
+ Assisted-by: Dan McDonald
+ Assisted-by: Gaelan Steele
+ Assisted-by: graywolf on github
+ Assisted-by: Jan Macku
+ Assisted-by: John Marshall
+ Assisted-by: Jonathan Perkin
+ Assisted-by: Kevin Daudt
+ Assisted-by: Marcus Müller
+ Assisted-by: Michał Górny
+ Assisted-by: Outvi V
+ Assisted-by: Ross Burton
+ Assisted-by: Sean Molenaar
+ Assisted-by: Till Wegmüller
+ Assisted-by: Viktor Szakats
+ Assisted-by: Winni Neessen
+
+ Closes #13178
+
+Fabian Keil (25 Mar 2024)
+
+- wolfSSL: do not call the stub function wolfSSL_BIO_set_init()
+
+ Calling the function isn't necessary and causes the build
+ to fail when wolfSSL has been compiled with NO_WOLFSSL_STUB:
+
+ Making all in opts
+ CCLD curl
+ ld: error: undefined symbol: wolfSSL_BIO_set_init
+ >>> referenced by wolfssl.c:235 (vtls/wolfssl.c:235)
+ >>> libcurl_la-wolfssl.o:(wolfssl_bio_cf_create) in archiv
+ e ../lib/.libs/libcurl.a
+ cc: error: linker command failed with exit code 1 (use -v to see invocat
+ ion)
+ *** Error code 1
+
+ Closes #13164
+
+Daniel Stenberg (25 Mar 2024)
+
+- cmdline-opts: shorter help texts
+
+ In an effort to increase the readability of the "--help all" output on
+ narrow (80 column) terminals.
+
+ Co-authored-by: Jay Satiro
+
+ Closes #13169
+
+Matt Jolly (25 Mar 2024)
+
+- curl-rustls.m4: add pkg-config support to rustls detection
+
+ Based on the existing openssl pkg-config detection, this commit tries to
+ use pkg-config to find `rustls` then falls back to the current approach
+ if that fails.
+
+ We use the following logic:
+
+ - if no path is provided, just use pkg-config, if it's not there we have
+ a problem!
+ - if a path is provided, try pkg-config
+ + if pkg-config fails, try and find rustls directly
+
+ Closes #13179
+
+Mohammadreza Hendiani (25 Mar 2024)
+
+- TODO: update 13.11 with more information
+
+ Closes #13173
+
+Daniel Stenberg (23 Mar 2024)
+
+- docs/libcurl: generate PROTOCOLS from meta-data
+
+ Remove the PROTOCOLS section from the source files completely and
+ instead generate them based on the header data in the curldown files.
+
+ It also generates TLS backend information for options marked for TLS as
+ protocol.
+
+ Closes #13175
+
+- CURLMOPT_MAX*: mention what happens if changed mid-transfer
+
+ For CURLMOPT_MAXCONNECTS and CURLMOPT_MAX_HOST_CONNECTIONS
+
+ Ref: #13158
+ Closes #13176
+
+- docs/libcurl: add TLS backend info for all TLS options
+
+ All man pages that are listed to be for TLS now must also specify
+ exactly what TLS backends the option works for, or use All if they all
+ work.
+
+ cd2nroff makes sure this is done and that the listed backends exist.
+
+ Closes #13168
+
+- docs/libcurl: cleanups
+
+ - CURLINFO_TLS_SESSION.md: remove mention of NSS
+ - CURLINFO_TLS_SSL_PTR.md: remove NSS leftover
+ - CURLOPT_CAINFO.md: drop mention of backends not supporting this
+ - CURLOPT_CAPATH.md: wolfSSL also supports this
+
+ Closes #13166
+
+- docs: make each libcurl man specify protocol(s)
+
+ The mandatory header now has a mandatory list of protocols for which the
+ manpage is relevant.
+
+ Most man pages already has a "PROTOCOLS" section, but this introduces a
+ stricter way to specify the relevant protocols.
+
+ cd2nroff verifies that at least one protocol is mentioned (which can be
+ `*`).
+
+ This information is not used just yet, but A) the PROTOCOLS section can
+ now instead get generated and get a unified wording across all manpages
+ and B) this allows us to more reliably filter/search for protocol
+ specific manpages/options.
+
+ Closes #13166
+
+Stefan Eissing (21 Mar 2024)
+
+- http2, http3: only return CURLE_PARTIAL_FILE when bytes were received
+
+ - should resolve spurious pytest failures when stream were reset
+ right after response header were received
+
+ Clsoes #13151
+
+- http: separate response parsing from response action
+
+ - move code that triggers on end-of-response into separate function from
+ parsing
+ - simplify some headp/headerlen usage
+ - add `httpversion` to SingleRequest to indicate the version of the
+ current response
+
+ Closes #13134
+
+Daniel Stenberg (21 Mar 2024)
+
+- http2: remove the third (unused) argument from http2_data_done()
+
+ Closes #13154
+
+- RELEASE-NOTES: synced
+
+Evgeny Grin (Karlson2k) (21 Mar 2024)
+
+- RELEASE-NOTES: corrected
+
+ Corrected link for item 118
+
+ Closes #13157
+
+Daniel Stenberg (19 Mar 2024)
+
+- CURLOPT_INTERFACE.md: remove spurious amp, add see-also
+
+ Closes #13149
+
+Stefan Eissing (19 Mar 2024)
+
+- http: improve response header handling, save cpu cycles
+
+ Saving some cpu cycles in http response header processing:
+ - pass the length of the header line along
+ - use string constant sizeof() instead of strlen()
+ - check line length if prefix is possible
+ - switch on first header char to limit checks
+
+ Closes #13143
+
+Daniel Stenberg (19 Mar 2024)
+
+- tool_getparam: accept a blank -w ""
+
+ Added test 468 to verify.
+
+ Regression from 07bcae89d5d00 (shipped in 8.6.0)
+ Reported-by: Thomas Pyle
+ Fixes #13144
+ Closes #13145
+
+Evgeny Grin (Karlson2k) (18 Mar 2024)
+
+- curl_sha512_256: work around a NetBSD bug
+
+ Based on Michael Kaufmann analysis and suggestion
+
+ Closes #13133
+
+Stefan Eissing (18 Mar 2024)
+
+- http: expect 100 rework
+
+ Move all handling of HTTP's `Expect: 100-continue` feature into a client
+ reader. Add sending flag `KEEP_SEND_TIMED` that triggers transfer
+ sending on general events like a timer.
+
+ HTTP installs a `CURL_CR_PROTOCOL` reader when announcing `Expect:
+ 100-continue`. That reader works as follows:
+
+ - on first invocation, records time, starts the `EXPIRE_100_TIMEOUT`
+ timer, disables `KEEP_SEND`, enables `KEEP_SEND_TIMER` and returns 0,
+ eos=FALSE like a paused upload.
+
+ - on subsequent invocation it checks if the timer has expired. If so, it
+ enables `KEEP_SEND` and switches to passing through reads to the
+ underlying readers.
+
+ Transfer handling's `readwrite()` will be invoked when a timer expires
+ (like `EXPIRE_100_TIMEOUT`) or when data from the server arrives. Seeing
+ `KEEP_SEND_TIMER`, it will try to upload more data, which triggers
+ reading from the client readers again. Which then may lead to a new
+ pausing or cause the upload to start.
+
+ Flags and timestamps connected to this have been moved from
+ `SingleRequest` into the reader's context.
+
+ Closes #13110
+
+- mbedtls: fix pytest for newer versions
+
+ Fix the expectations in pytest for newer versions of mbedtls
+
+ Closes #13132
+
+Daniel Stenberg (15 Mar 2024)
+
+- ipv6.md: mention IPv4 mapped addresses
+
+ Reported-by: Josh Soref
+ Assisted-by: Jay Satiro
+ Fixes #13112
+ Closes #13131
+
+Stefan Eissing (15 Mar 2024)
+
+- http: revisit http_perhapsrewind()
+
+ - use facilities provided by client readers better
+ - work also for non-uploading requests like GET/HEAD
+ - update documentation
+
+ Closes #13117
+
+- test 1541: verify getinfo values on first header callback
+
+ Reported-by: chensong1211 on github
+ Ref: #13125
+ Closes #13128
+
+- TLS: start shutdown only when peer did not already close
+
+ - When curl sees a TCP close from the peer, do not start a TLS shutdown.
+ TLS shutdown is a handshake and if the peer already closed the
+ connection, it is not interested in participating.
+
+ Reported-by: dfdity on github
+ Assisted-by: Jiří Bok
+ Assisted-by: Pēteris Caune
+ Fixes #10290
+ Closes #13087
+
+Daniel Stenberg (14 Mar 2024)
+
+- RELEASE-NOTES: synced
+
+- curl: make --libcurl output better CURLOPT_*SSLVERSION
+
+ The option is really two enums ORed together, so it needs special
+ attention to make the code output nice.
+
+ Added test 1481 to verify. Both the server and the proxy versions.
+
+ Reported-by: Boris Verkhovskiy
+ Fixes #13127
+ Closes #13129
+
+- GHA/linux: add sysctl trick to work-around GitHub runner issue
+
+ The GitHub image runner update from 20240304.1.0 to 20240310.1
+ introduces a problem for clang-14. The issue is caused by
+ incompatibility between llvm 14 provided in ubuntu-22.04 image and the
+ much newer kernel configured with high-entropy ASLR.
+
+ As a work-around, we issue a sysctl command to lower the entropy and get
+ clang-14 to work again.
+
+ URL: https://github.com/actions/runner-images/issues/9491
+
+ Closes #13124
+
+- SPONSORS: describe the basics
+
+ Closes #13119
+
+- GOVERNANCE: document the core team
+
+ Closes #13118
+
+Jay Satiro (13 Mar 2024)
+
+- vquic-tls: fix the error code returned for bad CA file
+
+ - Return CURLE_SSL_CACERT_BADFILE if wolfSSL encounters a problem
+ reading the cert file or path.
+
+ This is a follow-up to the parent commit aedbbdf1.
+
+ Reported-by: Karthikdasari0423@users.noreply.github.com
+
+ Fixes https://github.com/curl/curl/issues/13115
+
+Daniel Stenberg (12 Mar 2024)
+
+- vquic-tls: return appropirate errors on wolfSSL errors
+
+ Reported-by: Dexter Gerig
+ Closes #13107
+
+Viktor Szakats (12 Mar 2024)
+
+- tidy-up: one comment and EOF newlines
+
+ Reviewed-by: Daniel Stenberg
+ Closes #13108
+
+Daniel Stenberg (12 Mar 2024)
+
+- cmdline-opts: language cleanups
+
+ Use imperative mood consistently for the first sentence describing an
+ option.
+
+ "Set this" instead "tell curl to set" or "this sets..."
+
+ Plus some extra cleanups and rephrasing.
+
+ Closes #13106
+
+- managen: remove space before protocols
+
+ For options that are listed for specific protocols, the protocols (shown
+ first within parentheses) are now output without the leading space in the
+ manpage output.
+
+ Closes #13105
+
+Jay Satiro (12 Mar 2024)
+
+- mbedtls: properly cleanup the thread-shared entropy
+
+ - Store the state of the thread-shared entropy for global init/cleanup.
+
+ - Use curl's thread support of mbedtls for all Windows builds instead of
+ just when the threaded resolver is used via USE_THREADS_WIN32.
+
+ Prior to this change on global cleanup curl builds that have curl thread
+ support for mbedtls freed the entropy (8b1d2298) but failed to mark that
+ it had been freed, which caused problems on subsequent init + transfer.
+
+ Bug: https://github.com/curl/curl/discussions/11919#discussioncomment-8687105
+ Reported-by: awesomekosm@users.noreply.github.com
+
+ Closes https://github.com/curl/curl/pull/13071
+
+Daniel Stenberg (12 Mar 2024)
+
+- tool_getparam: handle non-existing (out of range) short-options
+
+ ... correctly, even when they follow an existing one without a space in
+ between.
+
+ Verify with test 467
+
+ Follow-up to 07dd60c05b
+ Reported-by: Geeknik Labs
+ Fixes #13101
+ Closes #13102
+
+Stefan Eissing (11 Mar 2024)
+
+- lib: move 'done' parameter to SingleRequests
+
+ A transfer may do several `SingleRequest`s for its success. This happens
+ regularly for authentication, follows and retries on failed connections.
+ The "readwrite()" calls and functions connected to those carried a `bool
+ *done` parameter to indicate that the current `SingleRequest` is over.
+ This may happen before `upload_done` or `download_done` bits of
+ `SingleRequest` are set.
+
+ The problem with that is now `write_resp()` protocol handlers are
+ invoked in places where the `bool *done` cannot be passed up to the
+ caller. Instead of being a bool in the call chain, it needs to become a
+ member of `SingleRequest`, reflecting its state.
+
+ This removes the `bool *done` parameter and adds the `done` bit to
+ `SingleRequest` instead. It adds `Curl_req_soft_reset()` for using a
+ `SingleRequest` in a follow up, clearing `done` and other
+ flags/counters.
+
+ Closes #13096
+
+- request: clarify message when request has been sent off
+
+ Change the "uploaded and fine" message for requests without a body
+
+ Reported-by: Karthikdasari0423 on github
+ Fixes #13093
+ Closes #13095
+
+Daniel Stenberg (11 Mar 2024)
+
+- RELEASE-NOTES: synced
+
+Stefan Eissing (9 Mar 2024)
+
+- lib: keep conn IP information together
+
+ new struct ip_quadruple for holding local/remote addr+port
+
+ - used in data->info and conn and cf-socket.c
+ - copy back and forth complete struct
+ - add 'secondary' to conn
+ - use secondary in reporting success for ftp 2nd connection
+
+ Reported-by: DasKutti on github
+ Fixes #13084
+ Closes #13090
+
+Daniel Stenberg (8 Mar 2024)
+
+- scripts/managen: the new name and home for the manpage generator
+
+ It was previously docs/cmdline-opts/gen.pl
+
+ Closes #13089
+
+- VULN-DISCLOSURE-POLICY.md: update detail about CVE requests
+
+ curl is a CNA now
+
+ Closes #13088
+
+Stefan Eissing (8 Mar 2024)
+
+- lib: client reader polish
+
+ - seek_func/seek_client, use transfer values only
+ - remove copies held in `struct connectdata`, use only
+ ever `data->set.seek_func`
+ - resolves possible issues in multiuse connections
+ - new mime post reader eliminates need to ever overwriting this
+
+ - websockets, remove empty Curl_ws_done() function
+
+ Closes #13079
+
+Marcel Raad (8 Mar 2024)
+
+- lib1598: fix `CURLOPT_POSTFIELDSIZE` usage
+
+ It requires a `long` argument.
+
+ Closes https://github.com/curl/curl/pull/13085
+
+Daniel Stenberg (8 Mar 2024)
+
+- docs/cmdline-opts: drop the curl.1 from the dist tarball
+
+ Since it is no longer needed for building tool_hugehelp.c and all the
+ docs is available in readable markdown format in the tarball, the peeps
+ that don't want to build the manpage still do good.
+
+ Removing it also fixes the complexity of out-of-tree builds when the
+ curl.1 exists in the source tree.
+
+- test1140/1173: extend wildcards to find curl.1
+
+ ... in its new build path.
+
+ Also update the test scripts to be more precise in error messages to
+ help us understand CI errors better.
+
+ Follow-up to f03c85635f35269f1
+ Ref: #13029
+ Closes #13083
+
+- http2: minor tweaks to optimize two struct sizes
+
+ - use BIT() instead of bool
+ - place the struct fields in (roughly) size order
+
+ Closes #13082
+
+- buildconf.bat: remove outdated groff/nroff use
+
+ - don't try to generate the real hugehelp file, because it requires
+ curl.txt which needs a build
+ - don't attempt to do anything in a c-ares subdirectory
+
+ Follow-up to f03c85635f35269
+ Closes #13078
+
+- http2: memory errors in the push callbacks are fatal
+
+ Use the correct nghttp2 error code accordingly.
+
+ Closes #13081
+
+Viktor Szakats (7 Mar 2024)
+
+- mkhelp: rename variable to fix compiler warnings
+
+ ```
+ src\tool_operate.c(541,33): warning C4459: declaration of 'm' hides global de
+ claration [_bld\src\curl.vcxproj]
+ _bld\src\tool_hugehelp.c(8,27):
+ see declaration of 'm'
+ src\tool_paramhlp.c(307,14): warning C4459: declaration of 'm' hides global d
+ eclaration [_bld\src\curl.vcxproj]
+ src\tool_progress.c(118,16): warning C4459: declaration of 'm' hides global d
+ eclaration [_bld\src\curl.vcxproj]
+ src\tool_writeout.c(288,31): warning C4459: declaration of 'm' hides global d
+ eclaration [_bld\src\curl.vcxproj]
+ ```
+ Ref: https://ci.appveyor.com/project/curlorg/curl/builds/49348159/job/51ee75c
+ d2n0wj6lc#L614
+
+ Reviewed-by: Daniel Stenberg
+ Closes #13077
+
+Daniel Stenberg (7 Mar 2024)
+
+- KNOWN_BUGS: POP3 issue when reading small chunks
+
+ Closes #12063
+
+- RELEASE-NOTES: synced
+
+Robert Moreton (7 Mar 2024)
+
+- asyn-ares: fix data race warning
+
+ - Store the c-ares version during global init.
+
+ Prior to this change several threads could write the same data to a
+ static int variable at the same time. Though in practice it's not a
+ problem ThreadSanitizer may warn.
+
+ Reported-by: Nikita Taranov
+ Assisted-by: Jay Satiro
+
+ Fixes #13065
+ Closes #13000
+
+Stefan Eissing (7 Mar 2024)
+
+- hyper: implement unpausing via client reader
+
+ Just a tidy up to contain 'ifdef' pollution of common
+ code parts with implementation specifics.
+
+ - remove the ifdef hyper unpausing in easy.c
+ - add hyper client reader for CURL_CR_PROTOCOL phase
+ that implements the unpause method for calling
+ the hyper waker if it is set
+
+ Closes #13075
+
+- ngtcp2: no recvbuf for stream
+
+ - write response data directly to the transfer via
+ `Curl_xfer_write_resp()` like we do in HTTP/2.
+
+ Closes #13073
+
+- docs/cmdline-opts/.gitignore: ignore curl.txt
+
+ Closes #13076
+
+Evgeny Grin (Karlson2k) (7 Mar 2024)
+
+- sha512_256: add support for GnuTLS and OpenSSL
+
+ This is a follow-up for PR #12897.
+
+ Add support for SHA-512/256 digest calculation by TLS backends.
+ Currently only OpenSSL and GnuTLS (actually, nettle) support
+ SHA-512/256.
+
+ Closes #13070
+
+- digest: add check for hashing error
+
+ Closes #13072
+
+Viktor Szakats (7 Mar 2024)
+
+- cmake: enable `ENABLE_CURL_MANUAL` by default
+
+ Meaning `curl.1` and `src/tool_hugehelp.c` are built by default,
+ and `--manual` in curl tool is also enabled by default.
+
+ This syncs behaviour with autotools.
+
+ For a reproducible `curl.1`, `SOURCE_DATE_EPOCH` needs to be set
+ to a consistent date, e.g. the timestamp of `CHANGES`.
+
+ A pre-built manual (e.g. the one distributed in the official source
+ tarball) will be ignored and rebuilt after this patch, unless
+ explicitly disabling this option.
+
+ Fixes #13028
+ Closes #13069
+
+Stefan Eissing (7 Mar 2024)
+
+- http2: push headers better cleanup
+
+ - provide common cleanup method for push headers
+
+ Closes #13054
+
+Daniel Stenberg (7 Mar 2024)
+
+- GIT-INFO: convert to markdown
+
+ Closes #13074
+
+Richard Levitte (7 Mar 2024)
+
+- cmake: fix libcurl.pc and curl-config library specifications
+
+ Letting CMake figure out where libraries are located gives you full
+ paths. When generating libcurl.pc and curl-config, getting libraries as
+ full paths is unusual when one expects to get a list of -l<libname>.
+
+ To meet expectations, an effort is made to convert the full paths into
+ -l<libname>, possibly with -L<libdir> before it.
+
+ Fixes #6169
+ Fixes #12748
+ Closes #12930
+
+Daniel Stenberg (7 Mar 2024)
+
+- test463: HTTP with -d @file with file containing CR, LF and null byte
+
+- paramhlp: fix CRLF-stripping files with "-d @file"
+
+ All CR and LF bytes should be stripped, as documented, and all other
+ bytes are inluded in the data. Starting now, it also excludes null bytes
+ as they would otherwise also cut the data short.
+
+ Reported-by: Simon K
+ Fixes #13063
+ Closes #13064
+
+Viktor Szakats (7 Mar 2024)
+
+- cmake: fix `CURL_WINDOWS_SSPI=ON` with Schannel disabled
+
+ Prior to this change `CURL_WINDOWS_SSPI` was accidentally forced `OFF`
+ when building without the Schannel TLS backend.
+
+ This in turn may have caused Kerberos, SPNEGO and SSPI features
+ disappearing even with `CURL_WINDOWS_SSPI=ON` set.
+
+ This patch fixes it by using the `CURL_USE_SCHANNEL` setting as a
+ default for `CURL_WINDOWS_SSPI`, but allowing a manual override.
+
+ Also update the option text to better tell its purpose.
+
+ Thanks-to: Andreas Loew
+ Reviewed-by: Daniel Stenberg
+ Ref: #13056
+ Closes #13061
+
+Jay Satiro (6 Mar 2024)
+
+- KNOWN_BUGS: FTPS server compatibility on Windows with Schannel
+
+ - Remove "2.12 FTPS with Schannel times out file list operation"
+
+ - Remove "7.12 FTPS directory listing hangs on Windows with Schannel"
+
+ - Add "7.12 FTPS server compatibility on Windows with Schannel"
+
+ This change adds a more generic bug description that explains FTPS with
+ the latest curl and Schannel is not widely used and may have more bugs
+ than other TLS backends.
+
+ The two removed FTPS Schannel bugs can't be reproduced any longer and
+ were likely fixed by 24d6c288.
+
+ Ref: https://github.com/curl/curl/issues/5284
+ Ref: https://github.com/curl/curl/issues/9161
+ Ref: https://github.com/curl/curl/issues/12894
+
+ Closes https://github.com/curl/curl/pull/13032
+
+- trace-config.md: remove the mutexed options list
+
+ - Remove the rendered manpage message that says:
+ "[--trace-config] is mutually exclusive to --trace and -v, --verbose".
+
+ Actually it can be used with either of those options, which are mutually
+ exclusive to each other but not to --trace-config.
+
+ Ref: https://curl.se/docs/manpage.html#--trace-config
+
+ Closes https://github.com/curl/curl/pull/13031
+
+Daniel Stenberg (6 Mar 2024)
+
+- mkhelp: simplify the generated hugehelp program
+
+ Use a plain array and puts() every line, also allows us to provide the
+ strings without ending newlines.
+
+ - merge blank lines into the next one as a prefixed newline.
+ - turn eight consecutive spaces into a tab (since they can only be on the
+ left side of text)
+ - the newly generated tool_hugehelp is 3K lines shorter and 50K smaller
+ - modifies the top logo layout a little by reducing the indent
+
+ Closes #13047
+
+- docs: ascii version of manpage without nroff
+
+ Create ASCII version of manpage without nroff
+
+ - build src/tool_hugegelp.c from the ascii manpage
+ - move the the manpage and the ascii version build to docs/cmdline-opts
+ - remove all use of nroff from the build process
+ - should make the build entirely reproducible (by avoiding nroff)
+
+ - partly reverts 2620aa9 to build libcurl option man pages one by one
+ in cmake because the appveyor builds got all crazy until I did
+
+ The ASCII version of the manpage
+
+ - is built with gen.pl, just like the manpage is
+ - has a right-justified column making the appearance similar to the previous
+ version
+ - uses a 4-space indent per level (instead of the old version's 7)
+ - does not do hyphenation of words (which nroff does)
+
+ History
+
+ We first made the curl build use nroff for building the hugehelp file in
+ December 1998, for curl 5.2.
+
+ Closes #13047
+
+Stefan Eissing (6 Mar 2024)
+
+- lib: add `void *ctx` to reader/writer instances
+
+ - `struct Curl_cwriter` and `struct Curl_creader` now carry a
+ `void *ctx` member that points to the instance as allocated.
+ - using `r->ctx` and `w->ctx` as pointer to the instance specific
+ struct that has been allocated
+
+ Reported-by: Rudi Heitbaum
+ Fixes #13035
+ Closes #13059
+
+- http: fix dead code in setting post client reader
+
+ - postsize was always 0, thus the check's else never happened
+ after the mime client reader was introduced
+
+ Follow-up to 0ba47146f7ff3d
+ Closes #13060
+
+- http2: fix push discard
+
+ - fix logic in discarding a failed pushed stream so that
+ stream context is properly cleaned up
+
+ Closes #13055
+
+- transfer.c: break receive loop in speed limited transfers
+
+ - the change breaks looping in transfer.c receive for transfers that are
+ speed limited on having gotten *some* bytes.
+ - the overall speed limit timing is done in multi.c
+
+ Reported-by: Dmitry Karpov
+ Bug: https://curl.se/mail/lib-2024-03/0001.html
+ Closes #13050
+
+- mime: add client reader
+
+ Add `mime` client reader. Encapsulates reading from mime parts, getting
+ their length, rewinding and unpausing.
+
+ - remove special mime handling from sendf.c and easy.c
+ - add general "unpause" method to client readers
+ - use new reader in http/imap/smtp
+ - make some mime functions static that are now only used internally
+
+ In addition:
+ - remove flag 'forbidchunk' as no longer needed
+
+ Closes #13039
+
+Daniel Stenberg (5 Mar 2024)
+
+- RELEASE-NOTES: synced
+
+- TODO: remove "build HTTP/3 with OpenSSL and nghttp3 using cmake"
+
+ Follow-up to 8e741644a229c37
+
+Tal Regev (5 Mar 2024)
+
+- cmake: add USE_OPENSSL_QUIC support
+
+ Closes #13034
+
+Stefan Eissing (5 Mar 2024)
+
+- TIMER_STARTTRANSFER: set the same for everyone
+
+ - set TIMER_STARTTRANSFER on seeing the first response bytes
+ in the download client writer, not coming from a CONNECT
+ - initialized the timer the same way for all protocols
+ - remove explicit setting of TIMER_STARTTRANSFER in file.c
+ and c-hyper.c
+
+ Closes #13052
+
+Michael Kaufmann (5 Mar 2024)
+
+- http: better error message for HTTP/1.x response without status line
+
+ If a response without a status line is received, and the connection is
+ known to use HTTP/1.x (not HTTP/0.9), report the error "Invalid status
+ line" instead of "Received HTTP/0.9 when not allowed".
+
+ Closes #13045
+
+Viktor Szakats (5 Mar 2024)
+
+- KNOWN_BUGS: fix typo
+
+ Reviewed-by: Daniel Stenberg
+ Closes #13051
+
+Sebastian Neubauer (5 Mar 2024)
+
+- smpt: fix starttls
+
+ In cases where the connection was fast, curl sometimes failed to open a
+ connection. This fixes a regression of c2d973627bab12abc5486a3f3.
+
+ The regression triggered in these steps:
+
+ 1. Create an smtp connection
+ 2. Use STARTTLS
+ 3. Receive the response
+ 4. We are inside the loop in `smtp_statemachine`, calling
+ `smtp_state_starttls_resp`
+ 5. In the good flow, we exit the loop, re-enter `smtp_statemachine` and
+ run `smtp_perform_upgrade_tls` at the start of the function.
+
+ In the bad flow, we stay in the while loop, calling
+ `Curl_pp_readresp`, which reads part of the TLS handshake and things
+ go wrong.
+
+ The reason is that `Curl_pp_moredata` changed behavior and always
+ returns `true`, so we stay in the loop in `smtp_statemachine`. With a
+ slow connection `Curl_pp_readresp` cannot read new data and returns
+ `CURL_AGAIN`, so we leave the loop and re-enter `smtp_statemachine`.
+
+ With a fast connection, `Curl_pp_readresp` reads new data from the tcp
+ connection, which is part of the TLS handshake.
+
+ The fix is in `Curl_pp_moredata`, which needs to take the final line
+ into account and return `false` if only the final line is stored.
+
+ Closes #13048
+
+Stefan Eissing (5 Mar 2024)
+
+- lib: enhance client reader resume + rewind
+
+ - update client reader documentation
+ - client reader, add rewind capabilities
+ - tell creader to rewind on next start
+ - Curl_client_reset() will keep reader for future rewind if requested
+ - add Curl_client_cleanup() for freeing all resources independent of
+ rewinds
+ - add Curl_client_start() to trigger rewinds
+ - move rewind code from multi.c to sendf.c and make part of
+ "cr-in"'s implementation
+ - http, move the "resume_from" handling into the client readers
+ - the setup of a HTTP request is reshuffled to follow:
+ * determine method, target, auth negotiation
+ * install the client reader(s) for the request, including crlf
+ conversions and "chunked" encoding
+ * apply ranges to client reader
+ * concat request headers, upgrades, cookies, etc.
+ * complete request by determining Content-Length of installed
+ readers in combination with method
+ * send
+ - add methods for client readers to
+ * return the overall length they will generate (or -1 when unknown)
+ * return the amount of data on the CLIENT level, so that
+ expect-100 can decide if it want to apply itself
+ * set a "resume_from" offset or fail if unsupported
+ - struct HTTP has become largely empty now
+ - rename `Client_reader_*` to `Curl_creader_*`
+
+ Closes #13026
+
+Viktor Szakats (5 Mar 2024)
+
+- openssl-quic: fix BIO leak and Windows warning
+
+ Caused by an accidentally duplicated line in
+ d6825df334def106f735ce7e0c1a2ea87bddffb0.
+
+ ```
+ .../lib/vquic/curl_osslq.c:1095:30: warning: implicit conversion loses intege
+ r precision: 'curl_socket_t' (aka 'unsigned long long') to 'int' [-Wshorten-6
+ 4-to-32]
+ 1095 | bio = BIO_new_dgram(ctx->q.sockfd, BIO_NOCLOSE);
+ | ~~~~~~~~~~~~~ ~~~~~~~^~~~~~
+ 1 warning and 2 errors generated.
+ ```
+
+ Reviewed-by: Stefan Eissing
+ Closes #13043
+
+- openssl-quic: fix unity build, casing, indentation
+
+ - rename static functions to avoid duplicate symbols in unity mode.
+ - windows -> Windows/window in error message and comment.
+ - fix indentation.
+
+ Reviewed-by: Stefan Eissing
+ Closes #13044
+
+Daniel Stenberg (5 Mar 2024)
+
+- gen.pl: make the "manpageification" faster
+
+ The function that replaces occurances of "--longoption" with "-Z,
+ --longoption" etc with the proper highlight applied, no longer loops
+ over the options.
+
+ Closes #13041
+
+- CONTRIBUTE: update the section on documentation format
+
+ ... since most of it is markdown now.
+
+ Closes #13046
+
+- smtp: free a temp resource
+
+ The returned address needs to be freed.
+
+ Follow-up to e3905de8196d67b89df1602feb84c1f993211b20
+ Spotted by Coverity
+
+ Closes #13038
+
+- _VARIABLES.md: improve the description
+
+ Closes #13040
+
+dependabot[bot] (4 Mar 2024)
+
+- build(deps): bump fsfe/reuse-action from 2 to 3
+
+ Bumps [fsfe/reuse-action](https://github.com/fsfe/reuse-action) from 2 to 3.
+ - [Release notes](https://github.com/fsfe/reuse-action/releases)
+ - [Commits](https://github.com/fsfe/reuse-action/compare/v2...v3)
+
+ ---
+ updated-dependencies:
+ - dependency-name: fsfe/reuse-action
+ dependency-type: direct:production
+ update-type: version-update:semver-major
+ ...
+
+ Signed-off-by: dependabot[bot] <support@github.com>
+
+Stefan Eissing (4 Mar 2024)
+
+- pytest: adapt to API change
+
+ - pytest has changed the signature of the hook pytest_report_header()
+ for some obscure reason and that change landed in our CI now
+
+ - remove the changed param that we never used anyway
+
+ Closes #13037
+
+Daniel Stenberg (4 Mar 2024)
+
+- cookie: if psl fails, reject the cookie
+
+ A libpsl install without data and no built-in database is now considered
+ bad enough to reject all cookies since they cannot be checked. It is
+ somewhat of a user error, but still.
+
+ Reported-by: Dan Fandrich
+ Closes #13033
+
+Stefan Eissing (4 Mar 2024)
+
+- lib: further send/upload handling polish
+
+ - Move all the "upload_done" handling to request.c
+
+ - add possibility to abort sending of a request
+ - add `Curl_req_done_sending()` for checks
+ - transfer.c: readwrite_upload() now clean
+
+ - removing data->state.ulbuf and data->req.upload_fromhere
+
+ - as well as data->req.upload_present
+ - set data->req.upload_done on having read all from
+ the client and completely flushed the send buffer
+
+ - tftp, remove setting of data->req.upload_fromhere
+
+ - serves no purpose as `upload_present` is not set
+ and the data itself is directly `sendto()` anyway
+
+ - smtp, make upload EOB conversion a client reader
+ - xfer_ulbuf addition
+
+ - add xfer_ulbuf for borrowing, similar to xfer_buf
+ - use in file upload
+ - use in c-hyper body sending
+
+ - h1-proxy, remove init of data->state.uilbuf that is never used
+ - smb, add own send_buf instead of using data->state.ulbuf
+
+ Closes #13010
+
+Daniel Stenberg (4 Mar 2024)
+
+- RELEASE-NOTES: synced
+
+kpcyrd (3 Mar 2024)
+
+- rustls: fix two warnings related to number types
+
+ Reported-by: Gisle Vanem
+ Follow-up to #12989
+ Closes #13017
+
+Stefan Eissing (3 Mar 2024)
+
+- bufq: writing into a softlimit queue cannot be partial
+
+ - when unable to obtain a new chunk on a softlimit bufq,
+ this is an allocation error and needs to be reported as
+ such.
+ - writes into a soflimit bufq never must be partial success
+
+ Reported-by: Dan Fandrich
+ Fixes #13020
+ Closes #13023
+
+Dan Fandrich (2 Mar 2024)
+
+- configure: Don't build shell completions when disabled
+
+ With the recent changes to completion file building, the files were
+ built always and only installation was selectively disabled. Now, when
+ they are disabled they aren't even built, avoiding a build-time error in
+ environments where it's not possible to run the curl binary that was
+ just created (e.g. if library paths were not set up correctly).
+
+ Follow-up to 0f7aba83c
+
+ Reported-by: av223119 on github
+ Fixes #13027
+ Closes #13030
+
+Jay Satiro (2 Mar 2024)
+
+- cmdline-opts/_EXITCODES: sync with libcurl-errors
+
+ - Add error code 100 (CURLE_TOO_LARGE) to the list of error codes that
+ can be returned by the curl tool.
+
+ Closes https://github.com/curl/curl/pull/13015
+
+Stefan Eissing (1 Mar 2024)
+
+- hyper: disable test1598 due to lack of trailer support
+
+ Follow-up to 50838095
+
+ Closes #13016
+
+Dan Fandrich (1 Mar 2024)
+
+- ftp: Mark a const buffer as const
+
+- appveyor: Properly skip if only CircleCI is changed
+
+- docs: Update minimal binary size in INSTALL.md
+
+ Include more options to reduce binary size.
+
+- configure: Don't make shell completions without perl
+
+ The code that attempted to skip building the shell completions didn't
+ work properly and tried to build them even if perl wasn't available.
+ This step, as well as the install step, is now properly skipped without
+ perl.
+
+ Follow-up to 89733e2dd
+
+ Closes #13022
+
+RainRat (1 Mar 2024)
+
+- misc: Fix typos in docs and lib
+
+ This fixes miscellaneous typos and duplicated words in the docs, lib
+ and test comments and a few user facing errorstrings.
+
+ Author: RainRat on Github
+ Reviewed-by: Daniel Gustafsson <daniel@yesql.se>
+ Reviewed-by: Dan Fandrich <dan@coneharvesters.com>
+ Closes: #13019
+
+Dan Fandrich (29 Feb 2024)
+
+- configure: build & install shell completions when enabled
+
+ The --with-fish-functions-dir and --with-zsh-functions-dir options
+ currently have no effect on a normal build because the scripts/ directory
+ where they're used is not built. Add scripts/ to a normal build and
+ change the completion options to default to off to preserve the existing
+ behaviour.
+
+ Closes: #12906
+
+- github/labeler: improve the match patterns
+
+Stefan Eissing (28 Feb 2024)
+
+- tests: add test1598 for POST with trailers
+
+ - test POST fields with trailers and chunked encoding
+
+ Ref: #12938
+ Closes #13009
+
+Daniel Stenberg (28 Feb 2024)
+
+- cmdline-opts/_VERSION: provide %VERSION correctly
+
+ ... so that it does not get included verbatim in the output. Fixes a
+ regression shipped in 8.6.0.
+
+ Also fix a format mistake in form.md
+
+ Closes #13008
+
+Stefan Eissing (28 Feb 2024)
+
+- lib: Curl_read/Curl_write clarifications
+
+ - replace `Curl_read()`, `Curl_write()` and `Curl_nwrite()` to
+ clarify when and at what level they operate
+ - send/recv of transfer related data is now done via
+ `Curl_xfer_send()/Curl_xfer_recv()` which no longer has
+ socket/socketindex as parameter. It decides on the transfer
+ setup of `conn->sockfd` and `conn->writesockfd` on which
+ connection filter chain to operate.
+ - send/recv on a specific connection filter chain is done via
+ `Curl_conn_send()/Curl_conn_recv()` which get the socket index
+ as parameter.
+ - rename `Curl_setup_transfer()` to `Curl_xfer_setup()` for
+ naming consistency
+ - clarify that the special CURLE_AGAIN hangling to return
+ `CURLE_OK` with length 0 only applies to `Curl_xfer_send()`
+ and CURLE_AGAIN is returned by all other send() variants.
+ - fix a bug in websocket `curl_ws_recv()` that mixed up data
+ when it arrived in more than a single chunk (to be made
+ into a sperate PR, also)
+
+ Added as documented [in
+ CLIENT-READER.md](https://github.com/curl/curl/blob/5b1f31dfbab8aef467c419c68
+ aa06dc738cb75d4/docs/CLIENT-READERS.md).
+
+ - old `Curl_buffer_send()` completely replaced by new `Curl_req_send()`
+ - old `Curl_fillreadbuffer()` replaced with `Curl_client_read()`
+ - HTTP chunked uploads are now formatted in a client reader added when
+ needed.
+ - FTP line-end conversions are done in a client reader added when
+ needed.
+ - when sending requests headers, remaining buffer space is filled with
+ body data for sending in "one go". This is independent of the request
+ body size. Resolves #12938 as now small and large requests have the
+ same code path.
+
+ Changes done to test cases:
+
+ - test513: now fails before sending request headers as this initial
+ "client read" triggers the setup fault. Behaves now the same as in
+ hyper build
+ - test547, test555, test1620: fix the length check in the lib code to
+ only fail for reads *smaller* than expected. This was a bug in the
+ test code that never triggered in the old implementation.
+
+ Closes #12969
+
+Daniel Gustafsson (28 Feb 2024)
+
+- curldown: Fix email address in Copyright
+
+ The curldown conversion accidentally replaced daniel@haxx.se with
+ just daniel.se. This reverts back to the proper email address in
+ the curldown docs as well as in a few other stray places where it
+ was incorrect (while unrelated to curldown).
+
+ Reviewed-by: Daniel Stenberg <daniel@haxx.se>
+ Closes: #12997
+
+Daniel Stenberg (28 Feb 2024)
+
+- getparam: make --ftp-ssl work again
+
+ Follow-up to 9e4e527 which accidentally broke it
+
+ Reported-by: Jordan Brown
+ Fixes #13006
+ Closes #13007
+
+- KNOWN_BUGS: IMAPS connection fails with rustls error
+
+ Closes #10457
+
+- KNOWN_BUGS: FTPS upload, FileZilla, GnuTLS and close_notify
+
+ Closes #11383
+
+- KNOWN_BUGS: Implicit FTPS upload timeout
+
+ Closes #11720
+
+- KNOWN_BUGS: HTTP/2 prior knowledge over proxy
+
+ Closes #12641
+
+- TODO: build HTTP/3 with OpenSSL and nghttp3 using cmake
+
+ Closes #12988
+
+- TODO: Select signature algorithms
+
+ Closes #12982
+
+- examples: use present tense in comments
+
+ remove "will" and some other word fixes
+
+ Closes #13003
+
+- docs: more language cleanups
+
+ - present tense
+ - avoid bad words
+
+ Closes #13003
+
+Daniel Gustafsson (27 Feb 2024)
+
+- setopt: Fix disabling all protocols
+
+ When disabling all protocols without enabling any, the resulting
+ set of allowed protocols remained the default set. Clearing the
+ allowed set before inspecting the passed value from --proto make
+ the set empty even in the errorpath of no protocols enabled.
+
+ Co-authored-by: Dan Fandrich <dan@telarity.com>
+ Reported-by: Dan Fandrich <dan@telarity.com>
+ Reviewed-by: Daniel Stenberg <daniel@haxx.se>
+ Closes: #13004
+
+Andreas Kiefer (27 Feb 2024)
+
+- fopen: fix narrowing conversion warning on 32-bit Android
+
+ This was fixed in commit 06dc599405f, but came back in commit
+ 03cb1ff4d62.
+
+ When building for 32-bit ARM or x86 Android, `st_mode` is defined as
+ `unsigned int` instead of `mode_t`, resulting in a
+ `-Wimplicit-int-conversion` clang warning because `mode_t` is
+ `unsigned short`. Add a cast to silence the warning, but only for
+ 32-bit Android builds, because other architectures and platforms are
+ not affected.
+
+ Ref: https://android.googlesource.com/platform/bionic/+/refs/tags/ndk-r25c/li
+ bc/include/sys/stat.h#86
+ Closes https://github.com/curl/curl/pull/12998
+
+Stefan Eissing (27 Feb 2024)
+
+- lib: Curl_read/Curl_write clarifications
+
+ - replace `Curl_read()`, `Curl_write()` and `Curl_nwrite()` to
+ clarify when and at what level they operate
+ - send/recv of transfer related data is now done via
+ `Curl_xfer_send()/Curl_xfer_recv()` which no longer has
+ socket/socketindex as parameter. It decides on the transfer
+ setup of `conn->sockfd` and `conn->writesockfd` on which
+ connection filter chain to operate.
+ - send/recv on a specific connection filter chain is done via
+ `Curl_conn_send()/Curl_conn_recv()` which get the socket index
+ as parameter.
+ - rename `Curl_setup_transfer()` to `Curl_xfer_setup()` for
+ naming consistency
+ - clarify that the special CURLE_AGAIN hangling to return
+ `CURLE_OK` with length 0 only applies to `Curl_xfer_send()`
+ and CURLE_AGAIN is returned by all other send() variants.
+ - fix a bug in websocket `curl_ws_recv()` that mixed up data
+ when it arrived in more than a single chunk
+
+ The method for sending not just raw bytes, but bytes that are either
+ "headers" or "body". The send abstraction stack, to to bottom, now is:
+
+ * `Curl_req_send()`: has parameter to indicate amount of header bytes,
+ buffers all data.
+ * `Curl_xfer_send()`: knows on which socket index to send, returns
+ amount of bytes sent.
+ * `Curl_conn_send()`: called with socket index, returns amount of bytes
+ sent.
+
+ In addition there is `Curl_req_flush()` for writing out all buffered
+ bytes.
+
+ `Curl_req_send()` is active for requests without body,
+ `Curl_buffer_send()` still being used for others. This is because the
+ special quirks need to be addressed in future parts:
+
+ * `expect-100` handling
+ * `Curl_fillreadbuffer()` needs to add directly to the new
+ `data->req.sendbuf`
+ * special body handlings, like `chunked` encodings and line end
+ conversions will be moved into something like a Client Reader.
+
+ In functions of the pattern `CURLcode xxx_send(..., ssize_t *written)`,
+ replace the `ssize_t` with a `size_t`. It makes no sense to allow for negativ
+ e
+ values as the returned `CURLcode` already specifies error conditions. This
+ allows easier handling of lengths without casting.
+
+ Closes #12964
+
+Daniel Stenberg (27 Feb 2024)
+
+- multi: make add_handle free any multi_easy
+
+ If the easy handle that is being added to a multi handle has previously
+ been used for curl_easy_perform(), there is a private multi handle here
+ that we can kill off. While it flushes some caches etc for the easy
+ handle would it be used for an easy interface transfer again after being
+ used in the multi stack, this cleanup simplifies behavior and uses less
+ memory.
+
+ Closes #12992
+
+- docs: use present tense
+
+ avoid "will", detect "will" as a bad word in the CI
+
+ Also line wrapped a bunch of paragraphs
+
+ Closes #13001
+
+- CURLOPT_SSL_CTX_FUNCTION.md: no promises of lifetime after return
+
+ ... and cleanup other language.
+
+ Closes #12999
+
+Stefan Eissing (27 Feb 2024)
+
+- lib: send rework
+
+ Curl_read/Curl_write clarifications
+
+ - replace `Curl_read()`, `Curl_write()` and `Curl_nwrite()` to 1clarify
+ when and at what level they operate
+
+ - send/recv of transfer related data is now done via
+ `Curl_xfer_send()/Curl_xfer_recv()` which no longer has
+ socket/socketindex as parameter. It decides on the transfer setup of
+ `conn->sockfd` and `conn->writesockfd` on which connection filter
+ chain to operate.
+
+ - send/recv on a specific connection filter chain is done via
+ `Curl_conn_send()/Curl_conn_recv()` which get the socket index as
+ parameter.
+
+ - rename `Curl_setup_transfer()` to `Curl_xfer_setup()` for naming
+ consistency
+
+ - clarify that the special CURLE_AGAIN handling to return `CURLE_OK`
+ with length 0 only applies to `Curl_xfer_send()` and CURLE_AGAIN is
+ returned by all other send() variants.
+
+ SingleRequest reshuffling
+
+ - move functions into request.[ch]
+ - differentiate between reset and free
+ - add Curl_req_done() to perform last actions
+ - add a send `bufq` to SingleRequest for future use in keeping upload data
+
+ Closes #12963
+
+Daniel Stenberg (26 Feb 2024)
+
+- RELEASE-NOTES: synced
+
+- http_chunks: remove unused 'endptr' variable
+
+ Closes #12996
+
+Louis Solofrizzo (26 Feb 2024)
+
+- lib: initialize output pointers to NULL before calling strto[ff,l,ul]
+
+ In order to make MSAN happy:
+
+ ==2200945==WARNING: MemorySanitizer: use-of-uninitialized-value
+ #0 0x596f3b3ed246 in curlx_strtoofft [...]/libcurl/src/lib/strtoofft.c:23
+ 9:11
+ #1 0x596f3b402156 in Curl_httpchunk_read [...]/libcurl/src/lib/http_chunk
+ s.c:149:12
+ #2 0x596f3b348550 in readwrite_data [...]/libcurl/src/lib/transfer.c:607:
+ 11
+ [...]
+
+ ==2202041==WARNING: MemorySanitizer: use-of-uninitialized-value
+ #0 0x5a3fab66a72a in Curl_parse_port [...]/libcurl/src/lib/urlapi.c:547:8
+ #1 0x5a3fab650645 in parse_authority [...]/libcurl/src/lib/urlapi.c:796:1
+ 2
+ #2 0x5a3fab6740f6 in parseurl [...]/libcurl/src/lib/urlapi.c:1176:16
+ #3 0x5a3fab664fc5 in parseurl_and_replace [...]/libcurl/src/lib/urlapi.c:
+ 1342:12
+ [...]
+
+ ==2202320==WARNING: MemorySanitizer: use-of-uninitialized-value
+ #0 0x569076a0d6b0 in ipv4_normalize [...]/libcurl/src/lib/urlapi.c:683:12
+ #1 0x5690769f2820 in parse_authority [...]/libcurl/src/lib/urlapi.c:803:1
+ 0
+ #2 0x569076a160f6 in parseurl [...]/libcurl/src/lib/urlapi.c:1176:16
+ #3 0x569076a06fc5 in parseurl_and_replace [...]/libcurl/src/lib/urlapi.c:
+ 1342:12
+ [...]
+
+ Signed-off-by: Louis Solofrizzo <lsolofrizzo@scaleway.com>
+ Closes #12995
+
+Stefan Eissing (26 Feb 2024)
+
+- lib: move client writer into own source
+
+ Refactoring of the client writer that passes the data to the
+ client/application's callback functions.
+
+ - split out into own source cw-out.[ch] from sendf.c
+
+ - move tempwrite and tempcount from data->state into the context of the
+ client writer
+
+ - redesign the 3 tempwrite dynbufs as a linked list of dynbufs. On
+ paused transfers, this allows to "record" interleaved HEADER/BODY
+ chunks to be "played back" in the same order on unpausing.
+
+ - keep the overall size limit of all buffered data to DYN_PAUSE_BUFFER.
+ On exceeding that, return CURLE_TOO_LARGE instead of
+ CURLE_OUT_OF_MEMORY as before.
+
+ - add method to be called when a transfer is DONE to allow writing of
+ any data still buffered
+
+ - when paused, record HEADER writes exactly as they come for later
+ playback. HEADERs are documented to be written one-by-one.
+
+ Closes #12898
+
+- urldata: move authneg bit from conn to Curl_easy
+
+ - from `conn->bits.authneg` to `data->req.authneg`
+ - this is a property of the request about to be made
+ and not a property of the connection
+ - in multiuse connections, transfer could step on each others
+ toes here potentially.
+
+ Closes #12949
+
+- c-hyper: add header collection writer in hyper builds
+
+ Closes #12880
+
+- http: move headers collecting to writer
+
+ - add a client writer that does "push" response
+ headers written to the client if the headers api
+ is enabled
+ - remove special handling in sendf.c
+ - needs to be installed very early on connection
+ setup to catch CONNECT response headers
+
+ Closes #12880
+
+- sendf: Curl_client_write(), make passed in buf const
+
+Michał Antoniak (26 Feb 2024)
+
+- lib: remove curl_mimepart object when CURL_DISABLE_MIME
+
+ Remove curl_mimepart object from UserDefined structure when
+ CURL_DISABLE_MIME flag is active. Reduce size of UserDefined structure.
+
+ Also remove unreachable code: when CURL_DISABLE_MIME is set, httpreq can
+ never have HTTPREQ_POST_MIME value and the same goes for the
+ CURL_DISABLE_FORM_API flag and the HTTPREQ_POST_FORM value
+
+ Closes #12948
+
+kpcyrd (26 Feb 2024)
+
+- rustls: make curl compile with 0.12.0
+
+ Closes #12989
+
+Daniel Stenberg (26 Feb 2024)
+
+- strtoofft: fix the overflow check
+
+ ... to not rely on wrapping, since it is an undefined behavior that is
+ not what always might happen. This is in our private strtoff() parser
+ function, used only on platforms without a native version.
+
+ Reported-by: vulnerabilityspotter on hackerone
+ Closes #12990
+
+- libssh/libssh2: return error on too big range
+
+ If trying to get the range 0 - 2^63 and the remote file is 2^63 bytes or
+ larger.
+
+ Fixes #12983
+ Closes #12984
+
+Scott Talbert (24 Feb 2024)
+
+- setopt: fix check for CURLOPT_PROXY_TLSAUTH_TYPE value
+
+ Prior to this change CURLOPT_PROXY_TLSAUTH_TYPE would return
+ CURLE_BAD_FUNCTION_ARGUMENT on any type other than NULL. Since there is
+ only one type of TLS auth and it is also the default (SRP) the TLS auth
+ would work anyway.
+
+ Closes https://github.com/curl/curl/pull/12981
+
+Jay Satiro (24 Feb 2024)
+
+- mprintf: fix format prefix I32/I64 for windows compilers
+
+ - Support I32 & I64 (eg: %I64d) for all Win32 builds.
+
+ Prior to this change mprintf support for the I format prefix, which is a
+ Microsoft extension, was dependent on the compiler used.
+
+ When Borland compiler support was removed in fd7ef00f the prefix was
+ then no longer supported for that compiler; however since it's still
+ possible to build with Borland I'm restoring support for the prefix in
+ this way.
+
+ Reported-by: Paweł Witas
+
+ Fixes https://github.com/curl/curl/issues/12944
+ Closes https://github.com/curl/curl/pull/12950
+
+Daniel Stenberg (23 Feb 2024)
+
+- cd2nroff: gen: make `\>` in input to render as plain '>' in output
+
+ The same (copy and pasted) fix/mistake as in gen.pl
+
+- gen: make `\>` in input to render as plain '>' in output
+
+ Reported-by: Gisle Vanem
+ Fixes #12977
+ Closes #12978
+
+Fabrice Fontaine (23 Feb 2024)
+
+- configure.ac: find libpsl with pkg-config
+
+ Find libpsl with pkg-config to avoid static build failures.
+
+ Ref: http://autobuild.buildroot.org/results/1fb15e1a99472c403d0d3b1a688902f32
+ e78d002
+
+ Signed-off-by: Fabrice Fontaine <fontaine.fabrice@gmail.com>
+ Closes #12947
+
+Daniel Stenberg (23 Feb 2024)
+
+- BUG-BOUNTY.md: clarify that the curl security team decides
+
+ Closes #12975
+
+- THANKS: add bug reporter from #740
+
+ Ref: https://github.com/curl/curl/issues/740
+
+Stefan Eissing (22 Feb 2024)
+
+- multi: fix multi_sock handling of select_bits
+
+ - OR the event bitmask to data->state.select_bits instead of overwriting
+ them. They are cleared again on use.
+
+ Reported-by: 5533asdg on github
+ Fixes #12971
+ Closes #12972
+
+Daniel Stenberg (22 Feb 2024)
+
+- curlver: bump to 8.7.0 for next release
+
+- RELEASE-NOTES: synced
+
+- write-out: add '%{proxy_used}'
+
+ Returns 1 if the previous transfer used a proxy, otherwise 0. Useful to
+ for example determine if a `NOPROXY` pattern matched the hostname or
+ not.
+
+ Extended test 970 and 972
+
+- CURLINFO_USED_PROXY: return bool whether the proxy was used
+
+ Adds test536 to verify
+
+ Closes #12719
+
+- sha512_256: remove the cast macro, minor language/format edits
+
+ Follow-up to cbe41d151d6a100c
+
+ Closes #12966
+
+Stefan Eissing (20 Feb 2024)
+
+- DoH: add trace configuration
+
+ - refs #12397 where it is dicussed how to en-/disable verbose output
+ of DoH operations
+ - introducing `struct curl_trc_feat` to track a curl feature for
+ tracing
+ - adding `data->state.feat` optionally pointing to the feature a
+ transfer belongs to
+ - adding trace functions and verbosity checks on features
+ - using trace feature in DoH code
+ - documenting `doh` as feature for `--trace-config`
+
+ Closes #12411
+
+- websocket: fix curl_ws_recv()
+
+ - when data arrived in several chunks, the collection into
+ the passed buffer always started at offset 0, overwriting
+ the data already there.
+
+ adding test_20_07 to verify fix
+
+ - debug environment var CURL_WS_CHUNK_SIZE can be used to
+ influence the buffer chunk size used for en-/decoding.
+
+ Closes #12945
+
+Evgeny Grin (Karlson2k) (20 Feb 2024)
+
+- digest: support SHA-512/256
+
+ Also fix the tests. New implementation tested with GNU libmicrohttpd.
+ The new numbers in tests are real SHA-512/256 numbers (not just some
+ random ;) numbers ).
+
+- tests: add SHA-512/256 unit test
+
+- SHA-512/256: implement hash algorithm
+
+ Closes #12897
+
+- curl_setup.h: add curl_uint64_t internal type
+
+ The unsigned version of curl_off_t basically
+
+Daniel Stenberg (20 Feb 2024)
+
+- docs: dist curl*.1 and install without perl
+
+ Drop docs/mk-ca-bundle.1 from the tarball. It can be generated at will.
+
+ Closes #12959
+ Fixes #12921
+ Reported-by: Michael Forney
+
+Stefan Eissing (20 Feb 2024)
+
+- OpenSSL QUIC: adapt to v3.3.x
+
+ - set our idle timeout as transport parameter
+ - query negotiated idle timeout for connection alive checks
+ - query number of available bidi streams on a connection
+ - use write_ex2 with SSL_WRITE_FLAG_CONCLUDE to signal
+ EOF on last chunk write, so stream close does not
+ require an additional QUIC packet
+
+ Closes #12933
+
+Ramiro Garcia (19 Feb 2024)
+
+- MANUAL.md: fix typo
+
+ Closes #12965
+
+Daniel Stenberg (19 Feb 2024)
+
+- BINDINGS: add mcurl, the python binding
+
+ Ref: #12956
+ Closes #12962
+
+- mk-ca-bundle.md: cleanups and polish
+
+ Closes #12958
+
+- spellcheck.yml: remove .1/.3 handling, clean all man page .md files
+
+ Since we generate all .1 and .3 files from markdown now, we can limit
+ the spellcheck to the markdown versions only.
+
+ Closes #12960
+
+- libcurl-docs: cleanups
+
+ CURLMOPT_SOCKETDATA.md: fix typo
+ CURLMOPT_TIMERDATA.md: fix typo
+ CURLOPT_COOKIELIST.m: quote strings
+ CURLOPT_PREREQFUNCTION.md: quote variable names
+ CURLOPT_TCP_NODELAY.md: rephrased to please spell checker
+ CURLOPT_WILDCARDMATCH.md: rephrased
+ libcurl-tutorial.md: use correct option name
+ curl_global_init_mem.md: quote headers
+ curl_easy_getinfo.md: use correct symbol names in headers
+ curl_global_trace.md: quote some headers
+ curl_ws_meta.md: quote struct field names
+ libcurl-env.md: quote headers
+
+- cd2nroff: remove backticks from titles
+
+- RELEASE-NOTES: synced
+
+Stefan Eissing (18 Feb 2024)
+
+- http_chunks: fix the accounting of consumed bytes
+
+ Prior to this change chunks were handled correctly although in verbose
+ mode libcurl could incorrectly warn of "Leftovers after chunking" even
+ if there were none.
+
+ Reported-by: Michael Kaufmann
+
+ Fixes https://github.com/curl/curl/issues/12937
+ Closes https://github.com/curl/curl/pull/12939
+
+- file: use xfer buf for file:// transfers
+
+ - For file:// transfers use the multi handle's transfer buffer for
+ up- and downloads.
+
+ Prior to this change a6c9a33 (precedes 8.6.0) changed the file://
+ transfers to use a smaller stack based buffer, and that caused a
+ significant performance decrease in Windows.
+
+ Bug: https://github.com/curl/curl/issues/12750#issuecomment-1920103086
+ Reported-by: edmcln@users.noreply.github.com
+
+ Closes https://github.com/curl/curl/pull/12932
+
+Karthikdasari0423 (18 Feb 2024)
+
+- HTTP3.md: always run nghttp3 submodule init
+
+ - For consistency change all 'build nghttp3' commands to run submodule
+ init after cloning, even if the branch does not have submodules.
+
+ Follow-up to 5a4b2f93 and 4f794558.
+
+ Closes https://github.com/curl/curl/pull/12928
+
+LeeRiva (18 Feb 2024)
+
+- CURLOPT_POSTQUOTE.md: fix typo
+
+ Closes https://github.com/curl/curl/pull/12926
+
+Evgeny Grin (Karlson2k) (18 Feb 2024)
+
+- checksrc.pl: fix handling .checksrc with CRLF
+
+ - When parsing .checksrc chomp the (CR)LF line ending.
+
+ Prior to this change on Windows checksrc.pl would not process the
+ symbols in .checksrc properly, since many git repos in Windows use auto
+ crlf to check out files with CRLF line endings.
+
+ Closes https://github.com/curl/curl/pull/12924
+
+Richard Levitte (18 Feb 2024)
+
+- cmake: fix install for older CMake versions
+
+ - Generate the docs install list by using a foreach loop instead of
+ LIST:TRANSFORM since older CMake can't handle the latter.
+
+ Reported-by: Dan Fandrich
+
+ Fixes https://github.com/curl/curl/issues/12920
+ Closes https://github.com/curl/curl/pull/12922
+
+Stefan Eissing (16 Feb 2024)
+
+- vtls: fix tls proxy peer verification
+
+ - When verifying a proxy certificate for an ip address, use the correct
+ ip family.
+
+ Prior to this change the "connection" ip family was used, which was not
+ necessarily the same.
+
+ Reported-by: HsiehYuho@users.noreply.github.com
+
+ Fixes https://github.com/curl/curl/issues/12831
+ Closes https://github.com/curl/curl/pull/12931
+
+Dan Fandrich (15 Feb 2024)
+
+- CI: Bump the Circle CI base Ubuntu image to the latest 20.04
+
+ The previous ones are going to be removed soon, plus the new ones
+ include all the fixes since then.
+
+Jay Satiro (13 Feb 2024)
+
+- transfer: improve Windows SO_SNDBUF update limit
+
+ - Change the 1 second SO_SNDBUF update limit from per transfer to per
+ connection.
+
+ Prior to this change many transfers over the same connection could cause
+ many SO_SNDBUF updates made to that connection per second, which was
+ unnecessary.
+
+ Closes https://github.com/curl/curl/pull/12911
+
+- schannel: fix hang on unexpected server close
+
+ - Treat TLS connection close (either due to a close_notify from the
+ server or just closed due to receiving 0) as pending data.
+
+ This is because in some cases schannel_recv knows the connection is
+ closed but has to return actual pending data so it can't return 0 or an
+ error to indicate no more data. In this case schannel_recv must be
+ called again, which only happens if readwrite_data sees that there is
+ still pending data.
+
+ Prior to this change if the total size of the body that libcurl expected
+ to receive from the server was unknown then it was possible under some
+ network conditions that libcurl would hang waiting to receive more data,
+ when in fact a close_notify alert indicating no more data would be sent
+ was already processed.
+
+ Fixes https://github.com/curl/curl/issues/12894
+ Closes https://github.com/curl/curl/pull/12910
+
+Daniel Stenberg (10 Feb 2024)
+
+- KNOWN_BUGS: FTP upload fails if remebered dir is deleted
+
+ Closes #12181
+ Closes #12923
+
+Michał Antoniak (10 Feb 2024)
+
+- mbedtls: use mbedtls_ssl_conf_{min|max}_tls_version
+
+ ... instead of the deprecated mbedtls_ssl_conf_{min|max}_version
+
+ Closes #12905
+
+Dan Fandrich (9 Feb 2024)
+
+- CI: bump to actions/cache@v4 to avoid warning
+
+Evgeny Grin (Karlson2k) (9 Feb 2024)
+
+- test1165: improve pattern matching
+
+ * Fix excluded digits at the end of the symbols ('CURL_DISABLE_POP3'
+ was checked as 'CURL_DISABLE_POP')
+
+ Closes #12903
+
+Dan Fandrich (9 Feb 2024)
+
+- scripts: Fix cijobs.pl for Azure and GHA
+
+ The spacing in the yaml files changed.
+
+Daniel Stenberg (9 Feb 2024)
+
+- RELEASE-NOTES: synced
+
+- TODO: use pkg-config to find libpsl
+
+ Closes #12919
+
+- TODO: avoid nroff
+
+ Instead of adjusting roffit, skip the nroff step.
+
+ Closes #12919
+
+Dan Fandrich (9 Feb 2024)
+
+- Revert "CI: run Circle macOS builds on x86 for now"
+
+ This reverts commit 2683de3078eadc86d9b182e7417f4ee75a247e2c.
+ ARM resources are now available in Circle CI, so run these builds on ARM
+ again. This platform needs explicit paths set to libpsl and its
+ dependency icu4c.
+
+ Follow-up to 2683de30
+
+ Closes #12635
+
+Viktor Szakats (9 Feb 2024)
+
+- cmake: add warning for using TLS libraries without 1.3 support
+
+ Closes #12900
+
+Daniel Stenberg (9 Feb 2024)
+
+- configure: add warning for using TLS libraries without 1.3 support
+
+ Closes #12900
+
+Michał Antoniak (9 Feb 2024)
+
+- mbedtls: fix building when MBEDTLS_X509_REMOVE_INFO flag is defined
+
+ Closes #12904
+
+Stefan Eissing (9 Feb 2024)
+
+- ftp: fix socket wait activity in ftp_domore_getsock
+
+ - when waiting on the data connection, always add the control socket to
+ the pollset on state STOP or let the pingpong add the socket according
+ to its needs.
+
+ Reported-by: Fabian Vogt
+ Fixes #12901
+ Closes #12913
+
+Daniel Stenberg (9 Feb 2024)
+
+- dist: make sure the http tests are in the tarball
+
+ Fixes #12914
+ Reported-by: Fabian Vogt
+ Closes #12917
+
+Stefan Eissing (9 Feb 2024)
+
+- multi: add xfer_buf to multi handle
+
+ - can be borrowed by transfer during recv-write operation
+ - needs to be released before borrowing again
+ - adjustis size to `data->set.buffer_size`
+ - used in transfer.c readwrite_data()
+
+ Closes #12805
+
+Daniel Stenberg (9 Feb 2024)
+
+- write-out.md: clarify error handling details
+
+ - it gets used even if the transfer fails
+
+ - it does not cause error to be returned even if it fails
+
+ Closes #12909
+
+Stefan Eissing (8 Feb 2024)
+
+- ftp: do lineend conversions in client writer
+
+ - remove the ftp special handling from sendf.c
+ - let ftp_do() add a client writer that does
+ the linened conversions
+ - change the lineend conversion to no longer
+ modify the passed buffer, but write smaller
+ chunks to the next cwriter instead. The
+ inefficiency of this will be mitigated once
+ we add output buffering for all client writes.
+
+ Closes #12878
+
+- ftp: tracing improvements
+
+ - trace socketindex for connection filters when not the first
+ - trace socket fd in tcp
+ - trace pollset adjusts in vtls
+
+ Closes #12902
+
+Karthikdasari0423 (8 Feb 2024)
+
+- HTTP3.md: adjust the OpenSSL QUIC install instructions
+
+ tried installing with old steps but failed
+ tried with newly added setps and able to build
+ ```
+ root@ubuntu:~/curl# ./src/curl -V
+ /root/curl/src/.libs/curl: /lib/x86_64-linux-gnu/libssl.so.3: version `OPENSS
+ L_3.2.0' not found (required by /root/curl/lib/.libs/libcurl.so.4)
+ root@ubuntu:~/curl#
+ ```
+ ```
+ root@ubuntu:~/curl# ./src/curl -V
+ curl 8.6.1-DEV (x86_64-pc-linux-gnu) libcurl/8.6.1-DEV OpenSSL/3.2.0 zlib/1.2
+ .11 brotli/1.0.9 libpsl/0.21.0 nghttp3/1.1.0 OpenLDAP/2.5.16
+ Release-Date: [unreleased]
+ Protocols: dict file ftp ftps gopher gophers http https imap imaps ipfs ipns
+ ldap ldaps mqtt pop3 pop3s rtsp smb smbs smtp smtps telnet tftp
+ Features: alt-svc AsynchDNS brotli HSTS HTTP3 HTTPS-proxy IPv6 Largefile libz
+ NTLM PSL SSL threadsafe TLS-SRP UnixSockets
+ root@ubuntu:~/curl#
+ ```
+
+ Closes #12896
+
+Daniel Stenberg (8 Feb 2024)
+
+- TODO: align the TOC with the header
+
+- docs: make sure curl.1 is included in dist tarballs
+
+ Ref: https://github.com/curl/curl/issues/12832#issuecomment-1933271873
+
+ Closes #12892
+
+Karthikdasari0423 (8 Feb 2024)
+
+- HTTP3.md: remove quiche word in Openssl 3.2
+
+ Closes #12893
+
+Daniel Stenberg (7 Feb 2024)
+
+- curl: when allocating variables, add the name into the struct
+
+ This saves the name from being an extra separate allocation.
+
+ Closes #12891
+
+- lib582: remove code causing warning that is never run
+
+ The previous realloc code in this code could trigger a compiler warning,
+ but since that code path cannot happen in normal circumstances it now
+ instead exits with an error message there.
+
+ Ref: #12887
+ Closes #12890
+
+Stefan Eissing (7 Feb 2024)
+
+- vtls: revert "receive max buffer" + add test case
+
+ - add test_05_04 for requests using http/1.0, http/1.1 and h2 against an
+ Apache resource that does an unclean TLS shutdown.
+ - revert special workarund in openssl.c for suppressing shutdown errors
+ on multiplexed connections
+ - vlts.c restore to its state before 9a90c9dd64d2f03601833a70786d485851bd1b53
+
+ Fixes #12885
+ Fixes #12844
+
+ Closes #12848
+
+Daniel Stenberg (7 Feb 2024)
+
+- tests: support setting/using blank content env variables
+
+ - test450: remove --config from the keywords
+ - test2080: change return code
+ - test428: add --config as a keyword
+ - test428: disable on Windows due to CI problems
+
+- curl: exit on config file parser errors
+
+ Like when trying to import an environment variable that does not exist.
+
+ Also fix a bug for reading env variables when there is a default value
+ set.
+
+ Bug: https://curl.se/mail/archive-2024-02/0008.html
+ Reported-by: Brett Buddin
+
+ Add test 462 to verify.
+
+ Closes #12862
+
+Daniel Szmulewicz (7 Feb 2024)
+
+- CURLOPT_WRITEFUNCTION.md: typo fix
+
+ The maximum amount of body data that is <be> passed to the write
+ callback is defined in the curl.h header file
+
+ Closes #12889
+
+Daniel Stenberg (7 Feb 2024)
+
+- lib: convert Curl_get_line to use dynbuf
+
+ Create the line in a dynbuf. Aborts the reading of the file on
+ errors. Avoids having to always allocate maximum amount from the
+ start. Avoids direct malloc.
+
+ Closes #12846
+
+- KNOWN_BUGS: unicode on Windows
+
+ Closes #11461
+ Closes #12231
+ Closes #12883
+
+- tool_operate: change precedence of server Retry-After time
+
+ - When calculating the retry time, no longer allow a server's requested
+ Retry-After time to take precedence over a longer retry time (either
+ default algorithmic or user-specified).
+
+ Prior to this change the server's Retry-After time took precedence over
+ curl's retry time in all cases, but that's not always practical for
+ short Retry-After times depending on how busy the server is.
+
+ Bug: https://curl.se/mail/archive-2024-01/0022.html
+ Reported-by: Dirk Hünniger
+
+ Closes https://github.com/curl/curl/pull/12871
+
+- cmdline-docs: quote and angle bracket cleanup
+
+ - make sure angle brackets are escaped
+ - remove a lot of superfluous double quotes
+ - replace several double quotes with backticks
+
+ To make nicer-looking markdown.
+
+ Closes #12884
+
+- badwords: use hostname, not host name
+
+ and username, filename - consistently. Fixed the patterns in
+ badwords.txt to catch these.
+
+ Closes #12888
+
+Viktor Szakats (6 Feb 2024)
+
+- cmake: fix function description in comment [ci skip]
+
+ Closes #12879
+
+Daniel Stenberg (6 Feb 2024)
+
+- header.md: remove backslash, make nicer markdown
+
+ - remove a leftover backslash before a dash
+ - use backticks for "code" strings
+
+ Closes #12877
+
+- docs: add mk-ca-bundle.1 to dist
+
+ ... which also makes it get built. But don't build this or curl-config.1
+ if build docs is disabled.
+
+ Closes #12875
+
+Stefan Eissing (6 Feb 2024)
+
+- https-proxy: use IP address and cert with ip in alt names
+
+ - improve info logging when peer verification fails to indicate
+ if DNS name or ip address has been tried to match
+ - add test case for contacting https proxy with ip address
+ - add pytest env check on loaded credentials and re-issue
+ when they are no longer valid
+ - disable proxy ip address test for bearssl, since not supported there
+
+ Ref: #12831
+ Closes #12838
+
+Jiawen Geng (6 Feb 2024)
+
+- docs: add necessary setup for nghttp3
+
+ Now nghttp3 has submodules
+ https://github.com/ngtcp2/nghttp3/blob/main/.gitmodules
+
+ Closes #12859
+
+Peter Krefting (6 Feb 2024)
+
+- version: allow building with ancient libpsl
+
+ The psl_check_version_number() API was added in libpsl 0.11.0. CentOS 7
+ ships with version 0.7.0 which lacks this API. Revert to using the older
+ versioning API if we detect an old libpsl version.
+
+ Follow-up to 72bd88adde0e8cf6e63644a7d6df1da01a399db4
+ Bug: https://curl.se/mail/archive-2024-02/0004.html
+ Reported-by: Scott Mutter
+ Closes #12872
+
+Daniel Stenberg (6 Feb 2024)
+
+- TODO: Support latest rustls
+
+ Closes #12737
+ Closes #12874
+
+- docs: make curldown do angle brackets like markdown
+
+ Make sure we use \< and \> in markdown all over so that it renders
+ correctly, on GitHub and elsewhere. cd2nroff now outputs a warning if it
+ finds an unescaled angle bracket.
+
+ Ref: #12854
+ Closes #12869
+
+- docs: fix the --disable-docs for autotools
+
+ Follow-up to 541321507e386
+
+ Closes #12870
+
+- RELEASE-NOTES: synced
+
+- libcurl-security.md: Active FTP passes on the local IP address
+
+ Reported-by: Harry Sintonen
+ Closes #12867
+
+Stefan Eissing (5 Feb 2024)
+
+- configure: do not link with nghttp3 unless necessary
+
+ Fixes #12833
+ Closes #12864
+ Reported-by: Ryan Carsten Schmidt
+
+Daniel Stenberg (5 Feb 2024)
+
+- THANKS: add Dmitry Tretyakov
+
+ ... since I missed to give credit to the report in the fix of #12861
+
+Stefan Eissing (5 Feb 2024)
+
+- openssl-quic: check on Windows that socket conv to int is possible
+
+ Fixes #12861
+ Closes #12865
+
+Daniel Stenberg (5 Feb 2024)
+
+- tool_cb_hdr: only parse etag + content-disposition for 2xx
+
+ ... and ignore them for other response codes.
+
+ Reported-by: Harry Sintonen
+ Closes #12866
+
+- md4: include strdup.h for the memdup proto
+
+ Reported-by: Erik Schnetter
+ Fixes #12849
+ Closes #12863
+
+Joel Depooter (5 Feb 2024)
+
+- docs: add missing slashes to SChannel client certificate documentation
+
+ When setting the CURLOPT_SSLCERT option to a certificate thumprint, it
+ is required to have a backslash between the "store location", "store
+ name" and "thumbprint" tokens. These slashes were present in the
+ previous documentation, but were missed in the transition to markdown
+ documentation.
+
+ Closes #12854
+
+Stefan Eissing (5 Feb 2024)
+
+- HTTP/2: write response directly
+
+ - use the new `Curl_xfer_write_resp()` to write incoming responses
+ directly to the client
+ - eliminates `stream->recvbuf`
+ - memory consumption on parallel transfers minimized
+
+ Closes #12828
+
+Daniel Stenberg (5 Feb 2024)
+
+- cookie.md: provide an example sending a fixed cookie
+
+ Closes #12868
+
+Lars Kellogg-Stedman (5 Feb 2024)
+
+- ALTSVC.md: correct a typo
+
+ The ALPN documentation erroneously referred to a "host number" instead
+ of a "port number".
+
+ Closes #12852
+
+Boris Verkhovskiy (5 Feb 2024)
+
+- proxy1.0.md: fix example
+
+ Closes #12856
+
+Chris Webb (5 Feb 2024)
+
+- configure: add --disable-docs flag
+
+ Building man pages from curldown sources now requires perl. Add a
+ --disable-docs flag to configure to enable building and installing
+ without documentation where perl is not available or man pages are not
+ required. This is selected automatically (with a warning) when perl is
+ not found by configure.
+
+ Fixes #12832
+ Closes #12857
+
+Faraz Fallahi (5 Feb 2024)
+
+- connect.c: fix typo
+
+ Closes #12858
+
+Daniel Stenberg (1 Feb 2024)
+
+- sendf: ignore response body to HEAD
+
+ and mark the stream for close, but return OK since the response this far
+ was ok - if headers were received. Partly because this is what curl has
+ done traditionally.
+
+ Test 499 verifies. Updates test 689.
+
+ Reported-by: Sergey Bronnikov
+ Bug: https://curl.se/mail/lib-2024-02/0000.html
+ Closes #12842
+
+- ftp: treat a 226 arriving before data as a signal to read data
+
+ For active mode transfers.
+
+ Due to some interesting timing, curl can sometimes get the 226 (transfer
+ complete) over the control channel first, before the data connection
+ signals readability. If this happens, use that as a signal to check the
+ data connection.
+
+ Additionally, set the socket filter in listen mode *before* the
+ PORT/EPRT command is issued, to reduce the risk that the little time gap
+ could interfere.
+
+ This issue never reproduced for me on Debian and takes several hundred
+ rounds for me to trigger on my mac.
+
+ Reported-by: Stefan Eissing
+ Fixes #12823
+ Closes #12841
+
+Patrick Monnerat (1 Feb 2024)
+
+- OS400: avoid using awk in the build scripts
+
+ Awk is a PASE program and its use may cause a failure depending on the
+ CCSID of the calling script (IBM bug?).
+
+ For this reason, revert to an sed-only solution to extract the exported
+ symbols from the header files.
+
+ Closes #12826
+
+Jan Macku (1 Feb 2024)
+
+- docs: remove `mk-ca-bundle.1` from `man_MANS`
+
+ It was accidentally added in https://github.com/curl/curl/pull/12730
+
+ Co-authored-by: Lukáš Zaoral <lzaoral@redhat.com>
+ Signed-off-by: Jan Macku <jamacku@redhat.com>
+
+ Follow-up to eefcc1bda4bccd800f5a56a0fe17a2f44a96e88b
+ Closes #12843
+
+Daniel Stenberg (1 Feb 2024)
+
+- RELEASE-NOTES: synced
+
+ and bump to 8.6.1 for now
+
+- cmdline-docs/Makefile: avoid using a fixed temp file name
+
+ By appending the pid number two different runs at the same time will not
+ trample over the same file.
+
+ Reported-by: Jon Rumsey
+ Fixes #12829
+ Closes #12839
+
+- asyn-thread: use wakeup_close to close the read descriptor
+
+ Reported-by: Dan Fandrich
+ Ref: #12834
+ Closes #12836
+
+Stefan Eissing (1 Feb 2024)
+
+- ntml_wb: fix buffer type typo
+
+ Closes #12825
+
+Daniel Stenberg (1 Feb 2024)
+
+- tool_operate: do not set CURLOPT_QUICK_EXIT in debug builds
+
+ Since it allows (small) memory leaks that interfere with torture tests
+ and regular memory-leak checks.
+
+ Reported-by: Dan Fandrich
+ Fixes #12834
+ Closes #12835
+
+Boris Verkhovskiy (31 Jan 2024)
+
+- form-string.md: correct the example
+
+ Closes #12822
+
Version 8.6.0 (31 Jan 2024)
Daniel Stenberg (31 Jan 2024)
@@ -8485,2212 +11022,3 @@ vvb2060 (11 Sep 2023) Daniel Stenberg (10 Sep 2023)
- RELEASE-NOTES: synced
-
-Benoit Pierre (10 Sep 2023)
-
-- configure: fix `HAVE_TIME_T_UNSIGNED` check
-
- The syntax was incorrect (need a proper main body), and the test
- condition was wrong (resulting in a signed `time_t` detected as
- unsigned).
-
- Closes #11825
-
-Daniel Stenberg (9 Sep 2023)
-
-- THANKS-filter: pszlazak on github
-
-pszlazak (9 Sep 2023)
-
-- include.d: explain headers not printed with --fail before 7.75.0
-
- Prior to 7.75.0 response headers were not printed if -f/--fail was used
- and an error was reported by server. This was fixed in ab525c0
- (precedes 7.75.0).
-
- Closes #11822
-
-Daniel Stenberg (8 Sep 2023)
-
-- http_aws_sigv4: skip the op if the query pair is zero bytes
-
- Follow-up to fc76a24c53b08cdf
-
- Spotted by OSS-Fuzz
-
- Bug: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=62175
- Closes #11823
-
-- cmdline-docs: use present tense, not future
-
- + some smaller cleanups
-
- Closes #11821
-
-- cmdline-docs: make sure to phrase it as "added in ...."
-
- References to things that were added or changed in a specific version
- should be specified as "(added in [version]) for two reasons:
-
- 1 - consistency
-
- 2 - to allow gen.pl to strip them out if deemed referring to too old
- versions
-
- Closes #11821
-
-Jay Satiro (8 Sep 2023)
-
-- docs: mark --ssl-revoke-best-effort as Schannel specific
-
- Closes https://github.com/curl/curl/pull/11760
-
-Nathan Moinvaziri (8 Sep 2023)
-
-- schannel: fix ordering of cert chain info
-
- - Use CERT_CONTEXT's pbCertEncoded to determine chain order.
-
- CERT_CONTEXT from SECPKG_ATTR_REMOTE_CERT_CONTEXT contains
- end-entity/server certificate in pbCertEncoded. We can use this pointer
- to determine the order of certificates when enumerating hCertStore using
- CertEnumCertificatesInStore.
-
- This change is to help ensure that the ordering of the certificate chain
- requested by the user via CURLINFO_CERTINFO has the same ordering on all
- versions of Windows.
-
- Prior to this change Schannel certificate order was reversed in 8986df80
- but that was later reverted in f540a39b when it was discovered that
- Windows 11 22H2 does the reversal on its own.
-
- Ref: https://github.com/curl/curl/issues/9706
-
- Closes https://github.com/curl/curl/pull/11632
-
-Chris Talbot (8 Sep 2023)
-
-- digest: Use hostname to generate spn instead of realm
-
- In https://www.rfc-editor.org/rfc/rfc2831#section-2.1.2
-
- digest-uri-value should be serv-type "/" host , where host is:
-
- The DNS host name or IP address for the service requested. The
- DNS host name must be the fully-qualified canonical name of the
- host. The DNS host name is the preferred form; see notes on server
- processing of the digest-uri.
-
- Realm may not be the host, so we must specify the host explicitly.
-
- Note this change only affects the non-SSPI digest code. The digest code
- used by SSPI builds already uses the hostname to generate the spn.
-
- Ref: https://github.com/curl/curl/issues/11369
-
- Closes https://github.com/curl/curl/pull/11395
-
-Daniel Stenberg (7 Sep 2023)
-
-- docs: remove use of the word 'very'
-
- It is mostly superfluous. proselint would complain.
-
- Closes #11818
-
-- curl_multi_remove_handle.3: clarify what happens with connection
-
- Closes #11817
-
-- RELEASE-NOTES: synced
-
-- test439: verify query canonization for aws-sigv4
-
-- tool_operate: make aws-sigv4 not require TLS to be used
-
- Maybe not used too often, but we want it for testing and it should work.
-
-- http_aws_sigv4: canonicalize the query
-
- Percent encoding needs to be done using uppercase, and most
- non-alphanumerical must be percent-encoded.
-
- Fixes #11794
- Reported-by: John Walker
- Closes #11806
-
-Wyatt O'Day (7 Sep 2023)
-
-- lib: add ability to disable auths individually
-
- Both with configure and cmake
-
- Closes #11490
-
-Stefan Eissing (7 Sep 2023)
-
-- ngtcp2: fix handling of large requests
-
- - requests >64K are send in parts to the filter
- - fix parsing of the request to assemble it correctly
- from several sends
- - open a QUIC stream only when the complete request has
- been collected
-
- Closes #11815
-
-- openssl: when CURLOPT_SSL_CTX_FUNCTION is registered, init x509 store before
-
- - we delay loading the x509 store to shorten the handshake time.
- However an application callback installed via CURLOPT_SSL_CTX_FUNCTION
- may need to have the store loaded and try to manipulate it.
- - load the x509 store before invoking the app callback
-
- Fixes #11800
- Reported-by: guoxinvmware on github
- Cloes #11805
-
-Daniel Stenberg (7 Sep 2023)
-
-- krb5: fix "implicit conversion loses integer precision" warnings
-
- conversions to/from enum and unsigned chars
-
- Closes #11814
-
-Stefan Eissing (7 Sep 2023)
-
-- pytest: improvements
-
- - set CURL_CI for pytest runs in CI environments
- - exclude timing sensitive tests from CI runs
- - for failed results, list only the log and stat of
- the failed transfer
-
- - fix type in http.c comment
-
- Closes #11812
-
-- CI: move on to ngtcp2 v0.19.1
-
- Closes #11809
-
-Dan Fandrich (5 Sep 2023)
-
-- CI: run Circle macOS builds on x86 for now
-
- The ARM machines aren't ready for us and requesting them now causes
- warnings e-mails to be sent to some PR pushers.
-
- Ref: #11771
-
-Viktor Szakats (5 Sep 2023)
-
-- http3: adjust cast for ngtcp2 v0.19.0
-
- ngtcp2 v0.19.0 made size of `ecn` member of `ngtcp2_pkt_info`
- an `uint8_t` (was: `uint32_t`). Adjust our local cast accordingly.
-
- Fixes:
- ```
- ./curl/lib/vquic/curl_ngtcp2.c:1912:12: warning: implicit conversion loses in
- teger precision: 'uint32_t' (aka 'unsigned int') to 'uint8_t' (aka 'unsigned
- char') [-Wimplicit-int-conversion]
- pi.ecn = (uint32_t)ecn;
- ~ ^~~~~~~~~~~~~
- ```
-
- Also bump ngtcp2, nghttp3 and nghttp2 to their latest versions in our
- docs and CI.
-
- Ref: https://github.com/ngtcp2/ngtcp2/commit/80447281bbc94af53f8aa7a4cfc19175
- 782894a3
- Ref: https://github.com/ngtcp2/ngtcp2/pull/877
- Closes #11798
-
-Stefan Eissing (5 Sep 2023)
-
-- http: fix sending of large requests
-
- - refs #11342 where errors with git https interactions
- were observed
- - problem was caused by 1st sends of size larger than 64KB
- which resulted in later retries of 64KB only
- - limit sending of 1st block to 64KB
- - adjust h2/h3 filters to cope with parsing the HTTP/1.1
- formatted request in chunks
-
- - introducing Curl_nwrite() as companion to Curl_write()
- for the many cases where the sockindex is already known
-
- Fixes #11342 (again)
- Closes #11803
-
-- pytest: fix check for slow_network skips to only apply when intended
-
- Closes #11801
-
-Daniel Stenberg (5 Sep 2023)
-
-- curl_url_get/set.3: add missing semicolon in SYNOPSIS
-
-- CURLOPT_URL.3: explain curl_url_set() uses the same parser
-
-- CURLOPT_URL.3: add two URL API calls in the see-also section
-
-Dan Fandrich (4 Sep 2023)
-
-- CI: add a 32-bit i686 Linux build
-
- This is done by cross-compiling under regular x86_64 Linux. Since the
- kernel offers backwards compatibility, the binaries can be tested as
- normal.
-
- Closes #11799
-
-- tests: fix a type warning on 32-bit x86
-
-Viktor Szakats (4 Sep 2023)
-
-- tests: delete stray `.orig` file
-
- Follow-up to 331b89a319d0067fa1e6441719307cfef9c7960f
- Closes #11797
-
-Daniel Stenberg (4 Sep 2023)
-
-- RELEASE-NOTES: synced
-
-Viktor Szakats (4 Sep 2023)
-
-- lib: silence compiler warning in inet_ntop6
-
- ```
- ./curl/lib/inet_ntop.c:121:21: warning: possible misuse of comma operator her
- e [-Wcomma]
- cur.base = i, cur.len = 1;
- ^
- ./curl/lib/inet_ntop.c:121:9: note: cast expression to void to silence warnin
- g
- cur.base = i, cur.len = 1;
- ^~~~~~~~~~~~
- (void)( )
- ```
-
- Closes #11790
-
-Daniel Stenberg (4 Sep 2023)
-
-- transfer: also stop the sending on closed connection
-
- Previously this cleared the receiving bit only but in some cases it is
- also still sending (like a request-body) when disconnected and neither
- direction can continue then.
-
- Fixes #11769
- Reported-by: Oleg Jukovec
- Closes #11795
-
-John Bampton (4 Sep 2023)
-
-- docs: change `sub-domain` to `subdomain`
-
- https://en.wikipedia.org/wiki/Subdomain
-
- Closes #11793
-
-Stefan Eissing (4 Sep 2023)
-
-- multi: more efficient pollfd count for poll
-
- - do not use separate pollfds for sockets that have POLLIN+POLLOUT
-
- Closes #11792
-
-- http2: polish things around POST
-
- - added test cases for various code paths
- - fixed handling of blocked write when stream had
- been closed inbetween attempts
- - re-enabled DEBUGASSERT on send with smaller data size
-
- - in debug builds, environment variables can be set to simulate a slow
- network when sending data. cf-socket.c and vquic.c support
- * CURL_DBG_SOCK_WBLOCK: percentage of send() calls that should be
- answered with a EAGAIN. TCP/UNIX sockets.
- This is chosen randomly.
- * CURL_DBG_SOCK_WPARTIAL: percentage of data that shall be written
- to the network. TCP/UNIX sockets.
- Example: 80 means a send with 1000 bytes would only send 800
- This is applied to every send.
- * CURL_DBG_QUIC_WBLOCK: percentage of send() calls that should be
- answered with EAGAIN. QUIC only.
- This is chosen randomly.
-
- Closes #11756
-
-Daniel Stenberg (4 Sep 2023)
-
-- docs: add curl_global_trace to some SEE ALSO sections
-
- Closes #11791
-
-- os400: fix checksrc nits
-
- Closes #11789
-
-Nicholas Nethercote (3 Sep 2023)
-
-- hyper: remove `hyptransfer->endtask`
-
- `Curl_hyper_stream` needs to distinguish between two kinds of
- `HYPER_TASK_EMPTY` tasks: (a) the `foreach` tasks it creates itself, and
- (b) background tasks that hyper produces. It does this by recording the
- address of any `foreach` task in `hyptransfer->endtask` before pushing
- it into the executor, and then comparing that against the address of
- tasks later polled out of the executor.
-
- This works right now, but there is no guarantee from hyper that the
- addresses are stable. `hyper_executor_push` says "The executor takes
- ownership of the task, which should not be accessed again unless
- returned back to the user with `hyper_executor_poll`". That wording is a
- bit ambiguous but with my Rust programmer's hat on I read it as meaning
- the task returned with `hyper_executor_poll` may be conceptually the
- same as a task that was pushed, but that there are no other guarantees
- and comparing addresses is a bad idea.
-
- This commit instead uses `hyper_task_set_userdata` to mark the `foreach`
- task with a `USERDATA_RESP_BODY` value which can then be checked for,
- removing the need for `hyptransfer->endtask`. This makes the code look
- more like that hyper C API examples, which use userdata for every task
- and never look at task addresses.
-
- Closes #11779
-
-Dave Cottlehuber (3 Sep 2023)
-
-- ws: fix spelling mistakes in examples and tests
-
- Closes #11784
-
-Daniel Stenberg (3 Sep 2023)
-
-- tool_filetime: make -z work with file dates before 1970
-
- Fixes #11785
- Reported-by: Harry Sintonen
- Closes #11786
-
-Dan Fandrich (1 Sep 2023)
-
-- build: fix portability of mancheck and checksrc targets
-
- At least FreeBSD preserves cwd across makefile lines, so rules
- consisting of more than one "cd X; do_something" must be explicitly run
- in a subshell to avoid this. This problem caused the Cirrus FreeBSD
- build to fail when parallel make jobs were enabled.
-
-- CI: adjust labeler match patterns for new & obsolete files
-
-- configure: trust pkg-config when it's used for zlib
-
- The library flags retrieved from pkg-config were later thrown out and
- harded-coded, which negates the whole reason to use pkg-config.
- Also, previously, the assumption was made that --libs-only-l and
- --libs-only-L are the full decomposition of --libs, which is untrue and
- would not allow linking against a static zlib. The new approach is
- better in that it uses --libs, although only if --libs-only-l returns
- nothing.
-
- Bug: https://curl.se/mail/lib-2023-08/0081.html
- Reported-by: Randall
- Closes #11778
-
-Stefan Eissing (1 Sep 2023)
-
-- CI/ngtcp2: clear wolfssl for when cache is ignored
-
- Closes #11783
-
-Daniel Stenberg (1 Sep 2023)
-
-- RELEASE-NOTES: synced
-
-Nicholas Nethercote (1 Sep 2023)
-
-- hyper: fix a progress upload counter bug
-
- `Curl_pgrsSetUploadCounter` should be a passed a total count, not an
- increment.
-
- This changes the failing diff for test 579 with hyper from this:
- ```
- Progress callback called with UL 0 out of 0[LF]
- -Progress callback called with UL 8 out of 0[LF]
- -Progress callback called with UL 16 out of 0[LF]
- -Progress callback called with UL 26 out of 0[LF]
- -Progress callback called with UL 61 out of 0[LF]
- -Progress callback called with UL 66 out of 0[LF]
- +Progress callback called with UL 29 out of 0[LF]
- ```
- to this:
- ```
- Progress callback called with UL 0 out of 0[LF]
- -Progress callback called with UL 8 out of 0[LF]
- -Progress callback called with UL 16 out of 0[LF]
- -Progress callback called with UL 26 out of 0[LF]
- -Progress callback called with UL 61 out of 0[LF]
- -Progress callback called with UL 66 out of 0[LF]
- +Progress callback called with UL 40 out of 0[LF]
- ```
- Presumably a step in the right direction.
-
- Closes #11780
-
-Daniel Stenberg (1 Sep 2023)
-
-- awssiv4: avoid freeing the date pointer on error
-
- Since it was not allocated, don't free it even if it was wrong syntax
-
- Bug: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=61908
-
- Follow-up to b137634ba3adb
-
- Closes #11782
-
-Stefan Eissing (1 Sep 2023)
-
-- CI: ngtcp2-linux: use separate caches for tls libraries
-
- allow ever changing master for wolfssl
-
- Closes #11766
-
-- replace `master` as wolfssl-version with recent commit
-
-- wolfssl, use master again in CI
-
- - with the shared session update fix landed in master, it
- is time to use that in our CI again
-
-Nicholas Nethercote (31 Aug 2023)
-
-- tests: fix formatting errors in `FILEFORMAT.md`.
-
- Without the surrounding backticks, these tags get swallowed when the
- markdown is rendered.
-
- Closes #11777
-
-Viktor Szakats (31 Aug 2023)
-
-- cmake: add support for `CURL_DEFAULT_SSL_BACKEND`
-
- Allow overriding the default TLS backend via a CMake setting.
-
- E.g.:
- `cmake [...] -DCURL_DEFAULT_SSL_BACKEND=mbedtls`
-
- Accepted values: bearssl, gnutls, mbedtls, openssl, rustls,
- schannel, secure-transport, wolfssl
-
- The passed string is baked into the curl/libcurl binaries.
- The value is case-insensitive.
-
- We added a similar option to autotools in 2017 via
- c7170e20d0a18ec8a514b4daa53bcdbb4dcb3a05.
-
- TODO: Convert to lowercase to improve reproducibility.
-
- Closes #11774
-
-- sectransp: fix compiler warnings
-
- https://github.com/curl/curl-for-win/actions/runs/6037489221/job/16381860220#
- step:3:11046
- ```
- /Users/runner/work/curl-for-win/curl-for-win/curl/lib/vtls/sectransp.c:2435:1
- 4: warning: unused variable 'success' [-Wunused-variable]
- OSStatus success;
- ^
- /Users/runner/work/curl-for-win/curl-for-win/curl/lib/vtls/sectransp.c:3300:4
- 4: warning: unused parameter 'sha256len' [-Wunused-parameter]
- size_t sha256len)
- ^
- ```
-
- Closes #11773
-
-- tidy-up: mostly whitespace nits
-
- - delete completed TODO from `./CMakeLists.txt`.
- - convert a C++ comment to C89 in `./CMake/CurlTests.c`.
- - delete duplicate EOLs from EOF.
- - add missing EOL at EOF.
- - delete whitespace at EOL (except from expected test results).
- - convert tabs to spaces.
- - convert CRLF EOLs to LF in GHA yaml.
- - text casing fixes in `./CMakeLists.txt`.
- - fix a codespell typo in `packages/OS400/initscript.sh`.
-
- Closes #11772
-
-Dan Fandrich (31 Aug 2023)
-
-- CI: remove Windows builds from Cirrus, without replacement
-
- If we don't do this, all coverage on Cirrus will cease in a few days. By
- removing the Windows builds, the FreeBSD one should still continue
- as before. The Windows builds will need be moved to another service to
- maintain test coverage.
-
- Closes #11771
-
-- CI: switch macOS ARM build from Cirrus to Circle CI
-
- Cirrus is drastically reducing their free tier on Sept. 1, so they will
- no longer perform all these builds for us. All but one build has been
- moved, with the LibreSSL one being dropped because of linking problems
- on Circle.
-
- One important note about this change is that Circle CI is currently
- directing all these builds to x86_64 hardware, despite them requesting
- ARM. This is because ARM nodes are scheduled to be available on the
- free tier only in December. This reduces our architectural diversity
- until then but it should automatically come back once those machines are
- enabled.
-
-- CI: use the right variable for BSD make
-
- BSD uses MAKEFLAGS instead of MAKE_FLAGS so it wasn't doing parallel
- builds before.
-
-- CI: drop the FreeBSD 12.X build
-
- Cirrus' new free tier won't let us have many builds, so drop the
- nonessential ones. The FreeBSD 13.X build will still give us the most
- relevant FreeBSD coverage.
-
-- CI: move the Alpine build from Cirrus to GHA
-
- Cirrus is reducing their free tier to next to nothing, so we must move
- builds elsewhere.
-
-Stefan Eissing (30 Aug 2023)
-
-- test_07_upload.py: fix test_07_34 curl args
-
- - Pass correct filename to --data-binary.
-
- Prior to this change --data-binary was passed an incorrect filename due
- to a missing separator in the arguments list. Since aacbeae7 curl will
- error on incorrect filenames for POST.
-
- Fixes https://github.com/curl/curl/issues/11761
- Closes https://github.com/curl/curl/pull/11763
-
-Nicholas Nethercote (30 Aug 2023)
-
-- tests: document which tests fail due to hyper's lack of trailer support.
-
- Closes #11762
-
-- docs: removing "pausing transfers" from HYPER.md.
-
- It's a reference to #8600, which was fixed by #9070.
-
- Closes #11764
-
-Patrick Monnerat (30 Aug 2023)
-
-- os400: handle CURL_TEMP_PRINTF() while building bind source
-
- Closes #11547
-
-- os400: build test servers
-
- Also fix a non-compliant main prototype in disabled.c.
-
- Closes #11547
-
-- tests: fix compilation error for os400
-
- OS400 uses BSD 4.3 setsockopt() prototype by default: this does not
- define parameter as const, resulting in an error if actual parameter is
- const. Remove the const keyword from the actual parameter cast: this
- works in all conditions, even if the formal parameter uses it.
-
- Closes #11547
-
-- os400: make programs and command name configurable
-
- Closes #11547
-
-- os400: move build configuration parameters to a separate script
-
- They can then easily be overriden in a script named "config400.override"
- that is not part of the distribution.
-
- Closes #11547
-
-- os400: implement CLI tool
-
- This is provided as a QADRT (ascii) program, a link to it in the IFS and
- a minimal CL command.
-
- Closes #11547
-
-Matthias Gatto (30 Aug 2023)
-
-- lib: fix aws-sigv4 having date header twice in some cases
-
- When the user was providing the header X-XXX-Date, the header was
- re-added during signature computation, and we had it twice in the
- request.
-
- Reported-by: apparentorder@users.noreply.github.com
-
- Signed-off-by: Matthias Gatto <matthias.gatto@outscale.com>
-
- Fixes: https://github.com/curl/curl/issues/11738
- Closes: https://github.com/curl/curl/pull/11754
-
-Jay Satiro (30 Aug 2023)
-
-- multi: remove 'processing: <url>' debug message
-
- - Remove debug message added by e024d566.
-
- Closes https://github.com/curl/curl/pull/11759
-
-- ftp: fix temp write of ipv6 address
-
- - During the check to differentiate between a port and IPv6 address
- without brackets, write the binary IPv6 address to an in6_addr.
-
- Prior to this change the binary IPv6 address was erroneously written to
- a sockaddr_in6 'sa6' when it should have been written to its in6_addr
- member 'sin6_addr'. There's no fallout because no members of 'sa6' are
- accessed before it is later overwritten.
-
- Closes https://github.com/curl/curl/pull/11747
-
-- tool: change some fopen failures from warnings to errors
-
- - Error on missing input file for --data, --data-binary,
- --data-urlencode, --header, --variable, --write-out.
-
- Prior to this change if a user of the curl tool specified an input file
- for one of the above options and that file could not be opened then it
- would be treated as zero length data instead of an error. For example, a
- POST using `--data @filenametypo` would cause a zero length POST which
- is probably not what the user intended.
-
- Closes https://github.com/curl/curl/pull/11677
-
-- hostip: fix typo
-
-Davide Masserut (29 Aug 2023)
-
-- tool: avoid including leading spaces in the Location hyperlink
-
- Co-authored-by: Dan Fandrich <dan@coneharvesters.com>
-
- Closes #11735
-
-Daniel Stenberg (29 Aug 2023)
-
-- SECURITY-PROCESS.md: not a sec issue: Tricking user to run a cmdline
-
- Closes #11757
-
-- connect: stop halving the remaining timeout when less than 600 ms left
-
- When curl wants to connect to a host, it always has a TIMEOUT. The
- maximum time it is allowed to spend until a connect is confirmed.
-
- curl will try to connect to each of the IP adresses returned for the
- host. Two loops, one for each IP family.
-
- During the connect loop, while curl has more than one IP address left to
- try within a single address family, curl has traditionally allowed (time
- left/2) for *this* connect attempt. This, to not get stuck on the
- initial addresses in case the timeout but still allow later addresses to
- get attempted.
-
- This has the downside that when users set a very short timeout and the
- host has a large number of IP addresses, the effective result might be
- that every attempt gets a little too short time.
-
- This change stop doing the divided-by-two if the total time left is
- below a threshold. This threshold is 600 milliseconds.
-
- Closes #11693
-
-- asyn-ares: reduce timeout to 2000ms
-
- When UDP packets get lost this makes for slightly faster retries. This
- lower timeout is used by @c-ares itself by default starting next
- release.
-
- Closes #11753
-
-John Bampton (29 Aug 2023)
-
-- misc: remove duplicate words
-
- Closes #11740
-
-Daniel Stenberg (29 Aug 2023)
-
-- RELEASE-NOTES: synced
-
-- wolfSSL: avoid the OpenSSL compat API when not needed
-
- ... and instead call wolfSSL functions directly.
-
- Closes #11752
-
-Viktor Szakats (28 Aug 2023)
-
-- lib: fix null ptr derefs and uninitialized vars (h2/h3)
-
- Fixing compiler warnings with gcc 13.2.0 in unity builds.
-
- Assisted-by: Jay Satiro
- Assisted-by: Stefan Eissing
- Closes #11739
-
-Jay Satiro (28 Aug 2023)
-
-- secureserver.pl: fix stunnel version parsing
-
- - Allow the stunnel minor-version version part to be zero.
-
- Prior to this change with the stunnel version scheme of <major>.<minor>
- if either part was 0 then version parsing would fail, causing
- secureserver.pl to fail with error "No stunnel", causing tests that use
- the SSL protocol to be skipped. As a practical matter this bug can only
- be caused by a minor-version part of 0, since the major-version part is
- always greater than 0.
-
- Closes https://github.com/curl/curl/pull/11722
-
-- secureserver.pl: fix stunnel path quoting
-
- - Store the stunnel path in the private variable $stunnel unquoted and
- instead quote it in the command strings.
-
- Prior to this change the quoted stunnel path was passed to perl's file
- operators which cannot handle quoted paths. For example:
-
- $stunnel = "\"/C/Program Files (x86)/stunnel/bin/tstunnel\"";
- if(-x $stunnel or -x "$stunnel")
- # false even if path exists and is executable
-
- Our other test scripts written in perl, unlike this one, use servers.pm
- which has a global $stunnel variable with the path stored unquoted and
- therefore those scripts don't have this problem.
-
- Closes https://github.com/curl/curl/pull/11721
-
-Daniel Stenberg (28 Aug 2023)
-
-- altsvc: accept and parse IPv6 addresses in response headers
-
- Store numerical IPv6 addresses in the alt-svc file with the brackets
- present.
-
- Verify with test 437 and 438
-
- Fixes #11737
- Reported-by: oliverpool on github
- Closes #11743
-
-- libtest: use curl_free() to free libcurl allocated data
-
- In several test programs. These mistakes are not detected or a problem
- as long as memdebug.h is included, as that provides the debug wrappers
- for all memory functions in the same style libcurl internals do it,
- which makes curl_free and free effectively the same call.
-
- Reported-by: Nicholas Nethercote
- Closes #11746
-
-Jay Satiro (28 Aug 2023)
-
-- disable.d: explain --disable not implemented prior to 7.50.0
-
- Option -q/--disable was added in 5.0 but only -q was actually
- implemented. Later --disable was implemented in e200034 (precedes
- 7.49.0), but incorrectly, and fixed in 6dbc23c (precedes 7.50.0).
-
- Reported-by: pszlazak@users.noreply.github.com
-
- Fixes https://github.com/curl/curl/issues/11710
- Closes #11712
-
-Nicholas Nethercote (28 Aug 2023)
-
-- hyper: fix ownership problems
-
- Some of these changes come from comparing `Curl_http` and
- `start_CONNECT`, which are similar, and adding things to them that are
- present in one and missing in another.
-
- The most important changes:
- - In `start_CONNECT`, add a missing `hyper_clientconn_free` call on the
- happy path.
- - In `start_CONNECT`, add a missing `hyper_request_free` on the error
- path.
- - In `bodysend`, add a missing `hyper_body_free` on an early-exit path.
- - In `bodysend`, remove an unnecessary `hyper_body_free` on a different
- error path that would cause a double-free.
- https://docs.rs/hyper/latest/hyper/ffi/fn.hyper_request_set_body.html
- says of `hyper_request_set_body`: "This takes ownership of the
- hyper_body *, you must not use it or free it after setting it on the
- request." This is true even if `hyper_request_set_body` returns an
- error; I confirmed this by looking at the hyper source code.
-
- Other changes are minor but make things slightly nicer.
-
- Closes #11745
-
-Daniel Stenberg (28 Aug 2023)
-
-- multi.h: the 'revents' field of curl_waitfd is supported
-
- Since 6d30f8ebed34e7276
-
- Reported-by: Nicolás Ojeda Bär
- Ref: #11748
- Closes #11749
-
-Gerome Fournier (27 Aug 2023)
-
-- tool_paramhlp: improve str2num(): avoid unnecessary call to strlen()
-
- Closes #11742
-
-Daniel Stenberg (27 Aug 2023)
-
-- docs: mention critical files in same directories as curl saves
-
- ... cannot be fully protected. Don't do it.
-
- Co-authored-by: Jay Satiro
- Reported-by: Harry Sintonen
- Fixes #11530
- Closes #11701
-
-John Hawthorn (26 Aug 2023)
-
-- OpenSSL: clear error queue after SSL_shutdown
-
- We've seen errors left in the OpenSSL error queue (specifically,
- "shutdown while in init") by adding some logging it revealed that the
- source was this file.
-
- Since we call SSL_read and SSL_shutdown here, but don't check the return
- code for an error, we should clear the OpenSSL error queue in case one
- was raised.
-
- This didn't affect curl because we call ERR_clear_error before every
- write operation (a0dd9df9ab35528eb9eb669e741a5df4b1fb833c), but when
- libcurl is used in a process with other OpenSSL users, they may detect
- an OpenSSL error pushed by libcurl's SSL_shutdown as if it was their
- own.
-
- Co-authored-by: Satana de Sant'Ana <satana@skylittlesystem.org>
-
- Closes #11736
-
-Alexander Kanavin (25 Aug 2023)
-
-- tests: update cookie expiry dates to far in the future
-
- This allows testing Y2038 with system time set to after that, so that
- actual Y2038 issues can be exposed, and not masked by expiry errors.
-
- Fixes #11576
- Closes #11610
-
-John Bampton (25 Aug 2023)
-
-- misc: fix spelling
-
- Closes #11733
-
-Daniel Stenberg (25 Aug 2023)
-
-- cmdline-opts/page-header: clarify stronger that !opt == URL
-
- Everything provided on the command line that is not an option (or an
- argument to an option) is treated as a URL.
-
- Closes #11734
-
-- tests/runner: fix %else handling
-
- Getting the show state proper for %else and %endif did not properly work
- in nested cases.
-
- Follow-up to 3d089c41ea9
-
- Closes #11731
-
-Nicholas Nethercote (25 Aug 2023)
-
-- docs: Remove mention of #10803 from `KNOWN_BUGS`.
-
- Because the leaks have been fixed.
-
-- c-hyper: fix another memory leak in `Curl_http`.
-
- There is a `hyper_clientconn_free` call on the happy path, but not one
- on the error path. This commit adds one.
-
- Fixes the second memory leak reported by Valgrind in #10803.
-
- Fixes #10803
- Closes #11729
-
-- c-hyper: fix a memory leak in `Curl_http`.
-
- A request created with `hyper_request_new` must be consumed by either
- `hyper_clientconn_send` or `hyper_request_free`.
-
- This is not terrifically clear from the hyper docs --
- `hyper_request_free` is documented only with "Free an HTTP request if
- not going to send it on a client" -- but a perusal of the hyper code
- confirms it.
-
- This commit adds a `hyper_request_free` to the `error:` path in
- `Curl_http` so that the request is consumed when an error occurs after
- the request is created but before it is sent.
-
- Fixes the first memory leak reported by Valgrind in #10803.
-
- Closes #11729
-
-Daniel Stenberg (25 Aug 2023)
-
-- RELEASE-NOTES: synced
-
-John Bampton (25 Aug 2023)
-
-- misc: spellfixes
-
- Closes #11730
-
-Daniel Stenberg (25 Aug 2023)
-
-- tests: add support for nested %if conditions
-
- Provides more flexiblity to test cases.
-
- Also warn and bail out if there is an '%else' or %endif' without a
- preceeding '%if'.
-
- Ref: #11610
- Closes #11728
-
-- time-cond.d: mention what happens on a missing file
-
- Closes #11727
-
-Christian Hesse (24 Aug 2023)
-
-- docs/cmdline-opts: match the current output
-
- The release date has been added in output, reflect that in documentation.
-
- Closes #11723
-
-Daniel Stenberg (24 Aug 2023)
-
-- lib: minor comment corrections
-
-- docs: rewrite to present tense
-
- ... instead of using future tense.
-
- + numerous cleanups and improvements
- + stick to "reuse" not "re-use"
- + fewer contractions
-
- Closes #11713
-
-- urlapi: setting a blank URL ("") is not an ok URL
-
- Test it in 1560
- Fixes #11714
- Reported-by: ad0p on github
- Closes #11715
-
-- spelling: use 'reuse' not 're-use' in code and elsewhere
-
- Unify the spelling as both versions were previously used intermittently
-
- Closes #11717
-
-Michael Osipov (23 Aug 2023)
-
-- system.h: add CURL_OFF_T definitions on HP-UX with HP aCC
-
- HP-UX on IA64 provides two modes: 32 and 64 bit while 32 bit being the
- default one. Use "long long" in 32 bit mode and just "long" in 64 bit
- mode.
-
- Closes #11718
-
-Dan Fandrich (22 Aug 2023)
-
-- tests: don't call HTTP errors OK in test cases
-
- Some HTTP errors codes were accompanied by the text OK, which causes
- some cognitive dissonance when reading them.
-
-- http: close the connection after a late 417 is received
-
- In this situation, only part of the data has been sent before aborting
- so the connection is no longer usable.
-
- Assisted-by: Jay Satiro
- Fixes #11678
- Closes #11679
-
-- runtests: slightly increase the longest log file displayed
-
- The new limit provides enough space for a 64 KiB data block to be logged
- in a trace file, plus a few lines at the start and end for context. This
- happens to be the amount of data sent at a time in a PUT request.
-
-- tests: add delay command to the HTTP server
-
- This adds a delay after client connect.
-
-Daniel Stenberg (22 Aug 2023)
-
-- cirrus: install everthing with pkg, avoid pip
-
- Assisted-by: Sevan Janiyan
-
- Closes #11711
-
-- curl_url*.3: update function descriptions
-
- - expand and clarify several descriptions
- - avoid using future tense all over
-
- Closes #11708
-
-- RELEASE-NOTES: synced
-
-Stefan Eissing (21 Aug 2023)
-
-- CI/cirrus: disable python install on FreeBSD
-
- - python cryptography package does not build build FreeBSD
- - install just mentions "error"
- - this gets the build and the main test suite going again
-
- Closes #11705
-
-- test2600: fix flakiness on low cpu
-
- - refs #11355 where failures to to low cpu resources in CI
- are reported
- - vastly extend CURLOPT_CONNECTTIMEOUT_MS and max durations
- to test cases
- - trigger Curl_expire() in test filter to allow re-checks before
- the usual 1second interval
-
- Closes #11690
-
-Maksim Sciepanienka (20 Aug 2023)
-
-- tool_urlglob: use the correct format specifier for curl_off_t in msnprintf
-
- Closes #11698
-
-Daniel Stenberg (20 Aug 2023)
-
-- test687/688: two more basic --xattr tests
-
- Closes #11697
-
-- cmdline-opts/docs: mentioned the negative option part
-
- ... for --no-alpn and --no-buffer in the same style done for other --no-
- options:
-
- "Note that this is the negated option name documented."
-
- Closes #11695
-
-Emanuele Torre (19 Aug 2023)
-
-- tool/var: also error when expansion result starts with NUL
-
- Expansions whose output starts with NUL were being expanded to the empty
- string, and not being recognised as values that contain a NUL byte, and
- should error.
-
- Closes #11694
-
-Daniel Stenberg (19 Aug 2023)
-
-- tests: add 'large-time' as a testable feature
-
- This allows test cases to require this feature to run and to be used in
- %if conditions.
-
- Large here means larger than 32 bits. Ie does not suffer from y2038.
-
- Closes #11696
-
-- tests/Makefile: add check-translatable-options.pl to tarball
-
- Used in test 1544
-
- Follow-up to ae806395abc8c
-
-- gen.pl: fix a long version generation mistake
-
- Too excessive escaping made the parsing not find the correct long names
- later and instead add "wrong" links.
-
- Follow-up to 439ff2052e219
-
- Reported-by: Lukas Tribus
- Fixes #11688
- Closes #11689
-
-- lib: move mimepost data from ->req.p.http to ->state
-
- When the legacy CURLOPT_HTTPPOST option is used, it gets converted into
- the modem mimpost struct at first use. This data is (now) kept for the
- entire transfer and not only per single HTTP request. This re-enables
- rewind in the beginning of the second request instead of in end of the
- first, as brought by 1b39731.
-
- The request struct is per-request data only.
-
- Extend test 650 to verify.
-
- Fixes #11680
- Reported-by: yushicheng7788 on github
- Closes #11682
-
-Patrick Monnerat (17 Aug 2023)
-
-- os400: do not check translatable options at build time
-
- Now that there is a test for this, the build time check is not needed
- anymore.
-
- Closes #11650
-
-- test1554: check translatable string options in OS400 wrapper
-
- This test runs a perl script that checks all string options are properly
- translated by the OS400 character code conversion wrapper. It also
- verifies these options are listed in alphanumeric order in the wrapper
- switch statement.
-
- Closes #11650
-
-Daniel Stenberg (17 Aug 2023)
-
-- unit3200: skip testing if function is not present
-
- Fake a successful run since we have no easy mechanism to skip this test
- for this advanced condition.
-
-- unit2600: fix build warning if built without verbose messages
-
-- test1608: make it build and get skipped without shuffle DNS support
-
-- lib: --disable-bindlocal builds curl without local binding support
-
-- test1304: build and skip without netrc support
-
-- lib: build fixups when built with most things disabled
-
- Closes #11687
-
-- workflows/macos.yml: disable zstd and alt-svc in the http-only build
-
- Closes #11683
-
-Stefan Eissing (17 Aug 2023)
-
-- bearssl: handshake fix, provide proper get_select_socks() implementation
-
- - bring bearssl handshake times down from +200ms down to other TLS backends
- - vtls: improve generic get_select_socks() implementation
- - tests: provide Apache with a suitable ssl session cache
-
- Closes #11675
-
-- tests: TLS session sharing test
-
- - test TLS session sharing with special test client
- - expect failure with wolfSSL
- - disable flaky wolfSSL test_02_07b
-
- Closes #11675
-
-Daniel Stenberg (17 Aug 2023)
-
-- CURLOPT_*TIMEOUT*: extend and clarify
-
- Closes #11686
-
-- urlapi: return CURLUE_BAD_HOSTNAME if puny2idn encoding fails
-
- And document it. Only return out of memory when it actually is a memory
- problem.
-
- Pointed-out-by: Jacob Mealey
- Closes #11674
-
-Mathew Benson (17 Aug 2023)
-
-- cmake: add GnuTLS option
-
- - Option to use GNUTLS was missing. Hence was not able to use GNUTLS
- with ngtcp2 for http3.
-
- Closes #11685
-
-Daniel Stenberg (16 Aug 2023)
-
-- RELEASE-NOTES: synced
-
-- http: remove the p_pragma struct field
-
- unused since 40e8b4e52 (2008)
-
- Closes #11681
-
-Jay Satiro (16 Aug 2023)
-
-- CURLINFO_CERTINFO.3: better explain curl_certinfo struct
-
- Closes https://github.com/curl/curl/pull/11666
-
-- CURLINFO_TLS_SSL_PTR.3: clarify a recommendation
-
- - Remove the out-of-date SSL backend list supported by
- CURLOPT_SSL_CTX_FUNCTION.
-
- It makes more sense to just refer to that document instead of having
- a separate list that has to be kept in sync.
-
- Closes https://github.com/curl/curl/pull/11665
-
-- write-out.d: clarify %{time_starttransfer}
-
- sync it up with CURLINFO_STARTTRANSFER_TIME_T
-
-Daniel Stenberg (15 Aug 2023)
-
-- transfer: don't set TIMER_STARTTRANSFER on first send
-
- The time stamp is for measuring the first *received* byte
-
- Fixes #11669
- Reported-by: JazJas on github
- Closes #11670
-
-trrui-huawei (15 Aug 2023)
-
-- quiche: enable quiche to handle timeout events
-
- In parallel with ngtcp2, quiche also offers the `quiche_conn_on_timeout`
- interface for the application to invoke upon timer
- expiration. Therefore, invoking the `on_timeout` function of the
- Connection is crucial to ensure seamless functionality of quiche with
- timeout events.
-
- Closes #11654
-
-- quiche: adjust quiche `QUIC_IDLE_TIMEOUT` to 60s
-
- Set the `QUIC_IDLE_TIMEOUT` parameter to match ngtcp2 for consistency.
-
-Daniel Stenberg (15 Aug 2023)
-
-- KNOWN_BUGS: LDAPS requests to ActiveDirectory server hang
-
- Closes #9580
-
-- imap: add a check for failing strdup()
-
-- imap: remove the only sscanf() call in the IMAP code
-
- Avoids the use of a stack buffer.
-
- Closes #11673
-
-- imap: use a dynbuf in imap_atom
-
- Avoid a calculation + malloc. Build the output in a dynbuf.
-
- Closes #11672
-
-Marin Hannache (14 Aug 2023)
-
-- http: do not require a user name when using CURLAUTH_NEGOTIATE
-
- In order to get Negotiate (SPNEGO) authentication to work in HTTP you
- used to be required to provide a (fake) user name (this concerned both
- curl and the lib) because the code wrongly only considered
- authentication if there was a user name provided, as in:
-
- curl -u : --negotiate https://example.com/
-
- This commit leverages the `struct auth` want member to figure out if the
- user enabled CURLAUTH_NEGOTIATE, effectively removing the requirement of
- setting a user name both in curl and the lib.
-
- Signed-off-by: Marin Hannache <git@mareo.fr>
- Reported-by: Enrico Scholz
- Fixes https://sourceforge.net/p/curl/bugs/440/
- Fixes #1161
- Closes #9047
-
-Viktor Szakats (13 Aug 2023)
-
-- build: streamline non-UWP wincrypt detections
-
- - with CMake, use the variable `WINDOWS_STORE` to detect an UWP build
- and disable our non-UWP-compatible use the Windows crypto API. This
- allows to drop two dynamic feature checks.
-
- `WINDOWS_STORE` is true when invoking CMake with
- `CMAKE_SYSTEM_NAME` == `WindowsStore`. Introduced in CMake v3.1.
-
- Ref: https://cmake.org/cmake/help/latest/variable/WINDOWS_STORE.html
-
- - with autotools, drop the separate feature check for `wincrypt.h`. On
- one hand this header has been present for long (even Borland C 5.5 had
- it from year 2000), on the other we used the check result solely to
- enable another check for certain crypto functions. This fails anyway
- with the header not present. We save one dynamic feature check at the
- configure stage.
-
- Reviewed-by: Marcel Raad
- Closes #11657
-
-Nicholas Nethercote (13 Aug 2023)
-
-- docs/HYPER.md: update hyper build instructions
-
- Nightly Rust and `-Z unstable-options` are not needed.
-
- The instructions here now match the hyper docs exactly:
- https://github.com/hyperium/hyper/commit/bd7928f3dd6a8461f0f0fdf7ee0fd95c2f15
- 6f88
-
- Closes #11662
-
-Daniel Stenberg (13 Aug 2023)
-
-- RELEASE-NOTES: synced
-
-- urlapi: CURLU_PUNY2IDN - convert from punycode to IDN name
-
- Asssisted-by: Jay Satiro
- Closes #11655
-
-- spellcheck: adapt to backslashed minuses
-
- As the curl.1 has more backslashed minus, the cleanup sed lines xneed to
- adapt.
-
- Adjusted some docs slighly.
-
- Follow-up to 439ff2052e
-
- Closes #11663
-
-- gen: escape more minus
-
- Detected since it was still hard to search for option names using dashes
- in the middle in the man page.
-
- Closes #11660
-
-- cookie-jar.d: enphasize that this option is ONLY writing cookies
-
- Reported-by: Dan Jacobson
- Tweaked-by: Jay Satiro
- Ref: #11642
- Closes #11661
-
-Nicholas Nethercote (11 Aug 2023)
-
-- docs/HYPER.md: document a workaround for a link error
-
- Closes #11653
-
-Jay Satiro (11 Aug 2023)
-
-- schannel: verify hostname independent of verify cert
-
- Prior to this change when CURLOPT_SSL_VERIFYPEER (verifypeer) was off
- and CURLOPT_SSL_VERIFYHOST (verifyhost) was on we did not verify the
- hostname in schannel code.
-
- This fixes KNOWN_BUG 2.8 "Schannel disable CURLOPT_SSL_VERIFYPEER and
- verify hostname". We discussed a fix several years ago in #3285 but it
- went stale.
-
- Assisted-by: Daniel Stenberg
-
- Bug: https://curl.haxx.se/mail/lib-2018-10/0113.html
- Reported-by: Martin Galvan
-
- Ref: https://github.com/curl/curl/pull/3285
-
- Fixes https://github.com/curl/curl/issues/3284
- Closes https://github.com/curl/curl/pull/10056
-
-Daniel Stenberg (11 Aug 2023)
-
-- curl_quiche: remove superfluous NULL check
-
- 'stream' is always non-NULL at this point
-
- Pointed out by Coverity
-
- Closes #11656
-
-- curl/urlapi.h: tiny typo
-
-- github/labeler: make HYPER.md set Hyper and not TLS
-
-- docs/cmdline-opts/gen.pl: hide "added in" before 7.50.0
-
- 7.50.0 shipped on Jul 21 2016, over seven years ago. We no longer need
- to specify version changes for earlier releases in the generated output.
-
- This ups the limit from the previous 7.30.0 (Apr 12 2013)
-
- This hides roughly 35 "added in" mentions.
-
- Closes #11651
-
-Jay Satiro (10 Aug 2023)
-
-- bug_report: require reporters to specify curl and os versions
-
- - Change curl version and os sections from single-line input to
- multi-line textarea.
-
- - Require curl version and os sections to be filled out before report
- can be submitted.
-
- Closes https://github.com/curl/curl/pull/11636
-
-Daniel Stenberg (9 Aug 2023)
-
-- gen.pl: replace all single quotes with aq
-
- - this prevents man from using a unicode sequence for them
- - which then allows search to work properly
-
- Closes #11645
-
-Viktor Szakats (9 Aug 2023)
-
-- cmake: fix to use variable for the curl namespace
-
- Replace (wrong) literal with a variable to specify the curl
- namespace.
-
- Follow-up to 1199308dbc902c52be67fc805c72dd2582520d30 #11505
-
- Reported-by: balikalina on Github
- Fixes https://github.com/curl/curl/commit/1199308dbc902c52be67fc805c72dd25825
- 20d30#r123923098
- Closes #11629
-
-- cmake: allow `SHARE_LIB_OBJECT=ON` on all platforms
-
- 2ebc74c36a19a1700af394c16855ce144d9878e3 #11546 introduced sharing
- libcurl objects for shared and static targets.
-
- The above automatically enabled for Windows builds, with an option to
- disable with `SHARE_LIB_OBJECT=OFF`.
-
- This patch extend this feature to all platforms as a manual option.
- You can enable it by setting `SHARE_LIB_OBJECT=ON`. Then shared objects
- are built in PIC mode, meaning the static lib will also have PIC code.
-
- [EXPERIMENTAL]
-
- Closes #11627
-
-- cmake: assume `wldap32` availability on Windows
-
- This system library first shipped with Windows ME, available as an extra
- install for some older releases (according to [1]). The import library
- was present already in old MinGW 3.4.2 (year 2007).
-
- Drop the feature check and its associated `HAVE_WLDAP32` variable.
-
- To manually disable `wldap32`, you can use the `USE_WIN32_LDAP=OFF`
- CMake option, like before.
-
- [1]: https://dlcdn.apache.org/httpd/binaries/win32/LEGACY.html
-
- Reviewed-by: Jay Satiro
- Closes #11624
-
-Daniel Stenberg (9 Aug 2023)
-
-- page-header: move up a URL paragraph from GLOBBING to URL
-
-- variable.d: output the function names table style
-
- Also correct the url function name in the header
-
- Closes #11641
-
-- haproxy-clientip.d: remove backticks
-
- This is not markdown
-
- Follow-up to 0a75964d0d94a4
-
- Closes #11639
-
-- RELEASE-NOTES: synced
-
-- gen.pl: escape all dashes (ascii minus) to avoid unicode hyphens
-
- Reported-by: FC Stegerman
- Fixes #11635
- Closes #11637
-
-- cmdline-opts/page-header: reorder, clean up
-
- - removed some unnecessary blurb to focus
- - moved up the more important URL details
- - put "globbing" into its own subtitle and moved down a little
- - mention the online man page in the version section
-
- Closes #11638
-
-- c-hyper: adjust the hyper to curlcode conversion
-
- Closes #11621
-
-- test2306: make it use a persistent connection
-
- + enable verbose already from the start
-
- Closes #11621
-
-eppesuig (8 Aug 2023)
-
-- list-only.d: mention SFTP as supported protocol
-
- Closes #11628
-
-Daniel Stenberg (8 Aug 2023)
-
-- request.d: use .TP for protocol "labels"
-
- To render the section nicer in man page.
-
- Closes #11630
-
-- cf-haproxy: make CURLOPT_HAPROXY_CLIENT_IP set the *source* IP
-
- ... as documented.
-
- Update test 3201 and 3202 accordingly.
-
- Reported-by: Markus Sommer
- Fixes #11619
- Closes #11626
-
-- page-footer: QLOGDIR works with ngtcp2 and quiche
-
- It previously said "both" backends which is confusing as we currently
- have three...
-
- Closes #11631
-
-Stefan Eissing (8 Aug 2023)
-
-- http3: quiche, handshake optimization, trace cleanup
-
- - load x509 store after clienthello
- - cleanup of tracing
-
- Closes #11618
-
-Daniel Stenberg (8 Aug 2023)
-
-- ngtcp2: remove dead code
-
- 'result' is always zero (CURLE_OK) at this point
-
- Detected by Coverity
-
- Closes #11622
-
-Viktor Szakats (8 Aug 2023)
-
-- openssl: auto-detect `SSL_R_TLSV13_ALERT_CERTIFICATE_REQUIRED`
-
- OpenSSL 1.1.1 defines this macro, but no ealier version, or any of the
- popular forks (yet). Use the macro itself to detect its presence,
- replacing the hard-wired fork-specific conditions.
-
- This way the feature will enable automatically when forks implement it,
- while also shorter and possibly requiring less future maintenance.
-
- Follow-up to 94241a9e78397a2aaf89a213e6ada61e7de7ee02 #6721
-
- Reviewed-by: Jay Satiro
- Closes #11617
-
-- openssl: use `SSL_CTX_set_ciphersuites` with LibreSSL 3.4.1
-
- LibreSSL 3.4.1 (2021-10-14) added support for
- `SSL_CTX_set_ciphersuites`.
-
- Ref: https://ftp.openbsd.org/pub/OpenBSD/LibreSSL/libressl-3.4.1-relnotes.txt
-
- Reviewed-by: Jay Satiro
- Closes #11616
-
-- openssl: use `SSL_CTX_set_keylog_callback` with LibreSSL 3.5.0
-
- LibreSSL 3.5.0 (2022-02-24) added support for
- `SSL_CTX_set_keylog_callback`.
-
- Ref: https://ftp.openbsd.org/pub/OpenBSD/LibreSSL/libressl-3.5.0-relnotes.txt
-
- Reviewed-by: Jay Satiro
- Closes #11615
-
-- cmake: drop `HAVE_LIBWINMM` and `HAVE_LIBWS2_32` feature checks
-
- - `HAVE_LIBWINMM` was detected but unused. The `winmm` system library is
- also not used by curl, but it is by its optional dependency `librtmp`.
- Change the logic to always add `winmm` when `USE_LIBRTMP` is set. This
- library has been available since the early days of Windows.
-
- - `HAVE_LIBWS2_32` detected `ws2_32` lib on Windows. This lib is present
- since Windows 95 OSR2 (AFAIR). Winsock1 already wasn't supported and
- other existing logic already assumed this lib being present, so delete
- the check and replace the detection variable with `WIN32` and always
- add `ws2_32` on Windows.
-
- Closes #11612
-
-Daniel Gustafsson (8 Aug 2023)
-
-- crypto: ensure crypto initialization works
-
- Make sure that context initialization during hash setup works to avoid
- going forward with the risk of a null pointer dereference.
-
- Reported-by: Philippe Antoine on HackerOne
- Assisted-by: Jay Satiro
- Assisted-by: Daniel Stenberg
-
- Closes #11614
-
-Viktor Szakats (7 Aug 2023)
-
-- openssl: switch to modern init for LibreSSL 2.7.0+
-
- LibreSSL 2.7.0 (2018-03-21) introduced automatic initialization,
- `OPENSSL_init_ssl()` function and deprecated the old, manual init
- method, as seen in OpenSSL 1.1.0. Switch to the modern method when
- available.
-
- Ref: https://ftp.openbsd.org/pub/OpenBSD/LibreSSL/libressl-2.7.0-relnotes.txt
-
- Reviewed-by: Daniel Stenberg
- Closes #11611
-
-Daniel Stenberg (7 Aug 2023)
-
-- gskit: remove
-
- We remove support for building curl with gskit.
-
- - This is a niche TLS library, only running on some IBM systems
- - no regular curl contributors use this backend
- - no CI builds use or verify this backend
- - gskit, or the curl adaption for it, lacks many modern TLS features
- making it an inferior solution
- - build breakages in this code take weeks or more to get detected
- - fixing gskit code is mostly done "flying blind"
-
- This removal has been advertized in DEPRECATED in Jan 2, 2023 and it has
- been mentioned on the curl-library mailing list.
-
- It could be brought back, this is not a ban. Given proper effort and
- will, gskit support is welcome back into the curl TLS backend family.
-
- Closes #11460
-
-- RELEASE-NOTES: synced
-
-Dan Fandrich (7 Aug 2023)
-
-- THANKS-filter: add a name typo
-
-Stefan Eissing (7 Aug 2023)
-
-- http3/ngtcp2: shorten handshake, trace cleanup
-
- - shorten handshake timing by delayed x509 store load (OpenSSL)
- as we do for HTTP/2
- - cleanup of trace output, align with HTTP/2 output
-
- Closes #11609
-
-Daniel Stenberg (7 Aug 2023)
-
-- headers: accept leading whitespaces on first response header
-
- This is a bad header fold but since the popular browsers accept this
- violation, so does curl now. Unless built with hyper.
-
- Add test 1473 to verify and adjust test 2306.
-
- Reported-by: junsik on github
- Fixes #11605
- Closes #11607
-
-- include/curl/mprintf.h: add __attribute__ for the prototypes
-
- - if gcc or clang is used
- - if __STDC_VERSION__ >= 199901L, which means greater than C90
- - if not using mingw
- - if CURL_NO_FMT_CHECKS is not defined
-
- Closes #11589
-
-- tests: fix bad printf format flags in test code
-
-- tests: fix header scan tools for attribute edits in mprintf.h
-
-- cf-socket: log successful interface bind
-
- When the setsockopt SO_BINDTODEVICE operation succeeds, output that in
- the verbose output.
-
- Ref: #11599
- Closes #11608
-
-- CURLOPT_SSL_VERIFYPEER.3: mention it does not load CA certs when disabled
-
- Ref: #11457
- Closes #11606
-
-- CURLOPT_SSL_VERIFYPEER.3: add two more see also options
-
- CURLINFO_CAINFO and CURLINFO_CAPATH
-
- Closes #11603
-
-- KNOWN_BUGS: aws-sigv4 does not behave well with AWS VPC Lattice
-
- Closes #11007
-
-Graham Campbell (6 Aug 2023)
-
-- CI: use openssl 3.0.10+quic, nghttp3 0.14.0, ngtcp2 0.18.0
-
- Closes #11585
-
-Daniel Stenberg (6 Aug 2023)
-
-- TODO: add *5* entries for aws-sigv4
-
- Closes #7559
- Closes #8107
- Closes #8810
- Closes #9717
- Closes #10129
-
-- TODO: LDAP Certificate-Based Authentication
-
- Closes #9641
-
-Stefan Eissing (6 Aug 2023)
-
-- http2: cleanup trace messages
-
- - more compact format with bracketed stream id
- - all frames traced in and out
-
- Closes #11592
-
-Daniel Stenberg (6 Aug 2023)
-
-- tests/tftpd+mqttd: make variables static to silence picky warnings
-
- Closes #11594
-
-- docs/cmdline: remove repeated working for negotiate + ntlm
-
- The extra wording is added automatically by the gen.pl tool
-
- Closes #11597
-
-- docs/cmdline: add small "warning" to verbose options
-
- "Note that verbose output of curl activities and network traffic might
- contain sensitive data, including user names, credentials or secret data
- content. Be aware and be careful when sharing trace logs with others."
-
- Closes #11596
-
-- RELEASE-NOTES: synced
-
-- pingpong: don't use *bump_headersize
-
- We use that for HTTP(S) only.
-
- Follow-up to 3ee79c1674fd6
-
- Closes #11590
-
-- urldata: remove spurious parenthesis to unbreak no-proxy build
-
- Follow-up to e12b39e13382
-
- Closes #11591
-
-- easy: don't call Curl_trc_opt() in disabled-verbose builds
-
- Follow-up to e12b39e133822c6a0
-
- Closes #11588
-
-- http: use %u for printfing int
-
- Follow-up to 3ee79c1674fd6f99e8efca5
-
- Closes #11587
-
-Goro FUJI (3 Aug 2023)
-
-- vquic: show stringified messages for errno
-
- Closes #11584
-
-Stefan Eissing (3 Aug 2023)
-
-- trace: make tracing available in non-debug builds
-
- Add --trace-config to curl
-
- Add curl_global_trace() to libcurl
-
- Closes #11421
-
-Daniel Stenberg (3 Aug 2023)
-
-- TODO: remove "Support intermediate & root pinning for PINNEDPUBLICKEY"
-
- See also https://github.com/curl/curl/pull/7507
-
-- TODO: add "WebSocket read callback"
-
- remove "Upgrade to websockets" as we already have this
-
- Closes #11402
-
-- test497: verify rejecting too large incoming headers
-
-- http: return error when receiving too large header set
-
- To avoid abuse. The limit is set to 300 KB for the accumulated size of
- all received HTTP headers for a single response. Incomplete research
- suggests that Chrome uses a 256-300 KB limit, while Firefox allows up to
- 1MB.
-
- Closes #11582
-
-Stefan Eissing (3 Aug 2023)
-
-- http2: upgrade tests and add fix for non-existing stream
-
- - check in h2 filter recv that stream actually exists
- and return error if not
- - add test for parallel, extreme h2 upgrades that fail if
- connections get reused before fully switched
- - add h2 upgrade upload test just for completeness
-
- Closes #11563
-
-Viktor Szakats (3 Aug 2023)
-
-- tests: ensure `libcurl.def` contains all exports
-
- Add `test1279` to verify that `libcurl.def` lists all exported API
- functions found in libcurl headers.
-
- Also:
-
- - extend test suite XML `stdout` tag with the `loadfile` attribute.
-
- - fix `tests/extern-scan.pl` and `test1135` to include websocket API.
-
- - use all headers (sorted) in `test1135` instead of a manual list.
-
- - add options `--sort`, `--heading=` to `tests/extern-scan.pl`.
-
- - add `libcurl.def` to the auto-labeler GHA task.
-
- Follow-up to 2ebc74c36a19a1700af394c16855ce144d9878e3
-
- Closes #11570
-
-Daniel Stenberg (2 Aug 2023)
-
-- url: change default value for CURLOPT_MAXREDIRS to 30
-
- It was previously unlimited by default, but that's not a sensible
- default. While changing this has a remote risk of breaking an existing
- use case, I figure it is more likely to actually save users from loops.
-
- Closes #11581
-
-- lib: fix a few *printf() flag mistakes
-
- Reported-by: Gisle Vanem
- Ref: #11574
- Closes #11579
-
-Samuel Chiang (2 Aug 2023)
-
-- openssl: make aws-lc version support OCSP
-
- And bump version in CI
-
- Closes #11568
-
-Daniel Stenberg (2 Aug 2023)
-
-- tool: make the length argument an int for printf()-.* flags
-
- Closes #11578
-
-- tool_operate: fix memory leak when SSL_CERT_DIR is used
-
- Detected by Coverity
-
- Follow-up to 29bce9857a12b6cfa726a5
-
- Closes #11577
-
-- tool/var: free memory on OOM
-
- Coverity detected this memory leak in OOM situation
-
- Follow-up to 2e160c9c652504e
-
- Closes #11575
-
-Viktor Szakats (2 Aug 2023)
-
-- gha: bump libressl and mbedtls versions
-
- Closes #11573
-
-Jay Satiro (2 Aug 2023)
-
-- schannel: fix user-set legacy algorithms in Windows 10 & 11
-
- - If the user set a legacy algorithm list (CURLOPT_SSL_CIPHER_LIST) then
- use the SCHANNEL_CRED legacy structure to pass the list to Schannel.
-
- - If the user set both a legacy algorithm list and a TLS 1.3 cipher list
- then abort.
-
- Although MS doesn't document it, Schannel will not negotiate TLS 1.3
- when SCHANNEL_CRED is used. That means setting a legacy algorithm list
- limits the user to earlier versions of TLS.
-
- Prior to this change, since 8beff435 (precedes 7.85.0), libcurl would
- ignore legacy algorithms in Windows 10 1809 and later.
-
- Reported-by: zhihaoy@users.noreply.github.com
-
- Fixes https://github.com/curl/curl/pull/10741
- Closes https://github.com/curl/curl/pull/10746
-
-Daniel Stenberg (2 Aug 2023)
-
-- variable.d: setting a variable again overwrites it
-
- Reported-by: Niall McGee
- Bug: https://twitter.com/niallmcgee/status/1686523075423322113
- Closes #11571
-
-Jay Satiro (2 Aug 2023)
-
-- CURLOPT_PROXY_SSL_OPTIONS.3: sync formatting
-
- - Re-wrap CURLSSLOPT_ALLOW_BEAST description.
-
-Daniel Stenberg (2 Aug 2023)
-
-- RELEASE-NOTES: synced
-
-- resolve: use PF_INET6 family lookups when CURL_IPRESOLVE_V6 is set
-
- Previously it would always do PF_UNSPEC if CURL_IPRESOLVE_V4 is not
- used, thus unnecessarily asking for addresses that will not be used.
-
- Reported-by: Joseph Tharayil
- Fixes #11564
- Closes #11565
-
-- docs: link to the website versions instead of markdowns
-
- ... to make the links work when the markdown is converted to webpages on
- https://curl.se
-
- Reported-by: Maurício Meneghini Fauth
- Fixes https://github.com/curl/curl-www/issues/272
- Closes #11569
-
-Viktor Szakats (1 Aug 2023)
-
-- cmake: cache more config and delete unused ones
-
- - cache more Windows config results for faster initialization.
-
- - delete unused config macros `HAVE_SYS_UTSNAME_H`, `HAVE_SSL_H`.
-
- - delete dead references to `sys/utsname.h`.
-
- Closes #11551
-
-- egd: delete feature detection and related source code
-
- EGD is Entropy Gathering Daemon, a socket-based entropy source supported
- by pre-OpenSSL v1.1 versions and now deprecated. curl also deprecated it
- a while ago.
-
- Its detection in CMake was broken all along because OpenSSL libs were
- not linked at the point of feature check.
-
- Delete detection from both cmake and autotools, along with the related
- source snippet, and the `--with-egd-socket=` `./configure` option.
-
- Closes #11556
-
-Stefan Eissing (1 Aug 2023)
-
-- tests: fix h3 server check and parallel instances
-
- - fix check for availability of nghttpx server
- - add `tcp` frontend config for same port as quic, as
- without this, port 3000 is bound which clashes for parallel
- testing
-
- Closes #11553
-
-Daniel Stenberg (1 Aug 2023)
-
-- docs/cmdline-opts: spellfixes, typos and polish
-
- To make them accepted by the spell checker
-
- Closes #11562
-
-- CI/spellcheck: build curl.1 and spellcheck it
-
- Added acceptable words
-
- Closes #11562
-
-Alexander Jaeger (1 Aug 2023)
-
-- misc: fix various typos
-
- Closes #11561
-
-Daniel Stenberg (1 Aug 2023)
-
-- http2: avoid too early connection re-use/multiplexing
-
- HTTP/1 connections that are upgraded to HTTP/2 should not be picked up
- for reuse and multiplexing by other handles until the 101 switching
- process is completed.
-
- Lots-of-debgging-by: Stefan Eissing
- Reported-by: Richard W.M. Jones
- Bug: https://curl.se/mail/lib-2023-07/0045.html
- Closes #11557
-
-- Revert "KNOWN_BUGS: build for iOS simulator on macOS 13.2 with Xcode 14"
-
- This reverts commit 2e8a3d7cb73c85a9aa151e263315f8a496dbb9d4.
-
- It's a user error for supplying incomplete information to the build system.
-
- Reported-by: Ryan Schmidt
- Ref: https://github.com/curl/curl/issues/11215#issuecomment-1658729367
-
-Viktor Szakats (1 Aug 2023)
-
-- cmake: add support for single libcurl compilation pass
-
- Before this patch CMake builds used two separate compilation passes to
- build the shared and static libcurl respectively. This patch allows to
- reduce that to a single pass if the target platform and build settings
- allow it.
-
- This reduces CMake build times when building both static and shared
- libcurl at the same time, making these dual builds an almost zero-cost
- option.
-
- Enable this feature for Windows builds, where the difference between the
- two passes was the use of `__declspec(dllexport)` attribute for exported
- API functions for the shared builds. This patch replaces this method
- with the use of `libcurl.def` at DLL link time.
-
- Also update `Makefile.mk` to use `libcurl.def` to export libcurl API
- symbols on Windows. This simplifies (or fixes) this build method (e.g.
- in curl-for-win, which generated a `libcurl.def` from `.h` files using
- an elaborate set of transformations).
-
- `libcurl.def` has the maintenance cost of keeping the list of public
- libcurl API symbols up-to-date. This list seldom changes, so the cost
- is low.
-
- Closes #11546
-
-- cmake: detect `SSL_set0_wbio` in OpenSSL
-
- Present in OpenSSL 1.1.0 and BoringSSL.
- Missing from LibreSSL 3.8.0.
-
- Follow-up to f39472ea9f4f4e12cfbc0500c4580a8d52ce4a59
-
- While here, also fix `RAND_egd()` detection which was broken, likely all
- along. This feature is probably broken with CMake builds and also
- requires a sufficiently obsolete OpenSSL version, so this part of the
- update was not tested.
-
- Closes #11555
diff --git a/libs/libcurl/docs/THANKS b/libs/libcurl/docs/THANKS index 0c1d0c7589..ecd6e9eb9a 100644 --- a/libs/libcurl/docs/THANKS +++ b/libs/libcurl/docs/THANKS @@ -11,6 +11,7 @@ 1ocalhost on github
3dyd on github
3eka on github
+5533asdg on github
8U61ife on github
a1346054 on github
Aaro Koskinen
@@ -47,11 +48,13 @@ ahodesuka on github ajak in #curl
Ajit Dhumale
Akhil Kedia
+Akhilesh Nema
Aki Koskinen
Akos Pasztory
Akshay Vernekar
Alain Danteny
Alain Miniussi
+Alan Coopersmith
Alan Jenkins
Alan Pinstein
Albert Chin-A-Young
@@ -155,6 +158,7 @@ Andreas Falkenhahn Andreas Farber
Andreas Fischer
Andreas Huebner
+Andreas Kiefer
Andreas Kostyrka
Andreas Malzahn
Andreas Ntaflos
@@ -186,6 +190,7 @@ Andrew de los Reyes Andrew Francis
Andrew Fuller
Andrew Ishchuk
+Andrew Kaster
Andrew Krieger
Andrew Kurushin
Andrew Lambert
@@ -238,6 +243,7 @@ apparentorder on github April King
arainchik on github
Archangel_SDY on github
+Arjan van de Ven
Arkadiusz Miskiewicz
Armel Asselin
Arnaud Compan
@@ -262,7 +268,9 @@ Ates Goral atjg on github
Augustus Saunders
Austin Green
+av223119 on github
Avery Fay
+awesomekosm on github
awesomenode on github
Axel Chong
Axel Morawietz
@@ -378,12 +386,14 @@ BratSinot on github Brendan Jurd
Brennan Kinney
Brent Beardsley
+Brett Buddin
Brian Akins
Brian Bergeron
Brian Carpenter
Brian Chaplin
Brian Childs
Brian Chrisman
+Brian Clemens
Brian Dessent
Brian E. Gallew
Brian Green
@@ -453,6 +463,7 @@ Charles Kerr Charles Romestant
Charlie C
Chen Prog
+chensong1211 on github
Cherish98 on github
Chester Liu
Chih-Chung Chang
@@ -475,6 +486,7 @@ Chris Roberts Chris Sauer
Chris Smowton
Chris Talbot
+Chris Webb
Chris Young
Christian Fillion
Christian Grothoff
@@ -502,6 +514,7 @@ Christopher R. Palmer Christopher Reid
Christopher Sauer
Christopher Stone
+chrysos349 on github
Chungtsun Li
Ciprian Badescu
civodul on github
@@ -575,6 +588,7 @@ Dan Fandrich Dan Johnson
Dan Kenigsberg
Dan Locks
+Dan McDonald
Dan McNulty
Dan Nelson
Dan Petitt
@@ -608,6 +622,7 @@ Daniel Shahaf Daniel Silverstone
Daniel Steinberg
Daniel Stenberg
+Daniel Szmulewicz
Daniel Theron
Daniel Valenzuela
Daniel Woelfel
@@ -618,6 +633,7 @@ Darryl House Darshan Mody
Darío Hereñú
dasimx on github
+DasKutti on github
Dave Cottlehuber
Dave Dribin
Dave Halbakken
@@ -689,12 +705,13 @@ Denis Laxalde Denis Ollier
Dennis Clarke
Dennis Felsing
-dependabot[bot]
Derek Higgins
Derzsi Dániel
Desmond O. Chang
destman on github
Detlef Schmier
+Dexter Gerig
+dfdity on github
Dheeraj Sangamkar
Didier Brisebourg
Diego Bes
@@ -714,6 +731,7 @@ Diogo Teles Sant'Anna Dion Williams
Dirk Eddelbuettel
Dirk Feytons
+Dirk Hünniger
Dirk Manske
Dirk Rosenkranz
Dirk Wetter
@@ -738,6 +756,7 @@ Dmitry Mikhirev Dmitry Popov
Dmitry Rechkin
Dmitry S. Baikov
+Dmitry Tretyakov
Dmitry Wagin
dnivras on github
Dolbneff A.V
@@ -788,6 +807,7 @@ ed0d2b2ce19451f2 Eddie Lumpkin
Edgaras Janušauskas
Edin Kadribasic
+edmcln on github
Edmond Yu
Edoardo Lolletti
Eduard Bloch
@@ -860,6 +880,7 @@ Erik Janssen Erik Johansson
Erik Minekus
Erik Olsson
+Erik Schnetter
Erik Stenlund
Ernest Beinrohr
Ernst Sjöstrand
@@ -884,6 +905,7 @@ Fabian Frank Fabian Hiernaux
Fabian Keil
Fabian Ruff
+Fabian Vogt
Fabian Yamaguchi
Fabrice Fontaine
Fabrizio Ammollo
@@ -957,6 +979,7 @@ Gabriel Corona Gabriel Kuri
Gabriel Simmer
Gabriel Sjoberg
+Gaelan Steele
Gambit Communications
Ganesh Kamath
gaoxingwang on github
@@ -1018,6 +1041,7 @@ Gou Lingfeng Graham Campbell
Grant Erickson
Grant Pannell
+graywolf on github
Greg Hewgill
Greg Morse
Greg Onufer
@@ -1111,6 +1135,7 @@ Hongyi Zhao Howard Blaise
Howard Chu
hsiao yi
+HsiehYuho on github
htasta on github
Hubert Kario
Hugh Macdonald
@@ -1312,6 +1337,7 @@ Jesse Noller Jesse Tan
jethrogb on github
jhoyla on github
+Jiawen Geng
Jie He
Jiehong on github
Jilayne Lovejoy
@@ -1327,6 +1353,7 @@ Jiri Dvorak Jiri Hruska
Jiri Jaburek
Jishan Shaikh
+Jiří Bok
Jiří Malák
jmdavitt on github
jnbr on github
@@ -1416,6 +1443,7 @@ Jonathan Cardoso Machado Jonathan Hseu
Jonathan Moerman
Jonathan Nieder
+Jonathan Perkin
Jonathan Watt
Jonathan Wernberg
Jongki Suwandi
@@ -1536,6 +1564,7 @@ Kev Jackson Kevin Adler
Kevin Baughman
Kevin Burke
+Kevin Daudt
Kevin Fisk
Kevin Ji
Kevin Lussier
@@ -1563,10 +1592,12 @@ Koichi Shiraishi kokke on github
Konstantin Isakov
Konstantin Kushnir
+Konstantin Vlasov
KotlinIsland on github
kotoriのねこ
kouzhudong on github
Kovalkov Dmitrii
+kpcyrd
kreshano on github
Kris Kennaway
Krishnendu Majumdar
@@ -1606,6 +1637,7 @@ Lars Francke Lars Gustafsson
Lars J. Aas
Lars Johannesen
+Lars Kellogg-Stedman
Lars Nilsson
Lars Torben Wilson
Lau
@@ -1620,6 +1652,7 @@ Lawrence Wagerfield Leah Neukirchen
Lealem Amedie
Leandro Coutinho
+LeeRiva
Legoff Vincent
Lehel Bernadt
Leif W
@@ -1666,6 +1699,7 @@ Loic Dachary LoRd_MuldeR
Loren Kirkby
Lorenzo Miniero
+Louis Solofrizzo
Loïc Yhuel
lRoccoon on github
Luan Cestari
@@ -1690,6 +1724,7 @@ Luke Amery Luke Call
Luke Dashjr
Luke Granger-Brown
+Lukáš Zaoral
luminixinc on github
Luo Jinghua
Luong Dinh Dung
@@ -1746,6 +1781,7 @@ Marco Maggi Marcos Diazr
Marcus Hoffmann
Marcus Klein
+Marcus Müller
Marcus Sundberg
Marcus T
Marcus Webster
@@ -2060,6 +2096,7 @@ Nicolás Ojeda Bär Niels Martignène
Niels van Tongeren
Nikita Schmidt
+Nikita Taranov
Nikitinskit Dmitriy
Niklas Angebrand
Niklas Hambüchen
@@ -2132,6 +2169,7 @@ Oscar Norlander Oskar Liljeblad
Oskar Sigvardsson
Oumph on github
+Outvi V
ovidiu-benea on github
Ozan Cansel
P R Schaffner
@@ -2198,6 +2236,7 @@ Pawel A. Gajda Pawel Kierski
Paweł Kowalski
Paweł Wegner
+Paweł Witas
PBudmark on github
Pedro Henrique
Pedro Larroy
@@ -2219,6 +2258,7 @@ Peter Goodman Peter Heuchert
Peter Hjalmarsson
Peter Korsgaard
+Peter Krefting
Peter Körner
Peter Lamare
Peter Lamberg
@@ -2293,6 +2333,7 @@ pszemus on github pszlazak on github
puckipedia on github
Puneet Pawaia
+Pēteris Caune
qiandu2006 on github
Quagmire
Quanah Gibson-Mount
@@ -2315,6 +2356,7 @@ Rainer Canavan Rainer Jung
Rainer Koenig
Rainer Müller
+RainRat
Raito Bezarius
Rajesh Naganathan
Rajkumar Mandal
@@ -2323,6 +2365,7 @@ Ralph Beckmann Ralph Langendam
Ralph Mitchell
Ram Krushna Mishra
+Ramiro Garcia
ramsay-jones on github
Ran Mozes
RanBarLavie on github
@@ -2428,6 +2471,7 @@ Robert Foreman Robert Iakobashvili
Robert Kolcun
Robert Linden
+Robert Moreton
Robert Olson
Robert Prag
Robert Ronto
@@ -2476,6 +2520,7 @@ roughtex on github Roy Bellingan
Roy Li
Roy Shan
+Rudi Heitbaum
Rui LIU
Rui Pinheiro
Rune Kleveland
@@ -2488,6 +2533,7 @@ RuurdBeerstra on github rwmjones on github
Ryan Beck-Buysse
Ryan Braud
+Ryan Carsten Schmidt
Ryan Chan
Ryan Mast
Ryan Nelson
@@ -2545,6 +2591,8 @@ Scott Barrett Scott Cantor
Scott Davis
Scott McCreary
+Scott Mutter
+Scott Talbert
sd0 on hackerone
Sean Boudreau
Sean Burford
@@ -2555,6 +2603,7 @@ Sean Molenaar Sebastiaan van Erk
Sebastian Haglund
Sebastian Mundry
+Sebastian Neubauer
Sebastian Pohlschmidt
Sebastian Rasmussen
Sebastian Sterk
@@ -2618,6 +2667,7 @@ Simon Chalifoux Simon Dick
Simon H.
Simon Josefsson
+Simon K
Simon Legner
Simon Liu
Simon Warta
@@ -2731,6 +2781,7 @@ T200proX7 on github Tadej Vengust
Tae Hyoung Ahn
Taiyu Len
+Tal Regev
Taneli Vähäkangas
Tanguy Fautre
Taras Kushnir
@@ -2766,6 +2817,7 @@ Thomas L. Shinnick Thomas Lopatic
Thomas M. DuBuisson
Thomas Petazzoni
+Thomas Pyle
Thomas Ruecker
Thomas Schwinge
Thomas Taylor
@@ -2778,6 +2830,7 @@ Thorsten Klein Thorsten Schöning
Tiit Pikma
Till Maas
+Till Wegmüller
Tim Ansell
Tim Baker
Tim Bartley
@@ -2948,6 +3001,7 @@ Vojtěch Král Volker Schmid
Vsevolod Novikov
vshmuk on hackerone
+vulnerabilityspotter on hackerone
vvb2060
vvb2060 on github
Vyron Tsingaras
@@ -2981,6 +3035,7 @@ William A. Rowe Jr William Ahern
William Desportes
William Tang
+Winni Neessen
wmsch on github
wncboy on github
Wojciech Zwiefka
diff --git a/libs/libcurl/include/curl/curl.h b/libs/libcurl/include/curl/curl.h index e69cb4fc17..3775ac8af9 100644 --- a/libs/libcurl/include/curl/curl.h +++ b/libs/libcurl/include/curl/curl.h @@ -2938,7 +2938,8 @@ typedef enum { CURLINFO_XFER_ID = CURLINFO_OFF_T + 63,
CURLINFO_CONN_ID = CURLINFO_OFF_T + 64,
CURLINFO_QUEUE_TIME_T = CURLINFO_OFF_T + 65,
- CURLINFO_LASTONE = 65
+ CURLINFO_USED_PROXY = CURLINFO_LONG + 66,
+ CURLINFO_LASTONE = 66
} CURLINFO;
/* CURLINFO_RESPONSE_CODE is the new name for the option previously known as
diff --git a/libs/libcurl/include/curl/curlver.h b/libs/libcurl/include/curl/curlver.h index 8d3879a30a..b2ac5a66d6 100644 --- a/libs/libcurl/include/curl/curlver.h +++ b/libs/libcurl/include/curl/curlver.h @@ -32,13 +32,13 @@ /* This is the version number of the libcurl package from which this header
file origins: */
-#define LIBCURL_VERSION "8.6.0"
+#define LIBCURL_VERSION "8.7.1"
/* The numeric version number is also available "in parts" by using these
defines: */
#define LIBCURL_VERSION_MAJOR 8
-#define LIBCURL_VERSION_MINOR 6
-#define LIBCURL_VERSION_PATCH 0
+#define LIBCURL_VERSION_MINOR 7
+#define LIBCURL_VERSION_PATCH 1
/* This is the numeric version of the libcurl version number, meant for easier
parsing and comparisons by programs. The LIBCURL_VERSION_NUM define will
@@ -59,7 +59,7 @@ CURL_VERSION_BITS() macro since curl's own configure script greps for it
and needs it to contain the full number.
*/
-#define LIBCURL_VERSION_NUM 0x080600
+#define LIBCURL_VERSION_NUM 0x080701
/*
* This is the date and time when the full source package was created. The
@@ -70,7 +70,7 @@ *
* "2007-11-23"
*/
-#define LIBCURL_TIMESTAMP "2024-01-31"
+#define LIBCURL_TIMESTAMP "2024-03-27"
#define CURL_VERSION_BITS(x,y,z) ((x)<<16|(y)<<8|(z))
#define CURL_AT_LEAST_VERSION(x,y,z) \
diff --git a/libs/libcurl/libcurl.vcxproj b/libs/libcurl/libcurl.vcxproj index 32801244aa..d4e3f36ede 100644 --- a/libs/libcurl/libcurl.vcxproj +++ b/libs/libcurl/libcurl.vcxproj @@ -125,6 +125,9 @@ <ClCompile Include="src\curl_sasl.c">
<PrecompiledHeader>NotUsing</PrecompiledHeader>
</ClCompile>
+ <ClCompile Include="src\curl_sha512_256.c">
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ </ClCompile>
<ClCompile Include="src\curl_sspi.c">
<PrecompiledHeader>NotUsing</PrecompiledHeader>
</ClCompile>
@@ -134,6 +137,9 @@ <ClCompile Include="src\curl_trc.c">
<PrecompiledHeader>NotUsing</PrecompiledHeader>
</ClCompile>
+ <ClCompile Include="src\cw-out.c">
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ </ClCompile>
<ClCompile Include="src\dict.c">
<PrecompiledHeader>NotUsing</PrecompiledHeader>
</ClCompile>
@@ -320,6 +326,9 @@ <ClCompile Include="src\rename.c">
<PrecompiledHeader>NotUsing</PrecompiledHeader>
</ClCompile>
+ <ClCompile Include="src\request.c">
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ </ClCompile>
<ClCompile Include="src\rtsp.c">
<PrecompiledHeader>NotUsing</PrecompiledHeader>
</ClCompile>
@@ -529,9 +538,11 @@ <ClInclude Include="src\curl_setup.h" />
<ClInclude Include="src\curl_setup_once.h" />
<ClInclude Include="src\curl_sha256.h" />
+ <ClInclude Include="src\curl_sha512_256.h" />
<ClInclude Include="src\curl_sspi.h" />
<ClInclude Include="src\curl_threads.h" />
<ClInclude Include="src\curl_trc.h" />
+ <ClInclude Include="src\cw-out.h" />
<ClInclude Include="src\dict.h" />
<ClInclude Include="src\doh.h" />
<ClInclude Include="src\dotdot.h" />
@@ -585,6 +596,7 @@ <ClInclude Include="src\psl.h" />
<ClInclude Include="src\rand.h" />
<ClInclude Include="src\rename.h" />
+ <ClInclude Include="src\request.h" />
<ClInclude Include="src\rtsp.h" />
<ClInclude Include="src\select.h" />
<ClInclude Include="src\sendf.h" />
diff --git a/libs/libcurl/libcurl.vcxproj.filters b/libs/libcurl/libcurl.vcxproj.filters index aced79ad6c..e5c1ab0b62 100644 --- a/libs/libcurl/libcurl.vcxproj.filters +++ b/libs/libcurl/libcurl.vcxproj.filters @@ -101,6 +101,9 @@ <ClCompile Include="src\curl_sasl.c">
<Filter>Source Files</Filter>
</ClCompile>
+ <ClCompile Include="src\curl_sha512_256.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
<ClCompile Include="src\curl_sspi.c">
<Filter>Source Files</Filter>
</ClCompile>
@@ -110,6 +113,9 @@ <ClCompile Include="src\curl_trc.c">
<Filter>Source Files</Filter>
</ClCompile>
+ <ClCompile Include="src\cw-out.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
<ClCompile Include="src\dict.c">
<Filter>Source Files</Filter>
</ClCompile>
@@ -296,6 +302,9 @@ <ClCompile Include="src\rename.c">
<Filter>Source Files</Filter>
</ClCompile>
+ <ClCompile Include="src\request.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
<ClCompile Include="src\rtsp.c">
<Filter>Source Files</Filter>
</ClCompile>
@@ -646,6 +655,9 @@ <ClInclude Include="src\curl_sha256.h">
<Filter>Header Files</Filter>
</ClInclude>
+ <ClInclude Include="src\curl_sha512_256.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
<ClInclude Include="src\curl_sspi.h">
<Filter>Header Files</Filter>
</ClInclude>
@@ -655,6 +667,9 @@ <ClInclude Include="src\curl_trc.h">
<Filter>Header Files</Filter>
</ClInclude>
+ <ClInclude Include="src\cw-out.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
<ClInclude Include="src\dict.h">
<Filter>Header Files</Filter>
</ClInclude>
@@ -814,6 +829,9 @@ <ClInclude Include="src\rename.h">
<Filter>Header Files</Filter>
</ClInclude>
+ <ClInclude Include="src\request.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
<ClInclude Include="src\rtsp.h">
<Filter>Header Files</Filter>
</ClInclude>
diff --git a/libs/libcurl/src/Makefile.in b/libs/libcurl/src/Makefile.in index 662f586563..7940068ba5 100644 --- a/libs/libcurl/src/Makefile.in +++ b/libs/libcurl/src/Makefile.in @@ -219,17 +219,18 @@ am__libcurl_la_SOURCES_DIST = altsvc.c amigaos.c asyn-ares.c \ curl_get_line.c curl_gethostname.c curl_gssapi.c \
curl_memrchr.c curl_multibyte.c curl_ntlm_core.c \
curl_ntlm_wb.c curl_path.c curl_range.c curl_rtmp.c \
- curl_sasl.c curl_sspi.c curl_threads.c curl_trc.c dict.c doh.c \
- dynbuf.c dynhds.c easy.c easygetopt.c easyoptions.c escape.c \
- file.c fileinfo.c fopen.c formdata.c ftp.c ftplistparser.c \
- getenv.c getinfo.c gopher.c hash.c headers.c hmac.c hostasyn.c \
- hostip.c hostip4.c hostip6.c hostsyn.c hsts.c http.c http1.c \
- http2.c http_aws_sigv4.c http_chunks.c http_digest.c \
- http_negotiate.c http_ntlm.c http_proxy.c idn.c if2ip.c imap.c \
- inet_ntop.c inet_pton.c krb5.c ldap.c llist.c macos.c md4.c \
- md5.c memdebug.c mime.c mprintf.c mqtt.c multi.c netrc.c \
- nonblock.c noproxy.c openldap.c parsedate.c pingpong.c pop3.c \
- progress.c psl.c rand.c rename.c rtsp.c select.c sendf.c \
+ curl_sasl.c curl_sha512_256.c curl_sspi.c curl_threads.c \
+ curl_trc.c cw-out.c dict.c doh.c dynbuf.c dynhds.c easy.c \
+ easygetopt.c easyoptions.c escape.c file.c fileinfo.c fopen.c \
+ formdata.c ftp.c ftplistparser.c getenv.c getinfo.c gopher.c \
+ hash.c headers.c hmac.c hostasyn.c hostip.c hostip4.c \
+ hostip6.c hostsyn.c hsts.c http.c http1.c http2.c \
+ http_aws_sigv4.c http_chunks.c http_digest.c http_negotiate.c \
+ http_ntlm.c http_proxy.c idn.c if2ip.c imap.c inet_ntop.c \
+ inet_pton.c krb5.c ldap.c llist.c macos.c md4.c md5.c \
+ memdebug.c mime.c mprintf.c mqtt.c multi.c netrc.c nonblock.c \
+ noproxy.c openldap.c parsedate.c pingpong.c pop3.c progress.c \
+ psl.c rand.c rename.c request.c rtsp.c select.c sendf.c \
setopt.c sha256.c share.c slist.c smb.c smtp.c socketpair.c \
socks.c socks_gssapi.c socks_sspi.c speedcheck.c splay.c \
strcase.c strdup.c strerror.c strtok.c strtoofft.c \
@@ -256,27 +257,27 @@ am__libcurl_la_SOURCES_DIST = altsvc.c amigaos.c asyn-ares.c \ curl_md5.h curl_memory.h curl_memrchr.h curl_multibyte.h \
curl_ntlm_core.h curl_ntlm_wb.h curl_path.h curl_printf.h \
curl_range.h curl_rtmp.h curl_sasl.h curl_setup.h \
- curl_setup_once.h curl_sha256.h curl_sspi.h curl_threads.h \
- curl_trc.h curlx.h dict.h doh.h dynbuf.h dynhds.h easy_lock.h \
- easyif.h easyoptions.h escape.h file.h fileinfo.h fopen.h \
- formdata.h ftp.h ftplistparser.h functypes.h getinfo.h \
- gopher.h hash.h headers.h hostip.h hsts.h http.h http1.h \
- http2.h http_aws_sigv4.h http_chunks.h http_digest.h \
- http_negotiate.h http_ntlm.h http_proxy.h idn.h if2ip.h imap.h \
- inet_ntop.h inet_pton.h llist.h macos.h memdebug.h mime.h \
- mqtt.h multihandle.h multiif.h netrc.h nonblock.h noproxy.h \
- parsedate.h pingpong.h pop3.h progress.h psl.h rand.h rename.h \
- rtsp.h select.h sendf.h setopt.h setup-vms.h share.h sigpipe.h \
- slist.h smb.h smtp.h sockaddr.h socketpair.h socks.h \
- speedcheck.h splay.h strcase.h strdup.h strerror.h strtok.h \
- strtoofft.h system_win32.h telnet.h tftp.h timediff.h \
- timeval.h transfer.h url.h urlapi-int.h urldata.h \
- version_win32.h warnless.h ws.h vauth/digest.h vauth/ntlm.h \
- vauth/vauth.h vtls/bearssl.h vtls/gtls.h vtls/hostcheck.h \
- vtls/keylog.h vtls/mbedtls.h vtls/mbedtls_threadlock.h \
- vtls/openssl.h vtls/rustls.h vtls/schannel.h \
- vtls/schannel_int.h vtls/sectransp.h vtls/vtls.h \
- vtls/vtls_int.h vtls/wolfssl.h vtls/x509asn1.h \
+ curl_setup_once.h curl_sha256.h curl_sha512_256.h curl_sspi.h \
+ curl_threads.h curl_trc.h curlx.h cw-out.h dict.h doh.h \
+ dynbuf.h dynhds.h easy_lock.h easyif.h easyoptions.h escape.h \
+ file.h fileinfo.h fopen.h formdata.h ftp.h ftplistparser.h \
+ functypes.h getinfo.h gopher.h hash.h headers.h hostip.h \
+ hsts.h http.h http1.h http2.h http_aws_sigv4.h http_chunks.h \
+ http_digest.h http_negotiate.h http_ntlm.h http_proxy.h idn.h \
+ if2ip.h imap.h inet_ntop.h inet_pton.h llist.h macos.h \
+ memdebug.h mime.h mqtt.h multihandle.h multiif.h netrc.h \
+ nonblock.h noproxy.h parsedate.h pingpong.h pop3.h progress.h \
+ psl.h rand.h rename.h request.h rtsp.h select.h sendf.h \
+ setopt.h setup-vms.h share.h sigpipe.h slist.h smb.h smtp.h \
+ sockaddr.h socketpair.h socks.h speedcheck.h splay.h strcase.h \
+ strdup.h strerror.h strtok.h strtoofft.h system_win32.h \
+ telnet.h tftp.h timediff.h timeval.h transfer.h url.h \
+ urlapi-int.h urldata.h version_win32.h warnless.h ws.h \
+ vauth/digest.h vauth/ntlm.h vauth/vauth.h vtls/bearssl.h \
+ vtls/gtls.h vtls/hostcheck.h vtls/keylog.h vtls/mbedtls.h \
+ vtls/mbedtls_threadlock.h vtls/openssl.h vtls/rustls.h \
+ vtls/schannel.h vtls/schannel_int.h vtls/sectransp.h \
+ vtls/vtls.h vtls/vtls_int.h vtls/wolfssl.h vtls/x509asn1.h \
vquic/curl_msh3.h vquic/curl_ngtcp2.h vquic/curl_osslq.h \
vquic/curl_quiche.h vquic/vquic.h vquic/vquic_int.h \
vquic/vquic-tls.h vssh/ssh.h libcurl.rc
@@ -296,8 +297,9 @@ am__objects_1 = libcurl_la-altsvc.lo libcurl_la-amigaos.lo \ libcurl_la-curl_ntlm_core.lo libcurl_la-curl_ntlm_wb.lo \
libcurl_la-curl_path.lo libcurl_la-curl_range.lo \
libcurl_la-curl_rtmp.lo libcurl_la-curl_sasl.lo \
- libcurl_la-curl_sspi.lo libcurl_la-curl_threads.lo \
- libcurl_la-curl_trc.lo libcurl_la-dict.lo libcurl_la-doh.lo \
+ libcurl_la-curl_sha512_256.lo libcurl_la-curl_sspi.lo \
+ libcurl_la-curl_threads.lo libcurl_la-curl_trc.lo \
+ libcurl_la-cw-out.lo libcurl_la-dict.lo libcurl_la-doh.lo \
libcurl_la-dynbuf.lo libcurl_la-dynhds.lo libcurl_la-easy.lo \
libcurl_la-easygetopt.lo libcurl_la-easyoptions.lo \
libcurl_la-escape.lo libcurl_la-file.lo libcurl_la-fileinfo.lo \
@@ -322,13 +324,14 @@ am__objects_1 = libcurl_la-altsvc.lo libcurl_la-amigaos.lo \ libcurl_la-noproxy.lo libcurl_la-openldap.lo \
libcurl_la-parsedate.lo libcurl_la-pingpong.lo \
libcurl_la-pop3.lo libcurl_la-progress.lo libcurl_la-psl.lo \
- libcurl_la-rand.lo libcurl_la-rename.lo libcurl_la-rtsp.lo \
- libcurl_la-select.lo libcurl_la-sendf.lo libcurl_la-setopt.lo \
- libcurl_la-sha256.lo libcurl_la-share.lo libcurl_la-slist.lo \
- libcurl_la-smb.lo libcurl_la-smtp.lo libcurl_la-socketpair.lo \
- libcurl_la-socks.lo libcurl_la-socks_gssapi.lo \
- libcurl_la-socks_sspi.lo libcurl_la-speedcheck.lo \
- libcurl_la-splay.lo libcurl_la-strcase.lo libcurl_la-strdup.lo \
+ libcurl_la-rand.lo libcurl_la-rename.lo libcurl_la-request.lo \
+ libcurl_la-rtsp.lo libcurl_la-select.lo libcurl_la-sendf.lo \
+ libcurl_la-setopt.lo libcurl_la-sha256.lo libcurl_la-share.lo \
+ libcurl_la-slist.lo libcurl_la-smb.lo libcurl_la-smtp.lo \
+ libcurl_la-socketpair.lo libcurl_la-socks.lo \
+ libcurl_la-socks_gssapi.lo libcurl_la-socks_sspi.lo \
+ libcurl_la-speedcheck.lo libcurl_la-splay.lo \
+ libcurl_la-strcase.lo libcurl_la-strdup.lo \
libcurl_la-strerror.lo libcurl_la-strtok.lo \
libcurl_la-strtoofft.lo libcurl_la-system_win32.lo \
libcurl_la-telnet.lo libcurl_la-tftp.lo libcurl_la-timediff.lo \
@@ -392,8 +395,9 @@ am__objects_11 = libcurlu_la-altsvc.lo libcurlu_la-amigaos.lo \ libcurlu_la-curl_multibyte.lo libcurlu_la-curl_ntlm_core.lo \
libcurlu_la-curl_ntlm_wb.lo libcurlu_la-curl_path.lo \
libcurlu_la-curl_range.lo libcurlu_la-curl_rtmp.lo \
- libcurlu_la-curl_sasl.lo libcurlu_la-curl_sspi.lo \
- libcurlu_la-curl_threads.lo libcurlu_la-curl_trc.lo \
+ libcurlu_la-curl_sasl.lo libcurlu_la-curl_sha512_256.lo \
+ libcurlu_la-curl_sspi.lo libcurlu_la-curl_threads.lo \
+ libcurlu_la-curl_trc.lo libcurlu_la-cw-out.lo \
libcurlu_la-dict.lo libcurlu_la-doh.lo libcurlu_la-dynbuf.lo \
libcurlu_la-dynhds.lo libcurlu_la-easy.lo \
libcurlu_la-easygetopt.lo libcurlu_la-easyoptions.lo \
@@ -421,8 +425,8 @@ am__objects_11 = libcurlu_la-altsvc.lo libcurlu_la-amigaos.lo \ libcurlu_la-openldap.lo libcurlu_la-parsedate.lo \
libcurlu_la-pingpong.lo libcurlu_la-pop3.lo \
libcurlu_la-progress.lo libcurlu_la-psl.lo libcurlu_la-rand.lo \
- libcurlu_la-rename.lo libcurlu_la-rtsp.lo \
- libcurlu_la-select.lo libcurlu_la-sendf.lo \
+ libcurlu_la-rename.lo libcurlu_la-request.lo \
+ libcurlu_la-rtsp.lo libcurlu_la-select.lo libcurlu_la-sendf.lo \
libcurlu_la-setopt.lo libcurlu_la-sha256.lo \
libcurlu_la-share.lo libcurlu_la-slist.lo libcurlu_la-smb.lo \
libcurlu_la-smtp.lo libcurlu_la-socketpair.lo \
@@ -517,9 +521,11 @@ am__depfiles_remade = ./$(DEPDIR)/libcurl_la-altsvc.Plo \ ./$(DEPDIR)/libcurl_la-curl_range.Plo \
./$(DEPDIR)/libcurl_la-curl_rtmp.Plo \
./$(DEPDIR)/libcurl_la-curl_sasl.Plo \
+ ./$(DEPDIR)/libcurl_la-curl_sha512_256.Plo \
./$(DEPDIR)/libcurl_la-curl_sspi.Plo \
./$(DEPDIR)/libcurl_la-curl_threads.Plo \
./$(DEPDIR)/libcurl_la-curl_trc.Plo \
+ ./$(DEPDIR)/libcurl_la-cw-out.Plo \
./$(DEPDIR)/libcurl_la-dict.Plo ./$(DEPDIR)/libcurl_la-doh.Plo \
./$(DEPDIR)/libcurl_la-dynbuf.Plo \
./$(DEPDIR)/libcurl_la-dynhds.Plo \
@@ -579,6 +585,7 @@ am__depfiles_remade = ./$(DEPDIR)/libcurl_la-altsvc.Plo \ ./$(DEPDIR)/libcurl_la-progress.Plo \
./$(DEPDIR)/libcurl_la-psl.Plo ./$(DEPDIR)/libcurl_la-rand.Plo \
./$(DEPDIR)/libcurl_la-rename.Plo \
+ ./$(DEPDIR)/libcurl_la-request.Plo \
./$(DEPDIR)/libcurl_la-rtsp.Plo \
./$(DEPDIR)/libcurl_la-select.Plo \
./$(DEPDIR)/libcurl_la-sendf.Plo \
@@ -643,9 +650,11 @@ am__depfiles_remade = ./$(DEPDIR)/libcurl_la-altsvc.Plo \ ./$(DEPDIR)/libcurlu_la-curl_range.Plo \
./$(DEPDIR)/libcurlu_la-curl_rtmp.Plo \
./$(DEPDIR)/libcurlu_la-curl_sasl.Plo \
+ ./$(DEPDIR)/libcurlu_la-curl_sha512_256.Plo \
./$(DEPDIR)/libcurlu_la-curl_sspi.Plo \
./$(DEPDIR)/libcurlu_la-curl_threads.Plo \
./$(DEPDIR)/libcurlu_la-curl_trc.Plo \
+ ./$(DEPDIR)/libcurlu_la-cw-out.Plo \
./$(DEPDIR)/libcurlu_la-dict.Plo \
./$(DEPDIR)/libcurlu_la-doh.Plo \
./$(DEPDIR)/libcurlu_la-dynbuf.Plo \
@@ -708,6 +717,7 @@ am__depfiles_remade = ./$(DEPDIR)/libcurl_la-altsvc.Plo \ ./$(DEPDIR)/libcurlu_la-psl.Plo \
./$(DEPDIR)/libcurlu_la-rand.Plo \
./$(DEPDIR)/libcurlu_la-rename.Plo \
+ ./$(DEPDIR)/libcurlu_la-request.Plo \
./$(DEPDIR)/libcurlu_la-rtsp.Plo \
./$(DEPDIR)/libcurlu_la-select.Plo \
./$(DEPDIR)/libcurlu_la-sendf.Plo \
@@ -965,11 +975,9 @@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAINT = @MAINT@
MAKEINFO = @MAKEINFO@
MANIFEST_TOOL = @MANIFEST_TOOL@
-MANOPT = @MANOPT@
MKDIR_P = @MKDIR_P@
NM = @NM@
NMEDIT = @NMEDIT@
-NROFF = @NROFF@
OBJDUMP = @OBJDUMP@
OBJEXT = @OBJEXT@
OTOOL = @OTOOL@
@@ -1005,6 +1013,7 @@ USE_ARES = @USE_ARES@ USE_BEARSSL = @USE_BEARSSL@
USE_GNUTLS = @USE_GNUTLS@
USE_HYPER = @USE_HYPER@
+USE_LIBPSL = @USE_LIBPSL@
USE_LIBRTMP = @USE_LIBRTMP@
USE_LIBSSH = @USE_LIBSSH@
USE_LIBSSH2 = @USE_LIBSSH2@
@@ -1256,9 +1265,11 @@ LIB_CFILES = \ curl_range.c \
curl_rtmp.c \
curl_sasl.c \
+ curl_sha512_256.c \
curl_sspi.c \
curl_threads.c \
curl_trc.c \
+ cw-out.c \
dict.c \
doh.c \
dynbuf.c \
@@ -1321,6 +1332,7 @@ LIB_CFILES = \ psl.c \
rand.c \
rename.c \
+ request.c \
rtsp.c \
select.c \
sendf.c \
@@ -1399,10 +1411,12 @@ LIB_HFILES = \ curl_setup.h \
curl_setup_once.h \
curl_sha256.h \
+ curl_sha512_256.h \
curl_sspi.h \
curl_threads.h \
curl_trc.h \
curlx.h \
+ cw-out.h \
dict.h \
doh.h \
dynbuf.h \
@@ -1455,6 +1469,7 @@ LIB_HFILES = \ psl.h \
rand.h \
rename.h \
+ request.h \
rtsp.h \
select.h \
sendf.h \
@@ -1839,9 +1854,11 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-curl_range.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-curl_rtmp.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-curl_sasl.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-curl_sha512_256.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-curl_sspi.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-curl_threads.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-curl_trc.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-cw-out.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-dict.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-doh.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-dynbuf.Plo@am__quote@ # am--include-marker
@@ -1904,6 +1921,7 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-psl.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-rand.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-rename.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-request.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-rtsp.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-select.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-sendf.Plo@am__quote@ # am--include-marker
@@ -1969,9 +1987,11 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-curl_range.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-curl_rtmp.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-curl_sasl.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-curl_sha512_256.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-curl_sspi.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-curl_threads.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-curl_trc.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-cw-out.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-dict.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-doh.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-dynbuf.Plo@am__quote@ # am--include-marker
@@ -2034,6 +2054,7 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-psl.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-rand.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-rename.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-request.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-rtsp.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-select.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-sendf.Plo@am__quote@ # am--include-marker
@@ -2400,6 +2421,13 @@ libcurl_la-curl_sasl.lo: curl_sasl.c @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -c -o libcurl_la-curl_sasl.lo `test -f 'curl_sasl.c' || echo '$(srcdir)/'`curl_sasl.c
+libcurl_la-curl_sha512_256.lo: curl_sha512_256.c
+@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -MT libcurl_la-curl_sha512_256.lo -MD -MP -MF $(DEPDIR)/libcurl_la-curl_sha512_256.Tpo -c -o libcurl_la-curl_sha512_256.lo `test -f 'curl_sha512_256.c' || echo '$(srcdir)/'`curl_sha512_256.c
+@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurl_la-curl_sha512_256.Tpo $(DEPDIR)/libcurl_la-curl_sha512_256.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='curl_sha512_256.c' object='libcurl_la-curl_sha512_256.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -c -o libcurl_la-curl_sha512_256.lo `test -f 'curl_sha512_256.c' || echo '$(srcdir)/'`curl_sha512_256.c
+
libcurl_la-curl_sspi.lo: curl_sspi.c
@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -MT libcurl_la-curl_sspi.lo -MD -MP -MF $(DEPDIR)/libcurl_la-curl_sspi.Tpo -c -o libcurl_la-curl_sspi.lo `test -f 'curl_sspi.c' || echo '$(srcdir)/'`curl_sspi.c
@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurl_la-curl_sspi.Tpo $(DEPDIR)/libcurl_la-curl_sspi.Plo
@@ -2421,6 +2449,13 @@ libcurl_la-curl_trc.lo: curl_trc.c @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -c -o libcurl_la-curl_trc.lo `test -f 'curl_trc.c' || echo '$(srcdir)/'`curl_trc.c
+libcurl_la-cw-out.lo: cw-out.c
+@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -MT libcurl_la-cw-out.lo -MD -MP -MF $(DEPDIR)/libcurl_la-cw-out.Tpo -c -o libcurl_la-cw-out.lo `test -f 'cw-out.c' || echo '$(srcdir)/'`cw-out.c
+@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurl_la-cw-out.Tpo $(DEPDIR)/libcurl_la-cw-out.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='cw-out.c' object='libcurl_la-cw-out.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -c -o libcurl_la-cw-out.lo `test -f 'cw-out.c' || echo '$(srcdir)/'`cw-out.c
+
libcurl_la-dict.lo: dict.c
@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -MT libcurl_la-dict.lo -MD -MP -MF $(DEPDIR)/libcurl_la-dict.Tpo -c -o libcurl_la-dict.lo `test -f 'dict.c' || echo '$(srcdir)/'`dict.c
@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurl_la-dict.Tpo $(DEPDIR)/libcurl_la-dict.Plo
@@ -2855,6 +2890,13 @@ libcurl_la-rename.lo: rename.c @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -c -o libcurl_la-rename.lo `test -f 'rename.c' || echo '$(srcdir)/'`rename.c
+libcurl_la-request.lo: request.c
+@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -MT libcurl_la-request.lo -MD -MP -MF $(DEPDIR)/libcurl_la-request.Tpo -c -o libcurl_la-request.lo `test -f 'request.c' || echo '$(srcdir)/'`request.c
+@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurl_la-request.Tpo $(DEPDIR)/libcurl_la-request.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='request.c' object='libcurl_la-request.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -c -o libcurl_la-request.lo `test -f 'request.c' || echo '$(srcdir)/'`request.c
+
libcurl_la-rtsp.lo: rtsp.c
@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -MT libcurl_la-rtsp.lo -MD -MP -MF $(DEPDIR)/libcurl_la-rtsp.Tpo -c -o libcurl_la-rtsp.lo `test -f 'rtsp.c' || echo '$(srcdir)/'`rtsp.c
@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurl_la-rtsp.Tpo $(DEPDIR)/libcurl_la-rtsp.Plo
@@ -3562,6 +3604,13 @@ libcurlu_la-curl_sasl.lo: curl_sasl.c @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -c -o libcurlu_la-curl_sasl.lo `test -f 'curl_sasl.c' || echo '$(srcdir)/'`curl_sasl.c
+libcurlu_la-curl_sha512_256.lo: curl_sha512_256.c
+@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -MT libcurlu_la-curl_sha512_256.lo -MD -MP -MF $(DEPDIR)/libcurlu_la-curl_sha512_256.Tpo -c -o libcurlu_la-curl_sha512_256.lo `test -f 'curl_sha512_256.c' || echo '$(srcdir)/'`curl_sha512_256.c
+@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurlu_la-curl_sha512_256.Tpo $(DEPDIR)/libcurlu_la-curl_sha512_256.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='curl_sha512_256.c' object='libcurlu_la-curl_sha512_256.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -c -o libcurlu_la-curl_sha512_256.lo `test -f 'curl_sha512_256.c' || echo '$(srcdir)/'`curl_sha512_256.c
+
libcurlu_la-curl_sspi.lo: curl_sspi.c
@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -MT libcurlu_la-curl_sspi.lo -MD -MP -MF $(DEPDIR)/libcurlu_la-curl_sspi.Tpo -c -o libcurlu_la-curl_sspi.lo `test -f 'curl_sspi.c' || echo '$(srcdir)/'`curl_sspi.c
@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurlu_la-curl_sspi.Tpo $(DEPDIR)/libcurlu_la-curl_sspi.Plo
@@ -3583,6 +3632,13 @@ libcurlu_la-curl_trc.lo: curl_trc.c @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -c -o libcurlu_la-curl_trc.lo `test -f 'curl_trc.c' || echo '$(srcdir)/'`curl_trc.c
+libcurlu_la-cw-out.lo: cw-out.c
+@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -MT libcurlu_la-cw-out.lo -MD -MP -MF $(DEPDIR)/libcurlu_la-cw-out.Tpo -c -o libcurlu_la-cw-out.lo `test -f 'cw-out.c' || echo '$(srcdir)/'`cw-out.c
+@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurlu_la-cw-out.Tpo $(DEPDIR)/libcurlu_la-cw-out.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='cw-out.c' object='libcurlu_la-cw-out.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -c -o libcurlu_la-cw-out.lo `test -f 'cw-out.c' || echo '$(srcdir)/'`cw-out.c
+
libcurlu_la-dict.lo: dict.c
@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -MT libcurlu_la-dict.lo -MD -MP -MF $(DEPDIR)/libcurlu_la-dict.Tpo -c -o libcurlu_la-dict.lo `test -f 'dict.c' || echo '$(srcdir)/'`dict.c
@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurlu_la-dict.Tpo $(DEPDIR)/libcurlu_la-dict.Plo
@@ -4017,6 +4073,13 @@ libcurlu_la-rename.lo: rename.c @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -c -o libcurlu_la-rename.lo `test -f 'rename.c' || echo '$(srcdir)/'`rename.c
+libcurlu_la-request.lo: request.c
+@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -MT libcurlu_la-request.lo -MD -MP -MF $(DEPDIR)/libcurlu_la-request.Tpo -c -o libcurlu_la-request.lo `test -f 'request.c' || echo '$(srcdir)/'`request.c
+@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurlu_la-request.Tpo $(DEPDIR)/libcurlu_la-request.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='request.c' object='libcurlu_la-request.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -c -o libcurlu_la-request.lo `test -f 'request.c' || echo '$(srcdir)/'`request.c
+
libcurlu_la-rtsp.lo: rtsp.c
@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -MT libcurlu_la-rtsp.lo -MD -MP -MF $(DEPDIR)/libcurlu_la-rtsp.Tpo -c -o libcurlu_la-rtsp.lo `test -f 'rtsp.c' || echo '$(srcdir)/'`rtsp.c
@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurlu_la-rtsp.Tpo $(DEPDIR)/libcurlu_la-rtsp.Plo
@@ -4672,9 +4735,11 @@ distclean: distclean-am -rm -f ./$(DEPDIR)/libcurl_la-curl_range.Plo
-rm -f ./$(DEPDIR)/libcurl_la-curl_rtmp.Plo
-rm -f ./$(DEPDIR)/libcurl_la-curl_sasl.Plo
+ -rm -f ./$(DEPDIR)/libcurl_la-curl_sha512_256.Plo
-rm -f ./$(DEPDIR)/libcurl_la-curl_sspi.Plo
-rm -f ./$(DEPDIR)/libcurl_la-curl_threads.Plo
-rm -f ./$(DEPDIR)/libcurl_la-curl_trc.Plo
+ -rm -f ./$(DEPDIR)/libcurl_la-cw-out.Plo
-rm -f ./$(DEPDIR)/libcurl_la-dict.Plo
-rm -f ./$(DEPDIR)/libcurl_la-doh.Plo
-rm -f ./$(DEPDIR)/libcurl_la-dynbuf.Plo
@@ -4737,6 +4802,7 @@ distclean: distclean-am -rm -f ./$(DEPDIR)/libcurl_la-psl.Plo
-rm -f ./$(DEPDIR)/libcurl_la-rand.Plo
-rm -f ./$(DEPDIR)/libcurl_la-rename.Plo
+ -rm -f ./$(DEPDIR)/libcurl_la-request.Plo
-rm -f ./$(DEPDIR)/libcurl_la-rtsp.Plo
-rm -f ./$(DEPDIR)/libcurl_la-select.Plo
-rm -f ./$(DEPDIR)/libcurl_la-sendf.Plo
@@ -4802,9 +4868,11 @@ distclean: distclean-am -rm -f ./$(DEPDIR)/libcurlu_la-curl_range.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-curl_rtmp.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-curl_sasl.Plo
+ -rm -f ./$(DEPDIR)/libcurlu_la-curl_sha512_256.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-curl_sspi.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-curl_threads.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-curl_trc.Plo
+ -rm -f ./$(DEPDIR)/libcurlu_la-cw-out.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-dict.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-doh.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-dynbuf.Plo
@@ -4867,6 +4935,7 @@ distclean: distclean-am -rm -f ./$(DEPDIR)/libcurlu_la-psl.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-rand.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-rename.Plo
+ -rm -f ./$(DEPDIR)/libcurlu_la-request.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-rtsp.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-select.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-sendf.Plo
@@ -5049,9 +5118,11 @@ maintainer-clean: maintainer-clean-am -rm -f ./$(DEPDIR)/libcurl_la-curl_range.Plo
-rm -f ./$(DEPDIR)/libcurl_la-curl_rtmp.Plo
-rm -f ./$(DEPDIR)/libcurl_la-curl_sasl.Plo
+ -rm -f ./$(DEPDIR)/libcurl_la-curl_sha512_256.Plo
-rm -f ./$(DEPDIR)/libcurl_la-curl_sspi.Plo
-rm -f ./$(DEPDIR)/libcurl_la-curl_threads.Plo
-rm -f ./$(DEPDIR)/libcurl_la-curl_trc.Plo
+ -rm -f ./$(DEPDIR)/libcurl_la-cw-out.Plo
-rm -f ./$(DEPDIR)/libcurl_la-dict.Plo
-rm -f ./$(DEPDIR)/libcurl_la-doh.Plo
-rm -f ./$(DEPDIR)/libcurl_la-dynbuf.Plo
@@ -5114,6 +5185,7 @@ maintainer-clean: maintainer-clean-am -rm -f ./$(DEPDIR)/libcurl_la-psl.Plo
-rm -f ./$(DEPDIR)/libcurl_la-rand.Plo
-rm -f ./$(DEPDIR)/libcurl_la-rename.Plo
+ -rm -f ./$(DEPDIR)/libcurl_la-request.Plo
-rm -f ./$(DEPDIR)/libcurl_la-rtsp.Plo
-rm -f ./$(DEPDIR)/libcurl_la-select.Plo
-rm -f ./$(DEPDIR)/libcurl_la-sendf.Plo
@@ -5179,9 +5251,11 @@ maintainer-clean: maintainer-clean-am -rm -f ./$(DEPDIR)/libcurlu_la-curl_range.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-curl_rtmp.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-curl_sasl.Plo
+ -rm -f ./$(DEPDIR)/libcurlu_la-curl_sha512_256.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-curl_sspi.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-curl_threads.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-curl_trc.Plo
+ -rm -f ./$(DEPDIR)/libcurlu_la-cw-out.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-dict.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-doh.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-dynbuf.Plo
@@ -5244,6 +5318,7 @@ maintainer-clean: maintainer-clean-am -rm -f ./$(DEPDIR)/libcurlu_la-psl.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-rand.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-rename.Plo
+ -rm -f ./$(DEPDIR)/libcurlu_la-request.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-rtsp.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-select.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-sendf.Plo
diff --git a/libs/libcurl/src/Makefile.inc b/libs/libcurl/src/Makefile.inc index a607bee30c..a6393054b7 100644 --- a/libs/libcurl/src/Makefile.inc +++ b/libs/libcurl/src/Makefile.inc @@ -134,9 +134,11 @@ LIB_CFILES = \ curl_range.c \
curl_rtmp.c \
curl_sasl.c \
+ curl_sha512_256.c \
curl_sspi.c \
curl_threads.c \
curl_trc.c \
+ cw-out.c \
dict.c \
doh.c \
dynbuf.c \
@@ -199,6 +201,7 @@ LIB_CFILES = \ psl.c \
rand.c \
rename.c \
+ request.c \
rtsp.c \
select.c \
sendf.c \
@@ -277,10 +280,12 @@ LIB_HFILES = \ curl_setup.h \
curl_setup_once.h \
curl_sha256.h \
+ curl_sha512_256.h \
curl_sspi.h \
curl_threads.h \
curl_trc.h \
curlx.h \
+ cw-out.h \
dict.h \
doh.h \
dynbuf.h \
@@ -333,6 +338,7 @@ LIB_HFILES = \ psl.h \
rand.h \
rename.h \
+ request.h \
rtsp.h \
select.h \
sendf.h \
diff --git a/libs/libcurl/src/altsvc.c b/libs/libcurl/src/altsvc.c index f410c2e7b2..0414ab1150 100644 --- a/libs/libcurl/src/altsvc.c +++ b/libs/libcurl/src/altsvc.c @@ -209,7 +209,6 @@ static CURLcode altsvc_add(struct altsvcinfo *asi, char *line) static CURLcode altsvc_load(struct altsvcinfo *asi, const char *file)
{
CURLcode result = CURLE_OK;
- char *line = NULL;
FILE *fp;
/* we need a private copy of the file name so that the altsvc cache file
@@ -221,11 +220,10 @@ static CURLcode altsvc_load(struct altsvcinfo *asi, const char *file) fp = fopen(file, FOPEN_READTEXT);
if(fp) {
- line = malloc(MAX_ALTSVC_LINE);
- if(!line)
- goto fail;
- while(Curl_get_line(line, MAX_ALTSVC_LINE, fp)) {
- char *lineptr = line;
+ struct dynbuf buf;
+ Curl_dyn_init(&buf, MAX_ALTSVC_LINE);
+ while(Curl_get_line(&buf, fp)) {
+ char *lineptr = Curl_dyn_ptr(&buf);
while(*lineptr && ISBLANK(*lineptr))
lineptr++;
if(*lineptr == '#')
@@ -234,16 +232,10 @@ static CURLcode altsvc_load(struct altsvcinfo *asi, const char *file) altsvc_add(asi, lineptr);
}
- free(line); /* free the line buffer */
+ Curl_dyn_free(&buf); /* free the line buffer */
fclose(fp);
}
return result;
-
-fail:
- Curl_safefree(asi->filename);
- free(line);
- fclose(fp);
- return CURLE_OUT_OF_MEMORY;
}
/*
diff --git a/libs/libcurl/src/asyn-ares.c b/libs/libcurl/src/asyn-ares.c index 69d22ea9fe..baae83218f 100644 --- a/libs/libcurl/src/asyn-ares.c +++ b/libs/libcurl/src/asyn-ares.c @@ -122,6 +122,8 @@ struct thread_data { #define CARES_TIMEOUT_PER_ATTEMPT 2000
+static int ares_ver = 0;
+
/*
* Curl_resolver_global_init() - the generic low-level asynchronous name
* resolve API. Called from curl_global_init() to initialize global resolver
@@ -134,6 +136,7 @@ int Curl_resolver_global_init(void) return CURLE_FAILED_INIT;
}
#endif
+ ares_version(&ares_ver);
return CURLE_OK;
}
@@ -173,16 +176,8 @@ CURLcode Curl_resolver_init(struct Curl_easy *easy, void **resolver) int status;
struct ares_options options;
int optmask = ARES_OPT_SOCK_STATE_CB;
- static int ares_ver = 0;
options.sock_state_cb = sock_state_cb;
options.sock_state_cb_data = easy;
- if(ares_ver == 0)
- ares_version(&ares_ver);
-
- if(ares_ver < 0x011400) { /* c-ares included similar change since 1.20.0 */
- options.timeout = CARES_TIMEOUT_PER_ATTEMPT;
- optmask |= ARES_OPT_TIMEOUTMS;
- }
/*
if c ares < 1.20.0: curl set timeout to CARES_TIMEOUT_PER_ATTEMPT (2s)
@@ -193,6 +188,11 @@ CURLcode Curl_resolver_init(struct Curl_easy *easy, void **resolver) if c-ares >= 1.24.0, user can set the timeout via /etc/resolv.conf to
overwrite c-ares' timeout.
*/
+ DEBUGASSERT(ares_ver);
+ if(ares_ver < 0x011400) {
+ options.timeout = CARES_TIMEOUT_PER_ATTEMPT;
+ optmask |= ARES_OPT_TIMEOUTMS;
+ }
status = ares_init_options((ares_channel*)resolver, &options, optmask);
if(status != ARES_SUCCESS) {
@@ -850,7 +850,7 @@ CURLcode Curl_set_dns_servers(struct Curl_easy *data, /* If server is NULL or empty, this would purge all DNS servers
* from ares library, which will cause any and all queries to fail.
* So, just return OK if none are configured and don't actually make
- * any changes to c-ares. This lets c-ares use it's defaults, which
+ * any changes to c-ares. This lets c-ares use its defaults, which
* it gets from the OS (for instance from /etc/resolv.conf on Linux).
*/
if(!(servers && servers[0]))
diff --git a/libs/libcurl/src/asyn-thread.c b/libs/libcurl/src/asyn-thread.c index c0ee113098..fe5e10ebc1 100644 --- a/libs/libcurl/src/asyn-thread.c +++ b/libs/libcurl/src/asyn-thread.c @@ -581,7 +581,7 @@ static void destroy_async_data(struct Curl_async *async) * before the FD is invalidated to avoid EBADF on EPOLL_CTL_DEL
*/
Curl_multi_closed(data, sock_rd);
- sclose(sock_rd);
+ wakeup_close(sock_rd);
#endif
}
async->tdata = NULL;
diff --git a/libs/libcurl/src/bufq.c b/libs/libcurl/src/bufq.c index a8c6b73c75..2bcad8a57f 100644 --- a/libs/libcurl/src/bufq.c +++ b/libs/libcurl/src/bufq.c @@ -396,7 +396,7 @@ ssize_t Curl_bufq_write(struct bufq *q, while(len) {
tail = get_non_full_tail(q);
if(!tail) {
- if(q->chunk_count < q->max_chunks) {
+ if((q->chunk_count < q->max_chunks) || (q->opts & BUFQ_OPT_SOFT_LIMIT)) {
*err = CURLE_OUT_OF_MEMORY;
return -1;
}
@@ -417,6 +417,17 @@ ssize_t Curl_bufq_write(struct bufq *q, return nwritten;
}
+CURLcode Curl_bufq_cwrite(struct bufq *q,
+ const char *buf, size_t len,
+ size_t *pnwritten)
+{
+ ssize_t n;
+ CURLcode result;
+ n = Curl_bufq_write(q, (const unsigned char *)buf, len, &result);
+ *pnwritten = (n < 0)? 0 : (size_t)n;
+ return result;
+}
+
ssize_t Curl_bufq_read(struct bufq *q, unsigned char *buf, size_t len,
CURLcode *err)
{
@@ -440,6 +451,16 @@ ssize_t Curl_bufq_read(struct bufq *q, unsigned char *buf, size_t len, return nread;
}
+CURLcode Curl_bufq_cread(struct bufq *q, char *buf, size_t len,
+ size_t *pnread)
+{
+ ssize_t n;
+ CURLcode result;
+ n = Curl_bufq_read(q, (unsigned char *)buf, len, &result);
+ *pnread = (n < 0)? 0 : (size_t)n;
+ return result;
+}
+
bool Curl_bufq_peek(struct bufq *q,
const unsigned char **pbuf, size_t *plen)
{
diff --git a/libs/libcurl/src/bufq.h b/libs/libcurl/src/bufq.h index bdde1137da..c7aa10a75d 100644 --- a/libs/libcurl/src/bufq.h +++ b/libs/libcurl/src/bufq.h @@ -178,6 +178,10 @@ ssize_t Curl_bufq_write(struct bufq *q, const unsigned char *buf, size_t len,
CURLcode *err);
+CURLcode Curl_bufq_cwrite(struct bufq *q,
+ const char *buf, size_t len,
+ size_t *pnwritten);
+
/**
* Read buf from the start of the buffer queue. The buf is copied
* and the amount of copied bytes is returned.
@@ -187,6 +191,9 @@ ssize_t Curl_bufq_write(struct bufq *q, ssize_t Curl_bufq_read(struct bufq *q, unsigned char *buf, size_t len,
CURLcode *err);
+CURLcode Curl_bufq_cread(struct bufq *q, char *buf, size_t len,
+ size_t *pnread);
+
/**
* Peek at the head chunk in the buffer queue. Returns a pointer to
* the chunk buf (at the current offset) and its length. Does not
diff --git a/libs/libcurl/src/c-hyper.c b/libs/libcurl/src/c-hyper.c index 4b479733ab..72619006b6 100644 --- a/libs/libcurl/src/c-hyper.c +++ b/libs/libcurl/src/c-hyper.c @@ -53,7 +53,9 @@ #include <hyper.h>
#include "urldata.h"
+#include "cfilters.h"
#include "sendf.h"
+#include "headers.h"
#include "transfer.h"
#include "multiif.h"
#include "progress.h"
@@ -65,6 +67,9 @@ #include "curl_memory.h"
#include "memdebug.h"
+
+static CURLcode cr_hyper_add(struct Curl_easy *data);
+
typedef enum {
USERDATA_NOT_SET = 0, /* for tasks with no userdata set; must be zero */
USERDATA_RESP_BODY
@@ -73,7 +78,8 @@ typedef enum { size_t Curl_hyper_recv(void *userp, hyper_context *ctx,
uint8_t *buf, size_t buflen)
{
- struct Curl_easy *data = userp;
+ struct hyp_io_ctx *io_ctx = userp;
+ struct Curl_easy *data = io_ctx->data;
struct connectdata *conn = data->conn;
CURLcode result;
ssize_t nread;
@@ -81,7 +87,8 @@ size_t Curl_hyper_recv(void *userp, hyper_context *ctx, (void)ctx;
DEBUGF(infof(data, "Curl_hyper_recv(%zu)", buflen));
- result = Curl_read(data, conn->sockfd, (char *)buf, buflen, &nread);
+ result = Curl_conn_recv(data, io_ctx->sockindex,
+ (char *)buf, buflen, &nread);
if(result == CURLE_AGAIN) {
/* would block, register interest */
DEBUGF(infof(data, "Curl_hyper_recv(%zu) -> EAGAIN", buflen));
@@ -105,15 +112,14 @@ size_t Curl_hyper_recv(void *userp, hyper_context *ctx, size_t Curl_hyper_send(void *userp, hyper_context *ctx,
const uint8_t *buf, size_t buflen)
{
- struct Curl_easy *data = userp;
- struct connectdata *conn = data->conn;
+ struct hyp_io_ctx *io_ctx = userp;
+ struct Curl_easy *data = io_ctx->data;
CURLcode result;
- ssize_t nwrote;
+ size_t nwrote;
DEBUGF(infof(data, "Curl_hyper_send(%zu)", buflen));
- result = Curl_write(data, conn->sockfd, (void *)buf, buflen, &nwrote);
- if(!result && !nwrote)
- result = CURLE_AGAIN;
+ result = Curl_conn_send(data, io_ctx->sockindex,
+ (void *)buf, buflen, &nwrote);
if(result == CURLE_AGAIN) {
DEBUGF(infof(data, "Curl_hyper_send(%zu) -> EAGAIN", buflen));
/* would block, register interest */
@@ -152,9 +158,6 @@ static int hyper_each_header(void *userdata, return HYPER_ITER_BREAK;
}
- if(!data->req.bytecount)
- Curl_pgrsTime(data, TIMER_STARTTRANSFER);
-
Curl_dyn_reset(&data->state.headerb);
if(name_len) {
if(Curl_dyn_addf(&data->state.headerb, "%.*s: %.*s\r\n",
@@ -168,7 +171,7 @@ static int hyper_each_header(void *userdata, len = Curl_dyn_len(&data->state.headerb);
headp = Curl_dyn_ptr(&data->state.headerb);
- result = Curl_http_header(data, data->conn, headp);
+ result = Curl_http_header(data, data->conn, headp, len);
if(result) {
data->state.hresult = result;
return HYPER_ITER_BREAK;
@@ -204,7 +207,6 @@ static int hyper_body_chunk(void *userdata, const hyper_buf *chunk) CURLcode result = CURLE_OK;
if(0 == k->bodywrites) {
- bool done = FALSE;
#if defined(USE_NTLM)
struct connectdata *conn = data->conn;
if(conn->bits.close &&
@@ -217,27 +219,26 @@ static int hyper_body_chunk(void *userdata, const hyper_buf *chunk) Curl_safefree(data->req.newurl);
}
#endif
- if(data->state.expect100header) {
- Curl_expire_done(data, EXPIRE_100_TIMEOUT);
+ if(Curl_http_exp100_is_selected(data)) {
if(data->req.httpcode < 400) {
- k->exp100 = EXP100_SEND_DATA;
- if(data->hyp.exp100_waker) {
- hyper_waker_wake(data->hyp.exp100_waker);
- data->hyp.exp100_waker = NULL;
+ Curl_http_exp100_got100(data);
+ if(data->hyp.send_body_waker) {
+ hyper_waker_wake(data->hyp.send_body_waker);
+ data->hyp.send_body_waker = NULL;
}
}
else { /* >= 4xx */
- k->exp100 = EXP100_FAILED;
+ Curl_req_abort_sending(data);
}
}
if(data->state.hconnect && (data->req.httpcode/100 != 2) &&
data->state.authproxy.done) {
- done = TRUE;
+ data->req.done = TRUE;
result = CURLE_OK;
}
else
- result = Curl_http_firstwrite(data, data->conn, &done);
- if(result || done) {
+ result = Curl_http_firstwrite(data);
+ if(result || data->req.done) {
infof(data, "Return early from hyper_body_chunk");
data->state.hresult = result;
return HYPER_ITER_BREAK;
@@ -273,14 +274,13 @@ static CURLcode status_line(struct Curl_easy *data, /* We need to set 'httpcodeq' for functions that check the response code in
a single place. */
data->req.httpcode = http_status;
-
+ data->req.httpversion = http_version == HYPER_HTTP_VERSION_1_1? 11 :
+ (http_version == HYPER_HTTP_VERSION_2 ? 20 : 10);
if(data->state.hconnect)
/* CONNECT */
data->info.httpproxycode = http_status;
else {
- conn->httpversion =
- http_version == HYPER_HTTP_VERSION_1_1 ? 11 :
- (http_version == HYPER_HTTP_VERSION_2 ? 20 : 10);
+ conn->httpversion = (unsigned char)data->req.httpversion;
if(http_version == HYPER_HTTP_VERSION_1_0)
data->state.httpwant = CURL_HTTP_VERSION_1_0;
@@ -335,7 +335,6 @@ static CURLcode empty_header(struct Curl_easy *data) CURLcode Curl_hyper_stream(struct Curl_easy *data,
struct connectdata *conn,
int *didwhat,
- bool *done,
int select_res)
{
hyper_response *resp = NULL;
@@ -352,20 +351,9 @@ CURLcode Curl_hyper_stream(struct Curl_easy *data, struct SingleRequest *k = &data->req;
(void)conn;
- if(k->exp100 > EXP100_SEND_DATA) {
- struct curltime now = Curl_now();
- timediff_t ms = Curl_timediff(now, k->start100);
- if(ms >= data->set.expect_100_timeout) {
- /* we've waited long enough, continue anyway */
- k->exp100 = EXP100_SEND_DATA;
- k->keepon |= KEEP_SEND;
- Curl_expire_done(data, EXPIRE_100_TIMEOUT);
- infof(data, "Done waiting for 100-continue");
- if(data->hyp.exp100_waker) {
- hyper_waker_wake(data->hyp.exp100_waker);
- data->hyp.exp100_waker = NULL;
- }
- }
+ if(data->hyp.send_body_waker) {
+ hyper_waker_wake(data->hyp.send_body_waker);
+ data->hyp.send_body_waker = NULL;
}
if(select_res & CURL_CSELECT_IN) {
@@ -379,7 +367,6 @@ CURLcode Curl_hyper_stream(struct Curl_easy *data, h->write_waker = NULL;
}
- *done = FALSE;
do {
hyper_task_return_type t;
task = hyper_executor_poll(h->exec);
@@ -422,7 +409,7 @@ CURLcode Curl_hyper_stream(struct Curl_easy *data, break;
}
}
- *done = TRUE;
+ data->req.done = TRUE;
hyper_error_free(hypererr);
break;
}
@@ -431,12 +418,11 @@ CURLcode Curl_hyper_stream(struct Curl_easy *data, hyper_task_free(task);
if((userdata_t)userdata == USERDATA_RESP_BODY) {
/* end of transfer */
- *done = TRUE;
+ data->req.done = TRUE;
infof(data, "hyperstream is done");
if(!k->bodywrites) {
/* hyper doesn't always call the body write callback */
- bool stilldone;
- result = Curl_http_firstwrite(data, data->conn, &stilldone);
+ result = Curl_http_firstwrite(data);
}
break;
}
@@ -462,11 +448,11 @@ CURLcode Curl_hyper_stream(struct Curl_easy *data, reasonp = hyper_response_reason_phrase(resp);
reason_len = hyper_response_reason_phrase_len(resp);
- if(http_status == 417 && data->state.expect100header) {
+ if(http_status == 417 && Curl_http_exp100_is_selected(data)) {
infof(data, "Got 417 while waiting for a 100");
data->state.disableexpect = TRUE;
data->req.newurl = strdup(data->state.url);
- Curl_done_sending(data, k);
+ Curl_req_abort_sending(data);
}
result = status_line(data, conn,
@@ -654,115 +640,66 @@ static CURLcode request_target(struct Curl_easy *data, return result;
}
-static int uploadpostfields(void *userdata, hyper_context *ctx,
- hyper_buf **chunk)
-{
- struct Curl_easy *data = (struct Curl_easy *)userdata;
- (void)ctx;
- if(data->req.exp100 > EXP100_SEND_DATA) {
- if(data->req.exp100 == EXP100_FAILED)
- return HYPER_POLL_ERROR;
-
- /* still waiting confirmation */
- if(data->hyp.exp100_waker)
- hyper_waker_free(data->hyp.exp100_waker);
- data->hyp.exp100_waker = hyper_context_waker(ctx);
- return HYPER_POLL_PENDING;
- }
- if(data->req.upload_done)
- *chunk = NULL; /* nothing more to deliver */
- else {
- /* send everything off in a single go */
- hyper_buf *copy = hyper_buf_copy(data->set.postfields,
- (size_t)data->req.p.http->postsize);
- if(copy)
- *chunk = copy;
- else {
- data->state.hresult = CURLE_OUT_OF_MEMORY;
- return HYPER_POLL_ERROR;
- }
- /* increasing the writebytecount here is a little premature but we
- don't know exactly when the body is sent */
- data->req.writebytecount += (size_t)data->req.p.http->postsize;
- Curl_pgrsSetUploadCounter(data, data->req.writebytecount);
- data->req.upload_done = TRUE;
- }
- return HYPER_POLL_READY;
-}
-
static int uploadstreamed(void *userdata, hyper_context *ctx,
hyper_buf **chunk)
{
size_t fillcount;
struct Curl_easy *data = (struct Curl_easy *)userdata;
- struct connectdata *conn = (struct connectdata *)data->conn;
CURLcode result;
+ char *xfer_ulbuf;
+ size_t xfer_ulblen;
+ bool eos;
+ int rc = HYPER_POLL_ERROR;
(void)ctx;
- if(data->req.exp100 > EXP100_SEND_DATA) {
- if(data->req.exp100 == EXP100_FAILED)
- return HYPER_POLL_ERROR;
+ result = Curl_multi_xfer_ulbuf_borrow(data, &xfer_ulbuf, &xfer_ulblen);
+ if(result)
+ goto out;
- /* still waiting confirmation */
- if(data->hyp.exp100_waker)
- hyper_waker_free(data->hyp.exp100_waker);
- data->hyp.exp100_waker = hyper_context_waker(ctx);
- return HYPER_POLL_PENDING;
- }
+ result = Curl_client_read(data, xfer_ulbuf, xfer_ulblen, &fillcount, &eos);
+ if(result)
+ goto out;
- if(data->req.upload_chunky && conn->bits.authneg) {
- fillcount = 0;
- data->req.upload_chunky = FALSE;
- result = CURLE_OK;
- }
- else {
- result = Curl_fillreadbuffer(data, data->set.upload_buffer_size,
- &fillcount);
- }
- if(result) {
- data->state.hresult = result;
- return HYPER_POLL_ERROR;
- }
- if(!fillcount) {
- if((data->req.keepon & KEEP_SEND_PAUSE) != KEEP_SEND_PAUSE)
- /* done! */
- *chunk = NULL;
- else {
- /* paused, save a waker */
- if(data->hyp.send_body_waker)
- hyper_waker_free(data->hyp.send_body_waker);
- data->hyp.send_body_waker = hyper_context_waker(ctx);
- return HYPER_POLL_PENDING;
- }
- }
- else {
- hyper_buf *copy = hyper_buf_copy((uint8_t *)data->state.ulbuf, fillcount);
+ if(fillcount) {
+ hyper_buf *copy = hyper_buf_copy((uint8_t *)xfer_ulbuf, fillcount);
if(copy)
*chunk = copy;
else {
- data->state.hresult = CURLE_OUT_OF_MEMORY;
- return HYPER_POLL_ERROR;
+ result = CURLE_OUT_OF_MEMORY;
+ goto out;
}
/* increasing the writebytecount here is a little premature but we
don't know exactly when the body is sent */
data->req.writebytecount += fillcount;
Curl_pgrsSetUploadCounter(data, data->req.writebytecount);
+ rc = HYPER_POLL_READY;
+ }
+ else if(eos) {
+ *chunk = NULL;
+ rc = HYPER_POLL_READY;
}
- return HYPER_POLL_READY;
+ else {
+ /* paused, save a waker */
+ if(data->hyp.send_body_waker)
+ hyper_waker_free(data->hyp.send_body_waker);
+ data->hyp.send_body_waker = hyper_context_waker(ctx);
+ rc = HYPER_POLL_PENDING;
+ }
+
+out:
+ Curl_multi_xfer_ulbuf_release(data, xfer_ulbuf);
+ data->state.hresult = result;
+ return rc;
}
/*
- * bodysend() sets up headers in the outgoing request for an HTTP transfer that
- * sends a body
+ * finalize_request() sets up last headers and optional body settings
*/
-
-static CURLcode bodysend(struct Curl_easy *data,
- struct connectdata *conn,
- hyper_headers *headers,
- hyper_request *hyperreq,
- Curl_HttpReq httpreq)
+static CURLcode finalize_request(struct Curl_easy *data,
+ hyper_headers *headers,
+ hyper_request *hyperreq,
+ Curl_HttpReq httpreq)
{
- struct HTTP *http = data->req.p.http;
CURLcode result = CURLE_OK;
struct dynbuf req;
if((httpreq == HTTPREQ_GET) || (httpreq == HTTPREQ_HEAD))
@@ -770,34 +707,31 @@ static CURLcode bodysend(struct Curl_easy *data, else {
hyper_body *body;
Curl_dyn_init(&req, DYN_HTTP_REQUEST);
- result = Curl_http_bodysend(data, conn, &req, httpreq);
+ result = Curl_http_req_complete(data, &req, httpreq);
+ if(result)
+ return result;
- if(!result)
+ /* if the "complete" above did produce more than the closing line,
+ parse the added headers */
+ if(Curl_dyn_len(&req) != 2 || strcmp(Curl_dyn_ptr(&req), "\r\n")) {
result = Curl_hyper_header(data, headers, Curl_dyn_ptr(&req));
+ if(result)
+ return result;
+ }
Curl_dyn_free(&req);
body = hyper_body_new();
hyper_body_set_userdata(body, data);
- if(data->set.postfields)
- hyper_body_set_data_func(body, uploadpostfields);
- else {
- result = Curl_get_upload_buffer(data);
- if(result) {
- hyper_body_free(body);
- return result;
- }
- /* init the "upload from here" pointer */
- data->req.upload_fromhere = data->state.ulbuf;
- hyper_body_set_data_func(body, uploadstreamed);
- }
+ hyper_body_set_data_func(body, uploadstreamed);
+
if(HYPERE_OK != hyper_request_set_body(hyperreq, body)) {
/* fail */
result = CURLE_OUT_OF_MEMORY;
}
}
- http->sending = HTTPSEND_BODY;
- return result;
+
+ return cr_hyper_add(data);
}
static CURLcode cookies(struct Curl_easy *data,
@@ -885,7 +819,16 @@ CURLcode Curl_http(struct Curl_easy *data, bool *done) may be parts of the request that is not yet sent, since we can deal with
the rest of the request in the PERFORM phase. */
*done = TRUE;
- Curl_client_cleanup(data);
+ result = Curl_client_start(data);
+ if(result)
+ return result;
+
+ /* Add collecting of headers written to client. For a new connection,
+ * we might have done that already, but reuse
+ * or multiplex needs it here as well. */
+ result = Curl_headers_init(data);
+ if(result)
+ return result;
infof(data, "Time for the Hyper dance");
memset(h, 0, sizeof(struct hyptransfer));
@@ -913,9 +856,9 @@ CURLcode Curl_http(struct Curl_easy *data, bool *done) return result;
}
- result = Curl_http_resume(data, conn, httpreq);
+ result = Curl_http_req_set_reader(data, httpreq, &te);
if(result)
- return result;
+ goto error;
result = Curl_http_range(data, httpreq);
if(result)
@@ -932,7 +875,9 @@ CURLcode Curl_http(struct Curl_easy *data, bool *done) goto error;
}
/* tell Hyper how to read/write network data */
- hyper_io_set_userdata(io, data);
+ h->io_ctx.data = data;
+ h->io_ctx.sockindex = FIRSTSOCKET;
+ hyper_io_set_userdata(io, &h->io_ctx);
hyper_io_set_read(io, Curl_hyper_recv);
hyper_io_set_write(io, Curl_hyper_send);
@@ -1005,11 +950,6 @@ CURLcode Curl_http(struct Curl_easy *data, bool *done) goto error;
}
}
- else {
- if(!data->state.disableexpect) {
- data->state.expect100header = TRUE;
- }
- }
if(hyper_request_set_method(req, (uint8_t *)method, strlen(method))) {
failf(data, "error setting method");
@@ -1034,10 +974,6 @@ CURLcode Curl_http(struct Curl_easy *data, bool *done) goto error;
}
- result = Curl_http_body(data, conn, httpreq, &te);
- if(result)
- goto error;
-
if(data->state.aptr.host) {
result = Curl_hyper_header(data, headers, data->state.aptr.host);
if(result)
@@ -1160,13 +1096,13 @@ CURLcode Curl_http(struct Curl_easy *data, bool *done) if(result)
goto error;
- result = bodysend(data, conn, headers, req, httpreq);
+ result = finalize_request(data, headers, req, httpreq);
if(result)
goto error;
Curl_debug(data, CURLINFO_HEADER_OUT, (char *)"\r\n", 2);
- if(data->req.upload_chunky && conn->bits.authneg) {
+ if(data->req.upload_chunky && data->req.authneg) {
data->req.upload_chunky = TRUE;
}
else {
@@ -1193,13 +1129,10 @@ CURLcode Curl_http(struct Curl_easy *data, bool *done) if((httpreq == HTTPREQ_GET) || (httpreq == HTTPREQ_HEAD)) {
/* HTTP GET/HEAD download */
Curl_pgrsSetUploadSize(data, 0); /* nothing */
- Curl_setup_transfer(data, FIRSTSOCKET, -1, TRUE, -1);
}
+
+ Curl_xfer_setup(data, FIRSTSOCKET, -1, TRUE, FIRSTSOCKET);
conn->datastream = Curl_hyper_stream;
- if(data->state.expect100header)
- /* Timeout count starts now since with Hyper we don't know exactly when
- the full request has been sent. */
- data->req.start100 = Curl_now();
/* clear userpwd and proxyuserpwd to avoid reusing old credentials
* from reused connections */
@@ -1241,10 +1174,51 @@ void Curl_hyper_done(struct Curl_easy *data) hyper_waker_free(h->write_waker);
h->write_waker = NULL;
}
- if(h->exp100_waker) {
- hyper_waker_free(h->exp100_waker);
- h->exp100_waker = NULL;
+ if(h->send_body_waker) {
+ hyper_waker_free(h->send_body_waker);
+ h->send_body_waker = NULL;
+ }
+}
+
+static CURLcode cr_hyper_unpause(struct Curl_easy *data,
+ struct Curl_creader *reader)
+{
+ (void)reader;
+ if(data->hyp.send_body_waker) {
+ hyper_waker_wake(data->hyp.send_body_waker);
+ data->hyp.send_body_waker = NULL;
}
+ return CURLE_OK;
+}
+
+/* Hyper client reader, handling unpausing */
+static const struct Curl_crtype cr_hyper_protocol = {
+ "cr-hyper",
+ Curl_creader_def_init,
+ Curl_creader_def_read,
+ Curl_creader_def_close,
+ Curl_creader_def_needs_rewind,
+ Curl_creader_def_total_length,
+ Curl_creader_def_resume_from,
+ Curl_creader_def_rewind,
+ cr_hyper_unpause,
+ Curl_creader_def_done,
+ sizeof(struct Curl_creader)
+};
+
+static CURLcode cr_hyper_add(struct Curl_easy *data)
+{
+ struct Curl_creader *reader = NULL;
+ CURLcode result;
+
+ result = Curl_creader_create(&reader, data, &cr_hyper_protocol,
+ CURL_CR_PROTOCOL);
+ if(!result)
+ result = Curl_creader_add(data, reader);
+
+ if(result && reader)
+ Curl_creader_free(data, reader);
+ return result;
}
#endif /* !defined(CURL_DISABLE_HTTP) && defined(USE_HYPER) */
diff --git a/libs/libcurl/src/c-hyper.h b/libs/libcurl/src/c-hyper.h index 244427f9e7..422e6e4935 100644 --- a/libs/libcurl/src/c-hyper.h +++ b/libs/libcurl/src/c-hyper.h @@ -29,13 +29,18 @@ #include <hyper.h>
+struct hyp_io_ctx {
+ struct Curl_easy *data;
+ int sockindex;
+};
+
/* per-transfer data for the Hyper backend */
struct hyptransfer {
hyper_waker *write_waker;
hyper_waker *read_waker;
const hyper_executor *exec;
- hyper_waker *exp100_waker;
hyper_waker *send_body_waker;
+ struct hyp_io_ctx io_ctx;
};
size_t Curl_hyper_recv(void *userp, hyper_context *ctx,
@@ -45,7 +50,6 @@ size_t Curl_hyper_send(void *userp, hyper_context *ctx, CURLcode Curl_hyper_stream(struct Curl_easy *data,
struct connectdata *conn,
int *didwhat,
- bool *done,
int select_res);
CURLcode Curl_hyper_header(struct Curl_easy *data, hyper_headers *headers,
diff --git a/libs/libcurl/src/cf-h1-proxy.c b/libs/libcurl/src/cf-h1-proxy.c index 1725c0e509..5c359e7ddc 100644 --- a/libs/libcurl/src/cf-h1-proxy.c +++ b/libs/libcurl/src/cf-h1-proxy.c @@ -114,18 +114,12 @@ static CURLcode tunnel_init(struct Curl_cfilter *cf, struct h1_tunnel_state **pts)
{
struct h1_tunnel_state *ts;
- CURLcode result;
if(cf->conn->handler->flags & PROTOPT_NOTCPPROXY) {
failf(data, "%s cannot be done over CONNECT", cf->conn->handler->scheme);
return CURLE_UNSUPPORTED_PROTOCOL;
}
- /* we might need the upload buffer for streaming a partial request */
- result = Curl_get_upload_buffer(data);
- if(result)
- return result;
-
ts = calloc(1, sizeof(*ts));
if(!ts)
return CURLE_OUT_OF_MEMORY;
@@ -212,6 +206,11 @@ static void tunnel_free(struct Curl_cfilter *cf, }
}
+static bool tunnel_want_send(struct h1_tunnel_state *ts)
+{
+ return (ts->tunnel_state == H1_TUNNEL_CONNECT);
+}
+
#ifndef USE_HYPER
static CURLcode start_CONNECT(struct Curl_cfilter *cf,
struct Curl_easy *data,
@@ -238,6 +237,8 @@ static CURLcode start_CONNECT(struct Curl_cfilter *cf, http_minor = (cf->conn->http_proxy.proxytype == CURLPROXY_HTTP_1_0) ? 0 : 1;
result = Curl_h1_req_write_head(req, http_minor, &ts->request_data);
+ if(!result)
+ result = Curl_creader_set_null(data);
out:
if(result)
@@ -366,7 +367,6 @@ static CURLcode recv_CONNECT_resp(struct Curl_cfilter *cf, {
CURLcode result = CURLE_OK;
struct SingleRequest *k = &data->req;
- curl_socket_t tunnelsocket = Curl_conn_cf_get_socket(cf, data);
char *linep;
size_t line_len;
int error, writetype;
@@ -386,7 +386,7 @@ static CURLcode recv_CONNECT_resp(struct Curl_cfilter *cf, /* Read one byte at a time to avoid a race condition. Wait at most one
second before looping to ensure continuous pgrsUpdates. */
- result = Curl_read(data, tunnelsocket, &byte, 1, &nread);
+ result = Curl_conn_recv(data, cf->sockindex, &byte, 1, &nread);
if(result == CURLE_AGAIN)
/* socket buffer drained, return */
return CURLE_OK;
@@ -593,7 +593,9 @@ static CURLcode start_CONNECT(struct Curl_cfilter *cf, goto error;
}
/* tell Hyper how to read/write network data */
- hyper_io_set_userdata(io, data);
+ h->io_ctx.data = data;
+ h->io_ctx.sockindex = cf->sockindex;
+ hyper_io_set_userdata(io, &h->io_ctx);
hyper_io_set_read(io, Curl_hyper_recv);
hyper_io_set_write(io, Curl_hyper_send);
conn->sockfd = tunnelsocket;
@@ -749,6 +751,10 @@ static CURLcode start_CONNECT(struct Curl_cfilter *cf, if(result)
goto error;
+ result = Curl_creader_set_null(data);
+ if(result)
+ goto error;
+
sendtask = hyper_clientconn_send(client, req);
if(!sendtask) {
failf(data, "hyper_clientconn_send");
@@ -832,9 +838,9 @@ static CURLcode recv_CONNECT_resp(struct Curl_cfilter *cf, int didwhat;
(void)ts;
- *done = FALSE;
- result = Curl_hyper_stream(data, cf->conn, &didwhat, done,
+ result = Curl_hyper_stream(data, cf->conn, &didwhat,
CURL_CSELECT_IN | CURL_CSELECT_OUT);
+ *done = data->req.done;
if(result || !*done)
return result;
if(h->exec) {
@@ -918,6 +924,7 @@ static CURLcode H1_CONNECT(struct Curl_cfilter *cf, * If the other side indicated a connection close, or if someone
* else told us to close this connection, do so now.
*/
+ Curl_req_soft_reset(&data->req, data);
if(ts->close_connection || conn->bits.close) {
/* Close this filter and the sub-chain, re-connect the
* sub-chain and continue. Closing this filter will
@@ -1003,11 +1010,9 @@ out: *done = (result == CURLE_OK) && tunnel_is_established(cf->ctx);
if(*done) {
cf->connected = TRUE;
- /* Restore `data->req` fields that may habe been touched */
- data->req.header = TRUE; /* assume header */
- data->req.bytecount = 0;
- data->req.ignorebody = FALSE;
- Curl_client_cleanup(data);
+ /* The real request will follow the CONNECT, reset request partially */
+ Curl_req_soft_reset(&data->req, data);
+ Curl_client_reset(data);
Curl_pgrsSetUploadCounter(data, 0);
Curl_pgrsSetDownloadCounter(data, 0);
@@ -1031,7 +1036,7 @@ static void cf_h1_proxy_adjust_pollset(struct Curl_cfilter *cf, wait for the socket to become readable to be able to get the
response headers or if we're still sending the request, wait
for write. */
- if(ts->CONNECT.sending == HTTPSEND_REQUEST)
+ if(tunnel_want_send(ts))
Curl_pollset_set_out_only(data, ps, sock);
else
Curl_pollset_set_in_only(data, ps, sock);
diff --git a/libs/libcurl/src/cf-h2-proxy.c b/libs/libcurl/src/cf-h2-proxy.c index 83915a6e26..c302f603d9 100644 --- a/libs/libcurl/src/cf-h2-proxy.c +++ b/libs/libcurl/src/cf-h2-proxy.c @@ -38,6 +38,7 @@ #include "http2.h"
#include "http_proxy.h"
#include "multiif.h"
+#include "sendf.h"
#include "cf-h2-proxy.h"
/* The last 3 #include files should be in this order */
@@ -956,6 +957,9 @@ static CURLcode submit_CONNECT(struct Curl_cfilter *cf, result = Curl_http_proxy_create_CONNECT(&req, cf, data, 2);
if(result)
goto out;
+ result = Curl_creader_set_null(data);
+ if(result)
+ goto out;
infof(data, "Establish HTTP/2 proxy tunnel to %s", req->authority);
@@ -1125,7 +1129,12 @@ static CURLcode cf_h2_proxy_connect(struct Curl_cfilter *cf, out:
*done = (result == CURLE_OK) && (ts->state == H2_TUNNEL_ESTABLISHED);
- cf->connected = *done;
+ if(*done) {
+ cf->connected = TRUE;
+ /* The real request will follow the CONNECT, reset request partially */
+ Curl_req_soft_reset(&data->req, data);
+ Curl_client_reset(data);
+ }
CF_DATA_RESTORE(cf, save);
return result;
}
diff --git a/libs/libcurl/src/cf-haproxy.c b/libs/libcurl/src/cf-haproxy.c index 902de36345..a6e1499d8a 100644 --- a/libs/libcurl/src/cf-haproxy.c +++ b/libs/libcurl/src/cf-haproxy.c @@ -86,14 +86,14 @@ static CURLcode cf_haproxy_date_out_set(struct Curl_cfilter*cf, if(data->set.str[STRING_HAPROXY_CLIENT_IP])
client_ip = data->set.str[STRING_HAPROXY_CLIENT_IP];
else
- client_ip = data->info.conn_local_ip;
+ client_ip = data->info.primary.local_ip;
result = Curl_dyn_addf(&ctx->data_out, "PROXY %s %s %s %i %i\r\n",
tcp_version,
client_ip,
- data->info.conn_primary_ip,
- data->info.conn_local_port,
- data->info.conn_primary_port);
+ data->info.primary.remote_ip,
+ data->info.primary.local_port,
+ data->info.primary.remote_port);
#ifdef USE_UNIX_SOCKETS
}
@@ -129,12 +129,17 @@ static CURLcode cf_haproxy_connect(struct Curl_cfilter *cf, case HAPROXY_SEND:
len = Curl_dyn_len(&ctx->data_out);
if(len > 0) {
- ssize_t written = Curl_conn_send(data, cf->sockindex,
- Curl_dyn_ptr(&ctx->data_out),
- len, &result);
- if(written < 0)
+ size_t written;
+ result = Curl_conn_send(data, cf->sockindex,
+ Curl_dyn_ptr(&ctx->data_out),
+ len, &written);
+ if(result == CURLE_AGAIN) {
+ result = CURLE_OK;
+ written = 0;
+ }
+ else if(result)
goto out;
- Curl_dyn_tail(&ctx->data_out, len - (size_t)written);
+ Curl_dyn_tail(&ctx->data_out, len - written);
if(Curl_dyn_len(&ctx->data_out) > 0) {
result = CURLE_OK;
goto out;
diff --git a/libs/libcurl/src/cf-socket.c b/libs/libcurl/src/cf-socket.c index 31b0291579..20ea147875 100644 --- a/libs/libcurl/src/cf-socket.c +++ b/libs/libcurl/src/cf-socket.c @@ -776,10 +776,7 @@ struct cf_socket_ctx { struct Curl_sockaddr_ex addr; /* address to connect to */
curl_socket_t sock; /* current attempt socket */
struct bufq recvbuf; /* used when `buffer_recv` is set */
- char r_ip[MAX_IPADR_LEN]; /* remote IP as string */
- int r_port; /* remote port number */
- char l_ip[MAX_IPADR_LEN]; /* local IP as string */
- int l_port; /* local port number */
+ struct ip_quadruple ip; /* The IP quadruple 2x(addr+port) */
struct curltime started_at; /* when socket was created */
struct curltime connected_at; /* when socket connected/got first byte */
struct curltime first_byte_at; /* when first byte was recvd */
@@ -880,8 +877,9 @@ static ssize_t nw_in_read(void *reader_ctx, nread = -1;
}
}
- CURL_TRC_CF(rctx->data, rctx->cf, "nw_in_read(len=%zu) -> %d, err=%d",
- len, (int)nread, *err);
+ CURL_TRC_CF(rctx->data, rctx->cf, "nw_in_read(len=%zu, fd=%"
+ CURL_FORMAT_SOCKET_T ") -> %d, err=%d",
+ len, ctx->sock, (int)nread, *err);
return nread;
}
@@ -940,7 +938,7 @@ static CURLcode set_local_ip(struct Curl_cfilter *cf, return CURLE_FAILED_INIT;
}
if(!Curl_addr2string((struct sockaddr*)&ssloc, slen,
- ctx->l_ip, &ctx->l_port)) {
+ ctx->ip.local_ip, &ctx->ip.local_port)) {
failf(data, "ssloc inet_ntop() failed with errno %d: %s",
errno, Curl_strerror(errno, buffer, sizeof(buffer)));
return CURLE_FAILED_INIT;
@@ -961,7 +959,7 @@ static CURLcode set_remote_ip(struct Curl_cfilter *cf, /* store remote address and port used in this connection attempt */
if(!Curl_addr2string(&ctx->addr.sa_addr, ctx->addr.addrlen,
- ctx->r_ip, &ctx->r_port)) {
+ ctx->ip.remote_ip, &ctx->ip.remote_port)) {
char buffer[STRERROR_LEN];
ctx->error = errno;
@@ -996,11 +994,11 @@ static CURLcode cf_socket_open(struct Curl_cfilter *cf, #ifdef ENABLE_IPV6
if(ctx->addr.family == AF_INET6) {
set_ipv6_v6only(ctx->sock, 0);
- infof(data, " Trying [%s]:%d...", ctx->r_ip, ctx->r_port);
+ infof(data, " Trying [%s]:%d...", ctx->ip.remote_ip, ctx->ip.remote_port);
}
else
#endif
- infof(data, " Trying %s:%d...", ctx->r_ip, ctx->r_port);
+ infof(data, " Trying %s:%d...", ctx->ip.remote_ip, ctx->ip.remote_port);
#ifdef ENABLE_IPV6
is_tcp = (ctx->addr.family == AF_INET
@@ -1166,9 +1164,9 @@ static CURLcode cf_tcp_connect(struct Curl_cfilter *cf, error = SOCKERRNO;
set_local_ip(cf, data);
CURL_TRC_CF(data, cf, "local address %s port %d...",
- ctx->l_ip, ctx->l_port);
+ ctx->ip.local_ip, ctx->ip.local_port);
if(-1 == rc) {
- result = socket_connect_result(data, ctx->r_ip, error);
+ result = socket_connect_result(data, ctx->ip.remote_ip, error);
goto out;
}
}
@@ -1213,7 +1211,8 @@ out: {
char buffer[STRERROR_LEN];
infof(data, "connect to %s port %u from %s port %d failed: %s",
- ctx->r_ip, ctx->r_port, ctx->l_ip, ctx->l_port,
+ ctx->ip.remote_ip, ctx->ip.remote_port,
+ ctx->ip.local_ip, ctx->ip.local_port,
Curl_strerror(ctx->error, buffer, sizeof(buffer)));
}
#endif
@@ -1233,10 +1232,11 @@ static void cf_socket_get_host(struct Curl_cfilter *cf, const char **pdisplay_host,
int *pport)
{
+ struct cf_socket_ctx *ctx = cf->ctx;
(void)data;
*phost = cf->conn->host.name;
*pdisplay_host = cf->conn->host.dispname;
- *pport = cf->conn->port;
+ *pport = ctx->ip.remote_port;
}
static void cf_socket_adjust_pollset(struct Curl_cfilter *cf,
@@ -1248,11 +1248,13 @@ static void cf_socket_adjust_pollset(struct Curl_cfilter *cf, if(ctx->sock != CURL_SOCKET_BAD) {
if(!cf->connected) {
Curl_pollset_set_out_only(data, ps, ctx->sock);
- CURL_TRC_CF(data, cf, "adjust_pollset(!connected) -> %d socks", ps->num);
+ CURL_TRC_CF(data, cf, "adjust_pollset, !connected, POLLOUT fd=%"
+ CURL_FORMAT_SOCKET_T, ctx->sock);
}
else if(!ctx->active) {
Curl_pollset_add_in(data, ps, ctx->sock);
- CURL_TRC_CF(data, cf, "adjust_pollset(!active) -> %d socks", ps->num);
+ CURL_TRC_CF(data, cf, "adjust_pollset, !active, POLLIN fd=%"
+ CURL_FORMAT_SOCKET_T, ctx->sock);
}
}
}
@@ -1433,31 +1435,24 @@ out: return nread;
}
-static void conn_set_primary_ip(struct Curl_cfilter *cf,
- struct Curl_easy *data)
-{
- struct cf_socket_ctx *ctx = cf->ctx;
-
- (void)data;
- DEBUGASSERT(sizeof(ctx->r_ip) == sizeof(cf->conn->primary_ip));
- memcpy(cf->conn->primary_ip, ctx->r_ip, sizeof(cf->conn->primary_ip));
-}
-
static void cf_socket_active(struct Curl_cfilter *cf, struct Curl_easy *data)
{
struct cf_socket_ctx *ctx = cf->ctx;
/* use this socket from now on */
cf->conn->sock[cf->sockindex] = ctx->sock;
- /* the first socket info gets set at conn and data */
+ set_local_ip(cf, data);
+ if(cf->sockindex == SECONDARYSOCKET)
+ cf->conn->secondary = ctx->ip;
+ else
+ cf->conn->primary = ctx->ip;
+ /* the first socket info gets some specials */
if(cf->sockindex == FIRSTSOCKET) {
cf->conn->remote_addr = &ctx->addr;
#ifdef ENABLE_IPV6
cf->conn->bits.ipv6 = (ctx->addr.family == AF_INET6)? TRUE : FALSE;
#endif
- conn_set_primary_ip(cf, data);
- set_local_ip(cf, data);
- Curl_persistconninfo(data, cf->conn, ctx->l_ip, ctx->l_port);
+ Curl_persistconninfo(data, cf->conn, &ctx->ip);
/* buffering is currently disabled by default because we have stalls
* in parallel transfers where not all buffered data is consumed and no
* socket events happen.
@@ -1480,7 +1475,7 @@ static CURLcode cf_socket_cntrl(struct Curl_cfilter *cf, cf_socket_active(cf, data);
break;
case CF_CTRL_DATA_SETUP:
- Curl_persistconninfo(data, cf->conn, ctx->l_ip, ctx->l_port);
+ Curl_persistconninfo(data, cf->conn, &ctx->ip);
break;
case CF_CTRL_FORGET_SOCKET:
ctx->sock = CURL_SOCKET_BAD;
@@ -1637,7 +1632,7 @@ static CURLcode cf_udp_setup_quic(struct Curl_cfilter *cf, #else
rc = connect(ctx->sock, &ctx->addr.sa_addr, ctx->addr.addrlen);
if(-1 == rc) {
- return socket_connect_result(data, ctx->r_ip, SOCKERRNO);
+ return socket_connect_result(data, ctx->ip.remote_ip, SOCKERRNO);
}
ctx->sock_connected = TRUE;
#endif
@@ -1645,7 +1640,8 @@ static CURLcode cf_udp_setup_quic(struct Curl_cfilter *cf, CURL_TRC_CF(data, cf, "%s socket %" CURL_FORMAT_SOCKET_T
" connected: [%s:%d] -> [%s:%d]",
(ctx->transport == TRNSPRT_QUIC)? "QUIC" : "UDP",
- ctx->sock, ctx->l_ip, ctx->l_port, ctx->r_ip, ctx->r_port);
+ ctx->sock, ctx->ip.local_ip, ctx->ip.local_port,
+ ctx->ip.remote_ip, ctx->ip.remote_port);
(void)curlx_nonblock(ctx->sock, TRUE);
switch(ctx->addr.family) {
@@ -1695,7 +1691,7 @@ static CURLcode cf_udp_connect(struct Curl_cfilter *cf, goto out;
CURL_TRC_CF(data, cf, "cf_udp_connect(), opened socket=%"
CURL_FORMAT_SOCKET_T " (%s:%d)",
- ctx->sock, ctx->l_ip, ctx->l_port);
+ ctx->sock, ctx->ip.local_ip, ctx->ip.local_port);
}
else {
CURL_TRC_CF(data, cf, "cf_udp_connect(), opened socket=%"
@@ -1891,8 +1887,8 @@ static void set_accepted_remote_ip(struct Curl_cfilter *cf, struct Curl_sockaddr_storage ssrem;
curl_socklen_t plen;
- ctx->r_ip[0] = 0;
- ctx->r_port = 0;
+ ctx->ip.remote_ip[0] = 0;
+ ctx->ip.remote_port = 0;
plen = sizeof(ssrem);
memset(&ssrem, 0, plen);
if(getpeername(ctx->sock, (struct sockaddr*) &ssrem, &plen)) {
@@ -1902,14 +1898,14 @@ static void set_accepted_remote_ip(struct Curl_cfilter *cf, return;
}
if(!Curl_addr2string((struct sockaddr*)&ssrem, plen,
- ctx->r_ip, &ctx->r_port)) {
+ ctx->ip.remote_ip, &ctx->ip.remote_port)) {
failf(data, "ssrem inet_ntop() failed with errno %d: %s",
errno, Curl_strerror(errno, buffer, sizeof(buffer)));
return;
}
#else
- ctx->r_ip[0] = 0;
- ctx->r_port = 0;
+ ctx->ip.remote_ip[0] = 0;
+ ctx->ip.remote_port = 0;
(void)data;
#endif
}
@@ -1938,7 +1934,7 @@ CURLcode Curl_conn_tcp_accepted_set(struct Curl_easy *data, cf->connected = TRUE;
CURL_TRC_CF(data, cf, "accepted_set(sock=%" CURL_FORMAT_SOCKET_T
", remote=%s port=%d)",
- ctx->sock, ctx->r_ip, ctx->r_port);
+ ctx->sock, ctx->ip.remote_ip, ctx->ip.remote_port);
return CURLE_OK;
}
@@ -1958,9 +1954,9 @@ CURLcode Curl_cf_socket_peek(struct Curl_cfilter *cf, struct Curl_easy *data,
curl_socket_t *psock,
const struct Curl_sockaddr_ex **paddr,
- const char **pr_ip_str, int *pr_port,
- const char **pl_ip_str, int *pl_port)
+ struct ip_quadruple *pip)
{
+ (void)data;
if(cf_is_socket(cf) && cf->ctx) {
struct cf_socket_ctx *ctx = cf->ctx;
@@ -1968,17 +1964,8 @@ CURLcode Curl_cf_socket_peek(struct Curl_cfilter *cf, *psock = ctx->sock;
if(paddr)
*paddr = &ctx->addr;
- if(pr_ip_str)
- *pr_ip_str = ctx->r_ip;
- if(pr_port)
- *pr_port = ctx->r_port;
- if(pl_port ||pl_ip_str) {
- set_local_ip(cf, data);
- if(pl_ip_str)
- *pl_ip_str = ctx->l_ip;
- if(pl_port)
- *pl_port = ctx->l_port;
- }
+ if(pip)
+ *pip = ctx->ip;
return CURLE_OK;
}
return CURLE_FAILED_INIT;
diff --git a/libs/libcurl/src/cf-socket.h b/libs/libcurl/src/cf-socket.h index 6031506526..2e175d1a30 100644 --- a/libs/libcurl/src/cf-socket.h +++ b/libs/libcurl/src/cf-socket.h @@ -33,6 +33,7 @@ struct Curl_cfilter; struct Curl_easy;
struct connectdata;
struct Curl_sockaddr_ex;
+struct ip_quadruple;
/*
* The Curl_sockaddr_ex structure is basically libcurl's external API
@@ -153,18 +154,14 @@ CURLcode Curl_conn_tcp_accepted_set(struct Curl_easy *data, * The filter owns all returned values.
* @param psock pointer to hold socket descriptor or NULL
* @param paddr pointer to hold addr reference or NULL
- * @param pr_ip_str pointer to hold remote addr as string or NULL
- * @param pr_port pointer to hold remote port number or NULL
- * @param pl_ip_str pointer to hold local addr as string or NULL
- * @param pl_port pointer to hold local port number or NULL
+ * @param pip pointer to get IP quadruple or NULL
* Returns error if the filter is of invalid type.
*/
CURLcode Curl_cf_socket_peek(struct Curl_cfilter *cf,
struct Curl_easy *data,
curl_socket_t *psock,
const struct Curl_sockaddr_ex **paddr,
- const char **pr_ip_str, int *pr_port,
- const char **pl_ip_str, int *pl_port);
+ struct ip_quadruple *pip);
extern struct Curl_cftype Curl_cft_tcp;
extern struct Curl_cftype Curl_cft_udp;
diff --git a/libs/libcurl/src/cfilters.c b/libs/libcurl/src/cfilters.c index 653220ddb7..2d1fbe1b1d 100644 --- a/libs/libcurl/src/cfilters.c +++ b/libs/libcurl/src/cfilters.c @@ -67,7 +67,7 @@ void Curl_cf_def_get_host(struct Curl_cfilter *cf, struct Curl_easy *data, else {
*phost = cf->conn->host.name;
*pdisplay_host = cf->conn->host.dispname;
- *pport = cf->conn->port;
+ *pport = cf->conn->primary.remote_port;
}
}
@@ -168,38 +168,46 @@ void Curl_conn_close(struct Curl_easy *data, int index) }
}
-ssize_t Curl_conn_recv(struct Curl_easy *data, int num, char *buf,
- size_t len, CURLcode *code)
+ssize_t Curl_cf_recv(struct Curl_easy *data, int num, char *buf,
+ size_t len, CURLcode *code)
{
struct Curl_cfilter *cf;
DEBUGASSERT(data);
DEBUGASSERT(data->conn);
+ *code = CURLE_OK;
cf = data->conn->cfilter[num];
while(cf && !cf->connected) {
cf = cf->next;
}
if(cf) {
- return cf->cft->do_recv(cf, data, buf, len, code);
+ ssize_t nread = cf->cft->do_recv(cf, data, buf, len, code);
+ DEBUGASSERT(nread >= 0 || *code);
+ DEBUGASSERT(nread < 0 || !*code);
+ return nread;
}
failf(data, "recv: no filter connected");
*code = CURLE_FAILED_INIT;
return -1;
}
-ssize_t Curl_conn_send(struct Curl_easy *data, int num,
- const void *mem, size_t len, CURLcode *code)
+ssize_t Curl_cf_send(struct Curl_easy *data, int num,
+ const void *mem, size_t len, CURLcode *code)
{
struct Curl_cfilter *cf;
DEBUGASSERT(data);
DEBUGASSERT(data->conn);
+ *code = CURLE_OK;
cf = data->conn->cfilter[num];
while(cf && !cf->connected) {
cf = cf->next;
}
if(cf) {
- return cf->cft->do_send(cf, data, mem, len, code);
+ ssize_t nwritten = cf->cft->do_send(cf, data, mem, len, code);
+ DEBUGASSERT(nwritten >= 0 || *code);
+ DEBUGASSERT(nwritten < 0 || !*code || !len);
+ return nwritten;
}
failf(data, "send: no filter connected");
DEBUGASSERT(0);
@@ -662,6 +670,58 @@ size_t Curl_conn_get_max_concurrent(struct Curl_easy *data, return (result || n <= 0)? 1 : (size_t)n;
}
+int Curl_conn_sockindex(struct Curl_easy *data, curl_socket_t sockfd)
+{
+ if(data && data->conn &&
+ sockfd != CURL_SOCKET_BAD && sockfd == data->conn->sock[SECONDARYSOCKET])
+ return SECONDARYSOCKET;
+ return FIRSTSOCKET;
+}
+
+CURLcode Curl_conn_recv(struct Curl_easy *data, int sockindex,
+ char *buf, size_t blen, ssize_t *n)
+{
+ CURLcode result = CURLE_OK;
+ ssize_t nread;
+
+ DEBUGASSERT(data->conn);
+ nread = data->conn->recv[sockindex](data, sockindex, buf, blen, &result);
+ DEBUGASSERT(nread >= 0 || result);
+ DEBUGASSERT(nread < 0 || !result);
+ *n = (nread >= 0)? (size_t)nread : 0;
+ return result;
+}
+
+CURLcode Curl_conn_send(struct Curl_easy *data, int sockindex,
+ const void *buf, size_t blen,
+ size_t *pnwritten)
+{
+ ssize_t nwritten;
+ CURLcode result = CURLE_OK;
+ struct connectdata *conn;
+
+ DEBUGASSERT(sockindex >= 0 && sockindex < 2);
+ DEBUGASSERT(pnwritten);
+ DEBUGASSERT(data);
+ DEBUGASSERT(data->conn);
+ conn = data->conn;
+#ifdef CURLDEBUG
+ {
+ /* Allow debug builds to override this logic to force short sends
+ */
+ char *p = getenv("CURL_SMALLSENDS");
+ if(p) {
+ size_t altsize = (size_t)strtoul(p, NULL, 10);
+ if(altsize)
+ blen = CURLMIN(blen, altsize);
+ }
+ }
+#endif
+ nwritten = conn->send[sockindex](data, sockindex, buf, blen, &result);
+ DEBUGASSERT((nwritten >= 0) || result);
+ *pnwritten = (nwritten < 0)? 0 : (size_t)nwritten;
+ return result;
+}
void Curl_pollset_reset(struct Curl_easy *data,
struct easy_pollset *ps)
diff --git a/libs/libcurl/src/cfilters.h b/libs/libcurl/src/cfilters.h index 5fc11be79c..016b05ea71 100644 --- a/libs/libcurl/src/cfilters.h +++ b/libs/libcurl/src/cfilters.h @@ -402,11 +402,11 @@ void Curl_conn_adjust_pollset(struct Curl_easy *data, /**
* Receive data through the filter chain at `sockindex` for connection
* `data->conn`. Copy at most `len` bytes into `buf`. Return the
- * actuel number of bytes copied or a negative value on error.
+ * actual number of bytes copied or a negative value on error.
* The error code is placed into `*code`.
*/
-ssize_t Curl_conn_recv(struct Curl_easy *data, int sockindex, char *buf,
- size_t len, CURLcode *code);
+ssize_t Curl_cf_recv(struct Curl_easy *data, int sockindex, char *buf,
+ size_t len, CURLcode *code);
/**
* Send `len` bytes of data from `buf` through the filter chain `sockindex`
@@ -414,8 +414,8 @@ ssize_t Curl_conn_recv(struct Curl_easy *data, int sockindex, char *buf, * or a negative value on error.
* The error code is placed into `*code`.
*/
-ssize_t Curl_conn_send(struct Curl_easy *data, int sockindex,
- const void *buf, size_t len, CURLcode *code);
+ssize_t Curl_cf_send(struct Curl_easy *data, int sockindex,
+ const void *buf, size_t len, CURLcode *code);
/**
* The easy handle `data` is being attached to `conn`. This does
@@ -497,6 +497,30 @@ size_t Curl_conn_get_max_concurrent(struct Curl_easy *data, int sockindex);
+/**
+ * Get the index of the given socket in the connection's sockets.
+ * Useful in calling `Curl_conn_send()/Curl_conn_recv()` with the
+ * correct socket index.
+ */
+int Curl_conn_sockindex(struct Curl_easy *data, curl_socket_t sockfd);
+
+/*
+ * Receive data on the connection, using FIRSTSOCKET/SECONDARYSOCKET.
+ * Will return CURLE_AGAIN iff blocked on receiving.
+ */
+CURLcode Curl_conn_recv(struct Curl_easy *data, int sockindex,
+ char *buf, size_t buffersize,
+ ssize_t *pnread);
+
+/*
+ * Send data on the connection, using FIRSTSOCKET/SECONDARYSOCKET.
+ * Will return CURLE_AGAIN iff blocked on sending.
+ */
+CURLcode Curl_conn_send(struct Curl_easy *data, int sockindex,
+ const void *buf, size_t blen,
+ size_t *pnwritten);
+
+
void Curl_pollset_reset(struct Curl_easy *data,
struct easy_pollset *ps);
diff --git a/libs/libcurl/src/config-win32.h b/libs/libcurl/src/config-win32.h index ce440ddda3..2514a09c33 100644 --- a/libs/libcurl/src/config-win32.h +++ b/libs/libcurl/src/config-win32.h @@ -158,10 +158,6 @@ /* Define if you have the socket function. */
#define HAVE_SOCKET 1
-/* Define if libSSH2 is in use */
-#define USE_LIBSSH2 1
-#define HAVE_LIBSSH2_H 1
-
/* Define if you have the strcasecmp function. */
#if defined(__MINGW32__)
#define HAVE_STRCASECMP 1
@@ -478,9 +474,6 @@ Vista #define USE_WIN32_LDAP 1
#endif
-/* if SSL is enabled */
-#define USE_OPENSSL 1
-
/* Define to use the Windows crypto library. */
#if !defined(CURL_WINDOWS_APP)
#define USE_WIN32_CRYPTO
diff --git a/libs/libcurl/src/conncache.c b/libs/libcurl/src/conncache.c index de05e7494f..c3212450a4 100644 --- a/libs/libcurl/src/conncache.c +++ b/libs/libcurl/src/conncache.c @@ -131,7 +131,7 @@ static void hashkey(struct connectdata *conn, char *buf, size_t len) #ifndef CURL_DISABLE_PROXY
if(conn->bits.httpproxy && !conn->bits.tunnel_proxy) {
hostname = conn->http_proxy.host.name;
- port = conn->port;
+ port = conn->primary.remote_port;
}
else
#endif
@@ -395,8 +395,6 @@ bool Curl_conncache_return_conn(struct Curl_easy *data, important that details from this (unrelated) disconnect does not
taint meta-data in the data handle. */
struct conncache *connc = data->state.conn_cache;
- connc->closure_handle->state.buffer = data->state.buffer;
- connc->closure_handle->set.buffer_size = data->set.buffer_size;
Curl_disconnect(connc->closure_handle, conn_candidate,
/* dead_connection */ FALSE);
}
@@ -522,12 +520,9 @@ Curl_conncache_extract_oldest(struct Curl_easy *data) void Curl_conncache_close_all_connections(struct conncache *connc)
{
struct connectdata *conn;
- char buffer[READBUFFER_MIN + 1];
SIGPIPE_VARIABLE(pipe_st);
if(!connc->closure_handle)
return;
- connc->closure_handle->state.buffer = buffer;
- connc->closure_handle->set.buffer_size = READBUFFER_MIN;
conn = conncache_find_first_connection(connc);
while(conn) {
@@ -541,7 +536,6 @@ void Curl_conncache_close_all_connections(struct conncache *connc) conn = conncache_find_first_connection(connc);
}
- connc->closure_handle->state.buffer = NULL;
sigpipe_ignore(connc->closure_handle, &pipe_st);
Curl_hostcache_clean(connc->closure_handle,
diff --git a/libs/libcurl/src/connect.c b/libs/libcurl/src/connect.c index eccdc4a7db..c391b0d99b 100644 --- a/libs/libcurl/src/connect.c +++ b/libs/libcurl/src/connect.c @@ -94,7 +94,7 @@ * infinite time left). If the value is negative, the timeout time has already
* elapsed.
* @param data the transfer to check on
- * @param nowp timestamp to use for calculdation, NULL to use Curl_now()
+ * @param nowp timestamp to use for calculation, NULL to use Curl_now()
* @param duringconnect TRUE iff connect timeout is also taken into account.
* @unittest: 1303
*/
@@ -145,19 +145,26 @@ timediff_t Curl_timeleft(struct Curl_easy *data, /* Copies connection info into the transfer handle to make it available when
the transfer handle is no longer associated with the connection. */
void Curl_persistconninfo(struct Curl_easy *data, struct connectdata *conn,
- char *local_ip, int local_port)
+ struct ip_quadruple *ip)
{
- memcpy(data->info.conn_primary_ip, conn->primary_ip, MAX_IPADR_LEN);
- if(local_ip && local_ip[0])
- memcpy(data->info.conn_local_ip, local_ip, MAX_IPADR_LEN);
- else
- data->info.conn_local_ip[0] = 0;
+ if(ip)
+ data->info.primary = *ip;
+ else {
+ memset(&data->info.primary, 0, sizeof(data->info.primary));
+ data->info.primary.remote_port = -1;
+ data->info.primary.local_port = -1;
+ }
data->info.conn_scheme = conn->handler->scheme;
/* conn_protocol can only provide "old" protocols */
data->info.conn_protocol = (conn->handler->protocol) & CURLPROTO_MASK;
- data->info.conn_primary_port = conn->port;
data->info.conn_remote_port = conn->remote_port;
- data->info.conn_local_port = local_port;
+ data->info.used_proxy =
+#ifdef CURL_DISABLE_PROXY
+ 0
+#else
+ conn->bits.proxy
+#endif
+ ;
}
static const struct Curl_addrinfo *
@@ -721,7 +728,7 @@ evaluate: failf(data, "Failed to connect to %s port %u after "
"%" CURL_FORMAT_TIMEDIFF_T " ms: %s",
- hostname, conn->port,
+ hostname, conn->primary.remote_port,
Curl_timediff(now, data->progress.t_startsingle),
curl_easy_strerror(result));
@@ -911,7 +918,7 @@ static CURLcode cf_he_connect(struct Curl_cfilter *cf, if(cf->conn->handler->protocol & PROTO_FAMILY_SSH)
Curl_pgrsTime(data, TIMER_APPCONNECT); /* we're connected already */
- Curl_verboseconnect(data, cf->conn);
+ Curl_verboseconnect(data, cf->conn, cf->sockindex);
data->info.numconnects++; /* to track the # of connections made */
}
break;
diff --git a/libs/libcurl/src/connect.h b/libs/libcurl/src/connect.h index f9961085a6..ce14ea845a 100644 --- a/libs/libcurl/src/connect.h +++ b/libs/libcurl/src/connect.h @@ -30,6 +30,7 @@ #include "timeval.h"
struct Curl_dns_entry;
+struct ip_quadruple;
/* generic function that returns how much time there's left to run, according
to the timeouts set */
@@ -52,7 +53,7 @@ bool Curl_addr2string(struct sockaddr *sa, curl_socklen_t salen, char *addr, int *port);
void Curl_persistconninfo(struct Curl_easy *data, struct connectdata *conn,
- char *local_ip, int local_port);
+ struct ip_quadruple *ip);
/*
* Curl_conncontrol() marks the end of a connection/stream. The 'closeit'
diff --git a/libs/libcurl/src/cookie.c b/libs/libcurl/src/cookie.c index 73ca6282d5..1689f7fa41 100644 --- a/libs/libcurl/src/cookie.c +++ b/libs/libcurl/src/cookie.c @@ -426,6 +426,7 @@ static void remove_expired(struct CookieInfo *cookies) }
}
+#ifndef USE_LIBPSL
/* Make sure domain contains a dot or is localhost. */
static bool bad_domain(const char *domain, size_t len)
{
@@ -443,6 +444,7 @@ static bool bad_domain(const char *domain, size_t len) }
return TRUE;
}
+#endif
/*
RFC 6265 section 4.1.1 says a server should accept this range:
@@ -1040,7 +1042,7 @@ Curl_cookie_add(struct Curl_easy *data, Curl_psl_release(data);
}
else
- acceptable = !bad_domain(domain, strlen(domain));
+ infof(data, "libpsl problem, rejecting cookie for satety");
}
if(!acceptable) {
@@ -1205,7 +1207,6 @@ struct CookieInfo *Curl_cookie_init(struct Curl_easy *data, bool newsession)
{
struct CookieInfo *c;
- char *line = NULL;
FILE *handle = NULL;
if(!inc) {
@@ -1241,16 +1242,14 @@ struct CookieInfo *Curl_cookie_init(struct Curl_easy *data, c->running = FALSE; /* this is not running, this is init */
if(fp) {
-
- line = malloc(MAX_COOKIE_LINE);
- if(!line)
- goto fail;
- while(Curl_get_line(line, MAX_COOKIE_LINE, fp)) {
- char *lineptr = line;
+ struct dynbuf buf;
+ Curl_dyn_init(&buf, MAX_COOKIE_LINE);
+ while(Curl_get_line(&buf, fp)) {
+ char *lineptr = Curl_dyn_ptr(&buf);
bool headerline = FALSE;
- if(checkprefix("Set-Cookie:", line)) {
+ if(checkprefix("Set-Cookie:", lineptr)) {
/* This is a cookie line, get it! */
- lineptr = &line[11];
+ lineptr += 11;
headerline = TRUE;
while(*lineptr && ISBLANK(*lineptr))
lineptr++;
@@ -1258,7 +1257,7 @@ struct CookieInfo *Curl_cookie_init(struct Curl_easy *data, Curl_cookie_add(data, c, headerline, TRUE, lineptr, NULL, NULL, TRUE);
}
- free(line); /* free the line buffer */
+ Curl_dyn_free(&buf); /* free the line buffer */
/*
* Remove expired cookies from the hash. We must make sure to run this
@@ -1274,18 +1273,6 @@ struct CookieInfo *Curl_cookie_init(struct Curl_easy *data, c->running = TRUE; /* now, we're running */
return c;
-
-fail:
- free(line);
- /*
- * Only clean up if we allocated it here, as the original could still be in
- * use by a share handle.
- */
- if(!inc)
- Curl_cookie_cleanup(c);
- if(handle)
- fclose(handle);
- return NULL; /* out of memory */
}
/*
diff --git a/libs/libcurl/src/curl_config.h.cmake b/libs/libcurl/src/curl_config.h.cmake index f0f6b95c98..d88710fed3 100644 --- a/libs/libcurl/src/curl_config.h.cmake +++ b/libs/libcurl/src/curl_config.h.cmake @@ -720,6 +720,9 @@ ${SIZEOF_TIME_T_CODE} /* to enable quiche */
#cmakedefine USE_QUICHE 1
+/* to enable openssl + nghttp3 */
+#cmakedefine USE_OPENSSL_QUIC 1
+
/* Define to 1 if you have the quiche_conn_set_qlog_fd function. */
#cmakedefine HAVE_QUICHE_CONN_SET_QLOG_FD 1
diff --git a/libs/libcurl/src/curl_config.h.in b/libs/libcurl/src/curl_config.h.in index d41e70b937..47ac9ec690 100644 --- a/libs/libcurl/src/curl_config.h.in +++ b/libs/libcurl/src/curl_config.h.in @@ -396,6 +396,9 @@ /* Define to 1 if you have the `idn2' library (-lidn2). */
#undef HAVE_LIBIDN2
+/* Define to 1 if you have the <libpsl.h> header file. */
+#undef HAVE_LIBPSL_H
+
/* Define to 1 if using libressl. */
#undef HAVE_LIBRESSL
@@ -848,7 +851,7 @@ /* if hyper is in use */
#undef USE_HYPER
-/* PSL support enabled */
+/* if libpsl is in use */
#undef USE_LIBPSL
/* if librtmp is in use */
diff --git a/libs/libcurl/src/curl_des.c b/libs/libcurl/src/curl_des.c index c92b1de174..d8df054017 100644 --- a/libs/libcurl/src/curl_des.c +++ b/libs/libcurl/src/curl_des.c @@ -36,7 +36,7 @@ * Curl_des_set_odd_parity()
*
* This is used to apply odd parity to the given byte array. It is typically
- * used by when a cryptography engines doesn't have it's own version.
+ * used by when a cryptography engine doesn't have its own version.
*
* The function is a port of the Java based oddParity() function over at:
*
diff --git a/libs/libcurl/src/curl_get_line.c b/libs/libcurl/src/curl_get_line.c index 3ffd7702a4..dda1d63c1e 100644 --- a/libs/libcurl/src/curl_get_line.c +++ b/libs/libcurl/src/curl_get_line.c @@ -33,14 +33,16 @@ #include "memdebug.h"
/*
- * Curl_get_line() makes sure to only return complete whole lines that fit in
- * 'len' bytes and end with a newline.
+ * Curl_get_line() makes sure to only return complete whole lines that end
+ * newlines.
*/
-char *Curl_get_line(char *buf, int len, FILE *input)
+int Curl_get_line(struct dynbuf *buf, FILE *input)
{
- bool partial = FALSE;
+ CURLcode result;
+ char buffer[128];
+ Curl_dyn_reset(buf);
while(1) {
- char *b = fgets(buf, len, input);
+ char *b = fgets(buffer, sizeof(buffer), input);
if(b) {
size_t rlen = strlen(b);
@@ -48,39 +50,28 @@ char *Curl_get_line(char *buf, int len, FILE *input) if(!rlen)
break;
- if(b[rlen-1] == '\n') {
- /* b is \n terminated */
- if(partial) {
- partial = FALSE;
- continue;
- }
- return b;
- }
- else if(feof(input)) {
- if(partial)
- /* Line is already too large to return, ignore rest */
- break;
+ result = Curl_dyn_addn(buf, b, rlen);
+ if(result)
+ /* too long line or out of memory */
+ return 0; /* error */
- if(rlen + 1 < (size_t) len) {
- /* b is EOF terminated, insert missing \n */
- b[rlen] = '\n';
- b[rlen + 1] = '\0';
- return b;
- }
- else
- /* Maximum buffersize reached + EOF
- * This line is impossible to add a \n to so we'll ignore it
- */
- break;
+ else if(b[rlen-1] == '\n')
+ /* end of the line */
+ return 1; /* all good */
+
+ else if(feof(input)) {
+ /* append a newline */
+ result = Curl_dyn_addn(buf, "\n", 1);
+ if(result)
+ /* too long line or out of memory */
+ return 0; /* error */
+ return 1; /* all good */
}
- else
- /* Maximum buffersize reached */
- partial = TRUE;
}
else
break;
}
- return NULL;
+ return 0;
}
#endif /* if not disabled */
diff --git a/libs/libcurl/src/curl_get_line.h b/libs/libcurl/src/curl_get_line.h index cb2160f00e..9b8c0d8410 100644 --- a/libs/libcurl/src/curl_get_line.h +++ b/libs/libcurl/src/curl_get_line.h @@ -24,8 +24,9 @@ *
***************************************************************************/
-/* get_line() makes sure to only return complete whole lines that fit in 'len'
- * bytes and end with a newline. */
-char *Curl_get_line(char *buf, int len, FILE *input);
+#include "dynbuf.h"
+
+/* Curl_get_line() returns complete lines that end with a newline. */
+int Curl_get_line(struct dynbuf *buf, FILE *input);
#endif /* HEADER_CURL_GET_LINE_H */
diff --git a/libs/libcurl/src/curl_ntlm_wb.c b/libs/libcurl/src/curl_ntlm_wb.c index 68d2994154..5afdb310d6 100644 --- a/libs/libcurl/src/curl_ntlm_wb.c +++ b/libs/libcurl/src/curl_ntlm_wb.c @@ -266,7 +266,7 @@ static CURLcode ntlm_wb_response(struct Curl_easy *data, struct ntlmdata *ntlm, size_t len_in = strlen(input), len_out = 0;
struct dynbuf b;
char *ptr = NULL;
- usigned char buf[1024]
+ unsigned char buf[1024];
Curl_dyn_init(&b, MAX_NTLM_WB_RESPONSE);
while(len_in > 0) {
diff --git a/libs/libcurl/src/curl_rtmp.c b/libs/libcurl/src/curl_rtmp.c index a4b60175e7..467bde6002 100644 --- a/libs/libcurl/src/curl_rtmp.c +++ b/libs/libcurl/src/curl_rtmp.c @@ -265,10 +265,10 @@ static CURLcode rtmp_do(struct Curl_easy *data, bool *done) if(data->state.upload) {
Curl_pgrsSetUploadSize(data, data->state.infilesize);
- Curl_setup_transfer(data, -1, -1, FALSE, FIRSTSOCKET);
+ Curl_xfer_setup(data, -1, -1, FALSE, FIRSTSOCKET);
}
else
- Curl_setup_transfer(data, FIRSTSOCKET, -1, FALSE, -1);
+ Curl_xfer_setup(data, FIRSTSOCKET, -1, FALSE, -1);
*done = TRUE;
return CURLE_OK;
}
diff --git a/libs/libcurl/src/curl_setup.h b/libs/libcurl/src/curl_setup.h index 2c53b742b2..eb67b29776 100644 --- a/libs/libcurl/src/curl_setup.h +++ b/libs/libcurl/src/curl_setup.h @@ -60,6 +60,16 @@ # ifndef NOGDI
# define NOGDI
# endif
+/* Detect Windows App environment which has a restricted access
+ * to the Win32 APIs. */
+# if (defined(_WIN32_WINNT) && (_WIN32_WINNT >= 0x0602)) || \
+ defined(WINAPI_FAMILY)
+# include <winapifamily.h>
+# if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) && \
+ !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
+# define CURL_WINDOWS_APP
+# endif
+# endif
#endif
/*
@@ -256,6 +266,13 @@ #include <curl/system.h>
+/* Helper macro to expand and concatenate two macros.
+ * Direct macros concatenation does not work because macros
+ * are not expanded before direct concatenation.
+ */
+#define CURL_CONC_MACROS_(A,B) A ## B
+#define CURL_CONC_MACROS(A,B) CURL_CONC_MACROS_(A,B)
+
/* curl uses its own printf() function internally. It understands the GNU
* format. Use this format, so that is matches the GNU format attribute we
* use with the mingw compiler, allowing it to verify them at compile-time.
@@ -485,6 +502,17 @@ #endif
#define CURL_OFF_T_MIN (-CURL_OFF_T_MAX - CURL_OFF_T_C(1))
+#if (SIZEOF_CURL_OFF_T != 8)
+# error "curl_off_t must be exactly 64 bits"
+#else
+ typedef unsigned CURL_TYPEOF_CURL_OFF_T curl_uint64_t;
+# ifndef CURL_SUFFIX_CURL_OFF_TU
+# error "CURL_SUFFIX_CURL_OFF_TU must be defined"
+# endif
+# define CURL_UINT64_SUFFIX CURL_SUFFIX_CURL_OFF_TU
+# define CURL_UINT64_C(val) CURL_CONC_MACROS(val,CURL_UINT64_SUFFIX)
+#endif
+
#if (SIZEOF_TIME_T == 4)
# ifdef HAVE_TIME_T_UNSIGNED
# define TIME_T_MAX UINT_MAX
diff --git a/libs/libcurl/src/curl_sha512_256.c b/libs/libcurl/src/curl_sha512_256.c new file mode 100644 index 0000000000..e1d6c4bc99 --- /dev/null +++ b/libs/libcurl/src/curl_sha512_256.c @@ -0,0 +1,844 @@ +/***************************************************************************
+ * _ _ ____ _
+ * Project ___| | | | _ \| |
+ * / __| | | | |_) | |
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+ * Copyright (C) Evgeny Grin (Karlson2k), <k2k@narod.ru>.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+ * are also available at https://curl.se/docs/copyright.html.
+ *
+ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
+ * copies of the Software, and permit persons to whom the Software is
+ * furnished to do so, under the terms of the COPYING file.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ * SPDX-License-Identifier: curl
+ *
+ ***************************************************************************/
+
+#include "curl_setup.h"
+
+#if !defined(CURL_DISABLE_DIGEST_AUTH) && !defined(CURL_DISABLE_SHA512_256)
+
+#include "curl_sha512_256.h"
+#include "warnless.h"
+
+/* The recommended order of the TLS backends:
+ * * OpenSSL
+ * * GnuTLS
+ * * wolfSSL
+ * * Schannel SSPI
+ * * SecureTransport (Darwin)
+ * * mbedTLS
+ * * BearSSL
+ * * rustls
+ * Skip the backend if it does not support the required algorithm */
+
+#if defined(USE_OPENSSL)
+# include <openssl/opensslv.h>
+# if (!defined(LIBRESSL_VERSION_NUMBER) && \
+ defined(OPENSSL_VERSION_NUMBER) && \
+ (OPENSSL_VERSION_NUMBER >= 0x10100010L)) || \
+ (defined(LIBRESSL_VERSION_NUMBER) && \
+ (LIBRESSL_VERSION_NUMBER >= 0x3080000fL))
+# include <openssl/opensslconf.h>
+# if !defined(OPENSSL_NO_SHA) && !defined(OPENSSL_NO_SHA512)
+# include <openssl/evp.h>
+# define USE_OPENSSL_SHA512_256 1
+# define HAS_SHA512_256_IMPLEMENTATION 1
+# endif
+# endif
+#endif /* USE_OPENSSL */
+
+
+#if !defined(HAS_SHA512_256_IMPLEMENTATION) && defined(USE_GNUTLS)
+# include <nettle/sha.h>
+# if defined(SHA512_256_DIGEST_SIZE)
+# define USE_GNUTLS_SHA512_256 1
+# define HAS_SHA512_256_IMPLEMENTATION 1
+# endif
+#endif /* ! HAS_SHA512_256_IMPLEMENTATION && USE_GNUTLS */
+
+#if defined(USE_OPENSSL_SHA512_256)
+
+/* OpenSSL does not provide macros for SHA-512/256 sizes */
+
+/**
+ * Size of the SHA-512/256 single processing block in bytes.
+ */
+#define SHA512_256_BLOCK_SIZE 128
+
+/**
+ * Size of the SHA-512/256 resulting digest in bytes.
+ * This is the final digest size, not intermediate hash.
+ */
+#define SHA512_256_DIGEST_SIZE SHA512_256_DIGEST_LENGTH
+
+/**
+ * Context type used for SHA-512/256 calculations
+ */
+typedef EVP_MD_CTX *Curl_sha512_256_ctx;
+
+/**
+ * Initialise structure for SHA-512/256 calculation.
+ *
+ * @param context the calculation context
+ * @return CURLE_OK if succeed,
+ * error code otherwise
+ */
+static CURLcode
+Curl_sha512_256_init(void *context)
+{
+ Curl_sha512_256_ctx *const ctx = (Curl_sha512_256_ctx *)context;
+
+ *ctx = EVP_MD_CTX_create();
+ if(!*ctx)
+ return CURLE_OUT_OF_MEMORY;
+
+ if(EVP_DigestInit_ex(*ctx, EVP_sha512_256(), NULL)) {
+ /* Check whether the header and this file use the same numbers */
+ DEBUGASSERT(EVP_MD_CTX_size(*ctx) == SHA512_256_DIGEST_SIZE);
+ /* Check whether the block size is correct */
+ DEBUGASSERT(EVP_MD_CTX_block_size(*ctx) == SHA512_256_BLOCK_SIZE);
+
+ return CURLE_OK; /* Success */
+ }
+
+ /* Cleanup */
+ EVP_MD_CTX_destroy(*ctx);
+ return CURLE_FAILED_INIT;
+}
+
+
+/**
+ * Process portion of bytes.
+ *
+ * @param context the calculation context
+ * @param data bytes to add to hash
+ * @return CURLE_OK if succeed,
+ * error code otherwise
+ */
+static CURLcode
+Curl_sha512_256_update(void *context,
+ const unsigned char *data,
+ size_t length)
+{
+ Curl_sha512_256_ctx *const ctx = (Curl_sha512_256_ctx *)context;
+
+ if(!EVP_DigestUpdate(*ctx, data, length))
+ return CURLE_SSL_CIPHER;
+
+ return CURLE_OK;
+}
+
+
+/**
+ * Finalise SHA-512/256 calculation, return digest.
+ *
+ * @param context the calculation context
+ * @param[out] digest set to the hash, must be #SHA512_256_DIGEST_SIZE bytes
+ * @return CURLE_OK if succeed,
+ * error code otherwise
+ */
+static CURLcode
+Curl_sha512_256_finish(unsigned char *digest,
+ void *context)
+{
+ CURLcode ret;
+ Curl_sha512_256_ctx *const ctx = (Curl_sha512_256_ctx *)context;
+
+#ifdef __NetBSD__
+ /* Use a larger buffer to work around a bug in NetBSD:
+ https://gnats.netbsd.org/cgi-bin/query-pr-single.pl?number=58039 */
+ unsigned char tmp_digest[SHA512_256_DIGEST_SIZE * 2];
+ ret = EVP_DigestFinal_ex(*ctx,
+ tmp_digest, NULL) ? CURLE_OK : CURLE_SSL_CIPHER;
+ if(ret == CURLE_OK)
+ memcpy(digest, tmp_digest, SHA512_256_DIGEST_SIZE);
+#else /* ! __NetBSD__ */
+ ret = EVP_DigestFinal_ex(*ctx, digest, NULL) ? CURLE_OK : CURLE_SSL_CIPHER;
+#endif /* ! __NetBSD__ */
+
+ EVP_MD_CTX_destroy(*ctx);
+ *ctx = NULL;
+
+ return ret;
+}
+
+#elif defined(USE_GNUTLS_SHA512_256)
+
+/**
+ * Context type used for SHA-512/256 calculations
+ */
+typedef struct sha512_256_ctx Curl_sha512_256_ctx;
+
+/**
+ * Initialise structure for SHA-512/256 calculation.
+ *
+ * @param context the calculation context
+ * @return always CURLE_OK
+ */
+static CURLcode
+Curl_sha512_256_init(void *context)
+{
+ Curl_sha512_256_ctx *const ctx = (Curl_sha512_256_ctx *)context;
+
+ /* Check whether the header and this file use the same numbers */
+ DEBUGASSERT(SHA512_256_DIGEST_LENGTH == SHA512_256_DIGEST_SIZE);
+
+ sha512_256_init(ctx);
+
+ return CURLE_OK;
+}
+
+
+/**
+ * Process portion of bytes.
+ *
+ * @param context the calculation context
+ * @param data bytes to add to hash
+ * @param length number of bytes in @a data
+ * @return always CURLE_OK
+ */
+static CURLcode
+Curl_sha512_256_update(void *context,
+ const unsigned char *data,
+ size_t length)
+{
+ Curl_sha512_256_ctx *const ctx = (Curl_sha512_256_ctx *)context;
+
+ DEBUGASSERT((data != NULL) || (length == 0));
+
+ sha512_256_update(ctx, length, (const uint8_t *)data);
+
+ return CURLE_OK;
+}
+
+
+/**
+ * Finalise SHA-512/256 calculation, return digest.
+ *
+ * @param context the calculation context
+ * @param[out] digest set to the hash, must be #SHA512_256_DIGEST_SIZE bytes
+ * @return always CURLE_OK
+ */
+static CURLcode
+Curl_sha512_256_finish(unsigned char *digest,
+ void *context)
+{
+ Curl_sha512_256_ctx *const ctx = (Curl_sha512_256_ctx *)context;
+
+ sha512_256_digest(ctx, (size_t)SHA512_256_DIGEST_SIZE, (uint8_t *)digest);
+
+ return CURLE_OK;
+}
+
+#else /* No system or TLS backend SHA-512/256 implementation available */
+
+/* Use local implementation */
+#define HAS_SHA512_256_IMPLEMENTATION 1
+
+/* ** This implementation of SHA-512/256 hash calculation was originally ** *
+ * ** written by Evgeny Grin (Karlson2k) for GNU libmicrohttpd. ** *
+ * ** The author ported the code to libcurl. The ported code is provided ** *
+ * ** under curl license. ** *
+ * ** This is a minimal version with minimal optimisations. Performance ** *
+ * ** can be significantly improved. Big-endian store and load macros ** *
+ * ** are obvious targets for optimisation. ** */
+
+#ifdef __GNUC__
+# if defined(__has_attribute) && defined(__STDC_VERSION__)
+# if __has_attribute(always_inline) && __STDC_VERSION__ >= 199901
+# define MHDX_INLINE inline __attribute__((always_inline))
+# endif
+# endif
+#endif
+
+#if !defined(MHDX_INLINE) && \
+ defined(_MSC_VER) && !defined(__GNUC__) && !defined(__clang__)
+# if _MSC_VER >= 1400
+# define MHDX_INLINE __forceinline
+# else
+# define MHDX_INLINE /* empty */
+# endif
+#endif
+
+#if !defined(MHDX_INLINE)
+# if defined(inline)
+ /* Assume that 'inline' macro was already defined correctly by
+ * the build system. */
+# define MHDX_INLINE inline
+# elif defined(__cplusplus)
+ /* The code is compiled with C++ compiler.
+ * C++ always supports 'inline'. */
+# define MHDX_INLINE inline
+# elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901
+ /* C99 (and later) supports 'inline' keyword */
+# define MHDX_INLINE inline
+# elif defined(__GNUC__) && __GNUC__ >= 3
+ /* GCC supports '__inline__' as an extension */
+# define MHDX_INLINE __inline__
+# else
+# define MHDX_INLINE /* empty */
+# endif
+#endif
+
+/* Bits manipulation macros and functions.
+ Can be moved to other headers to reuse. */
+
+#define MHDX_GET_64BIT_BE(ptr) \
+ ( ((curl_uint64_t)(((const unsigned char*)(ptr))[0]) << 56) | \
+ ((curl_uint64_t)(((const unsigned char*)(ptr))[1]) << 48) | \
+ ((curl_uint64_t)(((const unsigned char*)(ptr))[2]) << 40) | \
+ ((curl_uint64_t)(((const unsigned char*)(ptr))[3]) << 32) | \
+ ((curl_uint64_t)(((const unsigned char*)(ptr))[4]) << 24) | \
+ ((curl_uint64_t)(((const unsigned char*)(ptr))[5]) << 16) | \
+ ((curl_uint64_t)(((const unsigned char*)(ptr))[6]) << 8) | \
+ (curl_uint64_t)(((const unsigned char*)(ptr))[7]) )
+
+#define MHDX_PUT_64BIT_BE(ptr,val) do { \
+ ((unsigned char*)(ptr))[7]=(unsigned char)((curl_uint64_t)(val)); \
+ ((unsigned char*)(ptr))[6]=(unsigned char)(((curl_uint64_t)(val)) >> 8); \
+ ((unsigned char*)(ptr))[5]=(unsigned char)(((curl_uint64_t)(val)) >> 16); \
+ ((unsigned char*)(ptr))[4]=(unsigned char)(((curl_uint64_t)(val)) >> 24); \
+ ((unsigned char*)(ptr))[3]=(unsigned char)(((curl_uint64_t)(val)) >> 32); \
+ ((unsigned char*)(ptr))[2]=(unsigned char)(((curl_uint64_t)(val)) >> 40); \
+ ((unsigned char*)(ptr))[1]=(unsigned char)(((curl_uint64_t)(val)) >> 48); \
+ ((unsigned char*)(ptr))[0]=(unsigned char)(((curl_uint64_t)(val)) >> 56); \
+ } while(0)
+
+/* Defined as a function. The macro version may duplicate the binary code
+ * size as each argument is used twice, so if any calculation is used
+ * as an argument, the calculation could be done twice. */
+static MHDX_INLINE curl_uint64_t
+MHDx_rotr64(curl_uint64_t value, unsigned int bits)
+{
+ bits %= 64;
+ if(0 == bits)
+ return value;
+ /* Defined in a form which modern compiler could optimise. */
+ return (value >> bits) | (value << (64 - bits));
+}
+
+/* SHA-512/256 specific data */
+
+/**
+ * Number of bits in a single SHA-512/256 word.
+ */
+#define SHA512_256_WORD_SIZE_BITS 64
+
+/**
+ * Number of bytes in a single SHA-512/256 word.
+ */
+#define SHA512_256_BYTES_IN_WORD (SHA512_256_WORD_SIZE_BITS / 8)
+
+/**
+ * Hash is kept internally as 8 64-bit words.
+ * This is the intermediate hash size, used during computing the final digest.
+ */
+#define SHA512_256_HASH_SIZE_WORDS 8
+
+/**
+ * Size of the SHA-512/256 resulting digest in words.
+ * This is the final digest size, not intermediate hash.
+ */
+#define SHA512_256_DIGEST_SIZE_WORDS (SHA512_256_HASH_SIZE_WORDS / 2)
+
+/**
+ * Size of the SHA-512/256 resulting digest in bytes
+ * This is the final digest size, not intermediate hash.
+ */
+#define SHA512_256_DIGEST_SIZE \
+ (SHA512_256_DIGEST_SIZE_WORDS * SHA512_256_BYTES_IN_WORD)
+
+/**
+ * Size of the SHA-512/256 single processing block in bits.
+ */
+#define SHA512_256_BLOCK_SIZE_BITS 1024
+
+/**
+ * Size of the SHA-512/256 single processing block in bytes.
+ */
+#define SHA512_256_BLOCK_SIZE (SHA512_256_BLOCK_SIZE_BITS / 8)
+
+/**
+ * Size of the SHA-512/256 single processing block in words.
+ */
+#define SHA512_256_BLOCK_SIZE_WORDS \
+ (SHA512_256_BLOCK_SIZE_BITS / SHA512_256_WORD_SIZE_BITS)
+
+/**
+ * SHA-512/256 calculation context
+ */
+struct mhdx_sha512_256ctx
+{
+ /**
+ * Intermediate hash value. The variable is properly aligned. Smart
+ * compilers may automatically use fast load/store instruction for big
+ * endian data on little endian machine.
+ */
+ curl_uint64_t H[SHA512_256_HASH_SIZE_WORDS];
+ /**
+ * SHA-512/256 input data buffer. The buffer is properly aligned. Smart
+ * compilers may automatically use fast load/store instruction for big
+ * endian data on little endian machine.
+ */
+ curl_uint64_t buffer[SHA512_256_BLOCK_SIZE_WORDS];
+ /**
+ * The number of bytes, lower part
+ */
+ curl_uint64_t count;
+ /**
+ * The number of bits, high part. Unlike lower part, this counts the number
+ * of bits, not bytes.
+ */
+ curl_uint64_t count_bits_hi;
+};
+
+/**
+ * Context type used for SHA-512/256 calculations
+ */
+typedef struct mhdx_sha512_256ctx Curl_sha512_256_ctx;
+
+
+/**
+ * Initialise structure for SHA-512/256 calculation.
+ *
+ * @param context the calculation context
+ * @return always CURLE_OK
+ */
+static CURLcode
+MHDx_sha512_256_init(void *context)
+{
+ struct mhdx_sha512_256ctx *const ctx = (struct mhdx_sha512_256ctx *) context;
+
+ /* Check whether the header and this file use the same numbers */
+ DEBUGASSERT(SHA512_256_DIGEST_LENGTH == SHA512_256_DIGEST_SIZE);
+
+ DEBUGASSERT(sizeof(curl_uint64_t) == 8);
+
+ /* Initial hash values, see FIPS PUB 180-4 section 5.3.6.2 */
+ /* Values generated by "IV Generation Function" as described in
+ * section 5.3.6 */
+ ctx->H[0] = CURL_UINT64_C(0x22312194FC2BF72C);
+ ctx->H[1] = CURL_UINT64_C(0x9F555FA3C84C64C2);
+ ctx->H[2] = CURL_UINT64_C(0x2393B86B6F53B151);
+ ctx->H[3] = CURL_UINT64_C(0x963877195940EABD);
+ ctx->H[4] = CURL_UINT64_C(0x96283EE2A88EFFE3);
+ ctx->H[5] = CURL_UINT64_C(0xBE5E1E2553863992);
+ ctx->H[6] = CURL_UINT64_C(0x2B0199FC2C85B8AA);
+ ctx->H[7] = CURL_UINT64_C(0x0EB72DDC81C52CA2);
+
+ /* Initialise number of bytes and high part of number of bits. */
+ ctx->count = CURL_UINT64_C(0);
+ ctx->count_bits_hi = CURL_UINT64_C(0);
+
+ return CURLE_OK;
+}
+
+
+/**
+ * Base of the SHA-512/256 transformation.
+ * Gets a full 128 bytes block of data and updates hash values;
+ * @param H hash values
+ * @param data the data buffer with #SHA512_256_BLOCK_SIZE bytes block
+ */
+static void
+MHDx_sha512_256_transform(curl_uint64_t H[SHA512_256_HASH_SIZE_WORDS],
+ const void *data)
+{
+ /* Working variables,
+ see FIPS PUB 180-4 section 6.7, 6.4. */
+ curl_uint64_t a = H[0];
+ curl_uint64_t b = H[1];
+ curl_uint64_t c = H[2];
+ curl_uint64_t d = H[3];
+ curl_uint64_t e = H[4];
+ curl_uint64_t f = H[5];
+ curl_uint64_t g = H[6];
+ curl_uint64_t h = H[7];
+
+ /* Data buffer, used as a cyclic buffer.
+ See FIPS PUB 180-4 section 5.2.2, 6.7, 6.4. */
+ curl_uint64_t W[16];
+
+ /* 'Ch' and 'Maj' macro functions are defined with widely-used optimisation.
+ See FIPS PUB 180-4 formulae 4.8, 4.9. */
+#define Ch(x,y,z) ( (z) ^ ((x) & ((y) ^ (z))) )
+#define Maj(x,y,z) ( ((x) & (y)) ^ ((z) & ((x) ^ (y))) )
+
+ /* Four 'Sigma' macro functions.
+ See FIPS PUB 180-4 formulae 4.10, 4.11, 4.12, 4.13. */
+#define SIG0(x) \
+ ( MHDx_rotr64((x), 28) ^ MHDx_rotr64((x), 34) ^ MHDx_rotr64((x), 39) )
+#define SIG1(x) \
+ ( MHDx_rotr64((x), 14) ^ MHDx_rotr64((x), 18) ^ MHDx_rotr64((x), 41) )
+#define sig0(x) \
+ ( MHDx_rotr64((x), 1) ^ MHDx_rotr64((x), 8) ^ ((x) >> 7) )
+#define sig1(x) \
+ ( MHDx_rotr64((x), 19) ^ MHDx_rotr64((x), 61) ^ ((x) >> 6) )
+
+ if(1) {
+ unsigned int t;
+ /* K constants array.
+ See FIPS PUB 180-4 section 4.2.3 for K values. */
+ static const curl_uint64_t K[80] = {
+ CURL_UINT64_C(0x428a2f98d728ae22), CURL_UINT64_C(0x7137449123ef65cd),
+ CURL_UINT64_C(0xb5c0fbcfec4d3b2f), CURL_UINT64_C(0xe9b5dba58189dbbc),
+ CURL_UINT64_C(0x3956c25bf348b538), CURL_UINT64_C(0x59f111f1b605d019),
+ CURL_UINT64_C(0x923f82a4af194f9b), CURL_UINT64_C(0xab1c5ed5da6d8118),
+ CURL_UINT64_C(0xd807aa98a3030242), CURL_UINT64_C(0x12835b0145706fbe),
+ CURL_UINT64_C(0x243185be4ee4b28c), CURL_UINT64_C(0x550c7dc3d5ffb4e2),
+ CURL_UINT64_C(0x72be5d74f27b896f), CURL_UINT64_C(0x80deb1fe3b1696b1),
+ CURL_UINT64_C(0x9bdc06a725c71235), CURL_UINT64_C(0xc19bf174cf692694),
+ CURL_UINT64_C(0xe49b69c19ef14ad2), CURL_UINT64_C(0xefbe4786384f25e3),
+ CURL_UINT64_C(0x0fc19dc68b8cd5b5), CURL_UINT64_C(0x240ca1cc77ac9c65),
+ CURL_UINT64_C(0x2de92c6f592b0275), CURL_UINT64_C(0x4a7484aa6ea6e483),
+ CURL_UINT64_C(0x5cb0a9dcbd41fbd4), CURL_UINT64_C(0x76f988da831153b5),
+ CURL_UINT64_C(0x983e5152ee66dfab), CURL_UINT64_C(0xa831c66d2db43210),
+ CURL_UINT64_C(0xb00327c898fb213f), CURL_UINT64_C(0xbf597fc7beef0ee4),
+ CURL_UINT64_C(0xc6e00bf33da88fc2), CURL_UINT64_C(0xd5a79147930aa725),
+ CURL_UINT64_C(0x06ca6351e003826f), CURL_UINT64_C(0x142929670a0e6e70),
+ CURL_UINT64_C(0x27b70a8546d22ffc), CURL_UINT64_C(0x2e1b21385c26c926),
+ CURL_UINT64_C(0x4d2c6dfc5ac42aed), CURL_UINT64_C(0x53380d139d95b3df),
+ CURL_UINT64_C(0x650a73548baf63de), CURL_UINT64_C(0x766a0abb3c77b2a8),
+ CURL_UINT64_C(0x81c2c92e47edaee6), CURL_UINT64_C(0x92722c851482353b),
+ CURL_UINT64_C(0xa2bfe8a14cf10364), CURL_UINT64_C(0xa81a664bbc423001),
+ CURL_UINT64_C(0xc24b8b70d0f89791), CURL_UINT64_C(0xc76c51a30654be30),
+ CURL_UINT64_C(0xd192e819d6ef5218), CURL_UINT64_C(0xd69906245565a910),
+ CURL_UINT64_C(0xf40e35855771202a), CURL_UINT64_C(0x106aa07032bbd1b8),
+ CURL_UINT64_C(0x19a4c116b8d2d0c8), CURL_UINT64_C(0x1e376c085141ab53),
+ CURL_UINT64_C(0x2748774cdf8eeb99), CURL_UINT64_C(0x34b0bcb5e19b48a8),
+ CURL_UINT64_C(0x391c0cb3c5c95a63), CURL_UINT64_C(0x4ed8aa4ae3418acb),
+ CURL_UINT64_C(0x5b9cca4f7763e373), CURL_UINT64_C(0x682e6ff3d6b2b8a3),
+ CURL_UINT64_C(0x748f82ee5defb2fc), CURL_UINT64_C(0x78a5636f43172f60),
+ CURL_UINT64_C(0x84c87814a1f0ab72), CURL_UINT64_C(0x8cc702081a6439ec),
+ CURL_UINT64_C(0x90befffa23631e28), CURL_UINT64_C(0xa4506cebde82bde9),
+ CURL_UINT64_C(0xbef9a3f7b2c67915), CURL_UINT64_C(0xc67178f2e372532b),
+ CURL_UINT64_C(0xca273eceea26619c), CURL_UINT64_C(0xd186b8c721c0c207),
+ CURL_UINT64_C(0xeada7dd6cde0eb1e), CURL_UINT64_C(0xf57d4f7fee6ed178),
+ CURL_UINT64_C(0x06f067aa72176fba), CURL_UINT64_C(0x0a637dc5a2c898a6),
+ CURL_UINT64_C(0x113f9804bef90dae), CURL_UINT64_C(0x1b710b35131c471b),
+ CURL_UINT64_C(0x28db77f523047d84), CURL_UINT64_C(0x32caab7b40c72493),
+ CURL_UINT64_C(0x3c9ebe0a15c9bebc), CURL_UINT64_C(0x431d67c49c100d4c),
+ CURL_UINT64_C(0x4cc5d4becb3e42b6), CURL_UINT64_C(0x597f299cfc657e2a),
+ CURL_UINT64_C(0x5fcb6fab3ad6faec), CURL_UINT64_C(0x6c44198c4a475817)
+ };
+
+ /* One step of SHA-512/256 computation,
+ see FIPS PUB 180-4 section 6.4.2 step 3.
+ * Note: this macro updates working variables in-place, without rotation.
+ * Note: the first (vH += SIG1(vE) + Ch(vE,vF,vG) + kt + wt) equals T1 in
+ FIPS PUB 180-4 section 6.4.2 step 3.
+ the second (vH += SIG0(vA) + Maj(vE,vF,vC) equals T1 + T2 in
+ FIPS PUB 180-4 section 6.4.2 step 3.
+ * Note: 'wt' must be used exactly one time in this macro as macro for
+ 'wt' calculation may change other data as well every time when
+ used. */
+#define SHA2STEP64(vA,vB,vC,vD,vE,vF,vG,vH,kt,wt) do { \
+ (vD) += ((vH) += SIG1 ((vE)) + Ch ((vE),(vF),(vG)) + (kt) + (wt)); \
+ (vH) += SIG0 ((vA)) + Maj ((vA),(vB),(vC)); } while (0)
+
+ /* One step of SHA-512/256 computation with working variables rotation,
+ see FIPS PUB 180-4 section 6.4.2 step 3. This macro version reassigns
+ all working variables on each step. */
+#define SHA2STEP64RV(vA,vB,vC,vD,vE,vF,vG,vH,kt,wt) do { \
+ curl_uint64_t tmp_h_ = (vH); \
+ SHA2STEP64((vA),(vB),(vC),(vD),(vE),(vF),(vG),tmp_h_,(kt),(wt)); \
+ (vH) = (vG); \
+ (vG) = (vF); \
+ (vF) = (vE); \
+ (vE) = (vD); \
+ (vD) = (vC); \
+ (vC) = (vB); \
+ (vB) = (vA); \
+ (vA) = tmp_h_; } while(0)
+
+ /* Get value of W(t) from input data buffer for 0 <= t <= 15,
+ See FIPS PUB 180-4 section 6.2.
+ Input data must be read in big-endian bytes order,
+ see FIPS PUB 180-4 section 3.1.2. */
+#define SHA512_GET_W_FROM_DATA(buf,t) \
+ MHDX_GET_64BIT_BE( \
+ ((const unsigned char*) (buf)) + (t) * SHA512_256_BYTES_IN_WORD)
+
+ /* During first 16 steps, before making any calculation on each step, the
+ W element is read from the input data buffer as a big-endian value and
+ stored in the array of W elements. */
+ for(t = 0; t < 16; ++t) {
+ SHA2STEP64RV(a, b, c, d, e, f, g, h, K[t], \
+ W[t] = SHA512_GET_W_FROM_DATA(data, t));
+ }
+
+ /* 'W' generation and assignment for 16 <= t <= 79.
+ See FIPS PUB 180-4 section 6.4.2.
+ As only the last 16 'W' are used in calculations, it is possible to
+ use 16 elements array of W as a cyclic buffer.
+ Note: ((t-16) & 15) have same value as (t & 15) */
+#define Wgen(w,t) \
+ (curl_uint64_t)( (w)[(t - 16) & 15] + sig1((w)[((t) - 2) & 15]) \
+ + (w)[((t) - 7) & 15] + sig0((w)[((t) - 15) & 15]) )
+
+ /* During the last 64 steps, before making any calculation on each step,
+ current W element is generated from other W elements of the cyclic
+ buffer and the generated value is stored back in the cyclic buffer. */
+ for(t = 16; t < 80; ++t) {
+ SHA2STEP64RV(a, b, c, d, e, f, g, h, K[t], \
+ W[t & 15] = Wgen(W, t));
+ }
+ }
+
+ /* Compute and store the intermediate hash.
+ See FIPS PUB 180-4 section 6.4.2 step 4. */
+ H[0] += a;
+ H[1] += b;
+ H[2] += c;
+ H[3] += d;
+ H[4] += e;
+ H[5] += f;
+ H[6] += g;
+ H[7] += h;
+}
+
+
+/**
+ * Process portion of bytes.
+ *
+ * @param context the calculation context
+ * @param data bytes to add to hash
+ * @param length number of bytes in @a data
+ * @return always CURLE_OK
+ */
+static CURLcode
+MHDx_sha512_256_update(void *context,
+ const unsigned char *data,
+ size_t length)
+{
+ unsigned int bytes_have; /**< Number of bytes in the context buffer */
+ struct mhdx_sha512_256ctx *const ctx = (struct mhdx_sha512_256ctx *)context;
+ /* the void pointer here is required to mute Intel compiler warning */
+ void *const ctx_buf = ctx->buffer;
+
+ DEBUGASSERT((data != NULL) || (length == 0));
+
+ if(0 == length)
+ return CURLE_OK; /* Shortcut, do nothing */
+
+ /* Note: (count & (SHA512_256_BLOCK_SIZE-1))
+ equals (count % SHA512_256_BLOCK_SIZE) for this block size. */
+ bytes_have = (unsigned int) (ctx->count & (SHA512_256_BLOCK_SIZE - 1));
+ ctx->count += length;
+ if(length > ctx->count)
+ ctx->count_bits_hi += 1U << 3; /* Value wrap */
+ ctx->count_bits_hi += ctx->count >> 61;
+ ctx->count &= CURL_UINT64_C(0x1FFFFFFFFFFFFFFF);
+
+ if(0 != bytes_have) {
+ unsigned int bytes_left = SHA512_256_BLOCK_SIZE - bytes_have;
+ if(length >= bytes_left) {
+ /* Combine new data with data in the buffer and process the full
+ block. */
+ memcpy(((unsigned char *) ctx_buf) + bytes_have,
+ data,
+ bytes_left);
+ data += bytes_left;
+ length -= bytes_left;
+ MHDx_sha512_256_transform(ctx->H, ctx->buffer);
+ bytes_have = 0;
+ }
+ }
+
+ while(SHA512_256_BLOCK_SIZE <= length) {
+ /* Process any full blocks of new data directly,
+ without copying to the buffer. */
+ MHDx_sha512_256_transform(ctx->H, data);
+ data += SHA512_256_BLOCK_SIZE;
+ length -= SHA512_256_BLOCK_SIZE;
+ }
+
+ if(0 != length) {
+ /* Copy incomplete block of new data (if any)
+ to the buffer. */
+ memcpy(((unsigned char *) ctx_buf) + bytes_have, data, length);
+ }
+
+ return CURLE_OK;
+}
+
+
+
+/**
+ * Size of "length" insertion in bits.
+ * See FIPS PUB 180-4 section 5.1.2.
+ */
+#define SHA512_256_SIZE_OF_LEN_ADD_BITS 128
+
+/**
+ * Size of "length" insertion in bytes.
+ */
+#define SHA512_256_SIZE_OF_LEN_ADD (SHA512_256_SIZE_OF_LEN_ADD_BITS / 8)
+
+/**
+ * Finalise SHA-512/256 calculation, return digest.
+ *
+ * @param context the calculation context
+ * @param[out] digest set to the hash, must be #SHA512_256_DIGEST_SIZE bytes
+ * @return always CURLE_OK
+ */
+static CURLcode
+MHDx_sha512_256_finish(unsigned char *digest,
+ void *context)
+{
+ struct mhdx_sha512_256ctx *const ctx = (struct mhdx_sha512_256ctx *)context;
+ curl_uint64_t num_bits; /**< Number of processed bits */
+ unsigned int bytes_have; /**< Number of bytes in the context buffer */
+ /* the void pointer here is required to mute Intel compiler warning */
+ void *const ctx_buf = ctx->buffer;
+
+ /* Memorise the number of processed bits.
+ The padding and other data added here during the postprocessing must
+ not change the amount of hashed data. */
+ num_bits = ctx->count << 3;
+
+ /* Note: (count & (SHA512_256_BLOCK_SIZE-1))
+ equals (count % SHA512_256_BLOCK_SIZE) for this block size. */
+ bytes_have = (unsigned int) (ctx->count & (SHA512_256_BLOCK_SIZE - 1));
+
+ /* Input data must be padded with a single bit "1", then with zeros and
+ the finally the length of data in bits must be added as the final bytes
+ of the last block.
+ See FIPS PUB 180-4 section 5.1.2. */
+
+ /* Data is always processed in form of bytes (not by individual bits),
+ therefore position of the first padding bit in byte is always
+ predefined (0x80). */
+ /* Buffer always have space at least for one byte (as full buffers are
+ processed when formed). */
+ ((unsigned char *) ctx_buf)[bytes_have++] = 0x80U;
+
+ if(SHA512_256_BLOCK_SIZE - bytes_have < SHA512_256_SIZE_OF_LEN_ADD) {
+ /* No space in the current block to put the total length of message.
+ Pad the current block with zeros and process it. */
+ if(bytes_have < SHA512_256_BLOCK_SIZE)
+ memset(((unsigned char *) ctx_buf) + bytes_have, 0,
+ SHA512_256_BLOCK_SIZE - bytes_have);
+ /* Process the full block. */
+ MHDx_sha512_256_transform(ctx->H, ctx->buffer);
+ /* Start the new block. */
+ bytes_have = 0;
+ }
+
+ /* Pad the rest of the buffer with zeros. */
+ memset(((unsigned char *) ctx_buf) + bytes_have, 0,
+ SHA512_256_BLOCK_SIZE - SHA512_256_SIZE_OF_LEN_ADD - bytes_have);
+ /* Put high part of number of bits in processed message and then lower
+ part of number of bits as big-endian values.
+ See FIPS PUB 180-4 section 5.1.2. */
+ /* Note: the target location is predefined and buffer is always aligned */
+ MHDX_PUT_64BIT_BE(((unsigned char *) ctx_buf) \
+ + SHA512_256_BLOCK_SIZE \
+ - SHA512_256_SIZE_OF_LEN_ADD, \
+ ctx->count_bits_hi);
+ MHDX_PUT_64BIT_BE(((unsigned char *) ctx_buf) \
+ + SHA512_256_BLOCK_SIZE \
+ - SHA512_256_SIZE_OF_LEN_ADD \
+ + SHA512_256_BYTES_IN_WORD, \
+ num_bits);
+ /* Process the full final block. */
+ MHDx_sha512_256_transform(ctx->H, ctx->buffer);
+
+ /* Put in BE mode the leftmost part of the hash as the final digest.
+ See FIPS PUB 180-4 section 6.7. */
+
+ MHDX_PUT_64BIT_BE((digest + 0 * SHA512_256_BYTES_IN_WORD), ctx->H[0]);
+ MHDX_PUT_64BIT_BE((digest + 1 * SHA512_256_BYTES_IN_WORD), ctx->H[1]);
+ MHDX_PUT_64BIT_BE((digest + 2 * SHA512_256_BYTES_IN_WORD), ctx->H[2]);
+ MHDX_PUT_64BIT_BE((digest + 3 * SHA512_256_BYTES_IN_WORD), ctx->H[3]);
+
+ /* Erase potentially sensitive data. */
+ memset(ctx, 0, sizeof(struct mhdx_sha512_256ctx));
+
+ return CURLE_OK;
+}
+
+/* Map to the local implementation */
+#define Curl_sha512_256_init MHDx_sha512_256_init
+#define Curl_sha512_256_update MHDx_sha512_256_update
+#define Curl_sha512_256_finish MHDx_sha512_256_finish
+
+#endif /* Local SHA-512/256 code */
+
+
+/**
+ * Compute SHA-512/256 hash for the given data in one function call
+ * @param[out] output the pointer to put the hash
+ * @param[in] input the pointer to the data to process
+ * @param input_size the size of the data pointed by @a input
+ * @return always #CURLE_OK
+ */
+CURLcode
+Curl_sha512_256it(unsigned char *output, const unsigned char *input,
+ size_t input_size)
+{
+ Curl_sha512_256_ctx ctx;
+ CURLcode res;
+
+ res = Curl_sha512_256_init(&ctx);
+ if(res != CURLE_OK)
+ return res;
+
+ res = Curl_sha512_256_update(&ctx, (const void *) input, input_size);
+
+ if(res != CURLE_OK) {
+ (void) Curl_sha512_256_finish(output, &ctx);
+ return res;
+ }
+
+ return Curl_sha512_256_finish(output, &ctx);
+}
+
+/* Wrapper function, takes 'unsigned int' as length type, returns void */
+static void
+Curl_sha512_256_update_i(void *context,
+ const unsigned char *data,
+ unsigned int length)
+{
+ /* Hypothetically the function may fail, but assume it does not */
+ (void) Curl_sha512_256_update(context, data, length);
+}
+
+/* Wrapper function, returns void */
+static void
+Curl_sha512_256_finish_v(unsigned char *result,
+ void *context)
+{
+ /* Hypothetically the function may fail, but assume it does not */
+ (void) Curl_sha512_256_finish(result, context);
+}
+
+/* Wrapper function, takes 'unsigned int' as length type, returns void */
+
+const struct HMAC_params Curl_HMAC_SHA512_256[] = {
+ {
+ /* Initialize context procedure. */
+ Curl_sha512_256_init,
+ /* Update context with data. */
+ Curl_sha512_256_update_i,
+ /* Get final result procedure. */
+ Curl_sha512_256_finish_v,
+ /* Context structure size. */
+ sizeof(Curl_sha512_256_ctx),
+ /* Maximum key length (bytes). */
+ SHA512_256_BLOCK_SIZE,
+ /* Result length (bytes). */
+ SHA512_256_DIGEST_SIZE
+ }
+};
+
+#endif /* !CURL_DISABLE_DIGEST_AUTH && !CURL_DISABLE_SHA512_256 */
diff --git a/libs/libcurl/src/curl_sha512_256.h b/libs/libcurl/src/curl_sha512_256.h new file mode 100644 index 0000000000..3acdc90a01 --- /dev/null +++ b/libs/libcurl/src/curl_sha512_256.h @@ -0,0 +1,44 @@ +#ifndef HEADER_CURL_SHA512_256_H
+#define HEADER_CURL_SHA512_256_H
+/***************************************************************************
+ * _ _ ____ _
+ * Project ___| | | | _ \| |
+ * / __| | | | |_) | |
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+ * Copyright (C) Evgeny Grin (Karlson2k), <k2k@narod.ru>.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+ * are also available at https://curl.se/docs/copyright.html.
+ *
+ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
+ * copies of the Software, and permit persons to whom the Software is
+ * furnished to do so, under the terms of the COPYING file.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ * SPDX-License-Identifier: curl
+ *
+ ***************************************************************************/
+
+#if !defined(CURL_DISABLE_DIGEST_AUTH) && !defined(CURL_DISABLE_SHA512_256)
+
+#include <curl/curl.h>
+#include "curl_hmac.h"
+
+#define CURL_HAVE_SHA512_256
+
+extern const struct HMAC_params Curl_HMAC_SHA512_256[1];
+
+#define SHA512_256_DIGEST_LENGTH 32
+
+CURLcode
+Curl_sha512_256it(unsigned char *output, const unsigned char *input,
+ size_t input_size);
+
+#endif /* !CURL_DISABLE_DIGEST_AUTH && !CURL_DISABLE_SHA512_256 */
+
+#endif /* HEADER_CURL_SHA256_H */
diff --git a/libs/libcurl/src/curl_trc.c b/libs/libcurl/src/curl_trc.c index 4e9b099f16..9dc9fddea8 100644 --- a/libs/libcurl/src/curl_trc.c +++ b/libs/libcurl/src/curl_trc.c @@ -36,6 +36,7 @@ #include "cf-socket.h"
#include "connect.h"
+#include "doh.h"
#include "http2.h"
#include "http_proxy.h"
#include "cf-h1-proxy.h"
@@ -113,12 +114,14 @@ void Curl_failf(struct Curl_easy *data, const char *fmt, ...) void Curl_infof(struct Curl_easy *data, const char *fmt, ...)
{
DEBUGASSERT(!strchr(fmt, '\n'));
- if(data && data->set.verbose) {
+ if(Curl_trc_is_verbose(data)) {
va_list ap;
- int len;
+ int len = 0;
char buffer[MAXINFO + 2];
+ if(data->state.feat)
+ len = msnprintf(buffer, MAXINFO, "[%s] ", data->state.feat->name);
va_start(ap, fmt);
- len = mvsnprintf(buffer, MAXINFO, fmt, ap);
+ len += mvsnprintf(buffer + len, MAXINFO - len, fmt, ap);
va_end(ap);
buffer[len++] = '\n';
buffer[len] = '\0';
@@ -132,9 +135,16 @@ void Curl_trc_cf_infof(struct Curl_easy *data, struct Curl_cfilter *cf, DEBUGASSERT(cf);
if(Curl_trc_cf_is_verbose(cf, data)) {
va_list ap;
- int len;
+ int len = 0;
char buffer[MAXINFO + 2];
- len = msnprintf(buffer, MAXINFO, "[%s] ", cf->cft->name);
+ if(data->state.feat)
+ len += msnprintf(buffer + len, MAXINFO - len, "[%s] ",
+ data->state.feat->name);
+ if(cf->sockindex)
+ len += msnprintf(buffer + len, MAXINFO - len, "[%s-%d] ",
+ cf->cft->name, cf->sockindex);
+ else
+ len += msnprintf(buffer + len, MAXINFO - len, "[%s] ", cf->cft->name);
va_start(ap, fmt);
len += mvsnprintf(buffer + len, MAXINFO - len, fmt, ap);
va_end(ap);
@@ -144,6 +154,12 @@ void Curl_trc_cf_infof(struct Curl_easy *data, struct Curl_cfilter *cf, }
}
+static struct curl_trc_feat *trc_feats[] = {
+#ifndef CURL_DISABLE_DOH
+ &Curl_doh_trc,
+#endif
+ NULL,
+};
static struct Curl_cftype *cf_types[] = {
&Curl_cft_tcp,
@@ -215,6 +231,15 @@ CURLcode Curl_trc_opt(const char *config) break;
}
}
+ for(i = 0; trc_feats[i]; ++i) {
+ if(strcasecompare(token, "all")) {
+ trc_feats[i]->log_level = lvl;
+ }
+ else if(strcasecompare(token, trc_feats[i]->name)) {
+ trc_feats[i]->log_level = lvl;
+ break;
+ }
+ }
token = strtok_r(NULL, ", ", &tok_buf);
}
free(tmp);
diff --git a/libs/libcurl/src/curl_trc.h b/libs/libcurl/src/curl_trc.h index 3da00690e2..3949c5027f 100644 --- a/libs/libcurl/src/curl_trc.h +++ b/libs/libcurl/src/curl_trc.h @@ -86,10 +86,21 @@ void Curl_failf(struct Curl_easy *data, #ifndef CURL_DISABLE_VERBOSE_STRINGS
/* informational messages enabled */
-#define Curl_trc_is_verbose(data) ((data) && (data)->set.verbose)
+struct curl_trc_feat {
+ const char *name;
+ int log_level;
+};
+
+#define Curl_trc_is_verbose(data) \
+ ((data) && (data)->set.verbose && \
+ (!(data)->state.feat || \
+ ((data)->state.feat->log_level >= CURL_LOG_LVL_INFO)))
#define Curl_trc_cf_is_verbose(cf, data) \
- ((data) && (data)->set.verbose && \
- (cf) && (cf)->cft->log_level >= CURL_LOG_LVL_INFO)
+ (Curl_trc_is_verbose(data) && \
+ (cf) && (cf)->cft->log_level >= CURL_LOG_LVL_INFO)
+#define Curl_trc_ft_is_verbose(data, ft) \
+ (Curl_trc_is_verbose(data) && \
+ (ft)->log_level >= CURL_LOG_LVL_INFO)
/**
* Output an informational message when transfer's verbose logging is enabled.
@@ -109,6 +120,7 @@ void Curl_trc_cf_infof(struct Curl_easy *data, struct Curl_cfilter *cf, #define Curl_trc_is_verbose(d) ((void)(d), FALSE)
#define Curl_trc_cf_is_verbose(x,y) ((void)(x), (void)(y), FALSE)
+#define Curl_trc_ft_is_verbose(x,y) ((void)(x), (void)(y), FALSE)
static void Curl_infof(struct Curl_easy *data, const char *fmt, ...)
{
diff --git a/libs/libcurl/src/cw-out.c b/libs/libcurl/src/cw-out.c new file mode 100644 index 0000000000..a2f6904051 --- /dev/null +++ b/libs/libcurl/src/cw-out.c @@ -0,0 +1,437 @@ +/***************************************************************************
+ * _ _ ____ _
+ * Project ___| | | | _ \| |
+ * / __| | | | |_) | |
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+ * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+ * are also available at https://curl.se/docs/copyright.html.
+ *
+ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
+ * copies of the Software, and permit persons to whom the Software is
+ * furnished to do so, under the terms of the COPYING file.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ * SPDX-License-Identifier: curl
+ *
+ ***************************************************************************/
+
+#include "curl_setup.h"
+
+#include <curl/curl.h>
+
+#include "urldata.h"
+#include "cfilters.h"
+#include "headers.h"
+#include "multiif.h"
+#include "sendf.h"
+#include "cw-out.h"
+
+/* The last 3 #include files should be in this order */
+#include "curl_printf.h"
+#include "curl_memory.h"
+#include "memdebug.h"
+
+
+/**
+ * OVERALL DESIGN of this client writer
+ *
+ * The 'cw-out' writer is supposed to be the last writer in a transfer's
+ * stack. It is always added when that stack is initialized. Its purpose
+ * is to pass BODY and HEADER bytes to the client-installed callback
+ * functions.
+ *
+ * These callback may return `CURL_WRITEFUNC_PAUSE` to indicate that the
+ * data had not been written and the whole transfer should stop receiving
+ * new data. Or at least, stop calling the functions. When the transfer
+ * is "unpaused" by the client, the previous data shall be passed as
+ * if nothing happened.
+ *
+ * The `cw-out` writer therefore manages buffers for bytes that could
+ * not be written. Data that was already in flight from the server also
+ * needs buffering on paused transfer when it arrives.
+ *
+ * In addition, the writer allows buffering of "small" body writes,
+ * so client functions are called less often. That is only enabled on a
+ * number of conditions.
+ *
+ * HEADER and BODY data may arrive in any order. For paused transfers,
+ * a list of `struct cw_out_buf` is kept for `cw_out_type` types. The
+ * list may be: [BODY]->[HEADER]->[BODY]->[HEADER]....
+ * When unpausing, this list is "played back" to the client callbacks.
+ *
+ * The amount of bytes being buffered is limited by `DYN_PAUSE_BUFFER`
+ * and when that is exceeded `CURLE_TOO_LARGE` is returned as error.
+ */
+typedef enum {
+ CW_OUT_NONE,
+ CW_OUT_BODY,
+ CW_OUT_HDS
+} cw_out_type;
+
+struct cw_out_buf {
+ struct cw_out_buf *next;
+ struct dynbuf b;
+ cw_out_type type;
+};
+
+static struct cw_out_buf *cw_out_buf_create(cw_out_type otype)
+{
+ struct cw_out_buf *cwbuf = calloc(1, sizeof(*cwbuf));
+ if(cwbuf) {
+ cwbuf->type = otype;
+ Curl_dyn_init(&cwbuf->b, DYN_PAUSE_BUFFER);
+ }
+ return cwbuf;
+}
+
+static void cw_out_buf_free(struct cw_out_buf *cwbuf)
+{
+ if(cwbuf) {
+ Curl_dyn_free(&cwbuf->b);
+ free(cwbuf);
+ }
+}
+
+struct cw_out_ctx {
+ struct Curl_cwriter super;
+ struct cw_out_buf *buf;
+};
+
+static CURLcode cw_out_write(struct Curl_easy *data,
+ struct Curl_cwriter *writer, int type,
+ const char *buf, size_t nbytes);
+static void cw_out_close(struct Curl_easy *data, struct Curl_cwriter *writer);
+static CURLcode cw_out_init(struct Curl_easy *data,
+ struct Curl_cwriter *writer);
+
+struct Curl_cwtype Curl_cwt_out = {
+ "cw-out",
+ NULL,
+ cw_out_init,
+ cw_out_write,
+ cw_out_close,
+ sizeof(struct cw_out_ctx)
+};
+
+static CURLcode cw_out_init(struct Curl_easy *data,
+ struct Curl_cwriter *writer)
+{
+ struct cw_out_ctx *ctx = writer->ctx;
+ (void)data;
+ ctx->buf = NULL;
+ return CURLE_OK;
+}
+
+static void cw_out_bufs_free(struct cw_out_ctx *ctx)
+{
+ while(ctx->buf) {
+ struct cw_out_buf *next = ctx->buf->next;
+ cw_out_buf_free(ctx->buf);
+ ctx->buf = next;
+ }
+}
+
+static size_t cw_out_bufs_len(struct cw_out_ctx *ctx)
+{
+ struct cw_out_buf *cwbuf = ctx->buf;
+ size_t len = 0;
+ while(cwbuf) {
+ len += Curl_dyn_len(&cwbuf->b);
+ cwbuf = cwbuf->next;
+ }
+ return len;
+}
+
+static void cw_out_close(struct Curl_easy *data, struct Curl_cwriter *writer)
+{
+ struct cw_out_ctx *ctx = writer->ctx;
+
+ (void)data;
+ cw_out_bufs_free(ctx);
+}
+
+/**
+ * Return the current curl_write_callback and user_data for the buf type
+ */
+static void cw_get_writefunc(struct Curl_easy *data, cw_out_type otype,
+ curl_write_callback *pwcb, void **pwcb_data,
+ size_t *pmax_write, size_t *pmin_write)
+{
+ switch(otype) {
+ case CW_OUT_BODY:
+ *pwcb = data->set.fwrite_func;
+ *pwcb_data = data->set.out;
+ *pmax_write = CURL_MAX_WRITE_SIZE;
+ /* if we ever want buffering of BODY output, we can set `min_write`
+ * the preferred size. The default should always be to pass data
+ * to the client as it comes without delay */
+ *pmin_write = 0;
+ break;
+ case CW_OUT_HDS:
+ *pwcb = data->set.fwrite_header? data->set.fwrite_header :
+ (data->set.writeheader? data->set.fwrite_func : NULL);
+ *pwcb_data = data->set.writeheader;
+ *pmax_write = 0; /* do not chunk-write headers, write them as they are */
+ *pmin_write = 0;
+ break;
+ default:
+ *pwcb = NULL;
+ *pwcb_data = NULL;
+ *pmax_write = CURL_MAX_WRITE_SIZE;
+ *pmin_write = 0;
+ }
+}
+
+static CURLcode cw_out_ptr_flush(struct cw_out_ctx *ctx,
+ struct Curl_easy *data,
+ cw_out_type otype,
+ bool flush_all,
+ const char *buf, size_t blen,
+ size_t *pconsumed)
+{
+ curl_write_callback wcb;
+ void *wcb_data;
+ size_t max_write, min_write;
+ size_t wlen, nwritten;
+
+ (void)ctx;
+ /* write callbacks may get NULLed by the client between calls. */
+ cw_get_writefunc(data, otype, &wcb, &wcb_data, &max_write, &min_write);
+ if(!wcb) {
+ *pconsumed = blen;
+ return CURLE_OK;
+ }
+
+ *pconsumed = 0;
+ while(blen && !(data->req.keepon & KEEP_RECV_PAUSE)) {
+ if(!flush_all && blen < min_write)
+ break;
+ wlen = max_write? CURLMIN(blen, max_write) : blen;
+ Curl_set_in_callback(data, TRUE);
+ nwritten = wcb((char *)buf, 1, wlen, wcb_data);
+ Curl_set_in_callback(data, FALSE);
+ if(CURL_WRITEFUNC_PAUSE == nwritten) {
+ if(data->conn && data->conn->handler->flags & PROTOPT_NONETWORK) {
+ /* Protocols that work without network cannot be paused. This is
+ actually only FILE:// just now, and it can't pause since the
+ transfer isn't done using the "normal" procedure. */
+ failf(data, "Write callback asked for PAUSE when not supported");
+ return CURLE_WRITE_ERROR;
+ }
+ /* mark the connection as RECV paused */
+ data->req.keepon |= KEEP_RECV_PAUSE;
+ break;
+ }
+ if(nwritten != wlen) {
+ failf(data, "Failure writing output to destination, "
+ "passed %zu returned %zd", wlen, nwritten);
+ return CURLE_WRITE_ERROR;
+ }
+ *pconsumed += nwritten;
+ blen -= nwritten;
+ buf += nwritten;
+ }
+ return CURLE_OK;
+}
+
+static CURLcode cw_out_buf_flush(struct cw_out_ctx *ctx,
+ struct Curl_easy *data,
+ struct cw_out_buf *cwbuf,
+ bool flush_all)
+{
+ CURLcode result = CURLE_OK;
+
+ if(Curl_dyn_len(&cwbuf->b)) {
+ size_t consumed;
+
+ result = cw_out_ptr_flush(ctx, data, cwbuf->type, flush_all,
+ Curl_dyn_ptr(&cwbuf->b),
+ Curl_dyn_len(&cwbuf->b),
+ &consumed);
+ if(result)
+ return result;
+
+ if(consumed) {
+ if(consumed == Curl_dyn_len(&cwbuf->b)) {
+ Curl_dyn_free(&cwbuf->b);
+ }
+ else {
+ DEBUGASSERT(consumed < Curl_dyn_len(&cwbuf->b));
+ result = Curl_dyn_tail(&cwbuf->b, Curl_dyn_len(&cwbuf->b) - consumed);
+ if(result)
+ return result;
+ }
+ }
+ }
+ return result;
+}
+
+static CURLcode cw_out_flush_chain(struct cw_out_ctx *ctx,
+ struct Curl_easy *data,
+ struct cw_out_buf **pcwbuf,
+ bool flush_all)
+{
+ struct cw_out_buf *cwbuf = *pcwbuf;
+ CURLcode result;
+
+ if(!cwbuf)
+ return CURLE_OK;
+ if(data->req.keepon & KEEP_RECV_PAUSE)
+ return CURLE_OK;
+
+ /* write the end of the chain until it blocks or gets empty */
+ while(cwbuf->next) {
+ struct cw_out_buf **plast = &cwbuf->next;
+ while((*plast)->next)
+ plast = &(*plast)->next;
+ result = cw_out_flush_chain(ctx, data, plast, flush_all);
+ if(result)
+ return result;
+ if(*plast) {
+ /* could not write last, paused again? */
+ DEBUGASSERT(data->req.keepon & KEEP_RECV_PAUSE);
+ return CURLE_OK;
+ }
+ }
+
+ result = cw_out_buf_flush(ctx, data, cwbuf, flush_all);
+ if(result)
+ return result;
+ if(!Curl_dyn_len(&cwbuf->b)) {
+ cw_out_buf_free(cwbuf);
+ *pcwbuf = NULL;
+ }
+ return CURLE_OK;
+}
+
+static CURLcode cw_out_append(struct cw_out_ctx *ctx,
+ cw_out_type otype,
+ const char *buf, size_t blen)
+{
+ if(cw_out_bufs_len(ctx) + blen > DYN_PAUSE_BUFFER)
+ return CURLE_TOO_LARGE;
+
+ /* if we do not have a buffer, or it is of another type, make a new one.
+ * And for CW_OUT_HDS always make a new one, so we "replay" headers
+ * exactly as they came in */
+ if(!ctx->buf || (ctx->buf->type != otype) || (otype == CW_OUT_HDS)) {
+ struct cw_out_buf *cwbuf = cw_out_buf_create(otype);
+ if(!cwbuf)
+ return CURLE_OUT_OF_MEMORY;
+ cwbuf->next = ctx->buf;
+ ctx->buf = cwbuf;
+ }
+ DEBUGASSERT(ctx->buf && (ctx->buf->type == otype));
+ return Curl_dyn_addn(&ctx->buf->b, buf, blen);
+}
+
+static CURLcode cw_out_do_write(struct cw_out_ctx *ctx,
+ struct Curl_easy *data,
+ cw_out_type otype,
+ bool flush_all,
+ const char *buf, size_t blen)
+{
+ CURLcode result;
+
+ /* if we have buffered data and it is a different type than what
+ * we are writing now, try to flush all */
+ if(ctx->buf && ctx->buf->type != otype) {
+ result = cw_out_flush_chain(ctx, data, &ctx->buf, TRUE);
+ if(result)
+ return result;
+ }
+
+ if(ctx->buf) {
+ /* still have buffered data, append and flush */
+ result = cw_out_append(ctx, otype, buf, blen);
+ if(result)
+ return result;
+ result = cw_out_flush_chain(ctx, data, &ctx->buf, flush_all);
+ if(result)
+ return result;
+ }
+ else {
+ /* nothing buffered, try direct write */
+ size_t consumed;
+ result = cw_out_ptr_flush(ctx, data, otype, flush_all,
+ buf, blen, &consumed);
+ if(result)
+ return result;
+ if(consumed < blen) {
+ /* did not write all, append the rest */
+ result = cw_out_append(ctx, otype, buf + consumed, blen - consumed);
+ if(result)
+ return result;
+ }
+ }
+ return CURLE_OK;
+}
+
+static CURLcode cw_out_write(struct Curl_easy *data,
+ struct Curl_cwriter *writer, int type,
+ const char *buf, size_t blen)
+{
+ struct cw_out_ctx *ctx = writer->ctx;
+ CURLcode result;
+ bool flush_all;
+
+ flush_all = (type & CLIENTWRITE_EOS)? TRUE:FALSE;
+ if((type & CLIENTWRITE_BODY) ||
+ ((type & CLIENTWRITE_HEADER) && data->set.include_header)) {
+ result = cw_out_do_write(ctx, data, CW_OUT_BODY, flush_all, buf, blen);
+ if(result)
+ return result;
+ }
+
+ if(type & (CLIENTWRITE_HEADER|CLIENTWRITE_INFO)) {
+ result = cw_out_do_write(ctx, data, CW_OUT_HDS, flush_all, buf, blen);
+ if(result)
+ return result;
+ }
+
+ return CURLE_OK;
+}
+
+bool Curl_cw_out_is_paused(struct Curl_easy *data)
+{
+ struct Curl_cwriter *cw_out;
+ struct cw_out_ctx *ctx;
+
+ cw_out = Curl_cwriter_get_by_type(data, &Curl_cwt_out);
+ if(!cw_out)
+ return FALSE;
+
+ ctx = (struct cw_out_ctx *)cw_out;
+ return cw_out_bufs_len(ctx) > 0;
+}
+
+static CURLcode cw_out_flush(struct Curl_easy *data, bool flush_all)
+{
+ struct Curl_cwriter *cw_out;
+ CURLcode result = CURLE_OK;
+
+ cw_out = Curl_cwriter_get_by_type(data, &Curl_cwt_out);
+ if(cw_out) {
+ struct cw_out_ctx *ctx = (struct cw_out_ctx *)cw_out;
+
+ result = cw_out_flush_chain(ctx, data, &ctx->buf, flush_all);
+ }
+ return result;
+}
+
+CURLcode Curl_cw_out_flush(struct Curl_easy *data)
+{
+ return cw_out_flush(data, FALSE);
+}
+
+CURLcode Curl_cw_out_done(struct Curl_easy *data)
+{
+ return cw_out_flush(data, TRUE);
+}
diff --git a/libs/libcurl/src/cw-out.h b/libs/libcurl/src/cw-out.h new file mode 100644 index 0000000000..ecdd8d4ce6 --- /dev/null +++ b/libs/libcurl/src/cw-out.h @@ -0,0 +1,53 @@ +#ifndef HEADER_CURL_CW_OUT_H
+#define HEADER_CURL_CW_OUT_H
+/***************************************************************************
+ * _ _ ____ _
+ * Project ___| | | | _ \| |
+ * / __| | | | |_) | |
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+ * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+ * are also available at https://curl.se/docs/copyright.html.
+ *
+ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
+ * copies of the Software, and permit persons to whom the Software is
+ * furnished to do so, under the terms of the COPYING file.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ * SPDX-License-Identifier: curl
+ *
+ ***************************************************************************/
+
+#include "curl_setup.h"
+
+#include "sendf.h"
+
+/**
+ * The client writer type "cw-out" that does the actual writing to
+ * the client callbacks. Intended to be the last installed in the
+ * client writer stack of a transfer.
+ */
+extern struct Curl_cwtype Curl_cwt_out;
+
+/**
+ * Return TRUE iff 'cw-out' client write has paused data.
+ */
+bool Curl_cw_out_is_paused(struct Curl_easy *data);
+
+/**
+ * Flush any buffered date to the client, chunk collation still applies.
+ */
+CURLcode Curl_cw_out_flush(struct Curl_easy *data);
+
+/**
+ * Mark EndOfStream reached and flush ALL data to the client.
+ */
+CURLcode Curl_cw_out_done(struct Curl_easy *data);
+
+#endif /* HEADER_CURL_CW_OUT_H */
diff --git a/libs/libcurl/src/dict.c b/libs/libcurl/src/dict.c index 71549ccbb5..75649398f8 100644 --- a/libs/libcurl/src/dict.c +++ b/libs/libcurl/src/dict.c @@ -122,13 +122,12 @@ static char *unescape_word(const char *input) }
/* sendf() sends formatted data to the server */
-static CURLcode sendf(curl_socket_t sockfd, struct Curl_easy *data,
- const char *fmt, ...) CURL_PRINTF(3, 4);
+static CURLcode sendf(struct Curl_easy *data,
+ const char *fmt, ...) CURL_PRINTF(2, 3);
-static CURLcode sendf(curl_socket_t sockfd, struct Curl_easy *data,
- const char *fmt, ...)
+static CURLcode sendf(struct Curl_easy *data, const char *fmt, ...)
{
- ssize_t bytes_written;
+ size_t bytes_written;
size_t write_len;
CURLcode result = CURLE_OK;
char *s;
@@ -146,7 +145,7 @@ static CURLcode sendf(curl_socket_t sockfd, struct Curl_easy *data, for(;;) {
/* Write the buffer to the socket */
- result = Curl_write(data, sockfd, sptr, write_len, &bytes_written);
+ result = Curl_xfer_send(data, sptr, write_len, &bytes_written);
if(result)
break;
@@ -178,8 +177,6 @@ static CURLcode dict_do(struct Curl_easy *data, bool *done) char *nthdef = NULL; /* This is not part of the protocol, but required
by RFC 2229 */
CURLcode result;
- struct connectdata *conn = data->conn;
- curl_socket_t sockfd = conn->sock[FIRSTSOCKET];
char *path;
@@ -228,7 +225,7 @@ static CURLcode dict_do(struct Curl_easy *data, bool *done) goto error;
}
- result = sendf(sockfd, data,
+ result = sendf(data,
"CLIENT " LIBCURL_NAME " " LIBCURL_VERSION "\r\n"
"MATCH "
"%s " /* database */
@@ -243,7 +240,7 @@ static CURLcode dict_do(struct Curl_easy *data, bool *done) failf(data, "Failed sending DICT request");
goto error;
}
- Curl_setup_transfer(data, FIRSTSOCKET, -1, FALSE, -1); /* no upload */
+ Curl_xfer_setup(data, FIRSTSOCKET, -1, FALSE, -1); /* no upload */
}
else if(strncasecompare(path, DICT_DEFINE, sizeof(DICT_DEFINE)-1) ||
strncasecompare(path, DICT_DEFINE2, sizeof(DICT_DEFINE2)-1) ||
@@ -276,7 +273,7 @@ static CURLcode dict_do(struct Curl_easy *data, bool *done) goto error;
}
- result = sendf(sockfd, data,
+ result = sendf(data,
"CLIENT " LIBCURL_NAME " " LIBCURL_VERSION "\r\n"
"DEFINE "
"%s " /* database */
@@ -289,7 +286,7 @@ static CURLcode dict_do(struct Curl_easy *data, bool *done) failf(data, "Failed sending DICT request");
goto error;
}
- Curl_setup_transfer(data, FIRSTSOCKET, -1, FALSE, -1);
+ Curl_xfer_setup(data, FIRSTSOCKET, -1, FALSE, -1);
}
else {
@@ -302,7 +299,7 @@ static CURLcode dict_do(struct Curl_easy *data, bool *done) if(ppath[i] == ':')
ppath[i] = ' ';
}
- result = sendf(sockfd, data,
+ result = sendf(data,
"CLIENT " LIBCURL_NAME " " LIBCURL_VERSION "\r\n"
"%s\r\n"
"QUIT\r\n", ppath);
@@ -311,7 +308,7 @@ static CURLcode dict_do(struct Curl_easy *data, bool *done) goto error;
}
- Curl_setup_transfer(data, FIRSTSOCKET, -1, FALSE, -1);
+ Curl_xfer_setup(data, FIRSTSOCKET, -1, FALSE, -1);
}
}
diff --git a/libs/libcurl/src/doh.c b/libs/libcurl/src/doh.c index 6bc2c43b10..f63710b09d 100644 --- a/libs/libcurl/src/doh.c +++ b/libs/libcurl/src/doh.c @@ -69,7 +69,12 @@ static const char *doh_strerror(DOHcode code) return errors[code];
return "bad error code";
}
-#endif
+
+struct curl_trc_feat Curl_doh_trc = {
+ "DoH",
+ CURL_LOG_LVL_NONE,
+};
+#endif /* !CURL_DISABLE_VERBOSE_STRINGS */
/* @unittest 1655
*/
@@ -189,9 +194,9 @@ static int doh_done(struct Curl_easy *doh, CURLcode result) struct dohdata *dohp = data->req.doh;
/* so one of the DoH request done for the 'data' transfer is now complete! */
dohp->pending--;
- infof(data, "a DoH request is completed, %u to go", dohp->pending);
+ infof(doh, "a DoH request is completed, %u to go", dohp->pending);
if(result)
- infof(data, "DoH request %s", curl_easy_strerror(result));
+ infof(doh, "DoH request %s", curl_easy_strerror(result));
if(!dohp->pending) {
/* DoH completed */
@@ -242,6 +247,9 @@ static CURLcode dohprobe(struct Curl_easy *data, the gcc typecheck helpers */
struct dynbuf *resp = &p->serverdoh;
doh->state.internal = true;
+#ifndef CURL_DISABLE_VERBOSE_STRINGS
+ doh->state.feat = &Curl_doh_trc;
+#endif
ERROR_CHECK_SETOPT(CURLOPT_URL, url);
ERROR_CHECK_SETOPT(CURLOPT_DEFAULT_PROTOCOL, "https");
ERROR_CHECK_SETOPT(CURLOPT_WRITEFUNCTION, doh_write_cb);
@@ -264,7 +272,7 @@ static CURLcode dohprobe(struct Curl_easy *data, ERROR_CHECK_SETOPT(CURLOPT_SHARE, data->share);
if(data->set.err && data->set.err != stderr)
ERROR_CHECK_SETOPT(CURLOPT_STDERR, data->set.err);
- if(data->set.verbose)
+ if(Curl_trc_ft_is_verbose(data, &Curl_doh_trc))
ERROR_CHECK_SETOPT(CURLOPT_VERBOSE, 1L);
if(data->set.no_signal)
ERROR_CHECK_SETOPT(CURLOPT_NOSIGNAL, 1L);
@@ -741,11 +749,11 @@ static void showdoh(struct Curl_easy *data, const struct dohentry *d)
{
int i;
- infof(data, "TTL: %u seconds", d->ttl);
+ infof(data, "[DoH] TTL: %u seconds", d->ttl);
for(i = 0; i < d->numaddr; i++) {
const struct dohaddr *a = &d->addr[i];
if(a->type == DNS_TYPE_A) {
- infof(data, "DoH A: %u.%u.%u.%u",
+ infof(data, "[DoH] A: %u.%u.%u.%u",
a->ip.v4[0], a->ip.v4[1],
a->ip.v4[2], a->ip.v4[3]);
}
@@ -754,9 +762,9 @@ static void showdoh(struct Curl_easy *data, char buffer[128];
char *ptr;
size_t len;
- msnprintf(buffer, 128, "DoH AAAA: ");
- ptr = &buffer[10];
- len = 118;
+ len = msnprintf(buffer, 128, "[DoH] AAAA: ");
+ ptr = &buffer[len];
+ len = sizeof(buffer) - len;
for(j = 0; j < 16; j += 2) {
size_t l;
msnprintf(ptr, len, "%s%02x%02x", j?":":"", d->addr[i].ip.v6[j],
@@ -950,8 +958,11 @@ CURLcode Curl_doh_is_resolved(struct Curl_easy *data, struct Curl_dns_entry *dns;
struct Curl_addrinfo *ai;
- infof(data, "DoH Host name: %s", dohp->host);
- showdoh(data, &de);
+
+ if(Curl_trc_ft_is_verbose(data, &Curl_doh_trc)) {
+ infof(data, "[DoH] Host name: %s", dohp->host);
+ showdoh(data, &de);
+ }
result = doh2ai(&de, dohp->host, dohp->port, &ai);
if(result) {
diff --git a/libs/libcurl/src/doh.h b/libs/libcurl/src/doh.h index d50f60d458..d83e7b9a4a 100644 --- a/libs/libcurl/src/doh.h +++ b/libs/libcurl/src/doh.h @@ -120,6 +120,8 @@ void de_init(struct dohentry *d); void de_cleanup(struct dohentry *d);
#endif
+extern struct curl_trc_feat Curl_doh_trc;
+
#else /* if DoH is disabled */
#define Curl_doh(a,b,c,d) NULL
#define Curl_doh_is_resolved(x,y) CURLE_COULDNT_RESOLVE_HOST
diff --git a/libs/libcurl/src/easy.c b/libs/libcurl/src/easy.c index 5aa564c808..ff2cb6bd06 100644 --- a/libs/libcurl/src/easy.c +++ b/libs/libcurl/src/easy.c @@ -58,6 +58,7 @@ #include "multiif.h"
#include "select.h"
#include "cfilters.h"
+#include "cw-out.h"
#include "sendf.h" /* for failf function prototype */
#include "connect.h" /* for Curl_getconnectinfo */
#include "slist.h"
@@ -741,7 +742,6 @@ static CURLcode easy_perform(struct Curl_easy *data, bool events) multi = Curl_multi_handle(1, 3, 7);
if(!multi)
return CURLE_OUT_OF_MEMORY;
- data->multi_easy = multi;
}
if(multi->in_callback)
@@ -750,15 +750,18 @@ static CURLcode easy_perform(struct Curl_easy *data, bool events) /* Copy the MAXCONNECTS option to the multi handle */
curl_multi_setopt(multi, CURLMOPT_MAXCONNECTS, (long)data->set.maxconnects);
+ data->multi_easy = NULL; /* pretend it does not exist */
mcode = curl_multi_add_handle(multi, data);
if(mcode) {
curl_multi_cleanup(multi);
- data->multi_easy = NULL;
if(mcode == CURLM_OUT_OF_MEMORY)
return CURLE_OUT_OF_MEMORY;
return CURLE_FAILED_INIT;
}
+ /* assign this after curl_multi_add_handle() */
+ data->multi_easy = multi;
+
sigpipe_ignore(data, &pipe_st);
/* run the transfer */
@@ -1021,7 +1024,6 @@ fail: #ifndef CURL_DISABLE_COOKIES
free(outcurl->cookies);
#endif
- free(outcurl->state.buffer);
Curl_dyn_free(&outcurl->state.headerb);
Curl_altsvc_cleanup(&outcurl->asi);
Curl_hsts_cleanup(&outcurl->hsts);
@@ -1038,7 +1040,7 @@ fail: */
void curl_easy_reset(struct Curl_easy *data)
{
- Curl_free_request_state(data);
+ Curl_req_hard_reset(&data->req, data);
/* zero out UserDefined data: */
Curl_freeset(data);
@@ -1108,9 +1110,10 @@ CURLcode curl_easy_pause(struct Curl_easy *data, int action) /* Unpause parts in active mime tree. */
if((k->keepon & ~newstate & KEEP_SEND_PAUSE) &&
(data->mstate == MSTATE_PERFORMING ||
- data->mstate == MSTATE_RATELIMITING) &&
- data->state.fread_func == (curl_read_callback) Curl_mime_read) {
- Curl_mime_unpause(data->state.in);
+ data->mstate == MSTATE_RATELIMITING)) {
+ result = Curl_creader_unpause(data);
+ if(result)
+ return result;
}
/* put it back in the keepon */
@@ -1118,21 +1121,11 @@ CURLcode curl_easy_pause(struct Curl_easy *data, int action) if(!(newstate & KEEP_RECV_PAUSE)) {
Curl_conn_ev_data_pause(data, FALSE);
- result = Curl_client_unpause(data);
+ result = Curl_cw_out_flush(data);
if(result)
return result;
}
-#ifdef USE_HYPER
- if(!(newstate & KEEP_SEND_PAUSE)) {
- /* need to wake the send body waker */
- if(data->hyp.send_body_waker) {
- hyper_waker_wake(data->hyp.send_body_waker);
- data->hyp.send_body_waker = NULL;
- }
- }
-#endif
-
/* if there's no error and we're not pausing both directions, we want
to have this handle checked soon */
if((newstate & (KEEP_RECV_PAUSE|KEEP_SEND_PAUSE)) !=
@@ -1142,7 +1135,7 @@ CURLcode curl_easy_pause(struct Curl_easy *data, int action) /* reset the too-slow time keeper */
data->state.keeps_speed.tv_sec = 0;
- if(!data->state.tempcount)
+ if(!Curl_cw_out_is_paused(data))
/* if not pausing again, force a recv/send check of this connection as
the data might've been read off the socket already */
data->state.select_bits = CURL_CSELECT_IN | CURL_CSELECT_OUT;
@@ -1166,9 +1159,11 @@ CURLcode curl_easy_pause(struct Curl_easy *data, int action) }
-static CURLcode easy_connection(struct Curl_easy *data, curl_socket_t *sfd,
+static CURLcode easy_connection(struct Curl_easy *data,
struct connectdata **connp)
{
+ curl_socket_t sfd;
+
if(!data)
return CURLE_BAD_FUNCTION_ARGUMENT;
@@ -1178,9 +1173,9 @@ static CURLcode easy_connection(struct Curl_easy *data, curl_socket_t *sfd, return CURLE_UNSUPPORTED_PROTOCOL;
}
- *sfd = Curl_getconnectinfo(data, connp);
+ sfd = Curl_getconnectinfo(data, connp);
- if(*sfd == CURL_SOCKET_BAD) {
+ if(sfd == CURL_SOCKET_BAD) {
failf(data, "Failed to get recent socket");
return CURLE_UNSUPPORTED_PROTOCOL;
}
@@ -1196,7 +1191,6 @@ static CURLcode easy_connection(struct Curl_easy *data, curl_socket_t *sfd, CURLcode curl_easy_recv(struct Curl_easy *data, void *buffer, size_t buflen,
size_t *n)
{
- curl_socket_t sfd;
CURLcode result;
ssize_t n1;
struct connectdata *c;
@@ -1204,7 +1198,7 @@ CURLcode curl_easy_recv(struct Curl_easy *data, void *buffer, size_t buflen, if(Curl_is_in_callback(data))
return CURLE_RECURSIVE_API_CALL;
- result = easy_connection(data, &sfd, &c);
+ result = easy_connection(data, &c);
if(result)
return result;
@@ -1214,7 +1208,7 @@ CURLcode curl_easy_recv(struct Curl_easy *data, void *buffer, size_t buflen, Curl_attach_connection(data, c);
*n = 0;
- result = Curl_read(data, sfd, buffer, buflen, &n1);
+ result = Curl_conn_recv(data, FIRSTSOCKET, buffer, buflen, &n1);
if(result)
return result;
@@ -1226,11 +1220,10 @@ CURLcode curl_easy_recv(struct Curl_easy *data, void *buffer, size_t buflen, #ifdef USE_WEBSOCKETS
CURLcode Curl_connect_only_attach(struct Curl_easy *data)
{
- curl_socket_t sfd;
CURLcode result;
struct connectdata *c = NULL;
- result = easy_connection(data, &sfd, &c);
+ result = easy_connection(data, &c);
if(result)
return result;
@@ -1249,15 +1242,14 @@ CURLcode Curl_connect_only_attach(struct Curl_easy *data) * This is the private internal version of curl_easy_send()
*/
CURLcode Curl_senddata(struct Curl_easy *data, const void *buffer,
- size_t buflen, ssize_t *n)
+ size_t buflen, size_t *n)
{
- curl_socket_t sfd;
CURLcode result;
- ssize_t n1;
struct connectdata *c = NULL;
SIGPIPE_VARIABLE(pipe_st);
- result = easy_connection(data, &sfd, &c);
+ *n = 0;
+ result = easy_connection(data, &c);
if(result)
return result;
@@ -1266,20 +1258,12 @@ CURLcode Curl_senddata(struct Curl_easy *data, const void *buffer, needs to be reattached */
Curl_attach_connection(data, c);
- *n = 0;
sigpipe_ignore(data, &pipe_st);
- result = Curl_write(data, sfd, buffer, buflen, &n1);
+ result = Curl_conn_send(data, FIRSTSOCKET, buffer, buflen, n);
sigpipe_restore(&pipe_st);
- if(n1 == -1)
+ if(result && result != CURLE_AGAIN)
return CURLE_SEND_ERROR;
-
- /* detect EAGAIN */
- if(!result && !n1)
- return CURLE_AGAIN;
-
- *n = n1;
-
return result;
}
@@ -1290,13 +1274,13 @@ CURLcode Curl_senddata(struct Curl_easy *data, const void *buffer, CURLcode curl_easy_send(struct Curl_easy *data, const void *buffer,
size_t buflen, size_t *n)
{
- ssize_t written = 0;
+ size_t written = 0;
CURLcode result;
if(Curl_is_in_callback(data))
return CURLE_RECURSIVE_API_CALL;
result = Curl_senddata(data, buffer, buflen, &written);
- *n = (size_t)written;
+ *n = written;
return result;
}
diff --git a/libs/libcurl/src/easygetopt.c b/libs/libcurl/src/easygetopt.c index fc59db8587..40498b1fc4 100644 --- a/libs/libcurl/src/easygetopt.c +++ b/libs/libcurl/src/easygetopt.c @@ -5,7 +5,7 @@ * | (__| |_| | _ <| |___
* ___|___/|_| ______|
*
- * Copyright (C) Daniel Stenberg, <daniel.se>, et al.
+ * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
diff --git a/libs/libcurl/src/easyif.h b/libs/libcurl/src/easyif.h index b51ddc6693..af2c1fb151 100644 --- a/libs/libcurl/src/easyif.h +++ b/libs/libcurl/src/easyif.h @@ -28,7 +28,7 @@ * Prototypes for library-wide functions provided by easy.c
*/
CURLcode Curl_senddata(struct Curl_easy *data, const void *buffer,
- size_t buflen, ssize_t *n);
+ size_t buflen, size_t *n);
#ifdef USE_WEBSOCKETS
CURLcode Curl_connect_only_attach(struct Curl_easy *data);
diff --git a/libs/libcurl/src/easyoptions.c b/libs/libcurl/src/easyoptions.c index be6073a90c..e111406bc4 100644 --- a/libs/libcurl/src/easyoptions.c +++ b/libs/libcurl/src/easyoptions.c @@ -5,7 +5,7 @@ * | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
- * Copyright (C) Daniel Stenberg, <daniel.se>, et al.
+ * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
diff --git a/libs/libcurl/src/file.c b/libs/libcurl/src/file.c index b15b6ab408..640968a1a7 100644 --- a/libs/libcurl/src/file.c +++ b/libs/libcurl/src/file.c @@ -59,6 +59,7 @@ #include "file.h"
#include "speedcheck.h"
#include "getinfo.h"
+#include "multiif.h"
#include "transfer.h"
#include "url.h"
#include "parsedate.h" /* for the week day and month names */
@@ -290,10 +291,12 @@ static CURLcode file_upload(struct Curl_easy *data) int fd;
int mode;
CURLcode result = CURLE_OK;
- char buffer[8*1024], *uphere_save;
+ char *xfer_ulbuf;
+ size_t xfer_ulblen;
curl_off_t bytecount = 0;
struct_stat file_stat;
const char *sendbuf;
+ bool eos = FALSE;
/*
* Since FILE: doesn't do the full init, we need to provide some extra
@@ -337,15 +340,16 @@ static CURLcode file_upload(struct Curl_easy *data) data->state.resume_from = (curl_off_t)file_stat.st_size;
}
- /* Yikes! Curl_fillreadbuffer uses data->req.upload_fromhere to READ
- * client data to! Please, someone fix... */
- uphere_save = data->req.upload_fromhere;
- while(!result) {
+ result = Curl_multi_xfer_ulbuf_borrow(data, &xfer_ulbuf, &xfer_ulblen);
+ if(result)
+ goto out;
+
+ while(!result && !eos) {
size_t nread;
ssize_t nwrite;
size_t readcount;
- data->req.upload_fromhere = buffer;
- result = Curl_fillreadbuffer(data, sizeof(buffer), &readcount);
+
+ result = Curl_client_read(data, xfer_ulbuf, xfer_ulblen, &readcount, &eos);
if(result)
break;
@@ -359,16 +363,16 @@ static CURLcode file_upload(struct Curl_easy *data) if((curl_off_t)nread <= data->state.resume_from) {
data->state.resume_from -= nread;
nread = 0;
- sendbuf = buffer;
+ sendbuf = xfer_ulbuf;
}
else {
- sendbuf = buffer + data->state.resume_from;
+ sendbuf = xfer_ulbuf + data->state.resume_from;
nread -= (size_t)data->state.resume_from;
data->state.resume_from = 0;
}
}
else
- sendbuf = buffer;
+ sendbuf = xfer_ulbuf;
/* write the data to the target */
nwrite = write(fd, sendbuf, nread);
@@ -389,8 +393,9 @@ static CURLcode file_upload(struct Curl_easy *data) if(!result && Curl_pgrsUpdate(data))
result = CURLE_ABORTED_BY_CALLBACK;
+out:
close(fd);
- data->req.upload_fromhere = uphere_save;
+ Curl_multi_xfer_ulbuf_release(data, xfer_ulbuf);
return result;
}
@@ -419,6 +424,8 @@ static CURLcode file_do(struct Curl_easy *data, bool *done) bool fstated = FALSE;
int fd;
struct FILEPROTO *file;
+ char *xfer_buf;
+ size_t xfer_blen;
*done = TRUE; /* unconditionally */
@@ -541,25 +548,26 @@ static CURLcode file_do(struct Curl_easy *data, bool *done) return CURLE_BAD_DOWNLOAD_RESUME;
}
- Curl_pgrsTime(data, TIMER_STARTTRANSFER);
+ result = Curl_multi_xfer_buf_borrow(data, &xfer_buf, &xfer_blen);
+ if(result)
+ goto out;
while(!result) {
- char tmpbuf[8*1024];
ssize_t nread;
/* Don't fill a whole buffer if we want less than all data */
size_t bytestoread;
if(size_known) {
- bytestoread = (expected_size < (curl_off_t)(sizeof(tmpbuf)-1)) ?
- curlx_sotouz(expected_size) : (sizeof(tmpbuf)-1);
+ bytestoread = (expected_size < (curl_off_t)(xfer_blen-1)) ?
+ curlx_sotouz(expected_size) : (xfer_blen-1);
}
else
- bytestoread = sizeof(tmpbuf)-1;
+ bytestoread = xfer_blen-1;
- nread = read(fd, tmpbuf, bytestoread);
+ nread = read(fd, xfer_buf, bytestoread);
if(nread > 0)
- tmpbuf[nread] = 0;
+ xfer_buf[nread] = 0;
if(nread <= 0 || (size_known && (expected_size == 0)))
break;
@@ -567,18 +575,22 @@ static CURLcode file_do(struct Curl_easy *data, bool *done) if(size_known)
expected_size -= nread;
- result = Curl_client_write(data, CLIENTWRITE_BODY, tmpbuf, nread);
+ result = Curl_client_write(data, CLIENTWRITE_BODY, xfer_buf, nread);
if(result)
- return result;
+ goto out;
if(Curl_pgrsUpdate(data))
result = CURLE_ABORTED_BY_CALLBACK;
else
result = Curl_speedcheck(data, Curl_now());
+ if(result)
+ goto out;
}
if(Curl_pgrsUpdate(data))
result = CURLE_ABORTED_BY_CALLBACK;
+out:
+ Curl_multi_xfer_buf_release(data, xfer_buf);
return result;
}
diff --git a/libs/libcurl/src/fopen.c b/libs/libcurl/src/fopen.c index 444ebd090c..4cbd03d1a6 100644 --- a/libs/libcurl/src/fopen.c +++ b/libs/libcurl/src/fopen.c @@ -129,7 +129,12 @@ CURLcode Curl_fopen(struct Curl_easy *data, const char *filename, }
result = CURLE_WRITE_ERROR;
+#if (defined(ANDROID) || defined(__ANDROID__)) && \
+ (defined(__i386__) || defined(__arm__))
+ fd = open(tempstore, O_WRONLY | O_CREAT | O_EXCL, (mode_t)(0600|sb.st_mode));
+#else
fd = open(tempstore, O_WRONLY | O_CREAT | O_EXCL, 0600|sb.st_mode);
+#endif
if(fd == -1)
goto fail;
diff --git a/libs/libcurl/src/ftp.c b/libs/libcurl/src/ftp.c index 03678ac20c..7ec0ed9247 100644 --- a/libs/libcurl/src/ftp.c +++ b/libs/libcurl/src/ftp.c @@ -85,6 +85,14 @@ #define INET_ADDRSTRLEN 16
#endif
+/* macro to check for a three-digit ftp status code at the start of the
+ given string */
+#define STATUSCODE(line) (ISDIGIT(line[0]) && ISDIGIT(line[1]) && \
+ ISDIGIT(line[2]))
+
+/* macro to check for the last line in an FTP server response */
+#define LASTLINE(line) (STATUSCODE(line) && (' ' == line[3]))
+
#ifdef CURL_DISABLE_VERBOSE_STRINGS
#define ftp_pasv_verbose(a,b,c,d) Curl_nop_stmt
#endif
@@ -143,7 +151,7 @@ static CURLcode wc_statemach(struct Curl_easy *data); static void wc_data_dtor(void *ptr);
static CURLcode ftp_state_retr(struct Curl_easy *data, curl_off_t filesize);
static CURLcode ftp_readresp(struct Curl_easy *data,
- curl_socket_t sockfd,
+ int sockindex,
struct pingpong *pp,
int *ftpcode,
size_t *size);
@@ -247,6 +255,98 @@ static void freedirs(struct ftp_conn *ftpc) Curl_safefree(ftpc->newhost);
}
+#ifdef CURL_DO_LINEEND_CONV
+/***********************************************************************
+ *
+ * Lineend Conversions
+ * On ASCII transfers, e.g. directory listings, we might get lines
+ * ending in '\r\n' and we prefer just '\n'.
+ * We might also get a lonely '\r' which we convert into a '\n'.
+ */
+struct ftp_cw_lc_ctx {
+ struct Curl_cwriter super;
+ bool newline_pending;
+};
+
+static CURLcode ftp_cw_lc_write(struct Curl_easy *data,
+ struct Curl_cwriter *writer, int type,
+ const char *buf, size_t blen)
+{
+ static const char nl = '\n';
+ struct ftp_cw_lc_ctx *ctx = writer->ctx;
+
+ if(!(type & CLIENTWRITE_BODY) ||
+ data->conn->proto.ftpc.transfertype != 'A')
+ return Curl_cwriter_write(data, writer->next, type, buf, blen);
+
+ /* ASCII mode BODY data, convert lineends */
+ while(blen) {
+ /* do not pass EOS when writing parts */
+ int chunk_type = (type & ~CLIENTWRITE_EOS);
+ const char *cp;
+ size_t chunk_len;
+ CURLcode result;
+
+ if(ctx->newline_pending) {
+ if(buf[0] != '\n') {
+ /* previous chunk ended in '\r' and we do not see a '\n' in this one,
+ * need to write a newline. */
+ result = Curl_cwriter_write(data, writer->next, chunk_type, &nl, 1);
+ if(result)
+ return result;
+ }
+ /* either we just wrote the newline or it is part of the next
+ * chunk of bytes we write. */
+ data->state.crlf_conversions++;
+ ctx->newline_pending = FALSE;
+ }
+
+ cp = memchr(buf, '\r', blen);
+ if(!cp)
+ break;
+
+ /* write the bytes before the '\r', excluding the '\r' */
+ chunk_len = cp - buf;
+ if(chunk_len) {
+ result = Curl_cwriter_write(data, writer->next, chunk_type,
+ buf, chunk_len);
+ if(result)
+ return result;
+ }
+ /* skip the '\r', we now have a newline pending */
+ buf = cp + 1;
+ blen = blen - chunk_len - 1;
+ ctx->newline_pending = TRUE;
+ }
+
+ /* Any remaining data does not contain a '\r' */
+ if(blen) {
+ DEBUGASSERT(!ctx->newline_pending);
+ return Curl_cwriter_write(data, writer->next, type, buf, blen);
+ }
+ else if(type & CLIENTWRITE_EOS) {
+ /* EndOfStream, if we have a trailing cr, now is the time to write it */
+ if(ctx->newline_pending) {
+ ctx->newline_pending = FALSE;
+ data->state.crlf_conversions++;
+ return Curl_cwriter_write(data, writer->next, type, &nl, 1);
+ }
+ /* Always pass on the EOS type indicator */
+ return Curl_cwriter_write(data, writer->next, type, buf, 0);
+ }
+ return CURLE_OK;
+}
+
+static const struct Curl_cwtype ftp_cw_lc = {
+ "ftp-lineconv",
+ NULL,
+ Curl_cwriter_def_init,
+ ftp_cw_lc_write,
+ Curl_cwriter_def_close,
+ sizeof(struct ftp_cw_lc_ctx)
+};
+
+#endif /* CURL_DO_LINEEND_CONV */
/***********************************************************************
*
* AcceptServerConnect()
@@ -412,8 +512,32 @@ static CURLcode ReceivedServerConnect(struct Curl_easy *data, bool *received) }
if(response) {
infof(data, "Ctrl conn has data while waiting for data conn");
+ if(pp->overflow > 3) {
+ char *r = Curl_dyn_ptr(&pp->recvbuf);
+
+ DEBUGASSERT((pp->overflow + pp->nfinal) <=
+ Curl_dyn_len(&pp->recvbuf));
+ /* move over the most recently handled response line */
+ r += pp->nfinal;
+
+ if(LASTLINE(r)) {
+ int status = curlx_sltosi(strtol(r, NULL, 10));
+ if(status == 226) {
+ /* funny timing situation where we get the final message on the
+ control connection before traffic on the data connection has been
+ noticed. Leave the 226 in there and use this as a trigger to read
+ the data socket. */
+ infof(data, "Got 226 before data activity");
+ *received = TRUE;
+ return CURLE_OK;
+ }
+ }
+ }
+
(void)Curl_GetFTPResponse(data, &nread, &ftpcode);
+ infof(data, "FTP code: %03d", ftpcode);
+
if(ftpcode/100 > 3)
return CURLE_FTP_ACCEPT_FAILED;
@@ -457,12 +581,12 @@ static CURLcode InitiateTransfer(struct Curl_easy *data) /* set the SO_SNDBUF for the secondary socket for those who need it */
Curl_sndbufset(conn->sock[SECONDARYSOCKET]);
- Curl_setup_transfer(data, -1, -1, FALSE, SECONDARYSOCKET);
+ Curl_xfer_setup(data, -1, -1, FALSE, SECONDARYSOCKET);
}
else {
/* FTP download: */
- Curl_setup_transfer(data, SECONDARYSOCKET,
- conn->proto.ftpc.retr_size_saved, FALSE, -1);
+ Curl_xfer_setup(data, SECONDARYSOCKET,
+ conn->proto.ftpc.retr_size_saved, FALSE, -1);
}
conn->proto.ftpc.pp.pending_resp = TRUE; /* expect server response */
@@ -525,14 +649,6 @@ out: return result;
}
-/* macro to check for a three-digit ftp status code at the start of the
- given string */
-#define STATUSCODE(line) (ISDIGIT(line[0]) && ISDIGIT(line[1]) && \
- ISDIGIT(line[2]))
-
-/* macro to check for the last line in an FTP server response */
-#define LASTLINE(line) (STATUSCODE(line) && (' ' == line[3]))
-
static bool ftp_endofresp(struct Curl_easy *data, struct connectdata *conn,
char *line, size_t len, int *code)
{
@@ -548,13 +664,13 @@ static bool ftp_endofresp(struct Curl_easy *data, struct connectdata *conn, }
static CURLcode ftp_readresp(struct Curl_easy *data,
- curl_socket_t sockfd,
+ int sockindex,
struct pingpong *pp,
int *ftpcode, /* return the ftp-code if done */
size_t *size) /* size of the response */
{
int code;
- CURLcode result = Curl_pp_readresp(data, sockfd, pp, &code, size);
+ CURLcode result = Curl_pp_readresp(data, sockindex, pp, &code, size);
#ifdef HAVE_GSSAPI
{
@@ -689,7 +805,7 @@ CURLcode Curl_GetFTPResponse(struct Curl_easy *data, break;
}
}
- result = ftp_readresp(data, sockfd, pp, ftpcode, &nread);
+ result = ftp_readresp(data, FIRSTSOCKET, pp, ftpcode, &nread);
if(result)
break;
@@ -821,24 +937,18 @@ static int ftp_domore_getsock(struct Curl_easy *data, * remote site, or we could wait for that site to connect to us. Or just
* handle ordinary commands.
*/
-
DEBUGF(infof(data, "ftp_domore_getsock()"));
- if(conn->cfilter[SECONDARYSOCKET]
- && !Curl_conn_is_connected(conn, SECONDARYSOCKET))
- return 0;
if(FTP_STOP == ftpc->state) {
- int bits = GETSOCK_READSOCK(0);
-
/* if stopped and still in this state, then we're also waiting for a
connect on the secondary connection */
+ DEBUGASSERT(conn->sock[SECONDARYSOCKET] != CURL_SOCKET_BAD ||
+ (conn->cfilter[SECONDARYSOCKET] &&
+ !Curl_conn_is_connected(conn, SECONDARYSOCKET)));
socks[0] = conn->sock[FIRSTSOCKET];
- if(conn->sock[SECONDARYSOCKET] != CURL_SOCKET_BAD) {
- socks[1] = conn->sock[SECONDARYSOCKET];
- bits |= GETSOCK_WRITESOCK(1) | GETSOCK_READSOCK(1);
- }
-
- return bits;
+ /* An unconnected SECONDARY will add its socket by itself
+ * via its adjust_pollset() */
+ return GETSOCK_READSOCK(0);
}
return Curl_pp_getsock(data, &conn->proto.ftpc.pp, socks);
}
@@ -1179,6 +1289,12 @@ static CURLcode ftp_state_use_port(struct Curl_easy *data, conn->bits.ftp_use_eprt = TRUE;
#endif
+ /* Replace any filter on SECONDARY with one listening on this socket */
+ result = Curl_conn_tcp_listen_set(data, conn, SECONDARYSOCKET, &portsock);
+ if(result)
+ goto out;
+ portsock = CURL_SOCKET_BAD; /* now held in filter */
+
for(; fcmd != DONE; fcmd++) {
if(!conn->bits.ftp_use_eprt && (EPRT == fcmd))
@@ -1252,11 +1368,6 @@ static CURLcode ftp_state_use_port(struct Curl_easy *data, /* store which command was sent */
ftpc->count1 = fcmd;
- /* Replace any filter on SECONDARY with one listening on this socket */
- result = Curl_conn_tcp_listen_set(data, conn, SECONDARYSOCKET, &portsock);
- if(result)
- goto out;
- portsock = CURL_SOCKET_BAD; /* now held in filter */
ftp_state(data, FTP_PORT);
out:
@@ -1572,10 +1683,10 @@ static CURLcode ftp_state_ul_setup(struct Curl_easy *data, append = TRUE;
/* Let's read off the proper amount of bytes from the input. */
- if(conn->seek_func) {
+ if(data->set.seek_func) {
Curl_set_in_callback(data, true);
- seekerr = conn->seek_func(conn->seek_client, data->state.resume_from,
- SEEK_SET);
+ seekerr = data->set.seek_func(data->set.seek_client,
+ data->state.resume_from, SEEK_SET);
Curl_set_in_callback(data, false);
}
@@ -1614,7 +1725,7 @@ static CURLcode ftp_state_ul_setup(struct Curl_easy *data, infof(data, "File already completely uploaded");
/* no data to transfer */
- Curl_setup_transfer(data, -1, -1, FALSE, -1);
+ Curl_xfer_setup(data, -1, -1, FALSE, -1);
/* Set ->transfer so that we won't get any error in
* ftp_done() because we didn't transfer anything! */
@@ -1792,7 +1903,7 @@ static char *control_address(struct connectdata *conn) if(conn->bits.tunnel_proxy || conn->bits.socksproxy)
return conn->host.name;
#endif
- return conn->primary_ip;
+ return conn->primary.remote_ip;
}
static bool match_pasv_6nums(const char *p,
@@ -1929,14 +2040,14 @@ static CURLcode ftp_state_pasv_resp(struct Curl_easy *data, */
const char * const host_name = conn->bits.socksproxy ?
conn->socks_proxy.host.name : conn->http_proxy.host.name;
- rc = Curl_resolv(data, host_name, conn->port, FALSE, &addr);
+ rc = Curl_resolv(data, host_name, conn->primary.remote_port, FALSE, &addr);
if(rc == CURLRESOLV_PENDING)
/* BLOCKING, ignores the return code but 'addr' will be NULL in
case of failure */
(void)Curl_resolver_wait_resolv(data, &addr);
- connectport =
- (unsigned short)conn->port; /* we connect to the proxy's port */
+ /* we connect to the proxy's port */
+ connectport = (unsigned short)conn->primary.remote_port;
if(!addr) {
failf(data, "Can't resolve proxy host %s:%hu", host_name, connectport);
@@ -2285,7 +2396,7 @@ static CURLcode ftp_state_retr(struct Curl_easy *data, if(ftp->downloadsize == 0) {
/* no data to transfer */
- Curl_setup_transfer(data, -1, -1, FALSE, -1);
+ Curl_xfer_setup(data, -1, -1, FALSE, -1);
infof(data, "File already completely downloaded");
/* Set ->transfer so that we won't get any error in ftp_done()
@@ -2692,7 +2803,6 @@ static CURLcode ftp_statemachine(struct Curl_easy *data, struct connectdata *conn)
{
CURLcode result;
- curl_socket_t sock = conn->sock[FIRSTSOCKET];
int ftpcode;
struct ftp_conn *ftpc = &conn->proto.ftpc;
struct pingpong *pp = &ftpc->pp;
@@ -2702,7 +2812,7 @@ static CURLcode ftp_statemachine(struct Curl_easy *data, if(pp->sendleft)
return Curl_pp_flushsend(data, pp);
- result = ftp_readresp(data, sock, pp, &ftpcode, &nread);
+ result = ftp_readresp(data, FIRSTSOCKET, pp, &ftpcode, &nread);
if(result)
return result;
@@ -3702,7 +3812,7 @@ static CURLcode ftp_do_more(struct Curl_easy *data, int *completep) }
/* no data to transfer */
- Curl_setup_transfer(data, -1, -1, FALSE, -1);
+ Curl_xfer_setup(data, -1, -1, FALSE, -1);
if(!ftpc->wait_data_conn) {
/* no waiting for the data connection so this is now complete */
@@ -4010,6 +4120,24 @@ static CURLcode ftp_do(struct Curl_easy *data, bool *done) *done = FALSE; /* default to false */
ftpc->wait_data_conn = FALSE; /* default to no such wait */
+#ifdef CURL_DO_LINEEND_CONV
+ {
+ /* FTP data may need conversion. */
+ struct Curl_cwriter *ftp_lc_writer;
+
+ result = Curl_cwriter_create(&ftp_lc_writer, data, &ftp_cw_lc,
+ CURL_CW_CONTENT_DECODE);
+ if(result)
+ return result;
+
+ result = Curl_cwriter_add(data, ftp_lc_writer);
+ if(result) {
+ Curl_cwriter_free(data, ftp_lc_writer);
+ return result;
+ }
+ }
+#endif /* CURL_DO_LINEEND_CONV */
+
if(data->state.wildcardmatch) {
result = wc_statemach(data);
if(data->wildcard->state == CURLWC_SKIP ||
@@ -4286,7 +4414,7 @@ static CURLcode ftp_dophase_done(struct Curl_easy *data, bool connected) if(ftp->transfer != PPTRANSFER_BODY)
/* no data to transfer */
- Curl_setup_transfer(data, -1, -1, FALSE, -1);
+ Curl_xfer_setup(data, -1, -1, FALSE, -1);
else if(!connected)
/* since we didn't connect now, we want do_more to get called */
conn->bits.do_more = TRUE;
diff --git a/libs/libcurl/src/getinfo.c b/libs/libcurl/src/getinfo.c index 0d72f1bdf2..e7162b3a9f 100644 --- a/libs/libcurl/src/getinfo.c +++ b/libs/libcurl/src/getinfo.c @@ -76,10 +76,10 @@ CURLcode Curl_initinfo(struct Curl_easy *data) free(info->wouldredirect);
info->wouldredirect = NULL;
- info->conn_primary_ip[0] = '\0';
- info->conn_local_ip[0] = '\0';
- info->conn_primary_port = 0;
- info->conn_local_port = 0;
+ info->primary.remote_ip[0] = '\0';
+ info->primary.local_ip[0] = '\0';
+ info->primary.remote_port = 0;
+ info->primary.local_port = 0;
info->retry_after = 0;
info->conn_scheme = 0;
@@ -153,12 +153,12 @@ static CURLcode getinfo_char(struct Curl_easy *data, CURLINFO info, break;
case CURLINFO_PRIMARY_IP:
/* Return the ip address of the most recent (primary) connection */
- *param_charp = data->info.conn_primary_ip;
+ *param_charp = data->info.primary.remote_ip;
break;
case CURLINFO_LOCAL_IP:
/* Return the source/local ip address of the most recent (primary)
connection */
- *param_charp = data->info.conn_local_ip;
+ *param_charp = data->info.primary.local_ip;
break;
case CURLINFO_RTSP_SESSION_ID:
*param_charp = data->set.str[STRING_RTSP_SESSION_ID];
@@ -180,7 +180,6 @@ static CURLcode getinfo_char(struct Curl_easy *data, CURLINFO info, *param_charp = NULL;
#endif
break;
-
default:
return CURLE_UNKNOWN_OPTION;
}
@@ -285,11 +284,11 @@ static CURLcode getinfo_long(struct Curl_easy *data, CURLINFO info, break;
case CURLINFO_PRIMARY_PORT:
/* Return the (remote) port of the most recent (primary) connection */
- *param_longp = data->info.conn_primary_port;
+ *param_longp = data->info.primary.remote_port;
break;
case CURLINFO_LOCAL_PORT:
/* Return the local port of the most recent (primary) connection */
- *param_longp = data->info.conn_local_port;
+ *param_longp = data->info.primary.local_port;
break;
case CURLINFO_PROXY_ERROR:
*param_longp = (long)data->info.pxcode;
@@ -334,6 +333,15 @@ static CURLcode getinfo_long(struct Curl_easy *data, CURLINFO info, case CURLINFO_PROTOCOL:
*param_longp = data->info.conn_protocol;
break;
+ case CURLINFO_USED_PROXY:
+ *param_longp =
+#ifdef CURL_DISABLE_PROXY
+ 0
+#else
+ data->info.used_proxy
+#endif
+ ;
+ break;
default:
return CURLE_UNKNOWN_OPTION;
}
diff --git a/libs/libcurl/src/gopher.c b/libs/libcurl/src/gopher.c index 8adf741532..2c8bd18914 100644 --- a/libs/libcurl/src/gopher.c +++ b/libs/libcurl/src/gopher.c @@ -139,8 +139,8 @@ static CURLcode gopher_do(struct Curl_easy *data, bool *done) char *sel = NULL;
char *sel_org = NULL;
timediff_t timeout_ms;
- ssize_t amount, k;
- size_t len;
+ ssize_t k;
+ size_t amount, len;
int what;
*done = TRUE; /* unconditionally */
@@ -185,7 +185,7 @@ static CURLcode gopher_do(struct Curl_easy *data, bool *done) if(strlen(sel) < 1)
break;
- result = Curl_nwrite(data, FIRSTSOCKET, sel, k, &amount);
+ result = Curl_xfer_send(data, sel, k, &amount);
if(!result) { /* Which may not have written it all! */
result = Curl_client_write(data, CLIENTWRITE_HEADER, sel, amount);
if(result)
@@ -227,7 +227,7 @@ static CURLcode gopher_do(struct Curl_easy *data, bool *done) free(sel_org);
if(!result)
- result = Curl_nwrite(data, FIRSTSOCKET, "\r\n", 2, &amount);
+ result = Curl_xfer_send(data, "\r\n", 2, &amount);
if(result) {
failf(data, "Failed sending Gopher request");
return result;
@@ -236,7 +236,7 @@ static CURLcode gopher_do(struct Curl_easy *data, bool *done) if(result)
return result;
- Curl_setup_transfer(data, FIRSTSOCKET, -1, FALSE, -1);
+ Curl_xfer_setup(data, FIRSTSOCKET, -1, FALSE, -1);
return CURLE_OK;
}
#endif /* CURL_DISABLE_GOPHER */
diff --git a/libs/libcurl/src/headers.c b/libs/libcurl/src/headers.c index 7999ca911d..35e27d6503 100644 --- a/libs/libcurl/src/headers.c +++ b/libs/libcurl/src/headers.c @@ -27,6 +27,7 @@ #include "urldata.h"
#include "strdup.h"
#include "strcase.h"
+#include "sendf.h"
#include "headers.h"
/* The last 3 #include files should be in this order */
@@ -337,14 +338,68 @@ CURLcode Curl_headers_push(struct Curl_easy *data, const char *header, }
/*
- * Curl_headers_init(). Init the headers subsystem.
+ * Curl_headers_reset(). Reset the headers subsystem.
*/
-static void headers_init(struct Curl_easy *data)
+static void headers_reset(struct Curl_easy *data)
{
Curl_llist_init(&data->state.httphdrs, NULL);
data->state.prevhead = NULL;
}
+struct hds_cw_collect_ctx {
+ struct Curl_cwriter super;
+};
+
+static CURLcode hds_cw_collect_write(struct Curl_easy *data,
+ struct Curl_cwriter *writer, int type,
+ const char *buf, size_t blen)
+{
+ if((type & CLIENTWRITE_HEADER) && !(type & CLIENTWRITE_STATUS)) {
+ unsigned char htype = (unsigned char)
+ (type & CLIENTWRITE_CONNECT ? CURLH_CONNECT :
+ (type & CLIENTWRITE_1XX ? CURLH_1XX :
+ (type & CLIENTWRITE_TRAILER ? CURLH_TRAILER :
+ CURLH_HEADER)));
+ CURLcode result = Curl_headers_push(data, buf, htype);
+ if(result)
+ return result;
+ }
+ return Curl_cwriter_write(data, writer->next, type, buf, blen);
+}
+
+static const struct Curl_cwtype hds_cw_collect = {
+ "hds-collect",
+ NULL,
+ Curl_cwriter_def_init,
+ hds_cw_collect_write,
+ Curl_cwriter_def_close,
+ sizeof(struct hds_cw_collect_ctx)
+};
+
+CURLcode Curl_headers_init(struct Curl_easy *data)
+{
+ struct Curl_cwriter *writer;
+ CURLcode result;
+
+ if(data->conn && (data->conn->handler->protocol & PROTO_FAMILY_HTTP)) {
+ /* avoid installing it twice */
+ if(Curl_cwriter_get_by_name(data, hds_cw_collect.name))
+ return CURLE_OK;
+
+ result = Curl_cwriter_create(&writer, data, &hds_cw_collect,
+ CURL_CW_PROTOCOL);
+ if(result)
+ return result;
+
+ result = Curl_cwriter_add(data, writer);
+ if(result) {
+ Curl_cwriter_free(data, writer);
+ return result;
+ }
+ }
+ return CURLE_OK;
+}
+
/*
* Curl_headers_cleanup(). Free all stored headers and associated memory.
*/
@@ -358,7 +413,7 @@ CURLcode Curl_headers_cleanup(struct Curl_easy *data) n = e->next;
free(hs);
}
- headers_init(data);
+ headers_reset(data);
return CURLE_OK;
}
diff --git a/libs/libcurl/src/headers.h b/libs/libcurl/src/headers.h index a1c20852d0..f33e6c8585 100644 --- a/libs/libcurl/src/headers.h +++ b/libs/libcurl/src/headers.h @@ -37,6 +37,12 @@ struct Curl_header_store { };
/*
+ * Initialize header collecting for a transfer.
+ * Will add a client writer that catches CLIENTWRITE_HEADER writes.
+ */
+CURLcode Curl_headers_init(struct Curl_easy *data);
+
+/*
* Curl_headers_push() gets passed a full header to store.
*/
CURLcode Curl_headers_push(struct Curl_easy *data, const char *header,
@@ -48,6 +54,7 @@ CURLcode Curl_headers_push(struct Curl_easy *data, const char *header, CURLcode Curl_headers_cleanup(struct Curl_easy *data);
#else
+#define Curl_headers_init(x) CURLE_OK
#define Curl_headers_push(x,y,z) CURLE_OK
#define Curl_headers_cleanup(x) Curl_nop_stmt
#endif
diff --git a/libs/libcurl/src/hostip.c b/libs/libcurl/src/hostip.c index 70297ad60b..c7cc068606 100644 --- a/libs/libcurl/src/hostip.c +++ b/libs/libcurl/src/hostip.c @@ -288,7 +288,7 @@ static struct Curl_dns_entry *fetch_addr(struct Curl_easy *data, size_t entry_len = create_hostcache_id(hostname, 0, port,
entry_id, sizeof(entry_id));
- /* See if its already in our dns cache */
+ /* See if it's already in our dns cache */
dns = Curl_hash_pick(data->dns.hostcache, entry_id, entry_len + 1);
/* No entry found in cache, check if we might have a wildcard entry */
diff --git a/libs/libcurl/src/hsts.c b/libs/libcurl/src/hsts.c index 96af618bde..6166bbdcb3 100644 --- a/libs/libcurl/src/hsts.c +++ b/libs/libcurl/src/hsts.c @@ -511,7 +511,6 @@ static CURLcode hsts_pull(struct Curl_easy *data, struct hsts *h) static CURLcode hsts_load(struct hsts *h, const char *file)
{
CURLcode result = CURLE_OK;
- char *line = NULL;
FILE *fp;
/* we need a private copy of the file name so that the hsts cache file
@@ -523,11 +522,10 @@ static CURLcode hsts_load(struct hsts *h, const char *file) fp = fopen(file, FOPEN_READTEXT);
if(fp) {
- line = malloc(MAX_HSTS_LINE);
- if(!line)
- goto fail;
- while(Curl_get_line(line, MAX_HSTS_LINE, fp)) {
- char *lineptr = line;
+ struct dynbuf buf;
+ Curl_dyn_init(&buf, MAX_HSTS_LINE);
+ while(Curl_get_line(&buf, fp)) {
+ char *lineptr = Curl_dyn_ptr(&buf);
while(*lineptr && ISBLANK(*lineptr))
lineptr++;
if(*lineptr == '#')
@@ -536,15 +534,10 @@ static CURLcode hsts_load(struct hsts *h, const char *file) hsts_add(h, lineptr);
}
- free(line); /* free the line buffer */
+ Curl_dyn_free(&buf); /* free the line buffer */
fclose(fp);
}
return result;
-
-fail:
- Curl_safefree(h->filename);
- fclose(fp);
- return CURLE_OUT_OF_MEMORY;
}
/*
diff --git a/libs/libcurl/src/http.c b/libs/libcurl/src/http.c index 6741425ae5..3d3a641f20 100644 --- a/libs/libcurl/src/http.c +++ b/libs/libcurl/src/http.c @@ -73,6 +73,7 @@ #include "hostip.h"
#include "dynhds.h"
#include "http.h"
+#include "headers.h"
#include "select.h"
#include "parsedate.h" /* for the week day and month names */
#include "strtoofft.h"
@@ -101,6 +102,9 @@ */
static bool http_should_fail(struct Curl_easy *data);
+static bool http_exp100_is_waiting(struct Curl_easy *data);
+static CURLcode http_exp100_add_reader(struct Curl_easy *data);
+static void http_exp100_send_anyway(struct Curl_easy *data);
/*
* HTTP handler interface.
@@ -404,150 +408,88 @@ static bool pickoneauth(struct auth *pick, unsigned long mask) /*
* http_perhapsrewind()
*
- * If we are doing POST or PUT {
- * If we have more data to send {
- * If we are doing NTLM {
- * Keep sending since we must not disconnect
- * }
- * else {
- * If there is more than just a little data left to send, close
- * the current connection by force.
- * }
- * }
- * If we have sent any data {
- * If we don't have track of all the data {
- * call app to tell it to rewind
- * }
- * else {
- * rewind internally so that the operation can restart fine
- * }
- * }
- * }
+ * The current request needs to be done again - maybe due to a follow
+ * or authentication negotiation. Check if:
+ * 1) a rewind of the data sent to the server is necessary
+ * 2) the current transfer should continue or be stopped early
*/
static CURLcode http_perhapsrewind(struct Curl_easy *data,
struct connectdata *conn)
{
- struct HTTP *http = data->req.p.http;
- curl_off_t bytessent;
- curl_off_t expectsend = -1; /* default is unknown */
-
- if(!http)
- /* If this is still NULL, we have not reach very far and we can safely
- skip this rewinding stuff */
+ curl_off_t bytessent = data->req.writebytecount;
+ curl_off_t expectsend = Curl_creader_total_length(data);
+ curl_off_t upload_remain = (expectsend >= 0)? (expectsend - bytessent) : -1;
+ bool little_upload_remains = (upload_remain >= 0 && upload_remain < 2000);
+ bool needs_rewind = Curl_creader_needs_rewind(data);
+ /* By default, we'd like to abort the transfer when little or
+ * unknown amount remains. But this may be overridden by authentications
+ * further below! */
+ bool abort_upload = (!data->req.upload_done && !little_upload_remains);
+ const char *ongoing_auth = NULL;
+
+ /* We need a rewind before uploading client read data again. The
+ * checks below just influence of the upload is to be continued
+ * or aborted early.
+ * This depends on how much remains to be sent and in what state
+ * the authentication is. Some auth schemes such as NTLM do not work
+ * for a new connection. */
+ if(needs_rewind) {
+ infof(data, "Need to rewind upload for next request");
+ Curl_creader_set_rewind(data, TRUE);
+ }
+
+ if(conn->bits.close)
+ /* If we already decided to close this connection, we cannot veto. */
return CURLE_OK;
- switch(data->state.httpreq) {
- case HTTPREQ_GET:
- case HTTPREQ_HEAD:
- return CURLE_OK;
- default:
- break;
- }
-
- bytessent = data->req.writebytecount;
-
- if(conn->bits.authneg) {
- /* This is a state where we are known to be negotiating and we don't send
- any data then. */
- expectsend = 0;
- }
- else if(!conn->bits.protoconnstart) {
- /* HTTP CONNECT in progress: there is no body */
- expectsend = 0;
- }
- else {
- /* figure out how much data we are expected to send */
- switch(data->state.httpreq) {
- case HTTPREQ_POST:
- case HTTPREQ_PUT:
- if(data->state.infilesize != -1)
- expectsend = data->state.infilesize;
- break;
- case HTTPREQ_POST_FORM:
- case HTTPREQ_POST_MIME:
- expectsend = http->postsize;
- break;
- default:
- break;
- }
- }
-
- data->state.rewindbeforesend = FALSE; /* default */
-
- if((expectsend == -1) || (expectsend > bytessent)) {
+ if(abort_upload) {
+ /* We'd like to abort the upload - but should we? */
#if defined(USE_NTLM)
- /* There is still data left to send */
if((data->state.authproxy.picked == CURLAUTH_NTLM) ||
(data->state.authhost.picked == CURLAUTH_NTLM) ||
(data->state.authproxy.picked == CURLAUTH_NTLM_WB) ||
(data->state.authhost.picked == CURLAUTH_NTLM_WB)) {
- if(((expectsend - bytessent) < 2000) ||
- (conn->http_ntlm_state != NTLMSTATE_NONE) ||
+ ongoing_auth = "NTML";
+ if((conn->http_ntlm_state != NTLMSTATE_NONE) ||
(conn->proxy_ntlm_state != NTLMSTATE_NONE)) {
- /* The NTLM-negotiation has started *OR* there is just a little (<2K)
- data left to send, keep on sending. */
-
- /* rewind data when completely done sending! */
- if(!conn->bits.authneg && (conn->writesockfd != CURL_SOCKET_BAD)) {
- data->state.rewindbeforesend = TRUE;
- infof(data, "Rewind stream before next send");
- }
-
- return CURLE_OK;
+ /* The NTLM-negotiation has started, keep on sending.
+ * Need to do further work on same connection */
+ abort_upload = FALSE;
}
-
- if(conn->bits.close)
- /* this is already marked to get closed */
- return CURLE_OK;
-
- infof(data, "NTLM send, close instead of sending %"
- CURL_FORMAT_CURL_OFF_T " bytes",
- (curl_off_t)(expectsend - bytessent));
}
#endif
#if defined(USE_SPNEGO)
/* There is still data left to send */
if((data->state.authproxy.picked == CURLAUTH_NEGOTIATE) ||
(data->state.authhost.picked == CURLAUTH_NEGOTIATE)) {
- if(((expectsend - bytessent) < 2000) ||
- (conn->http_negotiate_state != GSS_AUTHNONE) ||
+ ongoing_auth = "NEGOTIATE";
+ if((conn->http_negotiate_state != GSS_AUTHNONE) ||
(conn->proxy_negotiate_state != GSS_AUTHNONE)) {
- /* The NEGOTIATE-negotiation has started *OR*
- there is just a little (<2K) data left to send, keep on sending. */
-
- /* rewind data when completely done sending! */
- if(!conn->bits.authneg && (conn->writesockfd != CURL_SOCKET_BAD)) {
- data->state.rewindbeforesend = TRUE;
- infof(data, "Rewind stream before next send");
- }
-
- return CURLE_OK;
+ /* The NEGOTIATE-negotiation has started, keep on sending.
+ * Need to do further work on same connection */
+ abort_upload = FALSE;
}
-
- if(conn->bits.close)
- /* this is already marked to get closed */
- return CURLE_OK;
-
- infof(data, "NEGOTIATE send, close instead of sending %"
- CURL_FORMAT_CURL_OFF_T " bytes",
- (curl_off_t)(expectsend - bytessent));
}
#endif
+ }
- /* This is not NEGOTIATE/NTLM or many bytes left to send: close */
+ if(abort_upload) {
+ if(upload_remain >= 0)
+ infof(data, "%s%sclose instead of sending %"
+ CURL_FORMAT_CURL_OFF_T " more bytes",
+ ongoing_auth? ongoing_auth : "",
+ ongoing_auth? " send, " : "",
+ upload_remain);
+ else
+ infof(data, "%s%sclose instead of sending unknown amount "
+ "of more bytes",
+ ongoing_auth? ongoing_auth : "",
+ ongoing_auth? " send, " : "");
+ /* We decided to abort the ongoing transfer */
streamclose(conn, "Mid-auth HTTP and much data left to send");
+ /* FIXME: questionable manipulation here, can we do this differently? */
data->req.size = 0; /* don't download any more than 0 bytes */
-
- /* There still is data left to send, but this connection is marked for
- closure so we can safely do the rewind right now */
}
-
- if(bytessent) {
- /* mark for rewind since if we already sent something */
- data->state.rewindbeforesend = TRUE;
- infof(data, "Please rewind output before next send");
- }
-
return CURLE_OK;
}
@@ -578,7 +520,7 @@ CURLcode Curl_http_auth_act(struct Curl_easy *data) if((data->state.aptr.user || data->set.str[STRING_BEARER]) &&
((data->req.httpcode == 401) ||
- (conn->bits.authneg && data->req.httpcode < 300))) {
+ (data->req.authneg && data->req.httpcode < 300))) {
pickhost = pickoneauth(&data->state.authhost, authmask);
if(!pickhost)
data->state.authproblem = TRUE;
@@ -592,7 +534,7 @@ CURLcode Curl_http_auth_act(struct Curl_easy *data) #ifndef CURL_DISABLE_PROXY
if(conn->bits.proxy_user_passwd &&
((data->req.httpcode == 407) ||
- (conn->bits.authneg && data->req.httpcode < 300))) {
+ (data->req.authneg && data->req.httpcode < 300))) {
pickproxy = pickoneauth(&data->state.authproxy,
authmask & ~CURLAUTH_BEARER);
if(!pickproxy)
@@ -601,13 +543,10 @@ CURLcode Curl_http_auth_act(struct Curl_easy *data) #endif
if(pickhost || pickproxy) {
- if((data->state.httpreq != HTTPREQ_GET) &&
- (data->state.httpreq != HTTPREQ_HEAD) &&
- !data->state.rewindbeforesend) {
- result = http_perhapsrewind(data, conn);
- if(result)
- return result;
- }
+ result = http_perhapsrewind(data, conn);
+ if(result)
+ return result;
+
/* In case this is GSS auth, the newurl field is already allocated so
we must make sure to free it before allocating a new one. As figured
out in bug #2284386 */
@@ -618,7 +557,7 @@ CURLcode Curl_http_auth_act(struct Curl_easy *data) }
else if((data->req.httpcode < 300) &&
(!data->state.authhost.done) &&
- conn->bits.authneg) {
+ data->req.authneg) {
/* no (known) authentication available,
authentication is not "done" yet and
no authentication seems to be required and
@@ -863,10 +802,10 @@ Curl_http_output_auth(struct Curl_easy *data, (httpreq != HTTPREQ_HEAD)) {
/* Auth is required and we are not authenticated yet. Make a PUT or POST
with content-length zero as a "probe". */
- conn->bits.authneg = TRUE;
+ data->req.authneg = TRUE;
}
else
- conn->bits.authneg = FALSE;
+ data->req.authneg = FALSE;
return result;
}
@@ -1174,274 +1113,6 @@ static bool http_should_fail(struct Curl_easy *data) }
/*
- * readmoredata() is a "fread() emulation" to provide POST and/or request
- * data. It is used when a huge POST is to be made and the entire chunk wasn't
- * sent in the first send(). This function will then be called from the
- * transfer.c loop when more data is to be sent to the peer.
- *
- * Returns the amount of bytes it filled the buffer with.
- */
-static size_t readmoredata(char *buffer,
- size_t size,
- size_t nitems,
- void *userp)
-{
- struct HTTP *http = (struct HTTP *)userp;
- struct Curl_easy *data = http->backup.data;
- size_t fullsize = size * nitems;
-
- if(!http->postsize)
- /* nothing to return */
- return 0;
-
- /* make sure that an HTTP request is never sent away chunked! */
- data->req.forbidchunk = (http->sending == HTTPSEND_REQUEST)?TRUE:FALSE;
-
- if(data->set.max_send_speed &&
- (data->set.max_send_speed < (curl_off_t)fullsize) &&
- (data->set.max_send_speed < http->postsize))
- /* speed limit */
- fullsize = (size_t)data->set.max_send_speed;
-
- else if(http->postsize <= (curl_off_t)fullsize) {
- memcpy(buffer, http->postdata, (size_t)http->postsize);
- fullsize = (size_t)http->postsize;
-
- if(http->backup.postsize) {
- /* move backup data into focus and continue on that */
- http->postdata = http->backup.postdata;
- http->postsize = http->backup.postsize;
- data->state.fread_func = http->backup.fread_func;
- data->state.in = http->backup.fread_in;
-
- http->sending++; /* move one step up */
-
- http->backup.postsize = 0;
- }
- else
- http->postsize = 0;
-
- return fullsize;
- }
-
- memcpy(buffer, http->postdata, fullsize);
- http->postdata += fullsize;
- http->postsize -= fullsize;
-
- return fullsize;
-}
-
-/*
- * Curl_buffer_send() sends a header buffer and frees all associated
- * memory. Body data may be appended to the header data if desired.
- *
- * Returns CURLcode
- */
-CURLcode Curl_buffer_send(struct dynbuf *in,
- struct Curl_easy *data,
- struct HTTP *http,
- /* add the number of sent bytes to this
- counter */
- curl_off_t *bytes_written,
- /* how much of the buffer contains body data */
- curl_off_t included_body_bytes,
- int sockindex)
-{
- ssize_t amount;
- CURLcode result;
- char *ptr;
- size_t size;
- struct connectdata *conn = data->conn;
- size_t sendsize;
- size_t headersize;
-
- DEBUGASSERT(sockindex <= SECONDARYSOCKET && sockindex >= 0);
-
- /* The looping below is required since we use non-blocking sockets, but due
- to the circumstances we will just loop and try again and again etc */
-
- ptr = Curl_dyn_ptr(in);
- size = Curl_dyn_len(in);
-
- headersize = size - (size_t)included_body_bytes; /* the initial part that
- isn't body is header */
-
- DEBUGASSERT(size > (size_t)included_body_bytes);
-
- if((conn->handler->flags & PROTOPT_SSL
-#ifndef CURL_DISABLE_PROXY
- || IS_HTTPS_PROXY(conn->http_proxy.proxytype)
-#endif
- )
- && conn->httpversion < 20) {
- /* Make sure this doesn't send more body bytes than what the max send
- speed says. The request bytes do not count to the max speed.
- */
- if(data->set.max_send_speed &&
- (included_body_bytes > data->set.max_send_speed)) {
- curl_off_t overflow = included_body_bytes - data->set.max_send_speed;
- DEBUGASSERT((size_t)overflow < size);
- sendsize = size - (size_t)overflow;
- }
- else
- sendsize = size;
-
- /* OpenSSL is very picky and we must send the SAME buffer pointer to the
- library when we attempt to re-send this buffer. Sending the same data
- is not enough, we must use the exact same address. For this reason, we
- must copy the data to the uploadbuffer first, since that is the buffer
- we will be using if this send is retried later.
- */
- result = Curl_get_upload_buffer(data);
- if(result) {
- /* malloc failed, free memory and return to the caller */
- Curl_dyn_free(in);
- return result;
- }
- /* We never send more than upload_buffer_size bytes in one single chunk
- when we speak HTTPS, as if only a fraction of it is sent now, this data
- needs to fit into the normal read-callback buffer later on and that
- buffer is using this size.
- */
- if(sendsize > (size_t)data->set.upload_buffer_size)
- sendsize = (size_t)data->set.upload_buffer_size;
-
- memcpy(data->state.ulbuf, ptr, sendsize);
- ptr = data->state.ulbuf;
- }
- else {
-#ifdef CURLDEBUG
- /* Allow debug builds to override this logic to force short initial
- sends
- */
- char *p = getenv("CURL_SMALLREQSEND");
- if(p) {
- size_t altsize = (size_t)strtoul(p, NULL, 10);
- if(altsize)
- sendsize = CURLMIN(size, altsize);
- else
- sendsize = size;
- }
- else
-#endif
- {
- /* Make sure this doesn't send more body bytes than what the max send
- speed says. The request bytes do not count to the max speed.
- */
- if(data->set.max_send_speed &&
- (included_body_bytes > data->set.max_send_speed)) {
- curl_off_t overflow = included_body_bytes - data->set.max_send_speed;
- DEBUGASSERT((size_t)overflow < size);
- sendsize = size - (size_t)overflow;
- }
- else
- sendsize = size;
- }
-
- /* We currently cannot send more that this for http here:
- * - if sending blocks, it return 0 as amount
- * - we then whisk aside the `in` into the `http` struct
- * and install our own `data->state.fread_func` that
- * on subsequent calls reads `in` empty.
- * - when the whisked away `in` is empty, the `fread_func`
- * is restored to its original state.
- * The problem is that `fread_func` can only return
- * `upload_buffer_size` lengths. If the send we do here
- * is larger and blocks, we do re-sending with smaller
- * amounts of data and connection filters do not like
- * that.
- */
- if(http && (sendsize > (size_t)data->set.upload_buffer_size))
- sendsize = (size_t)data->set.upload_buffer_size;
- }
-
- result = Curl_nwrite(data, sockindex, ptr, sendsize, &amount);
-
- if(!result) {
- /*
- * Note that we may not send the entire chunk at once, and we have a set
- * number of data bytes at the end of the big buffer (out of which we may
- * only send away a part).
- */
- /* how much of the header that was sent */
- size_t headlen = (size_t)amount>headersize ? headersize : (size_t)amount;
- size_t bodylen = amount - headlen;
-
- /* this data _may_ contain binary stuff */
- Curl_debug(data, CURLINFO_HEADER_OUT, ptr, headlen);
- if(bodylen)
- /* there was body data sent beyond the initial header part, pass that on
- to the debug callback too */
- Curl_debug(data, CURLINFO_DATA_OUT, ptr + headlen, bodylen);
-
- /* 'amount' can never be a very large value here so typecasting it so a
- signed 31 bit value should not cause problems even if ssize_t is
- 64bit */
- *bytes_written += (long)amount;
-
- if(http) {
- /* if we sent a piece of the body here, up the byte counter for it
- accordingly */
- data->req.writebytecount += bodylen;
- Curl_pgrsSetUploadCounter(data, data->req.writebytecount);
-
- if((size_t)amount != size) {
- /* The whole request could not be sent in one system call. We must
- queue it up and send it later when we get the chance. We must not
- loop here and wait until it might work again. */
-
- size -= amount;
-
- ptr = Curl_dyn_ptr(in) + amount;
-
- /* backup the currently set pointers */
- http->backup.fread_func = data->state.fread_func;
- http->backup.fread_in = data->state.in;
- http->backup.postdata = http->postdata;
- http->backup.postsize = http->postsize;
- http->backup.data = data;
-
- /* set the new pointers for the request-sending */
- data->state.fread_func = (curl_read_callback)readmoredata;
- data->state.in = (void *)http;
- http->postdata = ptr;
- http->postsize = (curl_off_t)size;
-
- /* this much data is remaining header: */
- data->req.pendingheader = headersize - headlen;
-
- http->send_buffer = *in; /* copy the whole struct */
- http->sending = HTTPSEND_REQUEST;
- return CURLE_OK;
- }
- http->sending = HTTPSEND_BODY;
- /* the full buffer was sent, clean up and return */
- }
- else {
- if((size_t)amount != size)
- /* We have no continue-send mechanism now, fail. This can only happen
- when this function is used from the CONNECT sending function. We
- currently (stupidly) assume that the whole request is always sent
- away in the first single chunk.
-
- This needs FIXing.
- */
- return CURLE_SEND_ERROR;
- }
- }
- Curl_dyn_free(in);
-
- /* no remaining header data */
- data->req.pendingheader = 0;
- return result;
-}
-
-/* end of the add_buffer functions */
-/* ------------------------------------------------------------------------- */
-
-
-
-/*
* Curl_compareheader()
*
* Returns TRUE if 'headerline' contains the 'header' with given 'content'.
@@ -1543,17 +1214,11 @@ CURLcode Curl_http_done(struct Curl_easy *data, data->state.authhost.multipass = FALSE;
data->state.authproxy.multipass = FALSE;
- /* set the proper values (possibly modified on POST) */
- conn->seek_func = data->set.seek_func; /* restore */
- conn->seek_client = data->set.seek_client; /* restore */
-
if(!http)
return CURLE_OK;
- Curl_dyn_free(&http->send_buffer);
Curl_dyn_reset(&data->state.headerb);
Curl_hyper_done(data);
- Curl_ws_done(data);
if(status)
return status;
@@ -1613,83 +1278,12 @@ static const char *get_http_string(const struct Curl_easy *data, }
#endif
-/* check and possibly add an Expect: header */
-static CURLcode expect100(struct Curl_easy *data,
- struct connectdata *conn,
- struct dynbuf *req)
-{
- CURLcode result = CURLE_OK;
- if(!data->state.disableexpect && Curl_use_http_1_1plus(data, conn) &&
- (conn->httpversion < 20)) {
- /* if not doing HTTP 1.0 or version 2, or disabled explicitly, we add an
- Expect: 100-continue to the headers which actually speeds up post
- operations (as there is one packet coming back from the web server) */
- const char *ptr = Curl_checkheaders(data, STRCONST("Expect"));
- if(ptr) {
- data->state.expect100header =
- Curl_compareheader(ptr, STRCONST("Expect:"), STRCONST("100-continue"));
- }
- else {
- result = Curl_dyn_addn(req, STRCONST("Expect: 100-continue\r\n"));
- if(!result)
- data->state.expect100header = TRUE;
- }
- }
-
- return result;
-}
-
enum proxy_use {
HEADER_SERVER, /* direct to server */
HEADER_PROXY, /* regular request to proxy */
HEADER_CONNECT /* sending CONNECT to a proxy */
};
-/* used to compile the provided trailers into one buffer
- will return an error code if one of the headers is
- not formatted correctly */
-CURLcode Curl_http_compile_trailers(struct curl_slist *trailers,
- struct dynbuf *b,
- struct Curl_easy *handle)
-{
- char *ptr = NULL;
- CURLcode result = CURLE_OK;
- const char *endofline_native = NULL;
- const char *endofline_network = NULL;
-
- if(
-#ifdef CURL_DO_LINEEND_CONV
- (handle->state.prefer_ascii) ||
-#endif
- (handle->set.crlf)) {
- /* \n will become \r\n later on */
- endofline_native = "\n";
- endofline_network = "\x0a";
- }
- else {
- endofline_native = "\r\n";
- endofline_network = "\x0d\x0a";
- }
-
- while(trailers) {
- /* only add correctly formatted trailers */
- ptr = strchr(trailers->data, ':');
- if(ptr && *(ptr + 1) == ' ') {
- result = Curl_dyn_add(b, trailers->data);
- if(result)
- return result;
- result = Curl_dyn_add(b, endofline_native);
- if(result)
- return result;
- }
- else
- infof(handle, "Malformatted trailing header, skipping trailer");
- trailers = trailers->next;
- }
- result = Curl_dyn_add(b, endofline_network);
- return result;
-}
-
static bool hd_name_eq(const char *n1, size_t n1len,
const char *n2, size_t n2len)
{
@@ -1808,7 +1402,7 @@ CURLcode Curl_dynhds_add_custom(struct Curl_easy *data, /* this header is sent later */
hd_name_eq(name, namelen, STRCONST("Content-Type:")))
;
- else if(conn->bits.authneg &&
+ else if(data->req.authneg &&
/* while doing auth neg, don't allow the custom length since
we will force length zero then */
hd_name_eq(name, namelen, STRCONST("Content-Length:")))
@@ -1954,7 +1548,7 @@ CURLcode Curl_add_custom_headers(struct Curl_easy *data, /* this header is sent later */
checkprefix("Content-Type:", compare))
;
- else if(conn->bits.authneg &&
+ else if(data->req.authneg &&
/* while doing auth neg, don't allow the custom length since
we will force length zero then */
checkprefix("Content-Length:", compare))
@@ -2335,18 +1929,17 @@ CURLcode Curl_http_target(struct Curl_easy *data, return result;
}
-CURLcode Curl_http_body(struct Curl_easy *data, struct connectdata *conn,
- Curl_HttpReq httpreq, const char **tep)
+#if !defined(CURL_DISABLE_MIME) || !defined(CURL_DISABLE_FORM_API)
+static CURLcode set_post_reader(struct Curl_easy *data, Curl_HttpReq httpreq)
{
- CURLcode result = CURLE_OK;
- const char *ptr;
- struct HTTP *http = data->req.p.http;
- http->postsize = 0;
+ CURLcode result;
switch(httpreq) {
+#ifndef CURL_DISABLE_MIME
case HTTPREQ_POST_MIME:
data->state.mimepost = &data->set.mimepost;
break;
+#endif
#ifndef CURL_DISABLE_FORM_API
case HTTPREQ_POST_FORM:
/* Convert the form structure into a mime structure, then keep
@@ -2368,35 +1961,154 @@ CURLcode Curl_http_body(struct Curl_easy *data, struct connectdata *conn, #endif
default:
data->state.mimepost = NULL;
+ break;
}
+ switch(httpreq) {
+ case HTTPREQ_POST_FORM:
+ case HTTPREQ_POST_MIME:
+ /* This is form posting using mime data. */
#ifndef CURL_DISABLE_MIME
- if(data->state.mimepost) {
- const char *cthdr = Curl_checkheaders(data, STRCONST("Content-Type"));
+ if(data->state.mimepost) {
+ const char *cthdr = Curl_checkheaders(data, STRCONST("Content-Type"));
- /* Read and seek body only. */
- data->state.mimepost->flags |= MIME_BODY_ONLY;
+ /* Read and seek body only. */
+ data->state.mimepost->flags |= MIME_BODY_ONLY;
- /* Prepare the mime structure headers & set content type. */
+ /* Prepare the mime structure headers & set content type. */
- if(cthdr)
- for(cthdr += 13; *cthdr == ' '; cthdr++)
- ;
- else if(data->state.mimepost->kind == MIMEKIND_MULTIPART)
- cthdr = "multipart/form-data";
+ if(cthdr)
+ for(cthdr += 13; *cthdr == ' '; cthdr++)
+ ;
+ else if(data->state.mimepost->kind == MIMEKIND_MULTIPART)
+ cthdr = "multipart/form-data";
- curl_mime_headers(data->state.mimepost, data->set.headers, 0);
- result = Curl_mime_prepare_headers(data, data->state.mimepost, cthdr,
- NULL, MIMESTRATEGY_FORM);
- curl_mime_headers(data->state.mimepost, NULL, 0);
- if(!result)
- result = Curl_mime_rewind(data->state.mimepost);
- if(result)
- return result;
- http->postsize = Curl_mime_size(data->state.mimepost);
+ curl_mime_headers(data->state.mimepost, data->set.headers, 0);
+ result = Curl_mime_prepare_headers(data, data->state.mimepost, cthdr,
+ NULL, MIMESTRATEGY_FORM);
+ if(result)
+ return result;
+ curl_mime_headers(data->state.mimepost, NULL, 0);
+ result = Curl_creader_set_mime(data, data->state.mimepost);
+ if(result)
+ return result;
+ }
+ else
+#endif
+ {
+ result = Curl_creader_set_null(data);
+ }
+ data->state.infilesize = Curl_creader_total_length(data);
+ return result;
+
+ default:
+ return Curl_creader_set_null(data);
}
+ /* never reached */
+}
#endif
+static CURLcode set_reader(struct Curl_easy *data, Curl_HttpReq httpreq)
+{
+ CURLcode result = CURLE_OK;
+ curl_off_t postsize = data->state.infilesize;
+
+ DEBUGASSERT(data->conn);
+
+ if(data->req.authneg) {
+ return Curl_creader_set_null(data);
+ }
+
+ switch(httpreq) {
+ case HTTPREQ_PUT: /* Let's PUT the data to the server! */
+ if(!postsize)
+ result = Curl_creader_set_null(data);
+ else
+ result = Curl_creader_set_fread(data, postsize);
+ return result;
+
+#if !defined(CURL_DISABLE_MIME) || !defined(CURL_DISABLE_FORM_API)
+ case HTTPREQ_POST_FORM:
+ case HTTPREQ_POST_MIME:
+ return set_post_reader(data, httpreq);
+#endif
+
+ case HTTPREQ_POST:
+ /* this is the simple POST, using x-www-form-urlencoded style */
+ /* the size of the post body */
+ if(!postsize) {
+ result = Curl_creader_set_null(data);
+ }
+ else if(data->set.postfields) {
+ if(postsize > 0)
+ result = Curl_creader_set_buf(data, data->set.postfields,
+ (size_t)postsize);
+ else
+ result = Curl_creader_set_null(data);
+ }
+ else { /* we read the bytes from the callback */
+ result = Curl_creader_set_fread(data, postsize);
+ }
+ return result;
+
+ default:
+ /* HTTP GET/HEAD download, has no body, needs no Content-Length */
+ data->state.infilesize = 0;
+ return Curl_creader_set_null(data);
+ }
+ /* not reached */
+}
+
+static CURLcode http_resume(struct Curl_easy *data, Curl_HttpReq httpreq)
+{
+ if((HTTPREQ_POST == httpreq || HTTPREQ_PUT == httpreq) &&
+ data->state.resume_from) {
+ /**********************************************************************
+ * Resuming upload in HTTP means that we PUT or POST and that we have
+ * got a resume_from value set. The resume value has already created
+ * a Range: header that will be passed along. We need to "fast forward"
+ * the file the given number of bytes and decrease the assume upload
+ * file size before we continue this venture in the dark lands of HTTP.
+ * Resuming mime/form posting at an offset > 0 has no sense and is ignored.
+ *********************************************************************/
+
+ if(data->state.resume_from < 0) {
+ /*
+ * This is meant to get the size of the present remote-file by itself.
+ * We don't support this now. Bail out!
+ */
+ data->state.resume_from = 0;
+ }
+
+ if(data->state.resume_from && !data->req.authneg) {
+ /* only act on the first request */
+ CURLcode result;
+ result = Curl_creader_resume_from(data, data->state.resume_from);
+ if(result) {
+ failf(data, "Unable to resume from offset %" CURL_FORMAT_CURL_OFF_T,
+ data->state.resume_from);
+ return result;
+ }
+ }
+ }
+ return CURLE_OK;
+}
+
+CURLcode Curl_http_req_set_reader(struct Curl_easy *data,
+ Curl_HttpReq httpreq,
+ const char **tep)
+{
+ CURLcode result = CURLE_OK;
+ const char *ptr;
+
+ result = set_reader(data, httpreq);
+ if(result)
+ return result;
+
+ result = http_resume(data, httpreq);
+ if(result)
+ return result;
+
ptr = Curl_checkheaders(data, STRCONST("Transfer-Encoding"));
if(ptr) {
/* Some kind of TE is requested, check if 'chunked' is chosen */
@@ -2405,18 +2117,14 @@ CURLcode Curl_http_body(struct Curl_easy *data, struct connectdata *conn, STRCONST("Transfer-Encoding:"), STRCONST("chunked"));
}
else {
- if((conn->handler->protocol & PROTO_FAMILY_HTTP) &&
- (((httpreq == HTTPREQ_POST_MIME || httpreq == HTTPREQ_POST_FORM) &&
- http->postsize < 0) ||
- ((data->state.upload || httpreq == HTTPREQ_POST) &&
- data->state.infilesize == -1))) {
- if(conn->bits.authneg)
- /* don't enable chunked during auth neg */
- ;
- else if(Curl_use_http_1_1plus(data, conn)) {
- if(conn->httpversion < 20)
- /* HTTP, upload, unknown file size and not HTTP 1.0 */
- data->req.upload_chunky = TRUE;
+ curl_off_t req_clen = Curl_creader_total_length(data);
+
+ if(req_clen < 0) {
+ /* indeterminate request content length */
+ if(Curl_use_http_1_1plus(data, data->conn)) {
+ /* On HTTP/1.1, enable chunked, on HTTP/2 and later we do not
+ * need it */
+ data->req.upload_chunky = (data->conn->httpversion < 20);
}
else {
failf(data, "Chunky upload is not supported by HTTP 1.0");
@@ -2434,330 +2142,126 @@ CURLcode Curl_http_body(struct Curl_easy *data, struct connectdata *conn, return result;
}
-static CURLcode addexpect(struct Curl_easy *data, struct connectdata *conn,
- struct dynbuf *r)
+static CURLcode addexpect(struct Curl_easy *data, struct dynbuf *r,
+ bool *announced_exp100)
{
- data->state.expect100header = FALSE;
+ CURLcode result;
+ char *ptr;
+
+ *announced_exp100 = FALSE;
/* Avoid Expect: 100-continue if Upgrade: is used */
- if(data->req.upgr101 == UPGR101_INIT) {
- struct HTTP *http = data->req.p.http;
- /* For really small puts we don't use Expect: headers at all, and for
- the somewhat bigger ones we allow the app to disable it. Just make
- sure that the expect100header is always set to the preferred value
- here. */
- char *ptr = Curl_checkheaders(data, STRCONST("Expect"));
- if(ptr) {
- data->state.expect100header =
- Curl_compareheader(ptr, STRCONST("Expect:"),
- STRCONST("100-continue"));
+ if(data->req.upgr101 != UPGR101_INIT)
+ return CURLE_OK;
+
+ /* For really small puts we don't use Expect: headers at all, and for
+ the somewhat bigger ones we allow the app to disable it. Just make
+ sure that the expect100header is always set to the preferred value
+ here. */
+ ptr = Curl_checkheaders(data, STRCONST("Expect"));
+ if(ptr) {
+ *announced_exp100 =
+ Curl_compareheader(ptr, STRCONST("Expect:"), STRCONST("100-continue"));
+ }
+ else if(!data->state.disableexpect &&
+ Curl_use_http_1_1plus(data, data->conn) &&
+ (data->conn->httpversion < 20)) {
+ /* if not doing HTTP 1.0 or version 2, or disabled explicitly, we add an
+ Expect: 100-continue to the headers which actually speeds up post
+ operations (as there is one packet coming back from the web server) */
+ curl_off_t client_len = Curl_creader_client_length(data);
+ if(client_len > EXPECT_100_THRESHOLD || client_len < 0) {
+ result = Curl_dyn_addn(r, STRCONST("Expect: 100-continue\r\n"));
+ if(result)
+ return result;
+ *announced_exp100 = TRUE;
}
- else if(http->postsize > EXPECT_100_THRESHOLD || http->postsize < 0)
- return expect100(data, conn, r);
}
return CURLE_OK;
}
-CURLcode Curl_http_bodysend(struct Curl_easy *data, struct connectdata *conn,
- struct dynbuf *r, Curl_HttpReq httpreq)
+CURLcode Curl_http_req_complete(struct Curl_easy *data,
+ struct dynbuf *r, Curl_HttpReq httpreq)
{
-#ifndef USE_HYPER
- /* Hyper always handles the body separately */
- curl_off_t included_body = 0;
-#else
- /* from this point down, this function should not be used */
-#define Curl_buffer_send(a,b,c,d,e,f) CURLE_OK
-#endif
CURLcode result = CURLE_OK;
- struct HTTP *http = data->req.p.http;
-
- switch(httpreq) {
- case HTTPREQ_PUT: /* Let's PUT the data to the server! */
+ curl_off_t req_clen;
+ bool announced_exp100 = FALSE;
- if(conn->bits.authneg)
- http->postsize = 0;
- else
- http->postsize = data->state.infilesize;
-
- if((http->postsize != -1) && !data->req.upload_chunky &&
- (conn->bits.authneg ||
- !Curl_checkheaders(data, STRCONST("Content-Length")))) {
- /* only add Content-Length if not uploading chunked */
- result = Curl_dyn_addf(r, "Content-Length: %" CURL_FORMAT_CURL_OFF_T
- "\r\n", http->postsize);
- if(result)
- return result;
- }
-
- result = addexpect(data, conn, r);
- if(result)
- return result;
-
- /* end of headers */
- result = Curl_dyn_addn(r, STRCONST("\r\n"));
- if(result)
- return result;
-
- /* set the upload size to the progress meter */
- Curl_pgrsSetUploadSize(data, http->postsize);
-
- /* this sends the buffer and frees all the buffer resources */
- result = Curl_buffer_send(r, data, data->req.p.http,
- &data->info.request_size, 0,
- FIRSTSOCKET);
- if(result)
- failf(data, "Failed sending PUT request");
- else
- /* prepare for transfer */
- Curl_setup_transfer(data, FIRSTSOCKET, -1, TRUE,
- http->postsize?FIRSTSOCKET:-1);
+ DEBUGASSERT(data->conn);
+#ifndef USE_HYPER
+ if(data->req.upload_chunky) {
+ result = Curl_httpchunk_add_reader(data);
if(result)
return result;
- break;
+ }
+#endif
+ /* Get the request body length that has been set up */
+ req_clen = Curl_creader_total_length(data);
+ switch(httpreq) {
+ case HTTPREQ_PUT:
+ case HTTPREQ_POST:
+#if !defined(CURL_DISABLE_MIME) || !defined(CURL_DISABLE_FORM_API)
case HTTPREQ_POST_FORM:
case HTTPREQ_POST_MIME:
- /* This is form posting using mime data. */
- if(conn->bits.authneg) {
- /* nothing to post! */
- result = Curl_dyn_addn(r, STRCONST("Content-Length: 0\r\n\r\n"));
- if(result)
- return result;
-
- result = Curl_buffer_send(r, data, data->req.p.http,
- &data->info.request_size, 0,
- FIRSTSOCKET);
- if(result)
- failf(data, "Failed sending POST request");
- else
- /* setup variables for the upcoming transfer */
- Curl_setup_transfer(data, FIRSTSOCKET, -1, TRUE, -1);
- break;
- }
-
- data->state.infilesize = http->postsize;
-
+#endif
/* We only set Content-Length and allow a custom Content-Length if
we don't upload data chunked, as RFC2616 forbids us to set both
- kinds of headers (Transfer-Encoding: chunked and Content-Length) */
- if(http->postsize != -1 && !data->req.upload_chunky &&
- (!Curl_checkheaders(data, STRCONST("Content-Length")))) {
+ kinds of headers (Transfer-Encoding: chunked and Content-Length).
+ We do not override a custom "Content-Length" header, but during
+ authentication negotiation that header is suppressed.
+ */
+ if(req_clen >= 0 && !data->req.upload_chunky &&
+ (data->req.authneg ||
+ !Curl_checkheaders(data, STRCONST("Content-Length")))) {
/* we allow replacing this header if not during auth negotiation,
although it isn't very wise to actually set your own */
result = Curl_dyn_addf(r,
"Content-Length: %" CURL_FORMAT_CURL_OFF_T
- "\r\n", http->postsize);
- if(result)
- return result;
+ "\r\n", req_clen);
}
+ if(result)
+ goto out;
#ifndef CURL_DISABLE_MIME
/* Output mime-generated headers. */
- {
+ if(data->state.mimepost &&
+ ((httpreq == HTTPREQ_POST_FORM) || (httpreq == HTTPREQ_POST_MIME))) {
struct curl_slist *hdr;
for(hdr = data->state.mimepost->curlheaders; hdr; hdr = hdr->next) {
result = Curl_dyn_addf(r, "%s\r\n", hdr->data);
if(result)
- return result;
- }
- }
-#endif
-
- result = addexpect(data, conn, r);
- if(result)
- return result;
-
- /* make the request end in a true CRLF */
- result = Curl_dyn_addn(r, STRCONST("\r\n"));
- if(result)
- return result;
-
- /* set the upload size to the progress meter */
- Curl_pgrsSetUploadSize(data, http->postsize);
-
- /* Read from mime structure. */
- data->state.fread_func = (curl_read_callback) Curl_mime_read;
- data->state.in = (void *) data->state.mimepost;
- http->sending = HTTPSEND_BODY;
-
- /* this sends the buffer and frees all the buffer resources */
- result = Curl_buffer_send(r, data, data->req.p.http,
- &data->info.request_size, 0,
- FIRSTSOCKET);
- if(result)
- failf(data, "Failed sending POST request");
- else
- /* prepare for transfer */
- Curl_setup_transfer(data, FIRSTSOCKET, -1, TRUE,
- http->postsize?FIRSTSOCKET:-1);
- if(result)
- return result;
-
- break;
-
- case HTTPREQ_POST:
- /* this is the simple POST, using x-www-form-urlencoded style */
-
- if(conn->bits.authneg)
- http->postsize = 0;
- else
- /* the size of the post body */
- http->postsize = data->state.infilesize;
-
- /* We only set Content-Length and allow a custom Content-Length if
- we don't upload data chunked, as RFC2616 forbids us to set both
- kinds of headers (Transfer-Encoding: chunked and Content-Length) */
- if((http->postsize != -1) && !data->req.upload_chunky &&
- (conn->bits.authneg ||
- !Curl_checkheaders(data, STRCONST("Content-Length")))) {
- /* we allow replacing this header if not during auth negotiation,
- although it isn't very wise to actually set your own */
- result = Curl_dyn_addf(r, "Content-Length: %" CURL_FORMAT_CURL_OFF_T
- "\r\n", http->postsize);
- if(result)
- return result;
- }
-
- if(!Curl_checkheaders(data, STRCONST("Content-Type"))) {
- result = Curl_dyn_addn(r, STRCONST("Content-Type: application/"
- "x-www-form-urlencoded\r\n"));
- if(result)
- return result;
- }
-
- result = addexpect(data, conn, r);
- if(result)
- return result;
-
-#ifndef USE_HYPER
- /* With Hyper the body is always passed on separately */
- if(data->set.postfields) {
- if(!data->state.expect100header &&
- (http->postsize < MAX_INITIAL_POST_SIZE)) {
- /* if we don't use expect: 100 AND
- postsize is less than MAX_INITIAL_POST_SIZE
-
- then append the post data to the HTTP request header. This limit
- is no magic limit but only set to prevent really huge POSTs to
- get the data duplicated with malloc() and family. */
-
- /* end of headers! */
- result = Curl_dyn_addn(r, STRCONST("\r\n"));
- if(result)
- return result;
-
- if(!data->req.upload_chunky) {
- /* We're not sending it 'chunked', append it to the request
- already now to reduce the number of send() calls */
- result = Curl_dyn_addn(r, data->set.postfields,
- (size_t)http->postsize);
- included_body = http->postsize;
- }
- else {
- if(http->postsize) {
- char chunk[16];
- /* Append the POST data chunky-style */
- msnprintf(chunk, sizeof(chunk), "%x\r\n", (int)http->postsize);
- result = Curl_dyn_add(r, chunk);
- if(!result) {
- included_body = http->postsize + strlen(chunk);
- result = Curl_dyn_addn(r, data->set.postfields,
- (size_t)http->postsize);
- if(!result)
- result = Curl_dyn_addn(r, STRCONST("\r\n"));
- included_body += 2;
- }
- }
- if(!result) {
- result = Curl_dyn_addn(r, STRCONST("\x30\x0d\x0a\x0d\x0a"));
- /* 0 CR LF CR LF */
- included_body += 5;
- }
- }
- if(result)
- return result;
- /* Make sure the progress information is accurate */
- Curl_pgrsSetUploadSize(data, http->postsize);
- }
- else {
- /* A huge POST coming up, do data separate from the request */
- http->postdata = data->set.postfields;
- http->sending = HTTPSEND_BODY;
- http->backup.data = data;
- data->state.fread_func = (curl_read_callback)readmoredata;
- data->state.in = (void *)http;
-
- /* set the upload size to the progress meter */
- Curl_pgrsSetUploadSize(data, http->postsize);
-
- /* end of headers! */
- result = Curl_dyn_addn(r, STRCONST("\r\n"));
- if(result)
- return result;
+ goto out;
}
}
- else
#endif
- {
- /* end of headers! */
- result = Curl_dyn_addn(r, STRCONST("\r\n"));
- if(result)
- return result;
-
- if(data->req.upload_chunky && conn->bits.authneg) {
- /* Chunky upload is selected and we're negotiating auth still, send
- end-of-data only */
- result = Curl_dyn_addn(r, (char *)STRCONST("\x30\x0d\x0a\x0d\x0a"));
- /* 0 CR LF CR LF */
+ if(httpreq == HTTPREQ_POST) {
+ if(!Curl_checkheaders(data, STRCONST("Content-Type"))) {
+ result = Curl_dyn_addn(r, STRCONST("Content-Type: application/"
+ "x-www-form-urlencoded\r\n"));
if(result)
- return result;
- }
-
- else if(data->state.infilesize) {
- /* set the upload size to the progress meter */
- Curl_pgrsSetUploadSize(data, http->postsize?http->postsize:-1);
-
- /* set the pointer to mark that we will send the post body using the
- read callback, but only if we're not in authenticate negotiation */
- if(!conn->bits.authneg)
- http->postdata = (char *)&http->postdata;
+ goto out;
}
}
- /* issue the request */
- result = Curl_buffer_send(r, data, data->req.p.http,
- &data->info.request_size, included_body,
- FIRSTSOCKET);
-
+ result = addexpect(data, r, &announced_exp100);
if(result)
- failf(data, "Failed sending HTTP POST request");
- else
- Curl_setup_transfer(data, FIRSTSOCKET, -1, TRUE,
- http->postdata?FIRSTSOCKET:-1);
+ goto out;
break;
-
default:
- result = Curl_dyn_addn(r, STRCONST("\r\n"));
- if(result)
- return result;
-
- /* issue the request */
- result = Curl_buffer_send(r, data, data->req.p.http,
- &data->info.request_size, 0,
- FIRSTSOCKET);
- if(result)
- failf(data, "Failed sending HTTP request");
-#ifdef USE_WEBSOCKETS
- else if((conn->handler->protocol & (CURLPROTO_WS|CURLPROTO_WSS)) &&
- !(data->set.connect_only))
- /* Set up the transfer for two-way since without CONNECT_ONLY set, this
- request probably wants to send data too post upgrade */
- Curl_setup_transfer(data, FIRSTSOCKET, -1, TRUE, FIRSTSOCKET);
-#endif
- else
- /* HTTP GET/HEAD download: */
- Curl_setup_transfer(data, FIRSTSOCKET, -1, TRUE, -1);
+ break;
}
+ /* end of headers */
+ result = Curl_dyn_addn(r, STRCONST("\r\n"));
+ Curl_pgrsSetUploadSize(data, req_clen);
+ if(announced_exp100)
+ result = http_exp100_add_reader(data);
+
+out:
+ if(!result) {
+ /* setup variables for the upcoming transfer */
+ Curl_xfer_setup(data, FIRSTSOCKET, -1, TRUE, FIRSTSOCKET);
+ }
return result;
}
@@ -2857,7 +2361,7 @@ CURLcode Curl_http_range(struct Curl_easy *data, }
else if((httpreq == HTTPREQ_POST || httpreq == HTTPREQ_PUT) &&
!Curl_checkheaders(data, STRCONST("Content-Range"))) {
-
+ curl_off_t req_clen = Curl_creader_total_length(data);
/* if a line like this was already allocated, free the previous one */
free(data->state.aptr.rangeline);
@@ -2868,25 +2372,28 @@ CURLcode Curl_http_range(struct Curl_easy *data, data->state.aptr.rangeline =
aprintf("Content-Range: bytes 0-%" CURL_FORMAT_CURL_OFF_T
"/%" CURL_FORMAT_CURL_OFF_T "\r\n",
- data->state.infilesize - 1, data->state.infilesize);
+ req_clen - 1, req_clen);
}
else if(data->state.resume_from) {
/* This is because "resume" was selected */
- curl_off_t total_expected_size =
- data->state.resume_from + data->state.infilesize;
+ /* TODO: not sure if we want to send this header during authentication
+ * negotiation, but test1084 checks for it. In which case we have a
+ * "null" client reader installed that gives an unexpected length. */
+ curl_off_t total_len = data->req.authneg?
+ data->state.infilesize :
+ (data->state.resume_from + req_clen);
data->state.aptr.rangeline =
aprintf("Content-Range: bytes %s%" CURL_FORMAT_CURL_OFF_T
"/%" CURL_FORMAT_CURL_OFF_T "\r\n",
- data->state.range, total_expected_size-1,
- total_expected_size);
+ data->state.range, total_len-1, total_len);
}
else {
/* Range was selected and then we just pass the incoming range and
append total size */
data->state.aptr.rangeline =
aprintf("Content-Range: bytes %s/%" CURL_FORMAT_CURL_OFF_T "\r\n",
- data->state.range, data->state.infilesize);
+ data->state.range, req_clen);
}
if(!data->state.aptr.rangeline)
return CURLE_OUT_OF_MEMORY;
@@ -2895,100 +2402,17 @@ CURLcode Curl_http_range(struct Curl_easy *data, return CURLE_OK;
}
-CURLcode Curl_http_resume(struct Curl_easy *data,
- struct connectdata *conn,
- Curl_HttpReq httpreq)
-{
- if((HTTPREQ_POST == httpreq || HTTPREQ_PUT == httpreq) &&
- data->state.resume_from) {
- /**********************************************************************
- * Resuming upload in HTTP means that we PUT or POST and that we have
- * got a resume_from value set. The resume value has already created
- * a Range: header that will be passed along. We need to "fast forward"
- * the file the given number of bytes and decrease the assume upload
- * file size before we continue this venture in the dark lands of HTTP.
- * Resuming mime/form posting at an offset > 0 has no sense and is ignored.
- *********************************************************************/
-
- if(data->state.resume_from < 0) {
- /*
- * This is meant to get the size of the present remote-file by itself.
- * We don't support this now. Bail out!
- */
- data->state.resume_from = 0;
- }
-
- if(data->state.resume_from && !data->state.followlocation) {
- /* only act on the first request */
-
- /* Now, let's read off the proper amount of bytes from the
- input. */
- int seekerr = CURL_SEEKFUNC_CANTSEEK;
- if(conn->seek_func) {
- Curl_set_in_callback(data, true);
- seekerr = conn->seek_func(conn->seek_client, data->state.resume_from,
- SEEK_SET);
- Curl_set_in_callback(data, false);
- }
-
- if(seekerr != CURL_SEEKFUNC_OK) {
- curl_off_t passed = 0;
-
- if(seekerr != CURL_SEEKFUNC_CANTSEEK) {
- failf(data, "Could not seek stream");
- return CURLE_READ_ERROR;
- }
- /* when seekerr == CURL_SEEKFUNC_CANTSEEK (can't seek to offset) */
- do {
- char scratch[4*1024];
- size_t readthisamountnow =
- (data->state.resume_from - passed > (curl_off_t)sizeof(scratch)) ?
- sizeof(scratch) :
- curlx_sotouz(data->state.resume_from - passed);
-
- size_t actuallyread =
- data->state.fread_func(scratch, 1, readthisamountnow,
- data->state.in);
-
- passed += actuallyread;
- if((actuallyread == 0) || (actuallyread > readthisamountnow)) {
- /* this checks for greater-than only to make sure that the
- CURL_READFUNC_ABORT return code still aborts */
- failf(data, "Could only read %" CURL_FORMAT_CURL_OFF_T
- " bytes from the input", passed);
- return CURLE_READ_ERROR;
- }
- } while(passed < data->state.resume_from);
- }
-
- /* now, decrease the size of the read */
- if(data->state.infilesize>0) {
- data->state.infilesize -= data->state.resume_from;
-
- if(data->state.infilesize <= 0) {
- failf(data, "File already completely uploaded");
- return CURLE_PARTIAL_FILE;
- }
- }
- /* we've passed, proceed as normal */
- }
- }
- return CURLE_OK;
-}
-
-CURLcode Curl_http_firstwrite(struct Curl_easy *data,
- struct connectdata *conn,
- bool *done)
+CURLcode Curl_http_firstwrite(struct Curl_easy *data)
{
+ struct connectdata *conn = data->conn;
struct SingleRequest *k = &data->req;
- *done = FALSE;
if(data->req.newurl) {
if(conn->bits.close) {
/* Abort after the headers if "follow Location" is set
and we're set to close anyway. */
k->keepon &= ~KEEP_RECV;
- *done = TRUE;
+ k->done = TRUE;
return CURLE_OK;
}
/* We have a new url to load, but since we want to be able to reuse this
@@ -3007,7 +2431,7 @@ CURLcode Curl_http_firstwrite(struct Curl_easy *data, streamclose(conn, "already downloaded");
/* Abort download */
k->keepon &= ~KEEP_RECV;
- *done = TRUE;
+ k->done = TRUE;
return CURLE_OK;
}
@@ -3025,7 +2449,7 @@ CURLcode Curl_http_firstwrite(struct Curl_easy *data, action for an HTTP/1.1 client */
if(!Curl_meets_timecondition(data, k->timeofdoc)) {
- *done = TRUE;
+ k->done = TRUE;
/* We're simulating an HTTP 304 from server so we return
what should have been returned from the server */
data->info.httpcode = 304;
@@ -3083,7 +2507,6 @@ CURLcode Curl_http(struct Curl_easy *data, bool *done) {
struct connectdata *conn = data->conn;
CURLcode result = CURLE_OK;
- struct HTTP *http;
Curl_HttpReq httpreq;
const char *te = ""; /* transfer-encoding */
const char *request;
@@ -3128,8 +2551,12 @@ CURLcode Curl_http(struct Curl_easy *data, bool *done) break;
}
- http = data->req.p.http;
- DEBUGASSERT(http);
+ /* Add collecting of headers written to client. For a new connection,
+ * we might have done that already, but reuse
+ * or multiplex needs it here as well. */
+ result = Curl_headers_init(data);
+ if(result)
+ goto fail;
result = Curl_http_host(data, conn);
if(result)
@@ -3181,17 +2608,13 @@ CURLcode Curl_http(struct Curl_easy *data, bool *done) goto fail;
#endif
- result = Curl_http_body(data, conn, httpreq, &te);
+ result = Curl_http_req_set_reader(data, httpreq, &te);
if(result)
goto fail;
p_accept = Curl_checkheaders(data,
STRCONST("Accept"))?NULL:"Accept: */*\r\n";
- result = Curl_http_resume(data, conn, httpreq);
- if(result)
- goto fail;
-
result = Curl_http_range(data, httpreq);
if(result)
goto fail;
@@ -3309,46 +2732,14 @@ CURLcode Curl_http(struct Curl_easy *data, bool *done) result = Curl_add_custom_headers(data, FALSE, &req);
if(!result) {
- http->postdata = NULL; /* nothing to post at this point */
- if((httpreq == HTTPREQ_GET) ||
- (httpreq == HTTPREQ_HEAD))
- Curl_pgrsSetUploadSize(data, 0); /* nothing */
-
- /* bodysend takes ownership of the 'req' memory on success */
- result = Curl_http_bodysend(data, conn, &req, httpreq);
+ /* req_send takes ownership of the 'req' memory on success */
+ result = Curl_http_req_complete(data, &req, httpreq);
+ if(!result)
+ result = Curl_req_send(data, &req);
}
- if(result) {
- Curl_dyn_free(&req);
+ Curl_dyn_free(&req);
+ if(result)
goto fail;
- }
-
- if((http->postsize > -1) &&
- (http->postsize <= data->req.writebytecount) &&
- (http->sending != HTTPSEND_REQUEST))
- data->req.upload_done = TRUE;
-
- if(data->req.writebytecount) {
- /* if a request-body has been sent off, we make sure this progress is noted
- properly */
- Curl_pgrsSetUploadCounter(data, data->req.writebytecount);
- if(Curl_pgrsUpdate(data))
- result = CURLE_ABORTED_BY_CALLBACK;
-
- if(!http->postsize) {
- /* already sent the entire request body, mark the "upload" as
- complete */
- infof(data, "upload completely sent off: %" CURL_FORMAT_CURL_OFF_T
- " out of %" CURL_FORMAT_CURL_OFF_T " bytes",
- data->req.writebytecount, http->postsize);
- data->req.upload_done = TRUE;
- data->req.keepon &= ~KEEP_SEND; /* we're done writing */
- data->req.exp100 = EXP100_SEND_DATA; /* already sent */
- Curl_expire_done(data, EXPIRE_100_TIMEOUT);
- }
- }
-
- if(data->req.upload_done)
- Curl_conn_ev_data_done_send(data);
if((conn->httpversion >= 20) && data->req.upload_chunky)
/* upload_chunky was set above to set up the request in a chunky fashion,
@@ -3433,325 +2824,368 @@ checkprotoprefix(struct Curl_easy *data, struct connectdata *conn, return checkhttpprefix(data, s, len);
}
+/* HTTP header has field name `n` (a string constant) */
+#define HD_IS(hd, hdlen, n) \
+ (((hdlen) >= (sizeof(n)-1)) && curl_strnequal((n), (hd), (sizeof(n)-1)))
+
+#define HD_VAL(hd, hdlen, n) \
+ ((((hdlen) >= (sizeof(n)-1)) && \
+ curl_strnequal((n), (hd), (sizeof(n)-1)))? (hd + (sizeof(n)-1)) : NULL)
+
+/* HTTP header has field name `n` (a string constant) and contains `v`
+ * (a string constant) in its value(s) */
+#define HD_IS_AND_SAYS(hd, hdlen, n, v) \
+ (HD_IS(hd, hdlen, n) && \
+ ((hdlen) > ((sizeof(n)-1) + (sizeof(v)-1))) && \
+ Curl_compareheader(hd, STRCONST(n), STRCONST(v)))
+
/*
* Curl_http_header() parses a single response header.
*/
CURLcode Curl_http_header(struct Curl_easy *data, struct connectdata *conn,
- char *headp)
+ char *hd, size_t hdlen)
{
CURLcode result;
struct SingleRequest *k = &data->req;
- /* Check for Content-Length: header lines to get size */
- if(!k->http_bodyless &&
- !data->set.ignorecl && checkprefix("Content-Length:", headp)) {
- curl_off_t contentlength;
- CURLofft offt = curlx_strtoofft(headp + strlen("Content-Length:"),
- NULL, 10, &contentlength);
-
- if(offt == CURL_OFFT_OK) {
- k->size = contentlength;
- k->maxdownload = k->size;
+ const char *v;
+
+ switch(hd[0]) {
+ case 'a':
+ case 'A':
+#ifndef CURL_DISABLE_ALTSVC
+ v = (data->asi &&
+ ((conn->handler->flags & PROTOPT_SSL) ||
+#ifdef CURLDEBUG
+ /* allow debug builds to circumvent the HTTPS restriction */
+ getenv("CURL_ALTSVC_HTTP")
+#else
+ 0
+#endif
+ ))? HD_VAL(hd, hdlen, "Alt-Svc:") : NULL;
+ if(v) {
+ /* the ALPN of the current request */
+ enum alpnid id = (conn->httpversion == 30)? ALPN_h3 :
+ (conn->httpversion == 20) ? ALPN_h2 : ALPN_h1;
+ return Curl_altsvc_parse(data, data->asi, v, id, conn->host.name,
+ curlx_uitous((unsigned int)conn->remote_port));
}
- else if(offt == CURL_OFFT_FLOW) {
- /* out of range */
- if(data->set.max_filesize) {
- failf(data, "Maximum file size exceeded");
- return CURLE_FILESIZE_EXCEEDED;
+#endif
+ break;
+ case 'c':
+ case 'C':
+ /* Check for Content-Length: header lines to get size */
+ v = (!k->http_bodyless && !data->set.ignorecl)?
+ HD_VAL(hd, hdlen, "Content-Length:") : NULL;
+ if(v) {
+ curl_off_t contentlength;
+ CURLofft offt = curlx_strtoofft(v, NULL, 10, &contentlength);
+
+ if(offt == CURL_OFFT_OK) {
+ k->size = contentlength;
+ k->maxdownload = k->size;
+ }
+ else if(offt == CURL_OFFT_FLOW) {
+ /* out of range */
+ if(data->set.max_filesize) {
+ failf(data, "Maximum file size exceeded");
+ return CURLE_FILESIZE_EXCEEDED;
+ }
+ streamclose(conn, "overflow content-length");
+ infof(data, "Overflow Content-Length: value");
}
- streamclose(conn, "overflow content-length");
- infof(data, "Overflow Content-Length: value");
+ else {
+ /* negative or just rubbish - bad HTTP */
+ failf(data, "Invalid Content-Length: value");
+ return CURLE_WEIRD_SERVER_REPLY;
+ }
+ return CURLE_OK;
}
- else {
- /* negative or just rubbish - bad HTTP */
- failf(data, "Invalid Content-Length: value");
- return CURLE_WEIRD_SERVER_REPLY;
+ v = (!k->http_bodyless && data->set.str[STRING_ENCODING])?
+ HD_VAL(hd, hdlen, "Content-Encoding:") : NULL;
+ if(v) {
+ /*
+ * Process Content-Encoding. Look for the values: identity,
+ * gzip, deflate, compress, x-gzip and x-compress. x-gzip and
+ * x-compress are the same as gzip and compress. (Sec 3.5 RFC
+ * 2616). zlib cannot handle compress. However, errors are
+ * handled further down when the response body is processed
+ */
+ return Curl_build_unencoding_stack(data, v, FALSE);
}
- }
- /* check for Content-Type: header lines to get the MIME-type */
- else if(checkprefix("Content-Type:", headp)) {
- char *contenttype = Curl_copy_header_value(headp);
- if(!contenttype)
- return CURLE_OUT_OF_MEMORY;
- if(!*contenttype)
- /* ignore empty data */
- free(contenttype);
- else {
- Curl_safefree(data->info.contenttype);
- data->info.contenttype = contenttype;
+ /* check for Content-Type: header lines to get the MIME-type */
+ v = HD_VAL(hd, hdlen, "Content-Type:");
+ if(v) {
+ char *contenttype = Curl_copy_header_value(hd);
+ if(!contenttype)
+ return CURLE_OUT_OF_MEMORY;
+ if(!*contenttype)
+ /* ignore empty data */
+ free(contenttype);
+ else {
+ Curl_safefree(data->info.contenttype);
+ data->info.contenttype = contenttype;
+ }
+ return CURLE_OK;
}
- }
-#ifndef CURL_DISABLE_PROXY
- else if((conn->httpversion == 10) &&
- conn->bits.httpproxy &&
- Curl_compareheader(headp,
- STRCONST("Proxy-Connection:"),
- STRCONST("keep-alive"))) {
- /*
- * When an HTTP/1.0 reply comes when using a proxy, the
- * 'Proxy-Connection: keep-alive' line tells us the
- * connection will be kept alive for our pleasure.
- * Default action for 1.0 is to close.
- */
- connkeep(conn, "Proxy-Connection keep-alive"); /* don't close */
- infof(data, "HTTP/1.0 proxy connection set to keep alive");
- }
- else if((conn->httpversion == 11) &&
- conn->bits.httpproxy &&
- Curl_compareheader(headp,
- STRCONST("Proxy-Connection:"),
- STRCONST("close"))) {
- /*
- * We get an HTTP/1.1 response from a proxy and it says it'll
- * close down after this transfer.
- */
- connclose(conn, "Proxy-Connection: asked to close after done");
- infof(data, "HTTP/1.1 proxy connection set close");
- }
-#endif
- else if((conn->httpversion == 10) &&
- Curl_compareheader(headp,
- STRCONST("Connection:"),
- STRCONST("keep-alive"))) {
- /*
- * An HTTP/1.0 reply with the 'Connection: keep-alive' line
- * tells us the connection will be kept alive for our
- * pleasure. Default action for 1.0 is to close.
- *
- * [RFC2068, section 19.7.1] */
- connkeep(conn, "Connection keep-alive");
- infof(data, "HTTP/1.0 connection set to keep alive");
- }
- else if(Curl_compareheader(headp,
- STRCONST("Connection:"), STRCONST("close"))) {
- /*
- * [RFC 2616, section 8.1.2.1]
- * "Connection: close" is HTTP/1.1 language and means that
- * the connection will close when this request has been
- * served.
- */
- streamclose(conn, "Connection: close used");
- }
- else if(!k->http_bodyless && checkprefix("Transfer-Encoding:", headp)) {
- /* One or more encodings. We check for chunked and/or a compression
- algorithm. */
- /*
- * [RFC 2616, section 3.6.1] A 'chunked' transfer encoding
- * means that the server will send a series of "chunks". Each
- * chunk starts with line with info (including size of the
- * coming block) (terminated with CRLF), then a block of data
- * with the previously mentioned size. There can be any amount
- * of chunks, and a chunk-data set to zero signals the
- * end-of-chunks. */
-
- result = Curl_build_unencoding_stack(data,
- headp + strlen("Transfer-Encoding:"),
- TRUE);
- if(result)
- return result;
- if(!k->chunk && data->set.http_transfer_encoding) {
- /* if this isn't chunked, only close can signal the end of this transfer
- as Content-Length is said not to be trusted for transfer-encoding! */
- connclose(conn, "HTTP/1.1 transfer-encoding without chunks");
- k->ignore_cl = TRUE;
+ if(HD_IS_AND_SAYS(hd, hdlen, "Connection:", "close")) {
+ /*
+ * [RFC 2616, section 8.1.2.1]
+ * "Connection: close" is HTTP/1.1 language and means that
+ * the connection will close when this request has been
+ * served.
+ */
+ streamclose(conn, "Connection: close used");
+ return CURLE_OK;
}
- }
- else if(!k->http_bodyless && checkprefix("Content-Encoding:", headp) &&
- data->set.str[STRING_ENCODING]) {
- /*
- * Process Content-Encoding. Look for the values: identity,
- * gzip, deflate, compress, x-gzip and x-compress. x-gzip and
- * x-compress are the same as gzip and compress. (Sec 3.5 RFC
- * 2616). zlib cannot handle compress. However, errors are
- * handled further down when the response body is processed
- */
- result = Curl_build_unencoding_stack(data,
- headp + strlen("Content-Encoding:"),
- FALSE);
- if(result)
- return result;
- }
- else if(checkprefix("Retry-After:", headp)) {
- /* Retry-After = HTTP-date / delay-seconds */
- curl_off_t retry_after = 0; /* zero for unknown or "now" */
- /* Try it as a decimal number, if it works it is not a date */
- (void)curlx_strtoofft(headp + strlen("Retry-After:"),
- NULL, 10, &retry_after);
- if(!retry_after) {
- time_t date = Curl_getdate_capped(headp + strlen("Retry-After:"));
- if(-1 != date)
- /* convert date to number of seconds into the future */
- retry_after = date - time(NULL);
+ if((conn->httpversion == 10) &&
+ HD_IS_AND_SAYS(hd, hdlen, "Connection:", "keep-alive")) {
+ /*
+ * An HTTP/1.0 reply with the 'Connection: keep-alive' line
+ * tells us the connection will be kept alive for our
+ * pleasure. Default action for 1.0 is to close.
+ *
+ * [RFC2068, section 19.7.1] */
+ connkeep(conn, "Connection keep-alive");
+ infof(data, "HTTP/1.0 connection set to keep alive");
+ return CURLE_OK;
}
- data->info.retry_after = retry_after; /* store it */
- }
- else if(!k->http_bodyless && checkprefix("Content-Range:", headp)) {
- /* Content-Range: bytes [num]-
- Content-Range: bytes: [num]-
- Content-Range: [num]-
- Content-Range: [asterisk]/[total]
-
- The second format was added since Sun's webserver
- JavaWebServer/1.1.1 obviously sends the header this way!
- The third added since some servers use that!
- The fourth means the requested range was unsatisfied.
- */
-
- char *ptr = headp + strlen("Content-Range:");
-
- /* Move forward until first digit or asterisk */
- while(*ptr && !ISDIGIT(*ptr) && *ptr != '*')
- ptr++;
-
- /* if it truly stopped on a digit */
- if(ISDIGIT(*ptr)) {
- if(!curlx_strtoofft(ptr, NULL, 10, &k->offset)) {
- if(data->state.resume_from == k->offset)
- /* we asked for a resume and we got it */
- k->content_range = TRUE;
+ v = !k->http_bodyless? HD_VAL(hd, hdlen, "Content-Range:") : NULL;
+ if(v) {
+ /* Content-Range: bytes [num]-
+ Content-Range: bytes: [num]-
+ Content-Range: [num]-
+ Content-Range: [asterisk]/[total]
+
+ The second format was added since Sun's webserver
+ JavaWebServer/1.1.1 obviously sends the header this way!
+ The third added since some servers use that!
+ The fourth means the requested range was unsatisfied.
+ */
+
+ const char *ptr = v;
+
+ /* Move forward until first digit or asterisk */
+ while(*ptr && !ISDIGIT(*ptr) && *ptr != '*')
+ ptr++;
+
+ /* if it truly stopped on a digit */
+ if(ISDIGIT(*ptr)) {
+ if(!curlx_strtoofft(ptr, NULL, 10, &k->offset)) {
+ if(data->state.resume_from == k->offset)
+ /* we asked for a resume and we got it */
+ k->content_range = TRUE;
+ }
}
+ else if(k->httpcode < 300)
+ data->state.resume_from = 0; /* get everything */
}
- else if(k->httpcode < 300)
- data->state.resume_from = 0; /* get everything */
- }
-#if !defined(CURL_DISABLE_COOKIES)
- else if(data->cookies && data->state.cookie_engine &&
- checkprefix("Set-Cookie:", headp)) {
- /* If there is a custom-set Host: name, use it here, or else use real peer
- host name. */
- const char *host = data->state.aptr.cookiehost?
- data->state.aptr.cookiehost:conn->host.name;
- const bool secure_context =
- conn->handler->protocol&(CURLPROTO_HTTPS|CURLPROTO_WSS) ||
- strcasecompare("localhost", host) ||
- !strcmp(host, "127.0.0.1") ||
- !strcmp(host, "::1") ? TRUE : FALSE;
-
- Curl_share_lock(data, CURL_LOCK_DATA_COOKIE,
- CURL_LOCK_ACCESS_SINGLE);
- Curl_cookie_add(data, data->cookies, TRUE, FALSE,
- headp + strlen("Set-Cookie:"), host,
- data->state.up.path, secure_context);
- Curl_share_unlock(data, CURL_LOCK_DATA_COOKIE);
- }
-#endif
- else if(!k->http_bodyless && checkprefix("Last-Modified:", headp) &&
- (data->set.timecondition || data->set.get_filetime) ) {
- k->timeofdoc = Curl_getdate_capped(headp + strlen("Last-Modified:"));
- if(data->set.get_filetime)
- data->info.filetime = k->timeofdoc;
- }
- else if((checkprefix("WWW-Authenticate:", headp) &&
- (401 == k->httpcode)) ||
- (checkprefix("Proxy-authenticate:", headp) &&
- (407 == k->httpcode))) {
-
- bool proxy = (k->httpcode == 407) ? TRUE : FALSE;
- char *auth = Curl_copy_header_value(headp);
- if(!auth)
- return CURLE_OUT_OF_MEMORY;
+ break;
+ case 'l':
+ case 'L':
+ v = (!k->http_bodyless &&
+ (data->set.timecondition || data->set.get_filetime))?
+ HD_VAL(hd, hdlen, "Last-Modified:") : NULL;
+ if(v) {
+ k->timeofdoc = Curl_getdate_capped(v);
+ if(data->set.get_filetime)
+ data->info.filetime = k->timeofdoc;
+ return CURLE_OK;
+ }
+ if((k->httpcode >= 300 && k->httpcode < 400) &&
+ HD_IS(hd, hdlen, "Location:") &&
+ !data->req.location) {
+ /* this is the URL that the server advises us to use instead */
+ char *location = Curl_copy_header_value(hd);
+ if(!location)
+ return CURLE_OUT_OF_MEMORY;
+ if(!*location)
+ /* ignore empty data */
+ free(location);
+ else {
+ data->req.location = location;
- result = Curl_http_input_auth(data, proxy, auth);
+ if(data->set.http_follow_location) {
+ DEBUGASSERT(!data->req.newurl);
+ data->req.newurl = strdup(data->req.location); /* clone */
+ if(!data->req.newurl)
+ return CURLE_OUT_OF_MEMORY;
- free(auth);
+ /* some cases of POST and PUT etc needs to rewind the data
+ stream at this point */
+ result = http_perhapsrewind(data, conn);
+ if(result)
+ return result;
- if(result)
+ /* mark the next request as a followed location: */
+ data->state.this_is_a_follow = TRUE;
+ }
+ }
+ }
+ break;
+ case 'p':
+ case 'P':
+#ifndef CURL_DISABLE_PROXY
+ v = HD_VAL(hd, hdlen, "Proxy-Connection:");
+ if(v) {
+ if((conn->httpversion == 10) && conn->bits.httpproxy &&
+ HD_IS_AND_SAYS(hd, hdlen, "Proxy-Connection:", "keep-alive")) {
+ /*
+ * When an HTTP/1.0 reply comes when using a proxy, the
+ * 'Proxy-Connection: keep-alive' line tells us the
+ * connection will be kept alive for our pleasure.
+ * Default action for 1.0 is to close.
+ */
+ connkeep(conn, "Proxy-Connection keep-alive"); /* don't close */
+ infof(data, "HTTP/1.0 proxy connection set to keep alive");
+ }
+ else if((conn->httpversion == 11) && conn->bits.httpproxy &&
+ HD_IS_AND_SAYS(hd, hdlen, "Proxy-Connection:", "close")) {
+ /*
+ * We get an HTTP/1.1 response from a proxy and it says it'll
+ * close down after this transfer.
+ */
+ connclose(conn, "Proxy-Connection: asked to close after done");
+ infof(data, "HTTP/1.1 proxy connection set close");
+ }
+ return CURLE_OK;
+ }
+#endif
+ if((407 == k->httpcode) && HD_IS(hd, hdlen, "Proxy-authenticate:")) {
+ char *auth = Curl_copy_header_value(hd);
+ if(!auth)
+ return CURLE_OUT_OF_MEMORY;
+ result = Curl_http_input_auth(data, TRUE, auth);
+ free(auth);
return result;
- }
+ }
#ifdef USE_SPNEGO
- else if(checkprefix("Persistent-Auth:", headp)) {
- struct negotiatedata *negdata = &conn->negotiate;
- struct auth *authp = &data->state.authhost;
- if(authp->picked == CURLAUTH_NEGOTIATE) {
- char *persistentauth = Curl_copy_header_value(headp);
- if(!persistentauth)
- return CURLE_OUT_OF_MEMORY;
- negdata->noauthpersist = checkprefix("false", persistentauth)?
- TRUE:FALSE;
- negdata->havenoauthpersist = TRUE;
- infof(data, "Negotiate: noauthpersist -> %d, header part: %s",
- negdata->noauthpersist, persistentauth);
- free(persistentauth);
+ if(HD_IS(hd, hdlen, "Persistent-Auth:")) {
+ struct negotiatedata *negdata = &conn->negotiate;
+ struct auth *authp = &data->state.authhost;
+ if(authp->picked == CURLAUTH_NEGOTIATE) {
+ char *persistentauth = Curl_copy_header_value(hd);
+ if(!persistentauth)
+ return CURLE_OUT_OF_MEMORY;
+ negdata->noauthpersist = checkprefix("false", persistentauth)?
+ TRUE:FALSE;
+ negdata->havenoauthpersist = TRUE;
+ infof(data, "Negotiate: noauthpersist -> %d, header part: %s",
+ negdata->noauthpersist, persistentauth);
+ free(persistentauth);
+ }
}
- }
#endif
- else if((k->httpcode >= 300 && k->httpcode < 400) &&
- checkprefix("Location:", headp) &&
- !data->req.location) {
- /* this is the URL that the server advises us to use instead */
- char *location = Curl_copy_header_value(headp);
- if(!location)
- return CURLE_OUT_OF_MEMORY;
- if(!*location)
- /* ignore empty data */
- free(location);
- else {
- data->req.location = location;
-
- if(data->set.http_follow_location) {
- DEBUGASSERT(!data->req.newurl);
- data->req.newurl = strdup(data->req.location); /* clone */
- if(!data->req.newurl)
- return CURLE_OUT_OF_MEMORY;
-
- /* some cases of POST and PUT etc needs to rewind the data
- stream at this point */
- result = http_perhapsrewind(data, conn);
- if(result)
- return result;
-
- /* mark the next request as a followed location: */
- data->state.this_is_a_follow = TRUE;
+ break;
+ case 'r':
+ case 'R':
+ v = HD_VAL(hd, hdlen, "Retry-After:");
+ if(v) {
+ /* Retry-After = HTTP-date / delay-seconds */
+ curl_off_t retry_after = 0; /* zero for unknown or "now" */
+ /* Try it as a decimal number, if it works it is not a date */
+ (void)curlx_strtoofft(v, NULL, 10, &retry_after);
+ if(!retry_after) {
+ time_t date = Curl_getdate_capped(v);
+ if(-1 != date)
+ /* convert date to number of seconds into the future */
+ retry_after = date - time(NULL);
}
+ data->info.retry_after = retry_after; /* store it */
+ return CURLE_OK;
}
- }
+ break;
+ case 's':
+ case 'S':
+#if !defined(CURL_DISABLE_COOKIES)
+ v = (data->cookies && data->state.cookie_engine)?
+ HD_VAL(hd, hdlen, "Set-Cookie:") : NULL;
+ if(v) {
+ /* If there is a custom-set Host: name, use it here, or else use
+ * real peer host name. */
+ const char *host = data->state.aptr.cookiehost?
+ data->state.aptr.cookiehost:conn->host.name;
+ const bool secure_context =
+ conn->handler->protocol&(CURLPROTO_HTTPS|CURLPROTO_WSS) ||
+ strcasecompare("localhost", host) ||
+ !strcmp(host, "127.0.0.1") ||
+ !strcmp(host, "::1") ? TRUE : FALSE;
+ Curl_share_lock(data, CURL_LOCK_DATA_COOKIE,
+ CURL_LOCK_ACCESS_SINGLE);
+ Curl_cookie_add(data, data->cookies, TRUE, FALSE, v, host,
+ data->state.up.path, secure_context);
+ Curl_share_unlock(data, CURL_LOCK_DATA_COOKIE);
+ return CURLE_OK;
+ }
+#endif
#ifndef CURL_DISABLE_HSTS
- /* If enabled, the header is incoming and this is over HTTPS */
- else if(data->hsts && checkprefix("Strict-Transport-Security:", headp) &&
- ((conn->handler->flags & PROTOPT_SSL) ||
+ /* If enabled, the header is incoming and this is over HTTPS */
+ v = (data->hsts &&
+ ((conn->handler->flags & PROTOPT_SSL) ||
#ifdef CURLDEBUG
/* allow debug builds to circumvent the HTTPS restriction */
getenv("CURL_HSTS_HTTP")
#else
0
#endif
- )) {
- CURLcode check =
- Curl_hsts_parse(data->hsts, conn->host.name,
- headp + strlen("Strict-Transport-Security:"));
- if(check)
- infof(data, "Illegal STS header skipped");
+ )
+ )? HD_VAL(hd, hdlen, "Strict-Transport-Security:") : NULL;
+ if(v) {
+ CURLcode check =
+ Curl_hsts_parse(data->hsts, conn->host.name, v);
+ if(check)
+ infof(data, "Illegal STS header skipped");
#ifdef DEBUGBUILD
- else
- infof(data, "Parsed STS header fine (%zu entries)",
- data->hsts->list.size);
-#endif
- }
+ else
+ infof(data, "Parsed STS header fine (%zu entries)",
+ data->hsts->list.size);
#endif
-#ifndef CURL_DISABLE_ALTSVC
- /* If enabled, the header is incoming and this is over HTTPS */
- else if(data->asi && checkprefix("Alt-Svc:", headp) &&
- ((conn->handler->flags & PROTOPT_SSL) ||
-#ifdef CURLDEBUG
- /* allow debug builds to circumvent the HTTPS restriction */
- getenv("CURL_ALTSVC_HTTP")
-#else
- 0
+ }
#endif
- )) {
- /* the ALPN of the current request */
- enum alpnid id = (conn->httpversion == 30)? ALPN_h3 :
- (conn->httpversion == 20) ? ALPN_h2 : ALPN_h1;
- result = Curl_altsvc_parse(data, data->asi,
- headp + strlen("Alt-Svc:"),
- id, conn->host.name,
- curlx_uitous((unsigned int)conn->remote_port));
- if(result)
+ break;
+ case 't':
+ case 'T':
+ v = !k->http_bodyless? HD_VAL(hd, hdlen, "Transfer-Encoding:") : NULL;
+ if(v) {
+ /* One or more encodings. We check for chunked and/or a compression
+ algorithm. */
+ /*
+ * [RFC 2616, section 3.6.1] A 'chunked' transfer encoding
+ * means that the server will send a series of "chunks". Each
+ * chunk starts with line with info (including size of the
+ * coming block) (terminated with CRLF), then a block of data
+ * with the previously mentioned size. There can be any amount
+ * of chunks, and a chunk-data set to zero signals the
+ * end-of-chunks. */
+
+ result = Curl_build_unencoding_stack(data, v, TRUE);
+ if(result)
+ return result;
+ if(!k->chunk && data->set.http_transfer_encoding) {
+ /* if this isn't chunked, only close can signal the end of this
+ * transfer as Content-Length is said not to be trusted for
+ * transfer-encoding! */
+ connclose(conn, "HTTP/1.1 transfer-encoding without chunks");
+ k->ignore_cl = TRUE;
+ }
+ return CURLE_OK;
+ }
+ break;
+ case 'w':
+ case 'W':
+ if((401 == k->httpcode) && HD_IS(hd, hdlen, "WWW-Authenticate:")) {
+ char *auth = Curl_copy_header_value(hd);
+ if(!auth)
+ return CURLE_OUT_OF_MEMORY;
+ result = Curl_http_input_auth(data, FALSE, auth);
+ free(auth);
return result;
+ }
+ break;
}
-#endif
- else if(conn->handler->protocol & CURLPROTO_RTSP) {
- result = Curl_rtsp_parseheader(data, headp);
+
+ if(conn->handler->protocol & CURLPROTO_RTSP) {
+ result = Curl_rtsp_parseheader(data, hd);
if(result)
return result;
}
@@ -3762,18 +3196,38 @@ CURLcode Curl_http_header(struct Curl_easy *data, struct connectdata *conn, * Called after the first HTTP response line (the status line) has been
* received and parsed.
*/
-
CURLcode Curl_http_statusline(struct Curl_easy *data,
struct connectdata *conn)
{
struct SingleRequest *k = &data->req;
+
+ switch(k->httpversion) {
+ case 10:
+ case 11:
+#ifdef USE_HTTP2
+ case 20:
+#endif
+#ifdef ENABLE_QUIC
+ case 30:
+#endif
+ /* TODO: we should verify that responses do not switch major
+ * HTTP version of the connection. Now, it seems we might accept
+ * a HTTP/2 response on a HTTP/1.1 connection, which is wrong. */
+ conn->httpversion = (unsigned char)k->httpversion;
+ break;
+ default:
+ failf(data, "Unsupported HTTP version (%u.%d) in response",
+ k->httpversion/10, k->httpversion%10);
+ return CURLE_UNSUPPORTED_PROTOCOL;
+ }
+
data->info.httpcode = k->httpcode;
+ data->info.httpversion = k->httpversion;
+ conn->httpversion = (unsigned char)k->httpversion;
- data->info.httpversion = conn->httpversion;
- if(!data->state.httpversion ||
- data->state.httpversion > conn->httpversion)
+ if(!data->state.httpversion || data->state.httpversion > k->httpversion)
/* store the lowest server version we encounter */
- data->state.httpversion = conn->httpversion;
+ data->state.httpversion = (unsigned char)k->httpversion;
/*
* This code executes as part of processing the header. As a
@@ -3790,25 +3244,23 @@ CURLcode Curl_http_statusline(struct Curl_easy *data, k->ignorebody = TRUE; /* Avoid appending error msg to good data. */
}
- if(conn->httpversion == 10) {
+ if(k->httpversion == 10) {
/* Default action for HTTP/1.0 must be to close, unless
we get one of those fancy headers that tell us the
server keeps it open for us! */
infof(data, "HTTP 1.0, assume close after body");
connclose(conn, "HTTP/1.0 close after body");
}
- else if(conn->httpversion == 20 ||
+ else if(k->httpversion == 20 ||
(k->upgr101 == UPGR101_H2 && k->httpcode == 101)) {
DEBUGF(infof(data, "HTTP/2 found, allow multiplexing"));
/* HTTP/2 cannot avoid multiplexing since it is a core functionality
of the protocol */
conn->bundle->multiuse = BUNDLE_MULTIPLEX;
}
- else if(conn->httpversion >= 11 &&
- !conn->bits.close) {
+ else if(k->httpversion >= 11 && !conn->bits.close) {
/* If HTTP version is >= 1.1 and connection is persistent */
- DEBUGF(infof(data,
- "HTTP 1.1 or later with persistent connection"));
+ DEBUGF(infof(data, "HTTP 1.1 or later with persistent connection"));
}
k->http_bodyless = k->httpcode >= 100 && k->httpcode < 200;
@@ -3916,6 +3368,285 @@ CURLcode Curl_bump_headersize(struct Curl_easy *data, }
+static CURLcode http_on_response(struct Curl_easy *data,
+ const char *buf, size_t blen,
+ size_t *pconsumed)
+{
+ struct connectdata *conn = data->conn;
+ CURLcode result = CURLE_OK;
+ struct SingleRequest *k = &data->req;
+ bool switch_to_h2 = FALSE;
+
+ (void)buf; /* not used without HTTP2 enabled */
+ *pconsumed = 0;
+
+ if(k->upgr101 == UPGR101_RECEIVED) {
+ /* supposedly upgraded to http2 now */
+ if(conn->httpversion != 20)
+ infof(data, "Lying server, not serving HTTP/2");
+ }
+ if(conn->httpversion < 20) {
+ conn->bundle->multiuse = BUNDLE_NO_MULTIUSE;
+ }
+
+ if(k->httpcode < 100) {
+ failf(data, "Unsupported response code in HTTP response");
+ return CURLE_UNSUPPORTED_PROTOCOL;
+ }
+ else if(k->httpcode < 200) {
+ /* "A user agent MAY ignore unexpected 1xx status responses." */
+ switch(k->httpcode) {
+ case 100:
+ /*
+ * We have made an HTTP PUT or POST and this is 1.1-lingo
+ * that tells us that the server is OK with this and ready
+ * to receive the data.
+ * However, we'll get more headers now so we must get
+ * back into the header-parsing state!
+ */
+ k->header = TRUE;
+ k->headerline = 0; /* restart the header line counter */
+
+ /* if we did wait for this do enable write now! */
+ Curl_http_exp100_got100(data);
+ break;
+ case 101:
+ if(conn->httpversion == 11) {
+ /* Switching Protocols only allowed from HTTP/1.1 */
+ if(k->upgr101 == UPGR101_H2) {
+ /* Switching to HTTP/2 */
+ infof(data, "Received 101, Switching to HTTP/2");
+ k->upgr101 = UPGR101_RECEIVED;
+
+ /* we'll get more headers (HTTP/2 response) */
+ k->header = TRUE;
+ k->headerline = 0; /* restart the header line counter */
+ switch_to_h2 = TRUE;
+ }
+#ifdef USE_WEBSOCKETS
+ else if(k->upgr101 == UPGR101_WS) {
+ /* verify the response */
+ result = Curl_ws_accept(data, buf, blen);
+ if(result)
+ return result;
+ k->header = FALSE; /* no more header to parse! */
+ *pconsumed += blen; /* ws accept handled the data */
+ blen = 0;
+ if(data->set.connect_only)
+ k->keepon &= ~KEEP_RECV; /* read no more content */
+ }
+#endif
+ else {
+ /* Not switching to another protocol */
+ k->header = FALSE; /* no more header to parse! */
+ }
+ }
+ else {
+ /* invalid for other HTTP versions */
+ failf(data, "unexpected 101 response code");
+ return CURLE_WEIRD_SERVER_REPLY;
+ }
+ break;
+ default:
+ /* the status code 1xx indicates a provisional response, so
+ we'll get another set of headers */
+ k->header = TRUE;
+ k->headerline = 0; /* restart the header line counter */
+ break;
+ }
+ }
+ else {
+ /* k->httpcode >= 200, final response */
+ k->header = FALSE;
+
+ if(k->upgr101 == UPGR101_H2) {
+ /* A requested upgrade was denied, poke the multi handle to possibly
+ allow a pending pipewait to continue */
+ Curl_multi_connchanged(data->multi);
+ }
+
+ if((k->size == -1) && !k->chunk && !conn->bits.close &&
+ (conn->httpversion == 11) &&
+ !(conn->handler->protocol & CURLPROTO_RTSP) &&
+ data->state.httpreq != HTTPREQ_HEAD) {
+ /* On HTTP 1.1, when connection is not to get closed, but no
+ Content-Length nor Transfer-Encoding chunked have been
+ received, according to RFC2616 section 4.4 point 5, we
+ assume that the server will close the connection to
+ signal the end of the document. */
+ infof(data, "no chunk, no close, no size. Assume close to "
+ "signal end");
+ streamclose(conn, "HTTP: No end-of-message indicator");
+ }
+ }
+
+ if(!k->header) {
+ result = Curl_http_size(data);
+ if(result)
+ return result;
+ }
+
+ /* At this point we have some idea about the fate of the connection.
+ If we are closing the connection it may result auth failure. */
+#if defined(USE_NTLM)
+ if(conn->bits.close &&
+ (((data->req.httpcode == 401) &&
+ (conn->http_ntlm_state == NTLMSTATE_TYPE2)) ||
+ ((data->req.httpcode == 407) &&
+ (conn->proxy_ntlm_state == NTLMSTATE_TYPE2)))) {
+ infof(data, "Connection closure while negotiating auth (HTTP 1.0?)");
+ data->state.authproblem = TRUE;
+ }
+#endif
+#if defined(USE_SPNEGO)
+ if(conn->bits.close &&
+ (((data->req.httpcode == 401) &&
+ (conn->http_negotiate_state == GSS_AUTHRECV)) ||
+ ((data->req.httpcode == 407) &&
+ (conn->proxy_negotiate_state == GSS_AUTHRECV)))) {
+ infof(data, "Connection closure while negotiating auth (HTTP 1.0?)");
+ data->state.authproblem = TRUE;
+ }
+ if((conn->http_negotiate_state == GSS_AUTHDONE) &&
+ (data->req.httpcode != 401)) {
+ conn->http_negotiate_state = GSS_AUTHSUCC;
+ }
+ if((conn->proxy_negotiate_state == GSS_AUTHDONE) &&
+ (data->req.httpcode != 407)) {
+ conn->proxy_negotiate_state = GSS_AUTHSUCC;
+ }
+#endif
+
+ /*
+ * When all the headers have been parsed, see if we should give
+ * up and return an error.
+ */
+ if(http_should_fail(data)) {
+ failf(data, "The requested URL returned error: %d",
+ k->httpcode);
+ return CURLE_HTTP_RETURNED_ERROR;
+ }
+
+#ifdef USE_WEBSOCKETS
+ /* All non-101 HTTP status codes are bad when wanting to upgrade to
+ websockets */
+ if(data->req.upgr101 == UPGR101_WS) {
+ failf(data, "Refused WebSockets upgrade: %d", k->httpcode);
+ return CURLE_HTTP_RETURNED_ERROR;
+ }
+#endif
+
+
+ /* Curl_http_auth_act() checks what authentication methods
+ * that are available and decides which one (if any) to
+ * use. It will set 'newurl' if an auth method was picked. */
+ result = Curl_http_auth_act(data);
+
+ if(result)
+ return result;
+
+ if(k->httpcode >= 300) {
+ if((!data->req.authneg) && !conn->bits.close &&
+ !Curl_creader_will_rewind(data)) {
+ /*
+ * General treatment of errors when about to send data. Including :
+ * "417 Expectation Failed", while waiting for 100-continue.
+ *
+ * The check for close above is done simply because of something
+ * else has already deemed the connection to get closed then
+ * something else should've considered the big picture and we
+ * avoid this check.
+ *
+ */
+
+ switch(data->state.httpreq) {
+ case HTTPREQ_PUT:
+ case HTTPREQ_POST:
+ case HTTPREQ_POST_FORM:
+ case HTTPREQ_POST_MIME:
+ /* We got an error response. If this happened before the whole
+ * request body has been sent we stop sending and mark the
+ * connection for closure after we've read the entire response.
+ */
+ if(!Curl_req_done_sending(data)) {
+ if((k->httpcode == 417) && Curl_http_exp100_is_selected(data)) {
+ /* 417 Expectation Failed - try again without the Expect
+ header */
+ if(!k->writebytecount && http_exp100_is_waiting(data)) {
+ infof(data, "Got HTTP failure 417 while waiting for a 100");
+ }
+ else {
+ infof(data, "Got HTTP failure 417 while sending data");
+ streamclose(conn,
+ "Stop sending data before everything sent");
+ result = http_perhapsrewind(data, conn);
+ if(result)
+ return result;
+ }
+ data->state.disableexpect = TRUE;
+ DEBUGASSERT(!data->req.newurl);
+ data->req.newurl = strdup(data->state.url);
+ Curl_req_abort_sending(data);
+ }
+ else if(data->set.http_keep_sending_on_error) {
+ infof(data, "HTTP error before end of send, keep sending");
+ http_exp100_send_anyway(data);
+ }
+ else {
+ infof(data, "HTTP error before end of send, stop sending");
+ streamclose(conn, "Stop sending data before everything sent");
+ result = Curl_req_abort_sending(data);
+ if(result)
+ return result;
+ }
+ }
+ break;
+
+ default: /* default label present to avoid compiler warnings */
+ break;
+ }
+ }
+
+ if(Curl_creader_will_rewind(data) && !Curl_req_done_sending(data)) {
+ /* We rewind before next send, continue sending now */
+ infof(data, "Keep sending data to get tossed away");
+ k->keepon |= KEEP_SEND;
+ }
+ }
+
+ if(!k->header) {
+ /*
+ * really end-of-headers.
+ *
+ * If we requested a "no body", this is a good time to get
+ * out and return home.
+ */
+ if(data->req.no_body)
+ k->download_done = TRUE;
+
+ /* If max download size is *zero* (nothing) we already have
+ nothing and can safely return ok now! But for HTTP/2, we'd
+ like to call http2_handle_stream_close to properly close a
+ stream. In order to do this, we keep reading until we
+ close the stream. */
+ if(0 == k->maxdownload
+ && !Curl_conn_is_http2(data, conn, FIRSTSOCKET)
+ && !Curl_conn_is_http3(data, conn, FIRSTSOCKET))
+ k->download_done = TRUE;
+ }
+
+ if(switch_to_h2) {
+ /* Having handled the headers, we can do the HTTP/2 switch.
+ * Any remaining `buf` bytes are already HTTP/2 and passed to
+ * be processed. */
+ result = Curl_http2_upgrade(data, conn, FIRSTSOCKET, buf, blen);
+ if(result)
+ return result;
+ *pconsumed += blen;
+ }
+
+ return CURLE_OK;
+}
/*
* Read any HTTP header lines from the server and pass them to the client app.
*/
@@ -3926,7 +3657,8 @@ static CURLcode http_rw_headers(struct Curl_easy *data, struct connectdata *conn = data->conn;
CURLcode result = CURLE_OK;
struct SingleRequest *k = &data->req;
- char *headp;
+ char *hd;
+ size_t hdlen;
char *end_ptr;
bool leftover_body = FALSE;
@@ -3958,6 +3690,10 @@ static CURLcode http_rw_headers(struct Curl_easy *data, /* this is not the beginning of a protocol first header line */
k->header = FALSE;
streamclose(conn, "bad HTTP: No end-of-message indicator");
+ if(conn->httpversion >= 10) {
+ failf(data, "Invalid status line");
+ return CURLE_WEIRD_SERVER_REPLY;
+ }
if(!data->set.http09_allowed) {
failf(data, "Received HTTP/0.9 when not allowed");
return CURLE_UNSUPPORTED_PROTOCOL;
@@ -3991,6 +3727,10 @@ static CURLcode http_rw_headers(struct Curl_easy *data, if(st == STATUS_BAD) {
streamclose(conn, "bad HTTP: No end-of-message indicator");
/* this is not the beginning of a protocol first header line */
+ if(conn->httpversion >= 10) {
+ failf(data, "Invalid status line");
+ return CURLE_WEIRD_SERVER_REPLY;
+ }
if(!data->set.http09_allowed) {
failf(data, "Received HTTP/0.9 when not allowed");
return CURLE_UNSUPPORTED_PROTOCOL;
@@ -4003,308 +3743,44 @@ static CURLcode http_rw_headers(struct Curl_easy *data, /* headers are in network encoding so use 0x0a and 0x0d instead of '\n'
and '\r' */
- headp = Curl_dyn_ptr(&data->state.headerb);
- if((0x0a == *headp) || (0x0d == *headp)) {
- size_t headerlen;
- bool switch_to_h2 = FALSE;
- /* Zero-length header line means end of headers! */
-
- if('\r' == *headp)
- headp++; /* pass the \r byte */
- if('\n' == *headp)
- headp++; /* pass the \n byte */
-
- if(100 <= k->httpcode && 199 >= k->httpcode) {
- /* "A user agent MAY ignore unexpected 1xx status responses." */
- switch(k->httpcode) {
- case 100:
- /*
- * We have made an HTTP PUT or POST and this is 1.1-lingo
- * that tells us that the server is OK with this and ready
- * to receive the data.
- * However, we'll get more headers now so we must get
- * back into the header-parsing state!
- */
- k->header = TRUE;
- k->headerline = 0; /* restart the header line counter */
-
- /* if we did wait for this do enable write now! */
- if(k->exp100 > EXP100_SEND_DATA) {
- k->exp100 = EXP100_SEND_DATA;
- k->keepon |= KEEP_SEND;
- Curl_expire_done(data, EXPIRE_100_TIMEOUT);
- }
- break;
- case 101:
- if(conn->httpversion == 11) {
- /* Switching Protocols only allowed from HTTP/1.1 */
- if(k->upgr101 == UPGR101_H2) {
- /* Switching to HTTP/2 */
- infof(data, "Received 101, Switching to HTTP/2");
- k->upgr101 = UPGR101_RECEIVED;
-
- /* we'll get more headers (HTTP/2 response) */
- k->header = TRUE;
- k->headerline = 0; /* restart the header line counter */
- switch_to_h2 = TRUE;
- }
-#ifdef USE_WEBSOCKETS
- else if(k->upgr101 == UPGR101_WS) {
- /* verify the response */
- result = Curl_ws_accept(data, buf, blen);
- if(result)
- return result;
- k->header = FALSE; /* no more header to parse! */
- *pconsumed += blen; /* ws accept handled the data */
- blen = 0;
- if(data->set.connect_only)
- k->keepon &= ~KEEP_RECV; /* read no more content */
- }
-#endif
- else {
- /* Not switching to another protocol */
- k->header = FALSE; /* no more header to parse! */
- }
- }
- else {
- /* invalid for other HTTP versions */
- failf(data, "unexpected 101 response code");
- return CURLE_WEIRD_SERVER_REPLY;
- }
- break;
- default:
- /* the status code 1xx indicates a provisional response, so
- we'll get another set of headers */
- k->header = TRUE;
- k->headerline = 0; /* restart the header line counter */
- break;
- }
- }
- else {
- if(k->upgr101 == UPGR101_H2) {
- /* A requested upgrade was denied, poke the multi handle to possibly
- allow a pending pipewait to continue */
- Curl_multi_connchanged(data->multi);
- }
- k->header = FALSE; /* no more header to parse! */
-
- if((k->size == -1) && !k->chunk && !conn->bits.close &&
- (conn->httpversion == 11) &&
- !(conn->handler->protocol & CURLPROTO_RTSP) &&
- data->state.httpreq != HTTPREQ_HEAD) {
- /* On HTTP 1.1, when connection is not to get closed, but no
- Content-Length nor Transfer-Encoding chunked have been
- received, according to RFC2616 section 4.4 point 5, we
- assume that the server will close the connection to
- signal the end of the document. */
- infof(data, "no chunk, no close, no size. Assume close to "
- "signal end");
- streamclose(conn, "HTTP: No end-of-message indicator");
- }
- }
-
- if(!k->header) {
- result = Curl_http_size(data);
- if(result)
- return result;
- }
-
- /* At this point we have some idea about the fate of the connection.
- If we are closing the connection it may result auth failure. */
-#if defined(USE_NTLM)
- if(conn->bits.close &&
- (((data->req.httpcode == 401) &&
- (conn->http_ntlm_state == NTLMSTATE_TYPE2)) ||
- ((data->req.httpcode == 407) &&
- (conn->proxy_ntlm_state == NTLMSTATE_TYPE2)))) {
- infof(data, "Connection closure while negotiating auth (HTTP 1.0?)");
- data->state.authproblem = TRUE;
- }
-#endif
-#if defined(USE_SPNEGO)
- if(conn->bits.close &&
- (((data->req.httpcode == 401) &&
- (conn->http_negotiate_state == GSS_AUTHRECV)) ||
- ((data->req.httpcode == 407) &&
- (conn->proxy_negotiate_state == GSS_AUTHRECV)))) {
- infof(data, "Connection closure while negotiating auth (HTTP 1.0?)");
- data->state.authproblem = TRUE;
- }
- if((conn->http_negotiate_state == GSS_AUTHDONE) &&
- (data->req.httpcode != 401)) {
- conn->http_negotiate_state = GSS_AUTHSUCC;
- }
- if((conn->proxy_negotiate_state == GSS_AUTHDONE) &&
- (data->req.httpcode != 407)) {
- conn->proxy_negotiate_state = GSS_AUTHSUCC;
- }
-#endif
+ hd = Curl_dyn_ptr(&data->state.headerb);
+ hdlen = Curl_dyn_len(&data->state.headerb);
+ if((0x0a == *hd) || (0x0d == *hd)) {
+ /* Empty header line means end of headers! */
+ size_t consumed;
/* now, only output this if the header AND body are requested:
*/
+ Curl_debug(data, CURLINFO_HEADER_IN, hd, hdlen);
+
writetype = CLIENTWRITE_HEADER |
((k->httpcode/100 == 1) ? CLIENTWRITE_1XX : 0);
- headerlen = Curl_dyn_len(&data->state.headerb);
- result = Curl_client_write(data, writetype,
- Curl_dyn_ptr(&data->state.headerb),
- headerlen);
+ result = Curl_client_write(data, writetype, hd, hdlen);
if(result)
return result;
- result = Curl_bump_headersize(data, headerlen, FALSE);
+ result = Curl_bump_headersize(data, hdlen, FALSE);
if(result)
return result;
-
- /*
- * When all the headers have been parsed, see if we should give
- * up and return an error.
- */
- if(http_should_fail(data)) {
- failf(data, "The requested URL returned error: %d",
- k->httpcode);
- return CURLE_HTTP_RETURNED_ERROR;
- }
-
-#ifdef USE_WEBSOCKETS
- /* All non-101 HTTP status codes are bad when wanting to upgrade to
- websockets */
- if(data->req.upgr101 == UPGR101_WS) {
- failf(data, "Refused WebSockets upgrade: %d", k->httpcode);
- return CURLE_HTTP_RETURNED_ERROR;
- }
-#endif
-
+ /* We are done with this line. We reset because response
+ * processing might switch to HTTP/2 and that might call us
+ * directly again. */
+ Curl_dyn_reset(&data->state.headerb);
data->req.deductheadercount =
(100 <= k->httpcode && 199 >= k->httpcode)?data->req.headerbytecount:0;
- /* Curl_http_auth_act() checks what authentication methods
- * that are available and decides which one (if any) to
- * use. It will set 'newurl' if an auth method was picked. */
- result = Curl_http_auth_act(data);
-
+ /* analyze the response to find out what to do */
+ result = http_on_response(data, buf, blen, &consumed);
if(result)
return result;
+ *pconsumed += consumed;
+ blen -= consumed;
+ buf += consumed;
- if(k->httpcode >= 300) {
- if((!conn->bits.authneg) && !conn->bits.close &&
- !data->state.rewindbeforesend) {
- /*
- * General treatment of errors when about to send data. Including :
- * "417 Expectation Failed", while waiting for 100-continue.
- *
- * The check for close above is done simply because of something
- * else has already deemed the connection to get closed then
- * something else should've considered the big picture and we
- * avoid this check.
- *
- * rewindbeforesend indicates that something has told libcurl to
- * continue sending even if it gets discarded
- */
-
- switch(data->state.httpreq) {
- case HTTPREQ_PUT:
- case HTTPREQ_POST:
- case HTTPREQ_POST_FORM:
- case HTTPREQ_POST_MIME:
- /* We got an error response. If this happened before the whole
- * request body has been sent we stop sending and mark the
- * connection for closure after we've read the entire response.
- */
- Curl_expire_done(data, EXPIRE_100_TIMEOUT);
- if(!k->upload_done) {
- if((k->httpcode == 417) && data->state.expect100header) {
- /* 417 Expectation Failed - try again without the Expect
- header */
- if(!k->writebytecount &&
- k->exp100 == EXP100_AWAITING_CONTINUE) {
- infof(data, "Got HTTP failure 417 while waiting for a 100");
- }
- else {
- infof(data, "Got HTTP failure 417 while sending data");
- streamclose(conn,
- "Stop sending data before everything sent");
- result = http_perhapsrewind(data, conn);
- if(result)
- return result;
- }
- data->state.disableexpect = TRUE;
- DEBUGASSERT(!data->req.newurl);
- data->req.newurl = strdup(data->state.url);
- Curl_done_sending(data, k);
- }
- else if(data->set.http_keep_sending_on_error) {
- infof(data, "HTTP error before end of send, keep sending");
- if(k->exp100 > EXP100_SEND_DATA) {
- k->exp100 = EXP100_SEND_DATA;
- k->keepon |= KEEP_SEND;
- }
- }
- else {
- infof(data, "HTTP error before end of send, stop sending");
- streamclose(conn, "Stop sending data before everything sent");
- result = Curl_done_sending(data, k);
- if(result)
- return result;
- k->upload_done = TRUE;
- if(data->state.expect100header)
- k->exp100 = EXP100_FAILED;
- }
- }
- break;
-
- default: /* default label present to avoid compiler warnings */
- break;
- }
- }
-
- if(data->state.rewindbeforesend &&
- (conn->writesockfd != CURL_SOCKET_BAD)) {
- /* We rewind before next send, continue sending now */
- infof(data, "Keep sending data to get tossed away");
- k->keepon |= KEEP_SEND;
- }
- }
-
- if(!k->header) {
- /*
- * really end-of-headers.
- *
- * If we requested a "no body", this is a good time to get
- * out and return home.
- */
- if(data->req.no_body)
- k->download_done = TRUE;
-
- /* If max download size is *zero* (nothing) we already have
- nothing and can safely return ok now! But for HTTP/2, we'd
- like to call http2_handle_stream_close to properly close a
- stream. In order to do this, we keep reading until we
- close the stream. */
- if(0 == k->maxdownload
- && !Curl_conn_is_http2(data, conn, FIRSTSOCKET)
- && !Curl_conn_is_http3(data, conn, FIRSTSOCKET))
- k->download_done = TRUE;
-
- Curl_debug(data, CURLINFO_HEADER_IN,
- Curl_dyn_ptr(&data->state.headerb),
- Curl_dyn_len(&data->state.headerb));
+ if(!k->header || !blen)
goto out; /* exit header line loop */
- }
-
- /* We continue reading headers, reset the line-based header */
- Curl_dyn_reset(&data->state.headerb);
- if(switch_to_h2) {
- /* Having handled the headers, we can do the HTTP/2 switch.
- * Any remaining `buf` bytes are already HTTP/2 and passed to
- * be processed. */
- result = Curl_http2_upgrade(data, conn, FIRSTSOCKET, buf, blen);
- if(result)
- return result;
- *pconsumed += blen;
- blen = 0;
- }
continue;
}
@@ -4318,6 +3794,8 @@ static CURLcode http_rw_headers(struct Curl_easy *data, /* This is the first header, it MUST be the error code line
or else we consider this to be the body right away! */
bool fine_statusline = FALSE;
+
+ k->httpversion = 0; /* Don't know yet */
if(conn->handler->protocol & PROTO_FAMILY_HTTP) {
/*
* https://datatracker.ietf.org/doc/html/rfc7230#section-3.1.2
@@ -4326,8 +3804,7 @@ static CURLcode http_rw_headers(struct Curl_easy *data, * says. We allow any three-digit number here, but we cannot make
* guarantees on future behaviors since it isn't within the protocol.
*/
- int httpversion = 0;
- char *p = headp;
+ char *p = hd;
while(*p && ISBLANK(*p))
p++;
@@ -4338,7 +3815,7 @@ static CURLcode http_rw_headers(struct Curl_easy *data, p++;
if((p[0] == '.') && (p[1] == '0' || p[1] == '1')) {
if(ISBLANK(p[2])) {
- httpversion = 10 + (p[1] - '0');
+ k->httpversion = 10 + (p[1] - '0');
p += 3;
if(ISDIGIT(p[0]) && ISDIGIT(p[1]) && ISDIGIT(p[2])) {
k->httpcode = (p[0] - '0') * 100 + (p[1] - '0') * 10 +
@@ -4358,7 +3835,7 @@ static CURLcode http_rw_headers(struct Curl_easy *data, case '3':
if(!ISBLANK(p[1]))
break;
- httpversion = (*p - '0') * 10;
+ k->httpversion = (*p - '0') * 10;
p += 2;
if(ISDIGIT(p[0]) && ISDIGIT(p[1]) && ISDIGIT(p[2])) {
k->httpcode = (p[0] - '0') * 100 + (p[1] - '0') * 10 +
@@ -4375,54 +3852,20 @@ static CURLcode http_rw_headers(struct Curl_easy *data, }
}
- if(fine_statusline) {
- if(k->httpcode < 100) {
- failf(data, "Unsupported response code in HTTP response");
- return CURLE_UNSUPPORTED_PROTOCOL;
- }
- switch(httpversion) {
- case 10:
- case 11:
-#ifdef USE_HTTP2
- case 20:
-#endif
-#ifdef ENABLE_QUIC
- case 30:
-#endif
- conn->httpversion = (unsigned char)httpversion;
- break;
- default:
- failf(data, "Unsupported HTTP version (%u.%d) in response",
- httpversion/10, httpversion%10);
- return CURLE_UNSUPPORTED_PROTOCOL;
- }
-
- if(k->upgr101 == UPGR101_RECEIVED) {
- /* supposedly upgraded to http2 now */
- if(conn->httpversion != 20)
- infof(data, "Lying server, not serving HTTP/2");
- }
- if(conn->httpversion < 20) {
- conn->bundle->multiuse = BUNDLE_NO_MULTIUSE;
- }
- }
- else {
+ if(!fine_statusline) {
/* If user has set option HTTP200ALIASES,
compare header line against list of aliases
*/
- statusline check =
- checkhttpprefix(data,
- Curl_dyn_ptr(&data->state.headerb),
- Curl_dyn_len(&data->state.headerb));
+ statusline check = checkhttpprefix(data, hd, hdlen);
if(check == STATUS_DONE) {
fine_statusline = TRUE;
k->httpcode = 200;
- conn->httpversion = 10;
+ k->httpversion = 10;
}
}
}
else if(conn->handler->protocol & CURLPROTO_RTSP) {
- char *p = headp;
+ char *p = hd;
while(*p && ISBLANK(*p))
p++;
if(!strncmp(p, "RTSP/", 5)) {
@@ -4438,7 +3881,7 @@ static CURLcode http_rw_headers(struct Curl_easy *data, p += 3;
if(ISSPACE(*p)) {
fine_statusline = TRUE;
- conn->httpversion = 11; /* RTSP acts like HTTP 1.1 */
+ k->httpversion = 11; /* RTSP acts like HTTP 1.1 */
}
}
}
@@ -4465,26 +3908,22 @@ static CURLcode http_rw_headers(struct Curl_easy *data, if(result)
return result;
- result = Curl_http_header(data, conn, headp);
+ result = Curl_http_header(data, conn, hd, hdlen);
if(result)
return result;
/*
- * End of header-checks. Write them to the client.
+ * Taken in one (more) header. Write it to the client.
*/
+ Curl_debug(data, CURLINFO_HEADER_IN, hd, hdlen);
+
if(k->httpcode/100 == 1)
writetype |= CLIENTWRITE_1XX;
-
- Curl_debug(data, CURLINFO_HEADER_IN, headp,
- Curl_dyn_len(&data->state.headerb));
-
- result = Curl_client_write(data, writetype, headp,
- Curl_dyn_len(&data->state.headerb));
+ result = Curl_client_write(data, writetype, hd, hdlen);
if(result)
return result;
- result = Curl_bump_headersize(data, Curl_dyn_len(&data->state.headerb),
- FALSE);
+ result = Curl_bump_headersize(data, hdlen, FALSE);
if(result)
return result;
@@ -4508,10 +3947,8 @@ out: */
CURLcode Curl_http_write_resp_hds(struct Curl_easy *data,
const char *buf, size_t blen,
- size_t *pconsumed,
- bool *done)
+ size_t *pconsumed)
{
- *done = FALSE;
if(!data->req.header) {
*pconsumed = 0;
return CURLE_OK;
@@ -4522,7 +3959,7 @@ CURLcode Curl_http_write_resp_hds(struct Curl_easy *data, result = http_rw_headers(data, buf, blen, pconsumed);
if(!result && !data->req.header) {
/* we have successfully finished parsing the HEADERs */
- result = Curl_http_firstwrite(data, data->conn, done);
+ result = Curl_http_firstwrite(data);
if(!data->req.no_body && Curl_dyn_len(&data->state.headerb)) {
/* leftover from parsing something that turned out not
@@ -4540,23 +3977,21 @@ CURLcode Curl_http_write_resp_hds(struct Curl_easy *data, CURLcode Curl_http_write_resp(struct Curl_easy *data,
const char *buf, size_t blen,
- bool is_eos,
- bool *done)
+ bool is_eos)
{
CURLcode result;
size_t consumed;
int flags;
- *done = FALSE;
- result = Curl_http_write_resp_hds(data, buf, blen, &consumed, done);
- if(result || *done)
+ result = Curl_http_write_resp_hds(data, buf, blen, &consumed);
+ if(result || data->req.done)
goto out;
DEBUGASSERT(consumed <= blen);
blen -= consumed;
buf += consumed;
/* either all was consumed in header parsing, or we have data left
- * and are done with heders, e.g. it is BODY data */
+ * and are done with headers, e.g. it is BODY data */
DEBUGASSERT(!blen || !data->req.header);
if(!data->req.header && (blen || is_eos)) {
/* BODY data after header been parsed, write and consume */
@@ -4938,4 +4373,142 @@ void Curl_http_resp_free(struct http_resp *resp) }
}
+struct cr_exp100_ctx {
+ struct Curl_creader super;
+ struct curltime start; /* time started waiting */
+ enum expect100 state;
+};
+
+/* Expect: 100-continue client reader, blocking uploads */
+
+static void http_exp100_continue(struct Curl_easy *data,
+ struct Curl_creader *reader)
+{
+ struct cr_exp100_ctx *ctx = reader->ctx;
+ if(ctx->state > EXP100_SEND_DATA) {
+ ctx->state = EXP100_SEND_DATA;
+ data->req.keepon |= KEEP_SEND;
+ data->req.keepon &= ~KEEP_SEND_TIMED;
+ Curl_expire_done(data, EXPIRE_100_TIMEOUT);
+ }
+}
+
+static CURLcode cr_exp100_read(struct Curl_easy *data,
+ struct Curl_creader *reader,
+ char *buf, size_t blen,
+ size_t *nread, bool *eos)
+{
+ struct cr_exp100_ctx *ctx = reader->ctx;
+ timediff_t ms;
+
+ switch(ctx->state) {
+ case EXP100_SENDING_REQUEST:
+ /* We are now waiting for a reply from the server or
+ * a timeout on our side */
+ DEBUGF(infof(data, "cr_exp100_read, start AWAITING_CONTINUE"));
+ ctx->state = EXP100_AWAITING_CONTINUE;
+ ctx->start = Curl_now();
+ Curl_expire(data, data->set.expect_100_timeout, EXPIRE_100_TIMEOUT);
+ data->req.keepon &= ~KEEP_SEND;
+ data->req.keepon |= KEEP_SEND_TIMED;
+ *nread = 0;
+ *eos = FALSE;
+ return CURLE_OK;
+ case EXP100_FAILED:
+ DEBUGF(infof(data, "cr_exp100_read, expectation failed, error"));
+ *nread = 0;
+ *eos = FALSE;
+ return CURLE_READ_ERROR;
+ case EXP100_AWAITING_CONTINUE:
+ ms = Curl_timediff(Curl_now(), ctx->start);
+ if(ms < data->set.expect_100_timeout) {
+ DEBUGF(infof(data, "cr_exp100_read, AWAITING_CONTINUE, not expired"));
+ data->req.keepon &= ~KEEP_SEND;
+ data->req.keepon |= KEEP_SEND_TIMED;
+ *nread = 0;
+ *eos = FALSE;
+ return CURLE_OK;
+ }
+ /* we've waited long enough, continue anyway */
+ http_exp100_continue(data, reader);
+ infof(data, "Done waiting for 100-continue");
+ FALLTHROUGH();
+ default:
+ DEBUGF(infof(data, "cr_exp100_read, pass through"));
+ return Curl_creader_read(data, reader->next, buf, blen, nread, eos);
+ }
+}
+
+static void cr_exp100_done(struct Curl_easy *data,
+ struct Curl_creader *reader, int premature)
+{
+ struct cr_exp100_ctx *ctx = reader->ctx;
+ ctx->state = premature? EXP100_FAILED : EXP100_SEND_DATA;
+ data->req.keepon &= ~KEEP_SEND_TIMED;
+ Curl_expire_done(data, EXPIRE_100_TIMEOUT);
+}
+
+static const struct Curl_crtype cr_exp100 = {
+ "cr-exp100",
+ Curl_creader_def_init,
+ cr_exp100_read,
+ Curl_creader_def_close,
+ Curl_creader_def_needs_rewind,
+ Curl_creader_def_total_length,
+ Curl_creader_def_resume_from,
+ Curl_creader_def_rewind,
+ Curl_creader_def_unpause,
+ cr_exp100_done,
+ sizeof(struct cr_exp100_ctx)
+};
+
+static CURLcode http_exp100_add_reader(struct Curl_easy *data)
+{
+ struct Curl_creader *reader = NULL;
+ CURLcode result;
+
+ result = Curl_creader_create(&reader, data, &cr_exp100,
+ CURL_CR_PROTOCOL);
+ if(!result)
+ result = Curl_creader_add(data, reader);
+ if(!result) {
+ struct cr_exp100_ctx *ctx = reader->ctx;
+ ctx->state = EXP100_SENDING_REQUEST;
+ }
+
+ if(result && reader)
+ Curl_creader_free(data, reader);
+ return result;
+}
+
+void Curl_http_exp100_got100(struct Curl_easy *data)
+{
+ struct Curl_creader *r = Curl_creader_get_by_type(data, &cr_exp100);
+ if(r)
+ http_exp100_continue(data, r);
+}
+
+static bool http_exp100_is_waiting(struct Curl_easy *data)
+{
+ struct Curl_creader *r = Curl_creader_get_by_type(data, &cr_exp100);
+ if(r) {
+ struct cr_exp100_ctx *ctx = r->ctx;
+ return (ctx->state == EXP100_AWAITING_CONTINUE);
+ }
+ return FALSE;
+}
+
+static void http_exp100_send_anyway(struct Curl_easy *data)
+{
+ struct Curl_creader *r = Curl_creader_get_by_type(data, &cr_exp100);
+ if(r)
+ http_exp100_continue(data, r);
+}
+
+bool Curl_http_exp100_is_selected(struct Curl_easy *data)
+{
+ struct Curl_creader *r = Curl_creader_get_by_type(data, &cr_exp100);
+ return r? TRUE : FALSE;
+}
+
#endif /* CURL_DISABLE_HTTP */
diff --git a/libs/libcurl/src/http.h b/libs/libcurl/src/http.h index 3ceb049693..86763d5ad5 100644 --- a/libs/libcurl/src/http.h +++ b/libs/libcurl/src/http.h @@ -74,12 +74,6 @@ char *Curl_checkProxyheaders(struct Curl_easy *data, const char *thisheader,
const size_t thislen);
struct HTTP; /* see below */
-CURLcode Curl_buffer_send(struct dynbuf *in,
- struct Curl_easy *data,
- struct HTTP *http,
- curl_off_t *bytes_written,
- curl_off_t included_body_bytes,
- int socketindex);
CURLcode Curl_add_timecondition(struct Curl_easy *data,
#ifndef USE_HYPER
@@ -100,10 +94,6 @@ CURLcode Curl_dynhds_add_custom(struct Curl_easy *data, bool is_connect,
struct dynhds *hds);
-CURLcode Curl_http_compile_trailers(struct curl_slist *trailers,
- struct dynbuf *buf,
- struct Curl_easy *handle);
-
void Curl_http_method(struct Curl_easy *data, struct connectdata *conn,
const char **method, Curl_HttpReq *);
CURLcode Curl_http_useragent(struct Curl_easy *data);
@@ -113,13 +103,13 @@ CURLcode Curl_http_target(struct Curl_easy *data, struct connectdata *conn, CURLcode Curl_http_statusline(struct Curl_easy *data,
struct connectdata *conn);
CURLcode Curl_http_header(struct Curl_easy *data, struct connectdata *conn,
- char *headp);
+ char *headp, size_t hdlen);
CURLcode Curl_transferencode(struct Curl_easy *data);
-CURLcode Curl_http_body(struct Curl_easy *data, struct connectdata *conn,
- Curl_HttpReq httpreq,
- const char **teep);
-CURLcode Curl_http_bodysend(struct Curl_easy *data, struct connectdata *conn,
- struct dynbuf *r, Curl_HttpReq httpreq);
+CURLcode Curl_http_req_set_reader(struct Curl_easy *data,
+ Curl_HttpReq httpreq,
+ const char **tep);
+CURLcode Curl_http_req_complete(struct Curl_easy *data,
+ struct dynbuf *r, Curl_HttpReq httpreq);
bool Curl_use_http_1_1plus(const struct Curl_easy *data,
const struct connectdata *conn);
#ifndef CURL_DISABLE_COOKIES
@@ -129,14 +119,9 @@ CURLcode Curl_http_cookies(struct Curl_easy *data, #else
#define Curl_http_cookies(a,b,c) CURLE_OK
#endif
-CURLcode Curl_http_resume(struct Curl_easy *data,
- struct connectdata *conn,
- Curl_HttpReq httpreq);
CURLcode Curl_http_range(struct Curl_easy *data,
Curl_HttpReq httpreq);
-CURLcode Curl_http_firstwrite(struct Curl_easy *data,
- struct connectdata *conn,
- bool *done);
+CURLcode Curl_http_firstwrite(struct Curl_easy *data);
/* protocol-specific functions set up to be called by the main engine */
CURLcode Curl_http_setup_conn(struct Curl_easy *data,
@@ -148,8 +133,7 @@ int Curl_http_getsock_do(struct Curl_easy *data, struct connectdata *conn, curl_socket_t *socks);
CURLcode Curl_http_write_resp(struct Curl_easy *data,
const char *buf, size_t blen,
- bool is_eos,
- bool *done);
+ bool is_eos);
/* These functions are in http.c */
CURLcode Curl_http_input_auth(struct Curl_easy *data, bool proxy,
@@ -192,34 +176,20 @@ CURLcode Curl_http_auth_act(struct Curl_easy *data); version. This count includes CONNECT response headers. */
#define MAX_HTTP_RESP_HEADER_SIZE (300*1024)
+bool Curl_http_exp100_is_selected(struct Curl_easy *data);
+void Curl_http_exp100_got100(struct Curl_easy *data);
+
#endif /* CURL_DISABLE_HTTP */
/****************************************************************************
* HTTP unique setup
***************************************************************************/
struct HTTP {
- curl_off_t postsize; /* off_t to handle large file sizes */
- const char *postdata;
- struct back {
- curl_read_callback fread_func; /* backup storage for fread pointer */
- void *fread_in; /* backup storage for fread_in pointer */
- const char *postdata;
- curl_off_t postsize;
- struct Curl_easy *data;
- } backup;
-
- enum {
- HTTPSEND_NADA, /* init */
- HTTPSEND_REQUEST, /* sending a request */
- HTTPSEND_BODY /* sending body */
- } sending;
-
#ifndef CURL_DISABLE_HTTP
void *h2_ctx; /* HTTP/2 implementation context */
void *h3_ctx; /* HTTP/3 implementation context */
- struct dynbuf send_buffer; /* used if the request couldn't be sent in one
- chunk, points to an allocated send_buffer
- struct */
+#else
+ char unused;
#endif
};
@@ -227,8 +197,7 @@ CURLcode Curl_http_size(struct Curl_easy *data); CURLcode Curl_http_write_resp_hds(struct Curl_easy *data,
const char *buf, size_t blen,
- size_t *pconsumed,
- bool *done);
+ size_t *pconsumed);
/**
* Curl_http_output_auth() setups the authentication headers for the
diff --git a/libs/libcurl/src/http2.c b/libs/libcurl/src/http2.c index fbec0a7927..c7f369e1cb 100644 --- a/libs/libcurl/src/http2.c +++ b/libs/libcurl/src/http2.c @@ -121,7 +121,6 @@ static ssize_t populate_binsettings(uint8_t *binsettings, struct cf_h2_ctx {
nghttp2_session *h2;
- uint32_t max_concurrent_streams;
/* The easy handle used in the current filter call, cleared at return */
struct cf_call_data call_data;
@@ -130,6 +129,7 @@ struct cf_h2_ctx { struct bufc_pool stream_bufcp; /* spares for stream buffers */
size_t drain_total; /* sum of all stream's UrlState drain */
+ uint32_t max_concurrent_streams;
int32_t goaway_error;
int32_t last_stream_id;
BIT(conn_closed);
@@ -169,11 +169,9 @@ static CURLcode h2_progress_egress(struct Curl_cfilter *cf, struct Curl_easy *data);
/**
- * All about the H3 internals of a stream
+ * All about the H2 internals of a stream
*/
-struct stream_ctx {
- /*********** for HTTP/2 we store stream-local data here *************/
- int32_t id; /* HTTP/2 protocol identifier for stream */
+struct h2_stream_ctx {
struct bufq recvbuf; /* response buffer */
struct bufq sendbuf; /* request buffer */
struct h1_req_parser h1; /* parsing the request */
@@ -181,6 +179,7 @@ struct stream_ctx { size_t resp_hds_len; /* amount of response header bytes in recvbuf */
size_t upload_blocked_len;
curl_off_t upload_left; /* number of request bytes left to upload */
+ curl_off_t nrcvd_data; /* number of DATA bytes received */
char **push_headers; /* allocated array */
size_t push_headers_used; /* number of entries filled in */
@@ -189,16 +188,18 @@ struct stream_ctx { int status_code; /* HTTP response status code */
uint32_t error; /* stream error code */
uint32_t local_window_size; /* the local recv window size */
- bool resp_hds_complete; /* we have a complete, final response */
- bool closed; /* TRUE on stream close */
- bool reset; /* TRUE on stream reset */
- bool close_handled; /* TRUE if stream closure is handled by libcurl */
- bool bodystarted;
- bool send_closed; /* transfer is done sending, we might have still
- buffered data in stream->sendbuf to upload. */
+ int32_t id; /* HTTP/2 protocol identifier for stream */
+ BIT(resp_hds_complete); /* we have a complete, final response */
+ BIT(closed); /* TRUE on stream close */
+ BIT(reset); /* TRUE on stream reset */
+ BIT(close_handled); /* TRUE if stream closure is handled by libcurl */
+ BIT(bodystarted);
+ BIT(send_closed); /* transfer is done sending, we might have still
+ buffered data in stream->sendbuf to upload. */
};
-#define H2_STREAM_CTX(d) ((struct stream_ctx *)(((d) && (d)->req.p.http)? \
+#define H2_STREAM_CTX(d) ((struct h2_stream_ctx *)(((d) && \
+ (d)->req.p.http)? \
((struct HTTP *)(d)->req.p.http)->h2_ctx \
: NULL))
#define H2_STREAM_LCTX(d) ((struct HTTP *)(d)->req.p.http)->h2_ctx
@@ -210,7 +211,7 @@ struct stream_ctx { */
static void drain_stream(struct Curl_cfilter *cf,
struct Curl_easy *data,
- struct stream_ctx *stream)
+ struct h2_stream_ctx *stream)
{
unsigned char bits;
@@ -229,10 +230,10 @@ static void drain_stream(struct Curl_cfilter *cf, static CURLcode http2_data_setup(struct Curl_cfilter *cf,
struct Curl_easy *data,
- struct stream_ctx **pstream)
+ struct h2_stream_ctx **pstream)
{
struct cf_h2_ctx *ctx = cf->ctx;
- struct stream_ctx *stream;
+ struct h2_stream_ctx *stream;
(void)cf;
DEBUGASSERT(data);
@@ -253,8 +254,6 @@ static CURLcode http2_data_setup(struct Curl_cfilter *cf, stream->id = -1;
Curl_bufq_initp(&stream->sendbuf, &ctx->stream_bufcp,
H2_STREAM_SEND_CHUNKS, BUFQ_OPT_NONE);
- Curl_bufq_initp(&stream->recvbuf, &ctx->stream_bufcp,
- H2_STREAM_RECV_CHUNKS, BUFQ_OPT_SOFT_LIMIT);
Curl_h1_req_parse_init(&stream->h1, H1_PARSE_DEFAULT_MAX_LINE_LEN);
Curl_dynhds_init(&stream->resp_trailers, 0, DYN_HTTP_REQUEST);
stream->resp_hds_len = 0;
@@ -265,20 +264,28 @@ static CURLcode http2_data_setup(struct Curl_cfilter *cf, stream->error = NGHTTP2_NO_ERROR;
stream->local_window_size = H2_STREAM_WINDOW_SIZE;
stream->upload_left = 0;
+ stream->nrcvd_data = 0;
H2_STREAM_LCTX(data) = stream;
*pstream = stream;
return CURLE_OK;
}
-static void http2_data_done(struct Curl_cfilter *cf,
- struct Curl_easy *data, bool premature)
+static void free_push_headers(struct h2_stream_ctx *stream)
+{
+ size_t i;
+ for(i = 0; i<stream->push_headers_used; i++)
+ free(stream->push_headers[i]);
+ Curl_safefree(stream->push_headers);
+ stream->push_headers_used = 0;
+}
+
+static void http2_data_done(struct Curl_cfilter *cf, struct Curl_easy *data)
{
struct cf_h2_ctx *ctx = cf->ctx;
- struct stream_ctx *stream = H2_STREAM_CTX(data);
+ struct h2_stream_ctx *stream = H2_STREAM_CTX(data);
DEBUGASSERT(ctx);
- (void)premature;
if(!stream)
return;
@@ -298,34 +305,15 @@ static void http2_data_done(struct Curl_cfilter *cf, stream->id, NGHTTP2_STREAM_CLOSED);
flush_egress = TRUE;
}
- if(!Curl_bufq_is_empty(&stream->recvbuf)) {
- /* Anything in the recvbuf is still being counted
- * in stream and connection window flow control. Need
- * to free that space or the connection window might get
- * exhausted eventually. */
- nghttp2_session_consume(ctx->h2, stream->id,
- Curl_bufq_len(&stream->recvbuf));
- /* give WINDOW_UPATE a chance to be sent, but ignore any error */
- flush_egress = TRUE;
- }
if(flush_egress)
nghttp2_session_send(ctx->h2);
}
Curl_bufq_free(&stream->sendbuf);
- Curl_bufq_free(&stream->recvbuf);
Curl_h1_req_parse_free(&stream->h1);
Curl_dynhds_free(&stream->resp_trailers);
- if(stream->push_headers) {
- /* if they weren't used and then freed before */
- for(; stream->push_headers_used > 0; --stream->push_headers_used) {
- free(stream->push_headers[stream->push_headers_used - 1]);
- }
- free(stream->push_headers);
- stream->push_headers = NULL;
- }
-
+ free_push_headers(stream);
free(stream);
H2_STREAM_LCTX(data) = NULL;
}
@@ -411,7 +399,7 @@ static CURLcode cf_h2_ctx_init(struct Curl_cfilter *cf, bool via_h1_upgrade)
{
struct cf_h2_ctx *ctx = cf->ctx;
- struct stream_ctx *stream;
+ struct h2_stream_ctx *stream;
CURLcode result = CURLE_OUT_OF_MEMORY;
int rc;
nghttp2_session_callbacks *cbs = NULL;
@@ -731,7 +719,7 @@ char *curl_pushheader_bynum(struct curl_pushheaders *h, size_t num) if(!h || !GOOD_EASY_HANDLE(h->data))
return NULL;
else {
- struct stream_ctx *stream = H2_STREAM_CTX(h->data);
+ struct h2_stream_ctx *stream = H2_STREAM_CTX(h->data);
if(stream && num < stream->push_headers_used)
return stream->push_headers[num];
}
@@ -743,7 +731,7 @@ char *curl_pushheader_bynum(struct curl_pushheaders *h, size_t num) */
char *curl_pushheader_byname(struct curl_pushheaders *h, const char *header)
{
- struct stream_ctx *stream;
+ struct h2_stream_ctx *stream;
size_t len;
size_t i;
/* Verify that we got a good easy handle in the push header struct,
@@ -783,7 +771,7 @@ static struct Curl_easy *h2_duphandle(struct Curl_cfilter *cf, (void)Curl_close(&second);
}
else {
- struct stream_ctx *second_stream;
+ struct h2_stream_ctx *second_stream;
second->req.p.http = http;
http2_data_setup(cf, second, &second_stream);
@@ -850,9 +838,8 @@ fail: static void discard_newhandle(struct Curl_cfilter *cf,
struct Curl_easy *newhandle)
{
- if(!newhandle->req.p.http) {
- http2_data_done(cf, newhandle, TRUE);
- newhandle->req.p.http = NULL;
+ if(newhandle->req.p.http) {
+ http2_data_done(cf, newhandle);
}
(void)Curl_close(&newhandle);
}
@@ -867,12 +854,11 @@ static int push_promise(struct Curl_cfilter *cf, CURL_TRC_CF(data, cf, "[%d] PUSH_PROMISE received",
frame->promised_stream_id);
if(data->multi->push_cb) {
- struct stream_ctx *stream;
- struct stream_ctx *newstream;
+ struct h2_stream_ctx *stream;
+ struct h2_stream_ctx *newstream;
struct curl_pushheaders heads;
CURLMcode rc;
CURLcode result;
- size_t i;
/* clone the parent */
struct Curl_easy *newhandle = h2_duphandle(cf, data);
if(!newhandle) {
@@ -917,11 +903,7 @@ static int push_promise(struct Curl_cfilter *cf, Curl_set_in_callback(data, false);
/* free the headers again */
- for(i = 0; i<stream->push_headers_used; i++)
- free(stream->push_headers[i]);
- free(stream->push_headers);
- stream->push_headers = NULL;
- stream->push_headers_used = 0;
+ free_push_headers(stream);
if(rv) {
DEBUGASSERT((rv > CURL_PUSH_OK) && (rv <= CURL_PUSH_ERROROUT));
@@ -967,18 +949,8 @@ static CURLcode recvbuf_write_hds(struct Curl_cfilter *cf, struct Curl_easy *data,
const char *buf, size_t blen)
{
- struct stream_ctx *stream = H2_STREAM_CTX(data);
- ssize_t nwritten;
- CURLcode result;
-
(void)cf;
- nwritten = Curl_bufq_write(&stream->recvbuf,
- (const unsigned char *)buf, blen, &result);
- if(nwritten < 0)
- return result;
- stream->resp_hds_len += (size_t)nwritten;
- DEBUGASSERT((size_t)nwritten == blen);
- return CURLE_OK;
+ return Curl_xfer_write_resp(data, (char *)buf, blen, FALSE);
}
static CURLcode on_stream_frame(struct Curl_cfilter *cf,
@@ -986,10 +958,9 @@ static CURLcode on_stream_frame(struct Curl_cfilter *cf, const nghttp2_frame *frame)
{
struct cf_h2_ctx *ctx = cf->ctx;
- struct stream_ctx *stream = H2_STREAM_CTX(data);
+ struct h2_stream_ctx *stream = H2_STREAM_CTX(data);
int32_t stream_id = frame->hd.stream_id;
CURLcode result;
- size_t rbuflen;
int rv;
if(!stream) {
@@ -999,9 +970,8 @@ static CURLcode on_stream_frame(struct Curl_cfilter *cf, switch(frame->hd.type) {
case NGHTTP2_DATA:
- rbuflen = Curl_bufq_len(&stream->recvbuf);
- CURL_TRC_CF(data, cf, "[%d] DATA, buffered=%zu, window=%d/%d",
- stream_id, rbuflen,
+ CURL_TRC_CF(data, cf, "[%d] DATA, window=%d/%d",
+ stream_id,
nghttp2_session_get_stream_effective_recv_data_length(
ctx->h2, stream->id),
nghttp2_session_get_stream_effective_local_window_size(
@@ -1018,20 +988,6 @@ static CURLcode on_stream_frame(struct Curl_cfilter *cf, if(frame->hd.flags & NGHTTP2_FLAG_END_STREAM) {
drain_stream(cf, data, stream);
}
- else if(rbuflen > stream->local_window_size) {
- int32_t wsize = nghttp2_session_get_stream_local_window_size(
- ctx->h2, stream->id);
- if(wsize > 0 && (uint32_t)wsize != stream->local_window_size) {
- /* H2 flow control is not absolute, as the server might not have the
- * same view, yet. When we receive more than we want, we enforce
- * the local window size again to make nghttp2 send WINDOW_UPATEs
- * accordingly. */
- nghttp2_session_set_local_window_size(ctx->h2,
- NGHTTP2_FLAG_NONE,
- stream->id,
- stream->local_window_size);
- }
- }
break;
case NGHTTP2_HEADERS:
if(stream->bodystarted) {
@@ -1233,7 +1189,7 @@ static int on_frame_recv(nghttp2_session *session, const nghttp2_frame *frame, * servers send an explicit WINDOW_UPDATE, but not all seem to do that.
* To be safe, we UNHOLD a stream in order not to stall. */
if(CURL_WANT_SEND(data)) {
- struct stream_ctx *stream = H2_STREAM_CTX(data);
+ struct h2_stream_ctx *stream = H2_STREAM_CTX(data);
if(stream)
drain_stream(cf, data, stream);
}
@@ -1270,9 +1226,9 @@ static int on_data_chunk_recv(nghttp2_session *session, uint8_t flags, const uint8_t *mem, size_t len, void *userp)
{
struct Curl_cfilter *cf = userp;
- struct stream_ctx *stream;
+ struct cf_h2_ctx *ctx = cf->ctx;
+ struct h2_stream_ctx *stream;
struct Curl_easy *data_s;
- ssize_t nwritten;
CURLcode result;
(void)flags;
@@ -1296,18 +1252,15 @@ static int on_data_chunk_recv(nghttp2_session *session, uint8_t flags, if(!stream)
return NGHTTP2_ERR_CALLBACK_FAILURE;
- nwritten = Curl_bufq_write(&stream->recvbuf, mem, len, &result);
- if(nwritten < 0) {
- if(result != CURLE_AGAIN)
- return NGHTTP2_ERR_CALLBACK_FAILURE;
+ result = Curl_xfer_write_resp(data_s, (char *)mem, len, FALSE);
+ if(result && result != CURLE_AGAIN)
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
- nwritten = 0;
- }
+ nghttp2_session_consume(ctx->h2, stream_id, len);
+ stream->nrcvd_data += (curl_off_t)len;
/* if we receive data for another handle, wake that up */
drain_stream(cf, data_s, stream);
-
- DEBUGASSERT((size_t)nwritten == len);
return 0;
}
@@ -1316,7 +1269,7 @@ static int on_stream_close(nghttp2_session *session, int32_t stream_id, {
struct Curl_cfilter *cf = userp;
struct Curl_easy *data_s, *call_data = CF_DATA_CURRENT(cf);
- struct stream_ctx *stream;
+ struct h2_stream_ctx *stream;
int rv;
(void)session;
@@ -1374,7 +1327,7 @@ static int on_begin_headers(nghttp2_session *session, const nghttp2_frame *frame, void *userp)
{
struct Curl_cfilter *cf = userp;
- struct stream_ctx *stream;
+ struct h2_stream_ctx *stream;
struct Curl_easy *data_s = NULL;
(void)cf;
@@ -1403,7 +1356,7 @@ static int on_header(nghttp2_session *session, const nghttp2_frame *frame, void *userp)
{
struct Curl_cfilter *cf = userp;
- struct stream_ctx *stream;
+ struct h2_stream_ctx *stream;
struct Curl_easy *data_s;
int32_t stream_id = frame->hd.stream_id;
CURLcode result;
@@ -1459,7 +1412,7 @@ static int on_header(nghttp2_session *session, const nghttp2_frame *frame, stream->push_headers = malloc(stream->push_headers_alloc *
sizeof(char *));
if(!stream->push_headers)
- return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE;
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
stream->push_headers_used = 0;
}
else if(stream->push_headers_used ==
@@ -1468,15 +1421,15 @@ static int on_header(nghttp2_session *session, const nghttp2_frame *frame, if(stream->push_headers_alloc > 1000) {
/* this is beyond crazy many headers, bail out */
failf(data_s, "Too many PUSH_PROMISE headers");
- Curl_safefree(stream->push_headers);
- return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE;
+ free_push_headers(stream);
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
}
stream->push_headers_alloc *= 2;
- headp = Curl_saferealloc(stream->push_headers,
- stream->push_headers_alloc * sizeof(char *));
+ headp = realloc(stream->push_headers,
+ stream->push_headers_alloc * sizeof(char *));
if(!headp) {
- stream->push_headers = NULL;
- return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE;
+ free_push_headers(stream);
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
}
stream->push_headers = headp;
}
@@ -1565,7 +1518,7 @@ static ssize_t req_body_read_callback(nghttp2_session *session, {
struct Curl_cfilter *cf = userp;
struct Curl_easy *data_s;
- struct stream_ctx *stream = NULL;
+ struct h2_stream_ctx *stream = NULL;
CURLcode result;
ssize_t nread;
(void)source;
@@ -1667,7 +1620,7 @@ static CURLcode http2_data_done_send(struct Curl_cfilter *cf, {
struct cf_h2_ctx *ctx = cf->ctx;
CURLcode result = CURLE_OK;
- struct stream_ctx *stream = H2_STREAM_CTX(data);
+ struct h2_stream_ctx *stream = H2_STREAM_CTX(data);
if(!ctx || !ctx->h2 || !stream)
goto out;
@@ -1691,7 +1644,7 @@ out: static ssize_t http2_handle_stream_close(struct Curl_cfilter *cf,
struct Curl_easy *data,
- struct stream_ctx *stream,
+ struct h2_stream_ctx *stream,
CURLcode *err)
{
ssize_t rv = 0;
@@ -1713,7 +1666,7 @@ static ssize_t http2_handle_stream_close(struct Curl_cfilter *cf, }
else if(stream->reset) {
failf(data, "HTTP/2 stream %u was reset", stream->id);
- *err = stream->bodystarted? CURLE_PARTIAL_FILE : CURLE_RECV_ERROR;
+ *err = data->req.bytecount? CURLE_PARTIAL_FILE : CURLE_HTTP2;
return -1;
}
@@ -1787,7 +1740,7 @@ static void h2_pri_spec(struct Curl_easy *data, nghttp2_priority_spec *pri_spec)
{
struct Curl_data_priority *prio = &data->set.priority;
- struct stream_ctx *depstream = H2_STREAM_CTX(prio->parent);
+ struct h2_stream_ctx *depstream = H2_STREAM_CTX(prio->parent);
int32_t depstream_id = depstream? depstream->id:0;
nghttp2_priority_spec_init(pri_spec, depstream_id,
sweight_wanted(data),
@@ -1805,7 +1758,7 @@ static CURLcode h2_progress_egress(struct Curl_cfilter *cf, struct Curl_easy *data)
{
struct cf_h2_ctx *ctx = cf->ctx;
- struct stream_ctx *stream = H2_STREAM_CTX(data);
+ struct h2_stream_ctx *stream = H2_STREAM_CTX(data);
int rv = 0;
if(stream && stream->id > 0 &&
@@ -1838,40 +1791,26 @@ out: }
static ssize_t stream_recv(struct Curl_cfilter *cf, struct Curl_easy *data,
- struct stream_ctx *stream,
+ struct h2_stream_ctx *stream,
char *buf, size_t len, CURLcode *err)
{
struct cf_h2_ctx *ctx = cf->ctx;
ssize_t nread = -1;
+ (void)buf;
*err = CURLE_AGAIN;
- if(!Curl_bufq_is_empty(&stream->recvbuf)) {
- nread = Curl_bufq_read(&stream->recvbuf,
- (unsigned char *)buf, len, err);
- if(nread < 0)
- goto out;
- DEBUGASSERT(nread > 0);
- }
-
- if(nread < 0) {
- if(stream->closed) {
- CURL_TRC_CF(data, cf, "[%d] returning CLOSE", stream->id);
- nread = http2_handle_stream_close(cf, data, stream, err);
- }
- else if(stream->reset ||
- (ctx->conn_closed && Curl_bufq_is_empty(&ctx->inbufq)) ||
- (ctx->goaway && ctx->last_stream_id < stream->id)) {
- CURL_TRC_CF(data, cf, "[%d] returning ERR", stream->id);
- *err = stream->bodystarted? CURLE_PARTIAL_FILE : CURLE_RECV_ERROR;
- nread = -1;
- }
- }
- else if(nread == 0) {
- *err = CURLE_AGAIN;
+ if(stream->closed) {
+ CURL_TRC_CF(data, cf, "[%d] returning CLOSE", stream->id);
+ nread = http2_handle_stream_close(cf, data, stream, err);
+ }
+ else if(stream->reset ||
+ (ctx->conn_closed && Curl_bufq_is_empty(&ctx->inbufq)) ||
+ (ctx->goaway && ctx->last_stream_id < stream->id)) {
+ CURL_TRC_CF(data, cf, "[%d] returning ERR", stream->id);
+ *err = data->req.bytecount? CURLE_PARTIAL_FILE : CURLE_HTTP2;
nread = -1;
}
-out:
if(nread < 0 && *err != CURLE_AGAIN)
CURL_TRC_CF(data, cf, "[%d] stream_recv(len=%zu) -> %zd, %d",
stream->id, len, nread, *err);
@@ -1879,10 +1818,11 @@ out: }
static CURLcode h2_progress_ingress(struct Curl_cfilter *cf,
- struct Curl_easy *data)
+ struct Curl_easy *data,
+ size_t data_max_bytes)
{
struct cf_h2_ctx *ctx = cf->ctx;
- struct stream_ctx *stream;
+ struct h2_stream_ctx *stream;
CURLcode result = CURLE_OK;
ssize_t nread;
@@ -1899,16 +1839,17 @@ static CURLcode h2_progress_ingress(struct Curl_cfilter *cf, * all network input */
while(!ctx->conn_closed && Curl_bufq_is_empty(&ctx->inbufq)) {
stream = H2_STREAM_CTX(data);
- if(stream && (stream->closed || Curl_bufq_is_full(&stream->recvbuf))) {
+ if(stream && (stream->closed || !data_max_bytes)) {
/* We would like to abort here and stop processing, so that
* the transfer loop can handle the data/close here. However,
* this may leave data in underlying buffers that will not
* be consumed. */
if(!cf->next || !cf->next->cft->has_data_pending(cf->next, data))
- break;
+ drain_stream(cf, data, stream);
+ break;
}
- nread = Curl_bufq_slurp(&ctx->inbufq, nw_in_reader, cf, &result);
+ nread = Curl_bufq_sipn(&ctx->inbufq, 0, nw_in_reader, cf, &result);
if(nread < 0) {
if(result != CURLE_AGAIN) {
failf(data, "Failed receiving HTTP2 data: %d(%s)", result,
@@ -1923,8 +1864,9 @@ static CURLcode h2_progress_ingress(struct Curl_cfilter *cf, break;
}
else {
- CURL_TRC_CF(data, cf, "[0] ingress: read %zd bytes",
- nread);
+ CURL_TRC_CF(data, cf, "[0] ingress: read %zd bytes", nread);
+ data_max_bytes = (data_max_bytes > (size_t)nread)?
+ (data_max_bytes - (size_t)nread) : 0;
}
if(h2_process_pending_input(cf, data, &result))
@@ -1942,7 +1884,7 @@ static ssize_t cf_h2_recv(struct Curl_cfilter *cf, struct Curl_easy *data, char *buf, size_t len, CURLcode *err)
{
struct cf_h2_ctx *ctx = cf->ctx;
- struct stream_ctx *stream = H2_STREAM_CTX(data);
+ struct h2_stream_ctx *stream = H2_STREAM_CTX(data);
ssize_t nread = -1;
CURLcode result;
struct cf_call_data save;
@@ -1966,7 +1908,7 @@ static ssize_t cf_h2_recv(struct Curl_cfilter *cf, struct Curl_easy *data, goto out;
if(nread < 0) {
- *err = h2_progress_ingress(cf, data);
+ *err = h2_progress_ingress(cf, data, len);
if(*err)
goto out;
@@ -2011,9 +1953,8 @@ out: nread = -1;
}
CURL_TRC_CF(data, cf, "[%d] cf_recv(len=%zu) -> %zd %d, "
- "buffered=%zu, window=%d/%d, connection %d/%d",
+ "window=%d/%d, connection %d/%d",
stream->id, len, nread, *err,
- Curl_bufq_len(&stream->recvbuf),
nghttp2_session_get_stream_effective_recv_data_length(
ctx->h2, stream->id),
nghttp2_session_get_stream_effective_local_window_size(
@@ -2025,12 +1966,13 @@ out: return nread;
}
-static ssize_t h2_submit(struct stream_ctx **pstream,
+static ssize_t h2_submit(struct h2_stream_ctx **pstream,
struct Curl_cfilter *cf, struct Curl_easy *data,
- const void *buf, size_t len, CURLcode *err)
+ const void *buf, size_t len,
+ size_t *phdslen, CURLcode *err)
{
struct cf_h2_ctx *ctx = cf->ctx;
- struct stream_ctx *stream = NULL;
+ struct h2_stream_ctx *stream = NULL;
struct dynhds h2_headers;
nghttp2_nv *nva = NULL;
const void *body = NULL;
@@ -2040,6 +1982,7 @@ static ssize_t h2_submit(struct stream_ctx **pstream, nghttp2_priority_spec pri_spec;
ssize_t nwritten;
+ *phdslen = 0;
Curl_dynhds_init(&h2_headers, 0, DYN_HTTP_REQUEST);
*err = http2_data_setup(cf, data, &stream);
@@ -2051,6 +1994,7 @@ static ssize_t h2_submit(struct stream_ctx **pstream, nwritten = Curl_h1_req_parse_read(&stream->h1, buf, len, NULL, 0, err);
if(nwritten < 0)
goto out;
+ *phdslen = (size_t)nwritten;
if(!stream->h1.done) {
/* need more data */
goto out;
@@ -2169,10 +2113,11 @@ static ssize_t cf_h2_send(struct Curl_cfilter *cf, struct Curl_easy *data, const void *buf, size_t len, CURLcode *err)
{
struct cf_h2_ctx *ctx = cf->ctx;
- struct stream_ctx *stream = H2_STREAM_CTX(data);
+ struct h2_stream_ctx *stream = H2_STREAM_CTX(data);
struct cf_call_data save;
int rv;
ssize_t nwritten;
+ size_t hdslen = 0;
CURLcode result;
int blocked = 0, was_blocked = 0;
@@ -2236,11 +2181,12 @@ static ssize_t cf_h2_send(struct Curl_cfilter *cf, struct Curl_easy *data, }
}
else {
- nwritten = h2_submit(&stream, cf, data, buf, len, err);
+ nwritten = h2_submit(&stream, cf, data, buf, len, &hdslen, err);
if(nwritten < 0) {
goto out;
}
DEBUGASSERT(stream);
+ DEBUGASSERT(hdslen <= (size_t)nwritten);
}
/* Call the nghttp2 send loop and flush to write ALL buffered data,
@@ -2275,18 +2221,26 @@ static ssize_t cf_h2_send(struct Curl_cfilter *cf, struct Curl_easy *data, * frame buffer or our network out buffer. */
size_t rwin = nghttp2_session_get_stream_remote_window_size(ctx->h2,
stream->id);
- /* Whatever the cause, we need to return CURL_EAGAIN for this call.
- * We have unwritten state that needs us being invoked again and EAGAIN
- * is the only way to ensure that. */
- stream->upload_blocked_len = nwritten;
+ /* At the start of a stream, we are called with request headers
+ * and, possibly, parts of the body. Later, only body data.
+ * If we cannot send pure body data, we EAGAIN. If there had been
+ * header, we return that *they* have been written and remember the
+ * block on the data length only. */
+ stream->upload_blocked_len = ((size_t)nwritten) - hdslen;
CURL_TRC_CF(data, cf, "[%d] cf_send(len=%zu) BLOCK: win %u/%zu "
- "blocked_len=%zu",
+ "hds_len=%zu blocked_len=%zu",
stream->id, len,
nghttp2_session_get_remote_window_size(ctx->h2), rwin,
- nwritten);
- *err = CURLE_AGAIN;
- nwritten = -1;
- goto out;
+ hdslen, stream->upload_blocked_len);
+ if(hdslen) {
+ *err = CURLE_OK;
+ nwritten = hdslen;
+ }
+ else {
+ *err = CURLE_AGAIN;
+ nwritten = -1;
+ goto out;
+ }
}
else if(should_close_session(ctx)) {
/* nghttp2 thinks this session is done. If the stream has not been
@@ -2340,7 +2294,7 @@ static void cf_h2_adjust_pollset(struct Curl_cfilter *cf, sock = Curl_conn_cf_get_socket(cf, data);
Curl_pollset_check(data, ps, sock, &want_recv, &want_send);
if(want_recv || want_send) {
- struct stream_ctx *stream = H2_STREAM_CTX(data);
+ struct h2_stream_ctx *stream = H2_STREAM_CTX(data);
struct cf_call_data save;
bool c_exhaust, s_exhaust;
@@ -2387,7 +2341,7 @@ static CURLcode cf_h2_connect(struct Curl_cfilter *cf, goto out;
}
- result = h2_progress_ingress(cf, data);
+ result = h2_progress_ingress(cf, data, H2_CHUNK_SIZE);
if(result)
goto out;
@@ -2441,7 +2395,7 @@ static CURLcode http2_data_pause(struct Curl_cfilter *cf, {
#ifdef NGHTTP2_HAS_SET_LOCAL_WINDOW_SIZE
struct cf_h2_ctx *ctx = cf->ctx;
- struct stream_ctx *stream = H2_STREAM_CTX(data);
+ struct h2_stream_ctx *stream = H2_STREAM_CTX(data);
DEBUGASSERT(data);
if(ctx && ctx->h2 && stream) {
@@ -2510,10 +2464,10 @@ static CURLcode cf_h2_cntrl(struct Curl_cfilter *cf, result = http2_data_done_send(cf, data);
break;
case CF_CTRL_DATA_DETACH:
- http2_data_done(cf, data, TRUE);
+ http2_data_done(cf, data);
break;
case CF_CTRL_DATA_DONE:
- http2_data_done(cf, data, arg1 != 0);
+ http2_data_done(cf, data);
break;
default:
break;
@@ -2526,11 +2480,10 @@ static bool cf_h2_data_pending(struct Curl_cfilter *cf, const struct Curl_easy *data)
{
struct cf_h2_ctx *ctx = cf->ctx;
- struct stream_ctx *stream = H2_STREAM_CTX(data);
+ struct h2_stream_ctx *stream = H2_STREAM_CTX(data);
if(ctx && (!Curl_bufq_is_empty(&ctx->inbufq)
- || (stream && !Curl_bufq_is_empty(&stream->sendbuf))
- || (stream && !Curl_bufq_is_empty(&stream->recvbuf))))
+ || (stream && !Curl_bufq_is_empty(&stream->sendbuf))))
return TRUE;
return cf->next? cf->next->cft->has_data_pending(cf->next, data) : FALSE;
}
@@ -2615,7 +2568,8 @@ struct Curl_cftype Curl_cft_nghttp2 = { static CURLcode http2_cfilter_add(struct Curl_cfilter **pcf,
struct Curl_easy *data,
struct connectdata *conn,
- int sockindex)
+ int sockindex,
+ bool via_h1_upgrade)
{
struct Curl_cfilter *cf = NULL;
struct cf_h2_ctx *ctx;
@@ -2630,8 +2584,9 @@ static CURLcode http2_cfilter_add(struct Curl_cfilter **pcf, if(result)
goto out;
+ ctx = NULL;
Curl_conn_cf_add(data, conn, sockindex, cf);
- result = CURLE_OK;
+ result = cf_h2_ctx_init(cf, data, via_h1_upgrade);
out:
if(result)
@@ -2641,7 +2596,8 @@ out: }
static CURLcode http2_cfilter_insert_after(struct Curl_cfilter *cf,
- struct Curl_easy *data)
+ struct Curl_easy *data,
+ bool via_h1_upgrade)
{
struct Curl_cfilter *cf_h2 = NULL;
struct cf_h2_ctx *ctx;
@@ -2656,8 +2612,9 @@ static CURLcode http2_cfilter_insert_after(struct Curl_cfilter *cf, if(result)
goto out;
+ ctx = NULL;
Curl_conn_cf_insert_after(cf, cf_h2);
- result = CURLE_OK;
+ result = cf_h2_ctx_init(cf_h2, data, via_h1_upgrade);
out:
if(result)
@@ -2714,11 +2671,7 @@ CURLcode Curl_http2_switch(struct Curl_easy *data, DEBUGASSERT(!Curl_conn_is_http2(data, conn, sockindex));
DEBUGF(infof(data, "switching to HTTP/2"));
- result = http2_cfilter_add(&cf, data, conn, sockindex);
- if(result)
- return result;
-
- result = cf_h2_ctx_init(cf, data, FALSE);
+ result = http2_cfilter_add(&cf, data, conn, sockindex, FALSE);
if(result)
return result;
@@ -2741,15 +2694,11 @@ CURLcode Curl_http2_switch_at(struct Curl_cfilter *cf, struct Curl_easy *data) DEBUGASSERT(!Curl_cf_is_http2(cf, data));
- result = http2_cfilter_insert_after(cf, data);
+ result = http2_cfilter_insert_after(cf, data, FALSE);
if(result)
return result;
cf_h2 = cf->next;
- result = cf_h2_ctx_init(cf_h2, data, FALSE);
- if(result)
- return result;
-
cf->conn->httpversion = 20; /* we know we're on HTTP/2 now */
cf->conn->bits.multiplex = TRUE; /* at least potentially multiplexed */
cf->conn->bundle->multiuse = BUNDLE_MULTIPLEX;
@@ -2774,17 +2723,13 @@ CURLcode Curl_http2_upgrade(struct Curl_easy *data, DEBUGF(infof(data, "upgrading to HTTP/2"));
DEBUGASSERT(data->req.upgr101 == UPGR101_RECEIVED);
- result = http2_cfilter_add(&cf, data, conn, sockindex);
+ result = http2_cfilter_add(&cf, data, conn, sockindex, TRUE);
if(result)
return result;
DEBUGASSERT(cf->cft == &Curl_cft_nghttp2);
ctx = cf->ctx;
- result = cf_h2_ctx_init(cf, data, TRUE);
- if(result)
- return result;
-
if(nread > 0) {
/* Remaining data from the protocol switch reply is already using
* the switched protocol, ie. HTTP/2. We add that to the network
@@ -2823,7 +2768,7 @@ CURLcode Curl_http2_upgrade(struct Curl_easy *data, CURLE_HTTP2_STREAM error! */
bool Curl_h2_http_1_1_error(struct Curl_easy *data)
{
- struct stream_ctx *stream = H2_STREAM_CTX(data);
+ struct h2_stream_ctx *stream = H2_STREAM_CTX(data);
return (stream && stream->error == NGHTTP2_HTTP_1_1_REQUIRED);
}
diff --git a/libs/libcurl/src/http_chunks.c b/libs/libcurl/src/http_chunks.c index 8f8098e015..eb05ea9190 100644 --- a/libs/libcurl/src/http_chunks.c +++ b/libs/libcurl/src/http_chunks.c @@ -27,10 +27,12 @@ #ifndef CURL_DISABLE_HTTP
#include "urldata.h" /* it includes http_chunks.h */
+#include "curl_printf.h"
#include "sendf.h" /* for the client write stuff */
#include "dynbuf.h"
#include "content_encoding.h"
#include "http.h"
+#include "multiif.h"
#include "strtoofft.h"
#include "warnless.h"
@@ -152,9 +154,9 @@ static CURLcode httpchunk_readwrite(struct Curl_easy *data, ch->hexbuffer[ch->hexindex++] = *buf;
buf++;
blen--;
+ (*pconsumed)++;
}
else {
- char *endptr;
if(0 == ch->hexindex) {
/* This is illegal data, we received junk where we expected
a hexadecimal digit. */
@@ -166,7 +168,7 @@ static CURLcode httpchunk_readwrite(struct Curl_easy *data, /* blen and buf are unmodified */
ch->hexbuffer[ch->hexindex] = 0;
- if(curlx_strtoofft(ch->hexbuffer, &endptr, 16, &ch->datasize)) {
+ if(curlx_strtoofft(ch->hexbuffer, NULL, 16, &ch->datasize)) {
failf(data, "chunk hex-length not valid: '%s'", ch->hexbuffer);
ch->state = CHUNK_FAILED;
ch->last_code = CHUNKE_ILLEGAL_HEX;
@@ -189,6 +191,7 @@ static CURLcode httpchunk_readwrite(struct Curl_easy *data, buf++;
blen--;
+ (*pconsumed)++;
break;
case CHUNK_DATA:
@@ -236,6 +239,7 @@ static CURLcode httpchunk_readwrite(struct Curl_easy *data, }
buf++;
blen--;
+ (*pconsumed)++;
break;
case CHUNK_TRAILER:
@@ -293,6 +297,7 @@ static CURLcode httpchunk_readwrite(struct Curl_easy *data, }
buf++;
blen--;
+ (*pconsumed)++;
break;
case CHUNK_TRAILER_CR:
@@ -300,6 +305,7 @@ static CURLcode httpchunk_readwrite(struct Curl_easy *data, ch->state = CHUNK_TRAILER_POSTCR;
buf++;
blen--;
+ (*pconsumed)++;
}
else {
ch->state = CHUNK_FAILED;
@@ -320,6 +326,7 @@ static CURLcode httpchunk_readwrite(struct Curl_easy *data, /* skip if CR */
buf++;
blen--;
+ (*pconsumed)++;
}
/* now wait for the final LF */
ch->state = CHUNK_STOP;
@@ -328,6 +335,7 @@ static CURLcode httpchunk_readwrite(struct Curl_easy *data, case CHUNK_STOP:
if(*buf == 0x0a) {
blen--;
+ (*pconsumed)++;
/* Record the length of any data left in the end of the buffer
even if there's no more chunks to read */
ch->datasize = blen;
@@ -386,7 +394,7 @@ struct chunked_writer { static CURLcode cw_chunked_init(struct Curl_easy *data,
struct Curl_cwriter *writer)
{
- struct chunked_writer *ctx = (struct chunked_writer *)writer;
+ struct chunked_writer *ctx = writer->ctx;
data->req.chunk = TRUE; /* chunks coming our way. */
Curl_httpchunk_init(data, &ctx->ch, FALSE);
@@ -396,7 +404,7 @@ static CURLcode cw_chunked_init(struct Curl_easy *data, static void cw_chunked_close(struct Curl_easy *data,
struct Curl_cwriter *writer)
{
- struct chunked_writer *ctx = (struct chunked_writer *)writer;
+ struct chunked_writer *ctx = writer->ctx;
Curl_httpchunk_free(data, &ctx->ch);
}
@@ -404,7 +412,7 @@ static CURLcode cw_chunked_write(struct Curl_easy *data, struct Curl_cwriter *writer, int type,
const char *buf, size_t blen)
{
- struct chunked_writer *ctx = (struct chunked_writer *)writer;
+ struct chunked_writer *ctx = writer->ctx;
CURLcode result;
size_t consumed;
@@ -452,4 +460,207 @@ const struct Curl_cwtype Curl_httpchunk_unencoder = { sizeof(struct chunked_writer)
};
+/* max length of a HTTP chunk that we want to generate */
+#define CURL_CHUNKED_MINLEN (1024)
+#define CURL_CHUNKED_MAXLEN (64 * 1024)
+
+struct chunked_reader {
+ struct Curl_creader super;
+ struct bufq chunkbuf;
+ BIT(read_eos); /* we read an EOS from the next reader */
+ BIT(eos); /* we have returned an EOS */
+};
+
+static CURLcode cr_chunked_init(struct Curl_easy *data,
+ struct Curl_creader *reader)
+{
+ struct chunked_reader *ctx = reader->ctx;
+ (void)data;
+ Curl_bufq_init2(&ctx->chunkbuf, CURL_CHUNKED_MAXLEN, 2, BUFQ_OPT_SOFT_LIMIT);
+ return CURLE_OK;
+}
+
+static void cr_chunked_close(struct Curl_easy *data,
+ struct Curl_creader *reader)
+{
+ struct chunked_reader *ctx = reader->ctx;
+ (void)data;
+ Curl_bufq_free(&ctx->chunkbuf);
+}
+
+static CURLcode add_last_chunk(struct Curl_easy *data,
+ struct Curl_creader *reader)
+{
+ struct chunked_reader *ctx = reader->ctx;
+ struct curl_slist *trailers = NULL, *tr;
+ CURLcode result;
+ size_t n;
+ int rc;
+
+ if(!data->set.trailer_callback) {
+ return Curl_bufq_cwrite(&ctx->chunkbuf, STRCONST("0\r\n\r\n"), &n);
+ }
+
+ result = Curl_bufq_cwrite(&ctx->chunkbuf, STRCONST("0\r\n"), &n);
+ if(result)
+ goto out;
+
+ Curl_set_in_callback(data, true);
+ rc = data->set.trailer_callback(&trailers, data->set.trailer_data);
+ Curl_set_in_callback(data, false);
+
+ if(rc != CURL_TRAILERFUNC_OK) {
+ failf(data, "operation aborted by trailing headers callback");
+ result = CURLE_ABORTED_BY_CALLBACK;
+ goto out;
+ }
+
+ for(tr = trailers; tr; tr = tr->next) {
+ /* only add correctly formatted trailers */
+ char *ptr = strchr(tr->data, ':');
+ if(!ptr || *(ptr + 1) != ' ') {
+ infof(data, "Malformatted trailing header, skipping trailer");
+ continue;
+ }
+
+ result = Curl_bufq_cwrite(&ctx->chunkbuf, tr->data,
+ strlen(tr->data), &n);
+ if(!result)
+ result = Curl_bufq_cwrite(&ctx->chunkbuf, STRCONST("\r\n"), &n);
+ if(result)
+ goto out;
+ }
+
+ result = Curl_bufq_cwrite(&ctx->chunkbuf, STRCONST("\r\n"), &n);
+
+out:
+ curl_slist_free_all(trailers);
+ return result;
+}
+
+static CURLcode add_chunk(struct Curl_easy *data,
+ struct Curl_creader *reader,
+ char *buf, size_t blen)
+{
+ struct chunked_reader *ctx = reader->ctx;
+ CURLcode result;
+ char tmp[CURL_CHUNKED_MINLEN];
+ size_t nread;
+ bool eos;
+
+ DEBUGASSERT(!ctx->read_eos);
+ blen = CURLMIN(blen, CURL_CHUNKED_MAXLEN); /* respect our buffer pref */
+ if(blen < sizeof(tmp)) {
+ /* small read, make a chunk of decent size */
+ buf = tmp;
+ blen = sizeof(tmp);
+ }
+ else {
+ /* larger read, make a chunk that will fit when read back */
+ blen -= (8 + 2 + 2); /* deduct max overhead, 8 hex + 2*crlf */
+ }
+
+ result = Curl_creader_read(data, reader->next, buf, blen, &nread, &eos);
+ if(result)
+ return result;
+ if(eos)
+ ctx->read_eos = TRUE;
+
+ if(nread) {
+ /* actually got bytes, wrap them into the chunkbuf */
+ char hd[11] = "";
+ int hdlen;
+ size_t n;
+
+ hdlen = msnprintf(hd, sizeof(hd), "%zx\r\n", nread);
+ if(hdlen <= 0)
+ return CURLE_READ_ERROR;
+ /* On a soft-limited bufq, we do not need to check that all was written */
+ result = Curl_bufq_cwrite(&ctx->chunkbuf, hd, hdlen, &n);
+ if(!result)
+ result = Curl_bufq_cwrite(&ctx->chunkbuf, buf, nread, &n);
+ if(!result)
+ result = Curl_bufq_cwrite(&ctx->chunkbuf, "\r\n", 2, &n);
+ if(result)
+ return result;
+ }
+
+ if(ctx->read_eos)
+ return add_last_chunk(data, reader);
+ return CURLE_OK;
+}
+
+static CURLcode cr_chunked_read(struct Curl_easy *data,
+ struct Curl_creader *reader,
+ char *buf, size_t blen,
+ size_t *pnread, bool *peos)
+{
+ struct chunked_reader *ctx = reader->ctx;
+ CURLcode result = CURLE_READ_ERROR;
+
+ *pnread = 0;
+ *peos = ctx->eos;
+
+ if(!ctx->eos) {
+ if(!ctx->read_eos && Curl_bufq_is_empty(&ctx->chunkbuf)) {
+ /* Still getting data form the next reader, buffer is empty */
+ result = add_chunk(data, reader, buf, blen);
+ if(result)
+ return result;
+ }
+
+ if(!Curl_bufq_is_empty(&ctx->chunkbuf)) {
+ result = Curl_bufq_cread(&ctx->chunkbuf, buf, blen, pnread);
+ if(!result && ctx->read_eos && Curl_bufq_is_empty(&ctx->chunkbuf)) {
+ /* no more data, read all, done. */
+ ctx->eos = TRUE;
+ *peos = TRUE;
+ }
+ return result;
+ }
+ }
+ /* We may get here, because we are done or because callbacks paused */
+ DEBUGASSERT(ctx->eos || !ctx->read_eos);
+ return CURLE_OK;
+}
+
+static curl_off_t cr_chunked_total_length(struct Curl_easy *data,
+ struct Curl_creader *reader)
+{
+ /* this reader changes length depending on input */
+ (void)data;
+ (void)reader;
+ return -1;
+}
+
+/* HTTP chunked Transfer-Encoding encoder */
+const struct Curl_crtype Curl_httpchunk_encoder = {
+ "chunked",
+ cr_chunked_init,
+ cr_chunked_read,
+ cr_chunked_close,
+ Curl_creader_def_needs_rewind,
+ cr_chunked_total_length,
+ Curl_creader_def_resume_from,
+ Curl_creader_def_rewind,
+ Curl_creader_def_unpause,
+ Curl_creader_def_done,
+ sizeof(struct chunked_reader)
+};
+
+CURLcode Curl_httpchunk_add_reader(struct Curl_easy *data)
+{
+ struct Curl_creader *reader = NULL;
+ CURLcode result;
+
+ result = Curl_creader_create(&reader, data, &Curl_httpchunk_encoder,
+ CURL_CR_TRANSFER_ENCODE);
+ if(!result)
+ result = Curl_creader_add(data, reader);
+
+ if(result && reader)
+ Curl_creader_free(data, reader);
+ return result;
+}
+
#endif /* CURL_DISABLE_HTTP */
diff --git a/libs/libcurl/src/http_chunks.h b/libs/libcurl/src/http_chunks.h index ea89949d7f..f40f57e723 100644 --- a/libs/libcurl/src/http_chunks.h +++ b/libs/libcurl/src/http_chunks.h @@ -133,6 +133,13 @@ bool Curl_httpchunk_is_done(struct Curl_easy *data, struct Curl_chunker *ch); extern const struct Curl_cwtype Curl_httpchunk_unencoder;
+extern const struct Curl_crtype Curl_httpchunk_encoder;
+
+/**
+ * Add a transfer-encoding "chunked" reader to the transfers reader stack
+ */
+CURLcode Curl_httpchunk_add_reader(struct Curl_easy *data);
+
#endif /* !CURL_DISABLE_HTTP */
#endif /* HEADER_CURL_HTTP_CHUNKS_H */
diff --git a/libs/libcurl/src/imap.c b/libs/libcurl/src/imap.c index 0cec828c46..404b101591 100644 --- a/libs/libcurl/src/imap.c +++ b/libs/libcurl/src/imap.c @@ -770,6 +770,7 @@ static CURLcode imap_perform_append(struct Curl_easy *data) return CURLE_URL_MALFORMAT;
}
+#ifndef CURL_DISABLE_MIME
/* Prepare the mime data if some. */
if(data->set.mimepost.kind != MIMEKIND_NONE) {
/* Use the whole structure as data. */
@@ -785,18 +786,18 @@ static CURLcode imap_perform_append(struct Curl_easy *data) result = Curl_mime_add_header(&data->set.mimepost.curlheaders,
"Mime-Version: 1.0");
- /* Make sure we will read the entire mime structure. */
if(!result)
- result = Curl_mime_rewind(&data->set.mimepost);
-
+ result = Curl_creader_set_mime(data, &data->set.mimepost);
+ if(result)
+ return result;
+ data->state.infilesize = Curl_creader_client_length(data);
+ }
+ else
+#endif
+ {
+ result = Curl_creader_set_fread(data, data->state.infilesize);
if(result)
return result;
-
- data->state.infilesize = Curl_mime_size(&data->set.mimepost);
-
- /* Read from mime structure. */
- data->state.fread_func = (curl_read_callback) Curl_mime_read;
- data->state.in = (void *) &data->set.mimepost;
}
/* Check we know the size of the upload */
@@ -1211,14 +1212,14 @@ static CURLcode imap_state_fetch_resp(struct Curl_easy *data, if(data->req.bytecount == size)
/* The entire data is already transferred! */
- Curl_setup_transfer(data, -1, -1, FALSE, -1);
+ Curl_xfer_setup(data, -1, -1, FALSE, -1);
else {
/* IMAP download */
data->req.maxdownload = size;
/* force a recv/send check of this connection, as the data might've been
read off the socket already */
data->state.select_bits = CURL_CSELECT_IN;
- Curl_setup_transfer(data, FIRSTSOCKET, size, FALSE, -1);
+ Curl_xfer_setup(data, FIRSTSOCKET, size, FALSE, -1);
}
}
else {
@@ -1266,7 +1267,7 @@ static CURLcode imap_state_append_resp(struct Curl_easy *data, int imapcode, Curl_pgrsSetUploadSize(data, data->state.infilesize);
/* IMAP upload */
- Curl_setup_transfer(data, -1, -1, FALSE, FIRSTSOCKET);
+ Curl_xfer_setup(data, -1, -1, FALSE, FIRSTSOCKET);
/* End of DO phase */
imap_state(data, IMAP_STOP);
@@ -1297,7 +1298,6 @@ static CURLcode imap_statemachine(struct Curl_easy *data, struct connectdata *conn)
{
CURLcode result = CURLE_OK;
- curl_socket_t sock = conn->sock[FIRSTSOCKET];
int imapcode;
struct imap_conn *imapc = &conn->proto.imapc;
struct pingpong *pp = &imapc->pp;
@@ -1314,7 +1314,7 @@ static CURLcode imap_statemachine(struct Curl_easy *data, do {
/* Read the response from the server */
- result = Curl_pp_readresp(data, sock, pp, &imapcode, &nread);
+ result = Curl_pp_readresp(data, FIRSTSOCKET, pp, &imapcode, &nread);
if(result)
return result;
@@ -1513,10 +1513,10 @@ static CURLcode imap_done(struct Curl_easy *data, CURLcode status, }
else if(!data->set.connect_only && !imap->custom &&
(imap->uid || imap->mindex || data->state.upload ||
- data->set.mimepost.kind != MIMEKIND_NONE)) {
+ IS_MIME_POST(data))) {
/* Handle responses after FETCH or APPEND transfer has finished */
- if(!data->state.upload && data->set.mimepost.kind == MIMEKIND_NONE)
+ if(!data->state.upload && !IS_MIME_POST(data))
imap_state(data, IMAP_FETCH_FINAL);
else {
/* End the APPEND command first by sending an empty line */
@@ -1582,7 +1582,7 @@ static CURLcode imap_perform(struct Curl_easy *data, bool *connected, selected = TRUE;
/* Start the first command in the DO phase */
- if(data->state.upload || data->set.mimepost.kind != MIMEKIND_NONE)
+ if(data->state.upload || IS_MIME_POST(data))
/* APPEND can be executed directly */
result = imap_perform_append(data);
else if(imap->custom && (selected || !imap->mailbox))
@@ -1692,7 +1692,7 @@ static CURLcode imap_dophase_done(struct Curl_easy *data, bool connected) if(imap->transfer != PPTRANSFER_BODY)
/* no data to transfer */
- Curl_setup_transfer(data, -1, -1, FALSE, -1);
+ Curl_xfer_setup(data, -1, -1, FALSE, -1);
return CURLE_OK;
}
diff --git a/libs/libcurl/src/krb5.c b/libs/libcurl/src/krb5.c index 6c245c575d..e04c2042f0 100644 --- a/libs/libcurl/src/krb5.c +++ b/libs/libcurl/src/krb5.c @@ -52,6 +52,7 @@ #include "ftp.h"
#include "curl_gssapi.h"
#include "sendf.h"
+#include "transfer.h"
#include "curl_krb5.h"
#include "warnless.h"
#include "strcase.h"
@@ -65,7 +66,7 @@ static CURLcode ftpsend(struct Curl_easy *data, struct connectdata *conn,
const char *cmd)
{
- ssize_t bytes_written;
+ size_t bytes_written;
#define SBUF_SIZE 1024
char s[SBUF_SIZE];
size_t write_len;
@@ -90,8 +91,7 @@ static CURLcode ftpsend(struct Curl_easy *data, struct connectdata *conn, #ifdef HAVE_GSSAPI
conn->data_prot = PROT_CMD;
#endif
- result = Curl_nwrite(data, FIRSTSOCKET, sptr, write_len,
- &bytes_written);
+ result = Curl_xfer_send(data, sptr, write_len, &bytes_written);
#ifdef HAVE_GSSAPI
DEBUGASSERT(data_sec > PROT_NONE && data_sec < PROT_LAST);
conn->data_prot = data_sec;
@@ -100,9 +100,9 @@ static CURLcode ftpsend(struct Curl_easy *data, struct connectdata *conn, if(result)
break;
- Curl_debug(data, CURLINFO_HEADER_OUT, sptr, (size_t)bytes_written);
+ Curl_debug(data, CURLINFO_HEADER_OUT, sptr, bytes_written);
- if(bytes_written != (ssize_t)write_len) {
+ if(bytes_written != write_len) {
write_len -= bytes_written;
sptr += bytes_written;
}
@@ -470,7 +470,7 @@ socket_read(struct Curl_easy *data, int sockindex, void *to, size_t len) ssize_t nread = 0;
while(len > 0) {
- nread = Curl_conn_recv(data, sockindex, to_p, len, &result);
+ result = Curl_conn_recv(data, sockindex, to_p, len, &nread);
if(nread > 0) {
len -= nread;
to_p += nread;
@@ -494,11 +494,11 @@ socket_write(struct Curl_easy *data, int sockindex, const void *to, {
const char *to_p = to;
CURLcode result;
- ssize_t written;
+ size_t written;
while(len > 0) {
- written = Curl_conn_send(data, sockindex, to_p, len, &result);
- if(written > 0) {
+ result = Curl_conn_send(data, sockindex, to_p, len, &written);
+ if(!result && written > 0) {
len -= written;
to_p += written;
}
@@ -567,8 +567,11 @@ static ssize_t sec_recv(struct Curl_easy *data, int sockindex, *err = CURLE_OK;
/* Handle clear text response. */
- if(conn->sec_complete == 0 || conn->data_prot == PROT_CLEAR)
- return Curl_conn_recv(data, sockindex, buffer, len, err);
+ if(conn->sec_complete == 0 || conn->data_prot == PROT_CLEAR) {
+ ssize_t nread;
+ *err = Curl_conn_recv(data, sockindex, buffer, len, &nread);
+ return nread;
+ }
if(conn->in_buffer.eof_flag) {
conn->in_buffer.eof_flag = 0;
diff --git a/libs/libcurl/src/ldap.c b/libs/libcurl/src/ldap.c index 9d8bee81a0..4d1cfadb36 100644 --- a/libs/libcurl/src/ldap.c +++ b/libs/libcurl/src/ldap.c @@ -371,7 +371,7 @@ static CURLcode ldap_do(struct Curl_easy *data, bool *done) #ifdef HAVE_LDAP_SSL
#ifdef USE_WIN32_LDAP
/* Win32 LDAP SDK doesn't support insecure mode without CA! */
- server = ldap_sslinit(host, conn->port, 1);
+ server = ldap_sslinit(host, conn->primary.remote_port, 1);
ldap_set_option(server, LDAP_OPT_SSL, LDAP_OPT_ON);
#else
int ldap_option;
@@ -417,10 +417,10 @@ static CURLcode ldap_do(struct Curl_easy *data, bool *done) result = CURLE_SSL_CERTPROBLEM;
goto quit;
}
- server = ldapssl_init(host, conn->port, 1);
+ server = ldapssl_init(host, conn->primary.remote_port, 1);
if(!server) {
failf(data, "LDAP local: Cannot connect to %s:%u",
- conn->host.dispname, conn->port);
+ conn->host.dispname, conn->primary.remote_port);
result = CURLE_COULDNT_CONNECT;
goto quit;
}
@@ -458,10 +458,10 @@ static CURLcode ldap_do(struct Curl_easy *data, bool *done) result = CURLE_SSL_CERTPROBLEM;
goto quit;
}
- server = ldap_init(host, conn->port);
+ server = ldap_init(host, conn->primary.remote_port);
if(!server) {
failf(data, "LDAP local: Cannot connect to %s:%u",
- conn->host.dispname, conn->port);
+ conn->host.dispname, conn->primary.remote_port);
result = CURLE_COULDNT_CONNECT;
goto quit;
}
@@ -499,10 +499,10 @@ static CURLcode ldap_do(struct Curl_easy *data, bool *done) goto quit;
}
else {
- server = ldap_init(host, conn->port);
+ server = ldap_init(host, conn->primary.remote_port);
if(!server) {
failf(data, "LDAP local: Cannot connect to %s:%u",
- conn->host.dispname, conn->port);
+ conn->host.dispname, conn->primary.remote_port);
result = CURLE_COULDNT_CONNECT;
goto quit;
}
@@ -749,7 +749,7 @@ quit: FREE_ON_WINLDAP(host);
/* no data to transfer */
- Curl_setup_transfer(data, -1, -1, FALSE, -1);
+ Curl_xfer_setup(data, -1, -1, FALSE, -1);
connclose(conn, "LDAP connection always disable reuse");
return result;
diff --git a/libs/libcurl/src/libcurl.plist b/libs/libcurl/src/libcurl.plist index f9df74ffe0..13b1785006 100644 --- a/libs/libcurl/src/libcurl.plist +++ b/libs/libcurl/src/libcurl.plist @@ -15,7 +15,7 @@ <string>se.curl.libcurl</string>
<key>CFBundleVersion</key>
- <string>8.6.0</string>
+ <string>8.7.1</string>
<key>CFBundleName</key>
<string>libcurl</string>
@@ -27,9 +27,9 @@ <string>????</string>
<key>CFBundleShortVersionString</key>
- <string>libcurl 8.6.0</string>
+ <string>libcurl 8.7.1</string>
<key>CFBundleGetInfoString</key>
- <string>libcurl.plist 8.6.0</string>
+ <string>libcurl.plist 8.7.1</string>
</dict>
</plist>
diff --git a/libs/libcurl/src/md4.c b/libs/libcurl/src/md4.c index f48ea9987c..e13577088c 100644 --- a/libs/libcurl/src/md4.c +++ b/libs/libcurl/src/md4.c @@ -28,6 +28,7 @@ #include <string.h>
+#include "strdup.h"
#include "curl_md4.h"
#include "warnless.h"
diff --git a/libs/libcurl/src/mime.c b/libs/libcurl/src/mime.c index 167c8bab2d..94930eba73 100644 --- a/libs/libcurl/src/mime.c +++ b/libs/libcurl/src/mime.c @@ -74,6 +74,7 @@ static curl_off_t encoder_base64_size(curl_mimepart *part); static size_t encoder_qp_read(char *buffer, size_t size, bool ateof,
curl_mimepart *part);
static curl_off_t encoder_qp_size(curl_mimepart *part);
+static curl_off_t mime_size(curl_mimepart *part);
static const struct mime_encoder encoders[] = {
{"binary", encoder_nop_read, encoder_nop_size},
@@ -1602,7 +1603,7 @@ size_t Curl_mime_read(char *buffer, size_t size, size_t nitems, void *instream) }
/* Rewind mime stream. */
-CURLcode Curl_mime_rewind(curl_mimepart *part)
+static CURLcode mime_rewind(curl_mimepart *part)
{
return mime_part_rewind(part) == CURL_SEEKFUNC_OK?
CURLE_OK: CURLE_SEND_FAIL_REWIND;
@@ -1634,7 +1635,7 @@ static curl_off_t multipart_size(curl_mime *mime) size = boundarysize; /* Final boundary - CRLF after headers. */
for(part = mime->firstpart; part; part = part->nextpart) {
- curl_off_t sz = Curl_mime_size(part);
+ curl_off_t sz = mime_size(part);
if(sz < 0)
size = sz;
@@ -1647,7 +1648,7 @@ static curl_off_t multipart_size(curl_mime *mime) }
/* Get/compute mime size. */
-curl_off_t Curl_mime_size(curl_mimepart *part)
+static curl_off_t mime_size(curl_mimepart *part)
{
curl_off_t size;
@@ -1896,7 +1897,7 @@ CURLcode Curl_mime_prepare_headers(struct Curl_easy *data, }
/* Recursively reset paused status in the given part. */
-void Curl_mime_unpause(curl_mimepart *part)
+static void mime_unpause(curl_mimepart *part)
{
if(part) {
if(part->lastreadstatus == CURL_READFUNC_PAUSE)
@@ -1908,12 +1909,228 @@ void Curl_mime_unpause(curl_mimepart *part) curl_mimepart *subpart;
for(subpart = mime->firstpart; subpart; subpart = subpart->nextpart)
- Curl_mime_unpause(subpart);
+ mime_unpause(subpart);
}
}
}
}
+struct cr_mime_ctx {
+ struct Curl_creader super;
+ curl_mimepart *part;
+ curl_off_t total_len;
+ curl_off_t read_len;
+ CURLcode error_result;
+ BIT(seen_eos);
+ BIT(errored);
+};
+
+static CURLcode cr_mime_init(struct Curl_easy *data,
+ struct Curl_creader *reader)
+{
+ struct cr_mime_ctx *ctx = reader->ctx;
+ (void)data;
+ ctx->total_len = -1;
+ ctx->read_len = 0;
+ return CURLE_OK;
+}
+
+/* Real client reader to installed client callbacks. */
+static CURLcode cr_mime_read(struct Curl_easy *data,
+ struct Curl_creader *reader,
+ char *buf, size_t blen,
+ size_t *pnread, bool *peos)
+{
+ struct cr_mime_ctx *ctx = reader->ctx;
+ size_t nread;
+
+ /* Once we have errored, we will return the same error forever */
+ if(ctx->errored) {
+ *pnread = 0;
+ *peos = FALSE;
+ return ctx->error_result;
+ }
+ if(ctx->seen_eos) {
+ *pnread = 0;
+ *peos = TRUE;
+ return CURLE_OK;
+ }
+ /* respect length limitations */
+ if(ctx->total_len >= 0) {
+ curl_off_t remain = ctx->total_len - ctx->read_len;
+ if(remain <= 0)
+ blen = 0;
+ else if(remain < (curl_off_t)blen)
+ blen = (size_t)remain;
+ }
+ nread = 0;
+ if(blen) {
+ nread = Curl_mime_read(buf, 1, blen, ctx->part);
+ }
+
+ switch(nread) {
+ case 0:
+ if((ctx->total_len >= 0) && (ctx->read_len < ctx->total_len)) {
+ failf(data, "client mime read EOF fail, only "
+ "only %"CURL_FORMAT_CURL_OFF_T"/%"CURL_FORMAT_CURL_OFF_T
+ " of needed bytes read", ctx->read_len, ctx->total_len);
+ return CURLE_READ_ERROR;
+ }
+ *pnread = 0;
+ *peos = TRUE;
+ ctx->seen_eos = TRUE;
+ break;
+
+ case CURL_READFUNC_ABORT:
+ failf(data, "operation aborted by callback");
+ *pnread = 0;
+ *peos = FALSE;
+ ctx->errored = TRUE;
+ ctx->error_result = CURLE_ABORTED_BY_CALLBACK;
+ return CURLE_ABORTED_BY_CALLBACK;
+
+ case CURL_READFUNC_PAUSE:
+ /* CURL_READFUNC_PAUSE pauses read callbacks that feed socket writes */
+ data->req.keepon |= KEEP_SEND_PAUSE; /* mark socket send as paused */
+ *pnread = 0;
+ *peos = FALSE;
+ break; /* nothing was read */
+
+ default:
+ if(nread > blen) {
+ /* the read function returned a too large value */
+ failf(data, "read function returned funny value");
+ *pnread = 0;
+ *peos = FALSE;
+ ctx->errored = TRUE;
+ ctx->error_result = CURLE_READ_ERROR;
+ return CURLE_READ_ERROR;
+ }
+ ctx->read_len += nread;
+ if(ctx->total_len >= 0)
+ ctx->seen_eos = (ctx->read_len >= ctx->total_len);
+ *pnread = nread;
+ *peos = ctx->seen_eos;
+ break;
+ }
+ DEBUGF(infof(data, "cr_mime_read(len=%zu, total=%"CURL_FORMAT_CURL_OFF_T
+ ", read=%"CURL_FORMAT_CURL_OFF_T") -> %d, %zu, %d",
+ blen, ctx->total_len, ctx->read_len, CURLE_OK, *pnread, *peos));
+ return CURLE_OK;
+}
+
+static bool cr_mime_needs_rewind(struct Curl_easy *data,
+ struct Curl_creader *reader)
+{
+ struct cr_mime_ctx *ctx = reader->ctx;
+ (void)data;
+ return ctx->read_len > 0;
+}
+
+static curl_off_t cr_mime_total_length(struct Curl_easy *data,
+ struct Curl_creader *reader)
+{
+ struct cr_mime_ctx *ctx = reader->ctx;
+ (void)data;
+ return ctx->total_len;
+}
+
+static CURLcode cr_mime_resume_from(struct Curl_easy *data,
+ struct Curl_creader *reader,
+ curl_off_t offset)
+{
+ struct cr_mime_ctx *ctx = reader->ctx;
+
+ if(offset > 0) {
+ curl_off_t passed = 0;
+
+ do {
+ char scratch[4*1024];
+ size_t readthisamountnow =
+ (offset - passed > (curl_off_t)sizeof(scratch)) ?
+ sizeof(scratch) :
+ curlx_sotouz(offset - passed);
+ size_t nread;
+
+ nread = Curl_mime_read(scratch, 1, readthisamountnow, ctx->part);
+ passed += (curl_off_t)nread;
+ if((nread == 0) || (nread > readthisamountnow)) {
+ /* this checks for greater-than only to make sure that the
+ CURL_READFUNC_ABORT return code still aborts */
+ failf(data, "Could only read %" CURL_FORMAT_CURL_OFF_T
+ " bytes from the mime post", passed);
+ return CURLE_READ_ERROR;
+ }
+ } while(passed < offset);
+
+ /* now, decrease the size of the read */
+ if(ctx->total_len > 0) {
+ ctx->total_len -= offset;
+
+ if(ctx->total_len <= 0) {
+ failf(data, "Mime post already completely uploaded");
+ return CURLE_PARTIAL_FILE;
+ }
+ }
+ /* we've passed, proceed as normal */
+ }
+ return CURLE_OK;
+}
+
+static CURLcode cr_mime_rewind(struct Curl_easy *data,
+ struct Curl_creader *reader)
+{
+ struct cr_mime_ctx *ctx = reader->ctx;
+ CURLcode result = mime_rewind(ctx->part);
+ if(result)
+ failf(data, "Cannot rewind mime/post data");
+ return result;
+}
+
+static CURLcode cr_mime_unpause(struct Curl_easy *data,
+ struct Curl_creader *reader)
+{
+ struct cr_mime_ctx *ctx = reader->ctx;
+ (void)data;
+ mime_unpause(ctx->part);
+ return CURLE_OK;
+}
+
+static const struct Curl_crtype cr_mime = {
+ "cr-mime",
+ cr_mime_init,
+ cr_mime_read,
+ Curl_creader_def_close,
+ cr_mime_needs_rewind,
+ cr_mime_total_length,
+ cr_mime_resume_from,
+ cr_mime_rewind,
+ cr_mime_unpause,
+ Curl_creader_def_done,
+ sizeof(struct cr_mime_ctx)
+};
+
+CURLcode Curl_creader_set_mime(struct Curl_easy *data, curl_mimepart *part)
+{
+ struct Curl_creader *r;
+ struct cr_mime_ctx *ctx;
+ CURLcode result;
+
+ result = Curl_creader_create(&r, data, &cr_mime, CURL_CR_CLIENT);
+ if(result)
+ return result;
+ ctx = r->ctx;
+ ctx->part = part;
+ /* Make sure we will read the entire mime structure. */
+ result = mime_rewind(ctx->part);
+ if(result) {
+ Curl_creader_free(data, r);
+ return result;
+ }
+ ctx->total_len = mime_size(ctx->part);
+
+ return Curl_creader_set(data, r);
+}
#else /* !CURL_DISABLE_MIME && (!CURL_DISABLE_HTTP ||
!CURL_DISABLE_SMTP || !CURL_DISABLE_IMAP) */
diff --git a/libs/libcurl/src/mime.h b/libs/libcurl/src/mime.h index 627689178b..edb2ee350d 100644 --- a/libs/libcurl/src/mime.h +++ b/libs/libcurl/src/mime.h @@ -151,12 +151,15 @@ CURLcode Curl_mime_prepare_headers(struct Curl_easy *data, const char *contenttype,
const char *disposition,
enum mimestrategy strategy);
-curl_off_t Curl_mime_size(struct curl_mimepart *part);
size_t Curl_mime_read(char *buffer, size_t size, size_t nitems,
void *instream);
-CURLcode Curl_mime_rewind(struct curl_mimepart *part);
const char *Curl_mime_contenttype(const char *filename);
-void Curl_mime_unpause(struct curl_mimepart *part);
+
+/**
+ * Install a client reader as upload source that reads the given
+ * mime part.
+ */
+CURLcode Curl_creader_set_mime(struct Curl_easy *data, curl_mimepart *part);
#else
/* if disabled */
@@ -165,10 +168,8 @@ void Curl_mime_unpause(struct curl_mimepart *part); #define Curl_mime_duppart(x,y,z) CURLE_OK /* Nothing to duplicate. Succeed */
#define Curl_mime_set_subparts(a,b,c) CURLE_NOT_BUILT_IN
#define Curl_mime_prepare_headers(a,b,c,d,e) CURLE_NOT_BUILT_IN
-#define Curl_mime_size(x) (curl_off_t) -1
#define Curl_mime_read NULL
-#define Curl_mime_rewind(x) ((void)x, CURLE_NOT_BUILT_IN)
-#define Curl_mime_unpause(x)
+#define Curl_creader_set_mime(x,y) ((void)x, CURLE_NOT_BUILT_IN)
#endif
diff --git a/libs/libcurl/src/mprintf.c b/libs/libcurl/src/mprintf.c index 72cf27edd3..9b9e6ed56a 100644 --- a/libs/libcurl/src/mprintf.c +++ b/libs/libcurl/src/mprintf.c @@ -49,16 +49,6 @@ #endif
/*
- * Non-ANSI integer extensions
- */
-
-#if (defined(_WIN32_WCE)) || \
- (defined(__MINGW32__)) || \
- (defined(_MSC_VER) && (_MSC_VER >= 900) && (_INTEGRAL_MAX_BITS >= 64))
-# define MP_HAVE_INT_EXTENSIONS
-#endif
-
-/*
* Max integer data types that mprintf.c is capable
*/
@@ -349,8 +339,9 @@ static int parsefmt(const char *format, case 'h':
flags |= FLAGS_SHORT;
break;
-#if defined(MP_HAVE_INT_EXTENSIONS)
+#if defined(_WIN32) || defined(_WIN32_WCE)
case 'I':
+ /* Non-ANSI integer extensions I32 I64 */
if((fmt[0] == '3') && (fmt[1] == '2')) {
flags |= FLAGS_LONG;
fmt += 2;
@@ -367,7 +358,7 @@ static int parsefmt(const char *format, #endif
}
break;
-#endif
+#endif /* _WIN32 || _WIN32_WCE */
case 'l':
if(flags & FLAGS_LONG)
flags |= FLAGS_LONGLONG;
@@ -651,7 +642,7 @@ static int parsefmt(const char *format, * On success, the input array describes the type of all arguments and their
* values.
*
- * The function then iterates over the output sengments and outputs them one
+ * The function then iterates over the output segments and outputs them one
* by one until done. Using the appropriate input arguments (if any).
*
* All output is sent to the 'stream()' callback, one byte at a time.
diff --git a/libs/libcurl/src/mqtt.c b/libs/libcurl/src/mqtt.c index 20b839159b..86817bf9bd 100644 --- a/libs/libcurl/src/mqtt.c +++ b/libs/libcurl/src/mqtt.c @@ -119,12 +119,12 @@ static CURLcode mqtt_send(struct Curl_easy *data, {
CURLcode result = CURLE_OK;
struct MQTT *mq = data->req.p.mqtt;
- ssize_t n;
- result = Curl_nwrite(data, FIRSTSOCKET, buf, len, &n);
+ size_t n;
+ result = Curl_xfer_send(data, buf, len, &n);
if(result)
return result;
Curl_debug(data, CURLINFO_HEADER_OUT, buf, (size_t)n);
- if(len != (size_t)n) {
+ if(len != n) {
size_t nsend = len - n;
char *sendleftovers = Curl_memdup(&buf[n], nsend);
if(!sendleftovers)
@@ -366,8 +366,7 @@ static CURLcode mqtt_recv_atleast(struct Curl_easy *data, size_t nbytes) ssize_t nread;
DEBUGASSERT(nbytes - rlen < sizeof(readbuf));
- result = Curl_read(data, data->conn->sock[FIRSTSOCKET],
- (char *)readbuf, nbytes - rlen, &nread);
+ result = Curl_xfer_recv(data, (char *)readbuf, nbytes - rlen, &nread);
if(result)
return result;
DEBUGASSERT(nread >= 0);
@@ -622,7 +621,6 @@ static CURLcode mqtt_read_publish(struct Curl_easy *data, bool *done) {
CURLcode result = CURLE_OK;
struct connectdata *conn = data->conn;
- curl_socket_t sockfd = conn->sock[FIRSTSOCKET];
ssize_t nread;
size_t remlen;
struct mqtt_conn *mqtt = &conn->proto.mqtt;
@@ -679,7 +677,7 @@ MQTT_SUBACK_COMING: size_t rest = mq->npacket;
if(rest > sizeof(buffer))
rest = sizeof(buffer);
- result = Curl_read(data, sockfd, buffer, rest, &nread);
+ result = Curl_xfer_recv(data, buffer, rest, &nread);
if(result) {
if(CURLE_AGAIN == result) {
infof(data, "EEEE AAAAGAIN");
@@ -744,7 +742,6 @@ static CURLcode mqtt_doing(struct Curl_easy *data, bool *done) struct mqtt_conn *mqtt = &conn->proto.mqtt;
struct MQTT *mq = data->req.p.mqtt;
ssize_t nread;
- curl_socket_t sockfd = conn->sock[FIRSTSOCKET];
unsigned char byte;
*done = FALSE;
@@ -762,7 +759,7 @@ static CURLcode mqtt_doing(struct Curl_easy *data, bool *done) switch(mqtt->state) {
case MQTT_FIRST:
/* Read the initial byte only */
- result = Curl_read(data, sockfd, (char *)&mq->firstbyte, 1, &nread);
+ result = Curl_xfer_recv(data, (char *)&mq->firstbyte, 1, &nread);
if(result)
break;
else if(!nread) {
@@ -778,7 +775,7 @@ static CURLcode mqtt_doing(struct Curl_easy *data, bool *done) FALLTHROUGH();
case MQTT_REMAINING_LENGTH:
do {
- result = Curl_read(data, sockfd, (char *)&byte, 1, &nread);
+ result = Curl_xfer_recv(data, (char *)&byte, 1, &nread);
if(!nread)
break;
Curl_debug(data, CURLINFO_HEADER_IN, (char *)&byte, 1);
diff --git a/libs/libcurl/src/multi.c b/libs/libcurl/src/multi.c index 1ff096b625..78980ba626 100644 --- a/libs/libcurl/src/multi.c +++ b/libs/libcurl/src/multi.c @@ -94,6 +94,7 @@ static CURLMcode add_next_timeout(struct curltime now, static CURLMcode multi_timeout(struct Curl_multi *multi,
long *timeout_ms);
static void process_pending_handles(struct Curl_multi *multi);
+static void multi_xfer_bufs_free(struct Curl_multi *multi);
#ifdef DEBUGBUILD
static const char * const multi_statename[]={
@@ -189,6 +190,10 @@ static void mstate(struct Curl_easy *data, CURLMstate state /* changing to COMPLETED means there's one less easy handle 'alive' */
DEBUGASSERT(data->multi->num_alive > 0);
data->multi->num_alive--;
+ if(!data->multi->num_alive) {
+ /* free the transfer buffer when we have no more active transfers */
+ multi_xfer_bufs_free(data->multi);
+ }
}
/* if this state has an init-function, run it */
@@ -525,6 +530,13 @@ CURLMcode curl_multi_add_handle(struct Curl_multi *multi, multi->dead = FALSE;
}
+ if(data->multi_easy) {
+ /* if this easy handle was previously used for curl_easy_perform(), there
+ is a private multi handle here that we can kill */
+ curl_multi_cleanup(data->multi_easy);
+ data->multi_easy = NULL;
+ }
+
/* Initialize timeout list for this handle */
Curl_llist_init(&data->state.timeoutlist, NULL);
@@ -640,7 +652,7 @@ static CURLcode multi_done(struct Curl_easy *data, after an error was detected */
bool premature)
{
- CURLcode result;
+ CURLcode result, r2;
struct connectdata *conn = data->conn;
#if defined(DEBUGBUILD) && !defined(CURL_DISABLE_VERBOSE_STRINGS)
@@ -691,14 +703,18 @@ static CURLcode multi_done(struct Curl_easy *data, result = CURLE_ABORTED_BY_CALLBACK;
}
+ /* Make sure that transfer client writes are really done now. */
+ r2 = Curl_xfer_write_done(data, premature);
+ if(r2 && !result)
+ result = r2;
+
/* Inform connection filters that this transfer is done */
Curl_conn_ev_data_done(data, premature);
process_pending_handles(data->multi); /* connection / multiplex */
- Curl_safefree(data->state.ulbuf);
-
- Curl_client_cleanup(data);
+ if(!result)
+ result = Curl_req_done(&data->req, data, premature);
CONNCACHE_LOCK(data);
Curl_detach_connection(data);
@@ -784,7 +800,6 @@ static CURLcode multi_done(struct Curl_easy *data, data->state.lastconnect_id = -1;
}
- Curl_safefree(data->state.buffer);
return result;
}
@@ -998,7 +1013,7 @@ static int connecting_getsock(struct Curl_easy *data, curl_socket_t *socks) {
struct connectdata *conn = data->conn;
(void)socks;
- /* Not using `conn->sockfd` as `Curl_setup_transfer()` initializes
+ /* Not using `conn->sockfd` as `Curl_xfer_setup()` initializes
* that *after* the connect. */
if(conn && conn->sock[FIRSTSOCKET] != CURL_SOCKET_BAD) {
/* Default is to wait to something from the server */
@@ -1802,101 +1817,15 @@ static CURLcode protocol_connect(struct Curl_easy *data, }
/*
- * readrewind() rewinds the read stream. This is typically used for HTTP
- * POST/PUT with multi-pass authentication when a sending was denied and a
- * resend is necessary.
- */
-static CURLcode readrewind(struct Curl_easy *data)
-{
- curl_mimepart *mimepart = &data->set.mimepost;
- DEBUGASSERT(data->conn);
-
- data->state.rewindbeforesend = FALSE; /* we rewind now */
-
- /* explicitly switch off sending data on this connection now since we are
- about to restart a new transfer and thus we want to avoid inadvertently
- sending more data on the existing connection until the next transfer
- starts */
- data->req.keepon &= ~KEEP_SEND;
-
- /* We have sent away data. If not using CURLOPT_POSTFIELDS or
- CURLOPT_HTTPPOST, call app to rewind
- */
-#ifndef CURL_DISABLE_HTTP
- if(data->conn->handler->protocol & PROTO_FAMILY_HTTP) {
- if(data->state.mimepost)
- mimepart = data->state.mimepost;
- }
-#endif
- if(data->set.postfields ||
- (data->state.httpreq == HTTPREQ_GET) ||
- (data->state.httpreq == HTTPREQ_HEAD))
- ; /* no need to rewind */
- else if(data->state.httpreq == HTTPREQ_POST_MIME ||
- data->state.httpreq == HTTPREQ_POST_FORM) {
- CURLcode result = Curl_mime_rewind(mimepart);
- if(result) {
- failf(data, "Cannot rewind mime/post data");
- return result;
- }
- }
- else {
- if(data->set.seek_func) {
- int err;
-
- Curl_set_in_callback(data, true);
- err = (data->set.seek_func)(data->set.seek_client, 0, SEEK_SET);
- Curl_set_in_callback(data, false);
- if(err) {
- failf(data, "seek callback returned error %d", (int)err);
- return CURLE_SEND_FAIL_REWIND;
- }
- }
- else if(data->set.ioctl_func) {
- curlioerr err;
-
- Curl_set_in_callback(data, true);
- err = (data->set.ioctl_func)(data, CURLIOCMD_RESTARTREAD,
- data->set.ioctl_client);
- Curl_set_in_callback(data, false);
- infof(data, "the ioctl callback returned %d", (int)err);
-
- if(err) {
- failf(data, "ioctl callback returned error %d", (int)err);
- return CURLE_SEND_FAIL_REWIND;
- }
- }
- else {
- /* If no CURLOPT_READFUNCTION is used, we know that we operate on a
- given FILE * stream and we can actually attempt to rewind that
- ourselves with fseek() */
- if(data->state.fread_func == (curl_read_callback)fread) {
- if(-1 != fseek(data->state.in, 0, SEEK_SET))
- /* successful rewind */
- return CURLE_OK;
- }
-
- /* no callback set or failure above, makes us fail at once */
- failf(data, "necessary data rewind wasn't possible");
- return CURLE_SEND_FAIL_REWIND;
- }
- }
- return CURLE_OK;
-}
-
-/*
* Curl_preconnect() is called immediately before a connect starts. When a
* redirect is followed, this is then called multiple times during a single
* transfer.
*/
CURLcode Curl_preconnect(struct Curl_easy *data)
{
- if(!data->state.buffer) {
- data->state.buffer = malloc(data->set.buffer_size + 1);
- if(!data->state.buffer)
- return CURLE_OUT_OF_MEMORY;
- }
-
+ /* this used to do data->state.buffer allocation,
+ maybe remove completely now? */
+ (void)data;
return CURLE_OK;
}
@@ -1914,7 +1843,6 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi, bool async;
bool protocol_connected = FALSE;
bool dophase_done = FALSE;
- bool done = FALSE;
CURLMcode rc;
CURLcode result = CURLE_OK;
timediff_t recv_timeout_ms;
@@ -2058,7 +1986,7 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi, hostname = conn->host.name;
/* check if we have the name resolved by now */
- dns = Curl_fetch_addr(data, hostname, (int)conn->port);
+ dns = Curl_fetch_addr(data, hostname, conn->primary.remote_port);
if(dns) {
#ifdef CURLRES_ASYNCH
@@ -2153,9 +2081,6 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi, break;
case MSTATE_PROTOCONNECT:
- if(data->state.rewindbeforesend)
- result = readrewind(data);
-
if(!result && data->conn->bits.reuse) {
/* ftp seems to hang when protoconnect on reused connection
* since we handle PROTOCONNECT in general inside the filers, it
@@ -2207,10 +2132,10 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi, /* call the prerequest callback function */
Curl_set_in_callback(data, true);
prereq_rc = data->set.fprereq(data->set.prereq_userp,
- data->info.conn_primary_ip,
- data->info.conn_local_ip,
- data->info.conn_primary_port,
- data->info.conn_local_port);
+ data->info.primary.remote_ip,
+ data->info.primary.local_ip,
+ data->info.primary.remote_port,
+ data->info.primary.local_port);
Curl_set_in_callback(data, false);
if(prereq_rc != CURL_PREREQFUNC_OK) {
failf(data, "operation aborted by pre-request callback");
@@ -2450,7 +2375,6 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi, {
char *newurl = NULL;
bool retry = FALSE;
- DEBUGASSERT(data->state.buffer);
/* check if over send speed */
send_timeout_ms = 0;
if(data->set.max_send_speed)
@@ -2480,9 +2404,9 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi, }
/* read/write data if it is ready to do so */
- result = Curl_readwrite(data, &done);
+ result = Curl_readwrite(data);
- if(done || (result == CURLE_RECV_ERROR)) {
+ if(data->req.done || (result == CURLE_RECV_ERROR)) {
/* If CURLE_RECV_ERROR happens early enough, we assume it was a race
* condition and the server closed the reused connection exactly when
* we wanted to use it, so figure out if that is indeed the case.
@@ -2497,7 +2421,7 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi, /* if we are to retry, set the result to OK and consider the
request as done */
result = CURLE_OK;
- done = TRUE;
+ data->req.done = TRUE;
}
}
else if((CURLE_HTTP2_STREAM == result) &&
@@ -2517,7 +2441,7 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi, as done */
retry = TRUE;
result = CURLE_OK;
- done = TRUE;
+ data->req.done = TRUE;
}
else
result = ret;
@@ -2539,7 +2463,7 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi, Curl_posttransfer(data);
multi_done(data, result, TRUE);
}
- else if(done) {
+ else if(data->req.done) {
/* call this even if the readwrite function returned error */
Curl_posttransfer(data);
@@ -2883,6 +2807,7 @@ CURLMcode curl_multi_cleanup(struct Curl_multi *multi) Curl_free_multi_ssl_backend_data(multi->ssl_backend_data);
#endif
+ multi_xfer_bufs_free(multi);
free(multi);
return CURLM_OK;
@@ -3242,7 +3167,7 @@ static CURLMcode multi_socket(struct Curl_multi *multi, if(data->conn && !(data->conn->handler->flags & PROTOPT_DIRLOCK))
/* set socket event bitmask if they're not locked */
- data->state.select_bits = (unsigned char)ev_bitmask;
+ data->state.select_bits |= (unsigned char)ev_bitmask;
Curl_expire(data, 0, EXPIRE_RUN_NOW);
}
@@ -3819,3 +3744,120 @@ struct Curl_easy **curl_multi_get_handles(struct Curl_multi *multi) }
return a;
}
+
+CURLcode Curl_multi_xfer_buf_borrow(struct Curl_easy *data,
+ char **pbuf, size_t *pbuflen)
+{
+ DEBUGASSERT(data);
+ DEBUGASSERT(data->multi);
+ *pbuf = NULL;
+ *pbuflen = 0;
+ if(!data->multi) {
+ failf(data, "transfer has no multi handle");
+ return CURLE_FAILED_INIT;
+ }
+ if(!data->set.buffer_size) {
+ failf(data, "transfer buffer size is 0");
+ return CURLE_FAILED_INIT;
+ }
+ if(data->multi->xfer_buf_borrowed) {
+ failf(data, "attempt to borrow xfer_buf when already borrowed");
+ return CURLE_AGAIN;
+ }
+
+ if(data->multi->xfer_buf &&
+ data->set.buffer_size > data->multi->xfer_buf_len) {
+ /* not large enough, get a new one */
+ free(data->multi->xfer_buf);
+ data->multi->xfer_buf = NULL;
+ data->multi->xfer_buf_len = 0;
+ }
+
+ if(!data->multi->xfer_buf) {
+ data->multi->xfer_buf = malloc((size_t)data->set.buffer_size);
+ if(!data->multi->xfer_buf) {
+ failf(data, "could not allocate xfer_buf of %zu bytes",
+ (size_t)data->set.buffer_size);
+ return CURLE_OUT_OF_MEMORY;
+ }
+ data->multi->xfer_buf_len = data->set.buffer_size;
+ }
+
+ data->multi->xfer_buf_borrowed = TRUE;
+ *pbuf = data->multi->xfer_buf;
+ *pbuflen = data->multi->xfer_buf_len;
+ return CURLE_OK;
+}
+
+void Curl_multi_xfer_buf_release(struct Curl_easy *data, char *buf)
+{
+ (void)buf;
+ DEBUGASSERT(data);
+ DEBUGASSERT(data->multi);
+ DEBUGASSERT(!buf || data->multi->xfer_buf == buf);
+ data->multi->xfer_buf_borrowed = FALSE;
+}
+
+CURLcode Curl_multi_xfer_ulbuf_borrow(struct Curl_easy *data,
+ char **pbuf, size_t *pbuflen)
+{
+ DEBUGASSERT(data);
+ DEBUGASSERT(data->multi);
+ *pbuf = NULL;
+ *pbuflen = 0;
+ if(!data->multi) {
+ failf(data, "transfer has no multi handle");
+ return CURLE_FAILED_INIT;
+ }
+ if(!data->set.upload_buffer_size) {
+ failf(data, "transfer upload buffer size is 0");
+ return CURLE_FAILED_INIT;
+ }
+ if(data->multi->xfer_ulbuf_borrowed) {
+ failf(data, "attempt to borrow xfer_ulbuf when already borrowed");
+ return CURLE_AGAIN;
+ }
+
+ if(data->multi->xfer_ulbuf &&
+ data->set.upload_buffer_size > data->multi->xfer_ulbuf_len) {
+ /* not large enough, get a new one */
+ free(data->multi->xfer_ulbuf);
+ data->multi->xfer_ulbuf = NULL;
+ data->multi->xfer_ulbuf_len = 0;
+ }
+
+ if(!data->multi->xfer_ulbuf) {
+ data->multi->xfer_ulbuf = malloc((size_t)data->set.upload_buffer_size);
+ if(!data->multi->xfer_ulbuf) {
+ failf(data, "could not allocate xfer_ulbuf of %zu bytes",
+ (size_t)data->set.upload_buffer_size);
+ return CURLE_OUT_OF_MEMORY;
+ }
+ data->multi->xfer_ulbuf_len = data->set.upload_buffer_size;
+ }
+
+ data->multi->xfer_ulbuf_borrowed = TRUE;
+ *pbuf = data->multi->xfer_ulbuf;
+ *pbuflen = data->multi->xfer_ulbuf_len;
+ return CURLE_OK;
+}
+
+void Curl_multi_xfer_ulbuf_release(struct Curl_easy *data, char *buf)
+{
+ (void)buf;
+ DEBUGASSERT(data);
+ DEBUGASSERT(data->multi);
+ DEBUGASSERT(!buf || data->multi->xfer_ulbuf == buf);
+ data->multi->xfer_ulbuf_borrowed = FALSE;
+}
+
+static void multi_xfer_bufs_free(struct Curl_multi *multi)
+{
+ DEBUGASSERT(multi);
+ Curl_safefree(multi->xfer_buf);
+ multi->xfer_buf_len = 0;
+ multi->xfer_buf_borrowed = FALSE;
+ Curl_safefree(multi->xfer_ulbuf);
+ multi->xfer_ulbuf_len = 0;
+ multi->xfer_ulbuf_borrowed = FALSE;
+}
diff --git a/libs/libcurl/src/multihandle.h b/libs/libcurl/src/multihandle.h index ec57304a38..6a7a3c62db 100644 --- a/libs/libcurl/src/multihandle.h +++ b/libs/libcurl/src/multihandle.h @@ -124,6 +124,13 @@ struct Curl_multi { times of all currently set timers */
struct Curl_tree *timetree;
+ /* buffer used for transfer data, lazy initialized */
+ char *xfer_buf; /* the actual buffer */
+ size_t xfer_buf_len; /* the allocated length */
+ /* buffer used for upload data, lazy initialized */
+ char *xfer_ulbuf; /* the actual buffer */
+ size_t xfer_ulbuf_len; /* the allocated length */
+
#if defined(USE_SSL)
struct multi_ssl_backend_data *ssl_backend_data;
#endif
@@ -171,6 +178,8 @@ struct Curl_multi { #endif
BIT(dead); /* a callback returned error, everything needs to crash and
burn */
+ BIT(xfer_buf_borrowed); /* xfer_buf is currently being borrowed */
+ BIT(xfer_ulbuf_borrowed); /* xfer_buf is currently being borrowed */
#ifdef DEBUGBUILD
BIT(warned); /* true after user warned of DEBUGBUILD */
#endif
diff --git a/libs/libcurl/src/multiif.h b/libs/libcurl/src/multiif.h index fac822c863..bedec92d1d 100644 --- a/libs/libcurl/src/multiif.h +++ b/libs/libcurl/src/multiif.h @@ -94,4 +94,53 @@ CURLMcode Curl_multi_add_perform(struct Curl_multi *multi, /* Return the value of the CURLMOPT_MAX_CONCURRENT_STREAMS option */
unsigned int Curl_multi_max_concurrent_streams(struct Curl_multi *multi);
+/**
+ * Borrow the transfer buffer from the multi, suitable
+ * for the given transfer `data`. The buffer may only be used in one
+ * multi processing of the easy handle. It MUST be returned to the
+ * multi before it can be borrowed again.
+ * Pointers into the buffer remain only valid as long as it is borrowed.
+ *
+ * @param data the easy handle
+ * @param pbuf on return, the buffer to use or NULL on error
+ * @param pbuflen on return, the size of *pbuf or 0 on error
+ * @return CURLE_OK when buffer is available and is returned.
+ * CURLE_OUT_OF_MEMORy on failure to allocate the buffer,
+ * CURLE_FAILED_INIT if the easy handle is without multi.
+ * CURLE_AGAIN if the buffer is borrowed already.
+ */
+CURLcode Curl_multi_xfer_buf_borrow(struct Curl_easy *data,
+ char **pbuf, size_t *pbuflen);
+/**
+ * Release the borrowed buffer. All references into the buffer become
+ * invalid after this.
+ * @param buf the buffer pointer borrowed for coding error checks.
+ */
+void Curl_multi_xfer_buf_release(struct Curl_easy *data, char *buf);
+
+/**
+ * Borrow the upload buffer from the multi, suitable
+ * for the given transfer `data`. The buffer may only be used in one
+ * multi processing of the easy handle. It MUST be returned to the
+ * multi before it can be borrowed again.
+ * Pointers into the buffer remain only valid as long as it is borrowed.
+ *
+ * @param data the easy handle
+ * @param pbuf on return, the buffer to use or NULL on error
+ * @param pbuflen on return, the size of *pbuf or 0 on error
+ * @return CURLE_OK when buffer is available and is returned.
+ * CURLE_OUT_OF_MEMORy on failure to allocate the buffer,
+ * CURLE_FAILED_INIT if the easy handle is without multi.
+ * CURLE_AGAIN if the buffer is borrowed already.
+ */
+CURLcode Curl_multi_xfer_ulbuf_borrow(struct Curl_easy *data,
+ char **pbuf, size_t *pbuflen);
+
+/**
+ * Release the borrowed upload buffer. All references into the buffer become
+ * invalid after this.
+ * @param buf the upload buffer pointer borrowed for coding error checks.
+ */
+void Curl_multi_xfer_ulbuf_release(struct Curl_easy *data, char *buf);
+
#endif /* HEADER_CURL_MULTIIF_H */
diff --git a/libs/libcurl/src/netrc.c b/libs/libcurl/src/netrc.c index 72680af665..700e085cf0 100644 --- a/libs/libcurl/src/netrc.c +++ b/libs/libcurl/src/netrc.c @@ -53,6 +53,8 @@ enum host_lookup_state { #define NETRC_FAILED -1
#define NETRC_SUCCESS 0
+#define MAX_NETRC_LINE 4096
+
/*
* Returns zero on success.
*/
@@ -80,13 +82,14 @@ static int parsenetrc(const char *host, file = fopen(netrcfile, FOPEN_READTEXT);
if(file) {
bool done = FALSE;
- char netrcbuffer[4096];
- int netrcbuffsize = (int)sizeof(netrcbuffer);
+ struct dynbuf buf;
+ Curl_dyn_init(&buf, MAX_NETRC_LINE);
- while(!done && Curl_get_line(netrcbuffer, netrcbuffsize, file)) {
+ while(!done && Curl_get_line(&buf, file)) {
char *tok;
char *tok_end;
bool quoted;
+ char *netrcbuffer = Curl_dyn_ptr(&buf);
if(state == MACDEF) {
if((netrcbuffer[0] == '\n') || (netrcbuffer[0] == '\r'))
state = NOTHING;
@@ -245,6 +248,7 @@ static int parsenetrc(const char *host, } /* while Curl_get_line() */
out:
+ Curl_dyn_free(&buf);
if(!retcode) {
/* success */
if(login_alloc) {
diff --git a/libs/libcurl/src/openldap.c b/libs/libcurl/src/openldap.c index 7452981562..2a21094b7d 100644 --- a/libs/libcurl/src/openldap.c +++ b/libs/libcurl/src/openldap.c @@ -916,7 +916,7 @@ static CURLcode oldap_do(struct Curl_easy *data, bool *done) else {
lr->msgid = msgid;
data->req.p.ldap = lr;
- Curl_setup_transfer(data, FIRSTSOCKET, -1, FALSE, -1);
+ Curl_xfer_setup(data, FIRSTSOCKET, -1, FALSE, -1);
*done = TRUE;
}
}
diff --git a/libs/libcurl/src/pingpong.c b/libs/libcurl/src/pingpong.c index 71116c8a9c..b39a7c75cf 100644 --- a/libs/libcurl/src/pingpong.c +++ b/libs/libcurl/src/pingpong.c @@ -164,7 +164,7 @@ CURLcode Curl_pp_vsendf(struct Curl_easy *data, const char *fmt,
va_list args)
{
- ssize_t bytes_written = 0;
+ size_t bytes_written = 0;
size_t write_len;
char *s;
CURLcode result;
@@ -199,8 +199,11 @@ CURLcode Curl_pp_vsendf(struct Curl_easy *data, #ifdef HAVE_GSSAPI
conn->data_prot = PROT_CMD;
#endif
- result = Curl_nwrite(data, FIRSTSOCKET, s, write_len, &bytes_written);
- if(result)
+ result = Curl_conn_send(data, FIRSTSOCKET, s, write_len, &bytes_written);
+ if(result == CURLE_AGAIN) {
+ bytes_written = 0;
+ }
+ else if(result)
return result;
#ifdef HAVE_GSSAPI
data_sec = conn->data_prot;
@@ -208,9 +211,9 @@ CURLcode Curl_pp_vsendf(struct Curl_easy *data, conn->data_prot = (unsigned char)data_sec;
#endif
- Curl_debug(data, CURLINFO_HEADER_OUT, s, (size_t)bytes_written);
+ Curl_debug(data, CURLINFO_HEADER_OUT, s, bytes_written);
- if(bytes_written != (ssize_t)write_len) {
+ if(bytes_written != write_len) {
/* the whole chunk was not sent, keep it around and adjust sizes */
pp->sendthis = s;
pp->sendsize = write_len;
@@ -251,7 +254,7 @@ CURLcode Curl_pp_sendf(struct Curl_easy *data, struct pingpong *pp, }
static CURLcode pingpong_read(struct Curl_easy *data,
- curl_socket_t sockfd,
+ int sockindex,
char *buffer,
size_t buflen,
ssize_t *nread)
@@ -261,7 +264,7 @@ static CURLcode pingpong_read(struct Curl_easy *data, enum protection_level prot = data->conn->data_prot;
data->conn->data_prot = PROT_CLEAR;
#endif
- result = Curl_read(data, sockfd, buffer, buflen, nread);
+ result = Curl_conn_recv(data, sockindex, buffer, buflen, nread);
#ifdef HAVE_GSSAPI
DEBUGASSERT(prot > PROT_NONE && prot < PROT_LAST);
data->conn->data_prot = (unsigned char)prot;
@@ -275,7 +278,7 @@ static CURLcode pingpong_read(struct Curl_easy *data, * Reads a piece of a server response.
*/
CURLcode Curl_pp_readresp(struct Curl_easy *data,
- curl_socket_t sockfd,
+ int sockindex,
struct pingpong *pp,
int *code, /* return the server code if done */
size_t *size) /* size of the response */
@@ -300,7 +303,7 @@ CURLcode Curl_pp_readresp(struct Curl_easy *data, ssize_t gotbytes = 0;
char buffer[900];
- result = pingpong_read(data, sockfd, buffer, sizeof(buffer), &gotbytes);
+ result = pingpong_read(data, sockindex, buffer, sizeof(buffer), &gotbytes);
if(result == CURLE_AGAIN)
return CURLE_OK;
@@ -395,14 +398,20 @@ CURLcode Curl_pp_flushsend(struct Curl_easy *data, struct pingpong *pp)
{
/* we have a piece of a command still left to send */
- ssize_t written;
- CURLcode result = Curl_nwrite(data, FIRSTSOCKET,
- pp->sendthis + pp->sendsize - pp->sendleft,
- pp->sendleft, &written);
+ size_t written;
+ CURLcode result;
+
+ result = Curl_conn_send(data, FIRSTSOCKET,
+ pp->sendthis + pp->sendsize - pp->sendleft,
+ pp->sendleft, &written);
+ if(result == CURLE_AGAIN) {
+ result = CURLE_OK;
+ written = 0;
+ }
if(result)
return result;
- if(written != (ssize_t)pp->sendleft) {
+ if(written != pp->sendleft) {
/* only a fraction was sent */
pp->sendleft -= written;
}
@@ -423,7 +432,7 @@ CURLcode Curl_pp_disconnect(struct pingpong *pp) bool Curl_pp_moredata(struct pingpong *pp)
{
- return (!pp->sendleft && Curl_dyn_len(&pp->recvbuf));
+ return (!pp->sendleft && Curl_dyn_len(&pp->recvbuf) > pp->nfinal);
}
#endif
diff --git a/libs/libcurl/src/pingpong.h b/libs/libcurl/src/pingpong.h index a74071675c..887d4e8ed9 100644 --- a/libs/libcurl/src/pingpong.h +++ b/libs/libcurl/src/pingpong.h @@ -132,7 +132,7 @@ CURLcode Curl_pp_vsendf(struct Curl_easy *data, * Reads a piece of a server response.
*/
CURLcode Curl_pp_readresp(struct Curl_easy *data,
- curl_socket_t sockfd,
+ int sockindex,
struct pingpong *pp,
int *code, /* return the server code if done */
size_t *size); /* size of the response */
diff --git a/libs/libcurl/src/pop3.c b/libs/libcurl/src/pop3.c index f31e86fe31..8281b6bcc0 100644 --- a/libs/libcurl/src/pop3.c +++ b/libs/libcurl/src/pop3.c @@ -934,7 +934,7 @@ static CURLcode pop3_state_command_resp(struct Curl_easy *data, if(pop3->transfer == PPTRANSFER_BODY) {
/* POP3 download */
- Curl_setup_transfer(data, FIRSTSOCKET, -1, FALSE, -1);
+ Curl_xfer_setup(data, FIRSTSOCKET, -1, FALSE, -1);
if(pp->overflow) {
/* The recv buffer contains data that is actually body content so send
@@ -970,7 +970,6 @@ static CURLcode pop3_statemachine(struct Curl_easy *data, struct connectdata *conn)
{
CURLcode result = CURLE_OK;
- curl_socket_t sock = conn->sock[FIRSTSOCKET];
int pop3code;
struct pop3_conn *pop3c = &conn->proto.pop3c;
struct pingpong *pp = &pop3c->pp;
@@ -987,7 +986,7 @@ static CURLcode pop3_statemachine(struct Curl_easy *data, do {
/* Read the response from the server */
- result = Curl_pp_readresp(data, sock, pp, &pop3code, &nread);
+ result = Curl_pp_readresp(data, FIRSTSOCKET, pp, &pop3code, &nread);
if(result)
return result;
diff --git a/libs/libcurl/src/request.c b/libs/libcurl/src/request.c new file mode 100644 index 0000000000..a6c69a3dda --- /dev/null +++ b/libs/libcurl/src/request.c @@ -0,0 +1,409 @@ +/***************************************************************************
+ * _ _ ____ _
+ * Project ___| | | | _ \| |
+ * / __| | | | |_) | |
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+ * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+ * are also available at https://curl.se/docs/copyright.html.
+ *
+ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
+ * copies of the Software, and permit persons to whom the Software is
+ * furnished to do so, under the terms of the COPYING file.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ * SPDX-License-Identifier: curl
+ *
+ ***************************************************************************/
+
+#include "curl_setup.h"
+
+#include "urldata.h"
+#include "cfilters.h"
+#include "dynbuf.h"
+#include "doh.h"
+#include "multiif.h"
+#include "progress.h"
+#include "request.h"
+#include "sendf.h"
+#include "transfer.h"
+#include "url.h"
+
+/* The last 3 #include files should be in this order */
+#include "curl_printf.h"
+#include "curl_memory.h"
+#include "memdebug.h"
+
+CURLcode Curl_req_init(struct SingleRequest *req)
+{
+ memset(req, 0, sizeof(*req));
+ return CURLE_OK;
+}
+
+CURLcode Curl_req_soft_reset(struct SingleRequest *req,
+ struct Curl_easy *data)
+{
+ CURLcode result;
+
+ req->done = FALSE;
+ req->upload_done = FALSE;
+ req->download_done = FALSE;
+ req->ignorebody = FALSE;
+ req->bytecount = 0;
+ req->writebytecount = 0;
+ req->header = TRUE; /* assume header */
+ req->headerline = 0;
+ req->headerbytecount = 0;
+ req->allheadercount = 0;
+ req->deductheadercount = 0;
+
+ result = Curl_client_start(data);
+ if(result)
+ return result;
+
+ if(!req->sendbuf_init) {
+ Curl_bufq_init2(&req->sendbuf, data->set.upload_buffer_size, 1,
+ BUFQ_OPT_SOFT_LIMIT);
+ req->sendbuf_init = TRUE;
+ }
+ else {
+ Curl_bufq_reset(&req->sendbuf);
+ if(data->set.upload_buffer_size != req->sendbuf.chunk_size) {
+ Curl_bufq_free(&req->sendbuf);
+ Curl_bufq_init2(&req->sendbuf, data->set.upload_buffer_size, 1,
+ BUFQ_OPT_SOFT_LIMIT);
+ }
+ }
+
+ return CURLE_OK;
+}
+
+CURLcode Curl_req_start(struct SingleRequest *req,
+ struct Curl_easy *data)
+{
+ req->start = Curl_now();
+ return Curl_req_soft_reset(req, data);
+}
+
+static CURLcode req_flush(struct Curl_easy *data);
+
+CURLcode Curl_req_done(struct SingleRequest *req,
+ struct Curl_easy *data, bool aborted)
+{
+ (void)req;
+ if(!aborted)
+ (void)req_flush(data);
+ Curl_client_reset(data);
+ return CURLE_OK;
+}
+
+void Curl_req_hard_reset(struct SingleRequest *req, struct Curl_easy *data)
+{
+ struct curltime t0 = {0, 0};
+
+ /* This is a bit ugly. `req->p` is a union and we assume we can
+ * free this safely without leaks. */
+ Curl_safefree(req->p.http);
+ Curl_safefree(req->newurl);
+ Curl_client_reset(data);
+ if(req->sendbuf_init)
+ Curl_bufq_reset(&req->sendbuf);
+
+#ifndef CURL_DISABLE_DOH
+ if(req->doh) {
+ Curl_close(&req->doh->probe[0].easy);
+ Curl_close(&req->doh->probe[1].easy);
+ }
+#endif
+ /* Can no longer memset() this struct as we need to keep some state */
+ req->size = -1;
+ req->maxdownload = -1;
+ req->bytecount = 0;
+ req->writebytecount = 0;
+ req->start = t0;
+ req->headerbytecount = 0;
+ req->allheadercount = 0;
+ req->deductheadercount = 0;
+ req->headerline = 0;
+ req->offset = 0;
+ req->httpcode = 0;
+ req->keepon = 0;
+ req->upgr101 = UPGR101_INIT;
+ req->timeofdoc = 0;
+ req->bodywrites = 0;
+ req->location = NULL;
+ req->newurl = NULL;
+#ifndef CURL_DISABLE_COOKIES
+ req->setcookies = 0;
+#endif
+ req->header = FALSE;
+ req->content_range = FALSE;
+ req->download_done = FALSE;
+ req->eos_written = FALSE;
+ req->eos_read = FALSE;
+ req->upload_done = FALSE;
+ req->upload_aborted = FALSE;
+ req->ignorebody = FALSE;
+ req->http_bodyless = FALSE;
+ req->chunk = FALSE;
+ req->ignore_cl = FALSE;
+ req->upload_chunky = FALSE;
+ req->getheader = FALSE;
+ req->no_body = data->set.opt_no_body;
+ req->authneg = FALSE;
+}
+
+void Curl_req_free(struct SingleRequest *req, struct Curl_easy *data)
+{
+ /* This is a bit ugly. `req->p` is a union and we assume we can
+ * free this safely without leaks. */
+ Curl_safefree(req->p.http);
+ Curl_safefree(req->newurl);
+ if(req->sendbuf_init)
+ Curl_bufq_free(&req->sendbuf);
+ Curl_client_cleanup(data);
+
+#ifndef CURL_DISABLE_DOH
+ if(req->doh) {
+ Curl_close(&req->doh->probe[0].easy);
+ Curl_close(&req->doh->probe[1].easy);
+ Curl_dyn_free(&req->doh->probe[0].serverdoh);
+ Curl_dyn_free(&req->doh->probe[1].serverdoh);
+ curl_slist_free_all(req->doh->headers);
+ Curl_safefree(req->doh);
+ }
+#endif
+}
+
+static CURLcode xfer_send(struct Curl_easy *data,
+ const char *buf, size_t blen,
+ size_t hds_len, size_t *pnwritten)
+{
+ CURLcode result = CURLE_OK;
+
+ *pnwritten = 0;
+#ifdef CURLDEBUG
+ {
+ /* Allow debug builds to override this logic to force short initial
+ sends
+ */
+ char *p = getenv("CURL_SMALLREQSEND");
+ if(p) {
+ size_t altsize = (size_t)strtoul(p, NULL, 10);
+ if(altsize && altsize < blen)
+ blen = altsize;
+ }
+ }
+#endif
+ /* Make sure this doesn't send more body bytes than what the max send
+ speed says. The headers do not count to the max speed. */
+ if(data->set.max_send_speed) {
+ size_t body_bytes = blen - hds_len;
+ if((curl_off_t)body_bytes > data->set.max_send_speed)
+ blen = hds_len + (size_t)data->set.max_send_speed;
+ }
+
+ result = Curl_xfer_send(data, buf, blen, pnwritten);
+ if(!result && *pnwritten) {
+ if(hds_len)
+ Curl_debug(data, CURLINFO_HEADER_OUT, (char *)buf,
+ CURLMIN(hds_len, *pnwritten));
+ if(*pnwritten > hds_len) {
+ size_t body_len = *pnwritten - hds_len;
+ Curl_debug(data, CURLINFO_DATA_OUT, (char *)buf + hds_len, body_len);
+ data->req.writebytecount += body_len;
+ Curl_pgrsSetUploadCounter(data, data->req.writebytecount);
+ }
+ }
+ return result;
+}
+
+static CURLcode req_send_buffer_flush(struct Curl_easy *data)
+{
+ CURLcode result = CURLE_OK;
+ const unsigned char *buf;
+ size_t blen;
+
+ while(Curl_bufq_peek(&data->req.sendbuf, &buf, &blen)) {
+ size_t nwritten, hds_len = CURLMIN(data->req.sendbuf_hds_len, blen);
+ result = xfer_send(data, (const char *)buf, blen, hds_len, &nwritten);
+ if(result)
+ break;
+
+ Curl_bufq_skip(&data->req.sendbuf, nwritten);
+ if(hds_len) {
+ data->req.sendbuf_hds_len -= CURLMIN(hds_len, nwritten);
+ }
+ /* leave if we could not send all. Maybe network blocking or
+ * speed limits on transfer */
+ if(nwritten < blen)
+ break;
+ }
+ return result;
+}
+
+static CURLcode req_set_upload_done(struct Curl_easy *data)
+{
+ DEBUGASSERT(!data->req.upload_done);
+ data->req.upload_done = TRUE;
+ data->req.keepon &= ~(KEEP_SEND|KEEP_SEND_TIMED); /* we're done sending */
+
+ Curl_creader_done(data, data->req.upload_aborted);
+
+ if(data->req.upload_aborted) {
+ if(data->req.writebytecount)
+ infof(data, "abort upload after having sent %" CURL_FORMAT_CURL_OFF_T
+ " bytes", data->req.writebytecount);
+ else
+ infof(data, "abort upload");
+ }
+ else if(data->req.writebytecount)
+ infof(data, "upload completely sent off: %" CURL_FORMAT_CURL_OFF_T
+ " bytes", data->req.writebytecount);
+ else
+ infof(data, Curl_creader_total_length(data)?
+ "We are completely uploaded and fine" :
+ "Request completely sent off");
+
+ return Curl_xfer_send_close(data);
+}
+
+static CURLcode req_flush(struct Curl_easy *data)
+{
+ CURLcode result;
+
+ if(!data || !data->conn)
+ return CURLE_FAILED_INIT;
+
+ if(!Curl_bufq_is_empty(&data->req.sendbuf)) {
+ result = req_send_buffer_flush(data);
+ if(result)
+ return result;
+ if(!Curl_bufq_is_empty(&data->req.sendbuf)) {
+ return CURLE_AGAIN;
+ }
+ }
+
+ if(!data->req.upload_done && data->req.eos_read &&
+ Curl_bufq_is_empty(&data->req.sendbuf)) {
+ return req_set_upload_done(data);
+ }
+ return CURLE_OK;
+}
+
+static ssize_t add_from_client(void *reader_ctx,
+ unsigned char *buf, size_t buflen,
+ CURLcode *err)
+{
+ struct Curl_easy *data = reader_ctx;
+ size_t nread;
+ bool eos;
+
+ *err = Curl_client_read(data, (char *)buf, buflen, &nread, &eos);
+ if(*err)
+ return -1;
+ if(eos)
+ data->req.eos_read = TRUE;
+ return (ssize_t)nread;
+}
+
+#ifndef USE_HYPER
+
+static CURLcode req_send_buffer_add(struct Curl_easy *data,
+ const char *buf, size_t blen,
+ size_t hds_len)
+{
+ CURLcode result = CURLE_OK;
+ ssize_t n;
+ n = Curl_bufq_write(&data->req.sendbuf,
+ (const unsigned char *)buf, blen, &result);
+ if(n < 0)
+ return result;
+ /* We rely on a SOFTLIMIT on sendbuf, so it can take all data in */
+ DEBUGASSERT((size_t)n == blen);
+ data->req.sendbuf_hds_len += hds_len;
+ return CURLE_OK;
+}
+
+CURLcode Curl_req_send(struct Curl_easy *data, struct dynbuf *req)
+{
+ CURLcode result;
+ const char *buf;
+ size_t blen, nwritten;
+
+ if(!data || !data->conn)
+ return CURLE_FAILED_INIT;
+
+ buf = Curl_dyn_ptr(req);
+ blen = Curl_dyn_len(req);
+ if(!Curl_creader_total_length(data)) {
+ /* Request without body. Try to send directly from the buf given. */
+ data->req.eos_read = TRUE;
+ result = xfer_send(data, buf, blen, blen, &nwritten);
+ if(result)
+ return result;
+ buf += nwritten;
+ blen -= nwritten;
+ }
+
+ if(blen) {
+ /* Either we have a request body, or we could not send the complete
+ * request in one go. Buffer the remainder and try to add as much
+ * body bytes as room is left in the buffer. Then flush. */
+ result = req_send_buffer_add(data, buf, blen, blen);
+ if(result)
+ return result;
+
+ return Curl_req_send_more(data);
+ }
+ return CURLE_OK;
+}
+#endif /* !USE_HYPER */
+
+bool Curl_req_want_send(struct Curl_easy *data)
+{
+ return data->req.sendbuf_init && !Curl_bufq_is_empty(&data->req.sendbuf);
+}
+
+bool Curl_req_done_sending(struct Curl_easy *data)
+{
+ if(data->req.upload_done) {
+ DEBUGASSERT(Curl_bufq_is_empty(&data->req.sendbuf));
+ return TRUE;
+ }
+ return FALSE;
+}
+
+CURLcode Curl_req_send_more(struct Curl_easy *data)
+{
+ CURLcode result;
+
+ /* Fill our send buffer if more from client can be read. */
+ if(!data->req.eos_read && !Curl_bufq_is_full(&data->req.sendbuf)) {
+ ssize_t nread = Curl_bufq_sipn(&data->req.sendbuf, 0,
+ add_from_client, data, &result);
+ if(nread < 0 && result != CURLE_AGAIN)
+ return result;
+ }
+
+ result = req_flush(data);
+ if(result == CURLE_AGAIN)
+ result = CURLE_OK;
+ return result;
+}
+
+CURLcode Curl_req_abort_sending(struct Curl_easy *data)
+{
+ if(!data->req.upload_done) {
+ Curl_bufq_reset(&data->req.sendbuf);
+ data->req.upload_aborted = TRUE;
+ return req_set_upload_done(data);
+ }
+ return CURLE_OK;
+}
diff --git a/libs/libcurl/src/request.h b/libs/libcurl/src/request.h new file mode 100644 index 0000000000..1f87b191c3 --- /dev/null +++ b/libs/libcurl/src/request.h @@ -0,0 +1,227 @@ +#ifndef HEADER_CURL_REQUEST_H
+#define HEADER_CURL_REQUEST_H
+/***************************************************************************
+ * _ _ ____ _
+ * Project ___| | | | _ \| |
+ * / __| | | | |_) | |
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+ * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+ * are also available at https://curl.se/docs/copyright.html.
+ *
+ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
+ * copies of the Software, and permit persons to whom the Software is
+ * furnished to do so, under the terms of the COPYING file.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ * SPDX-License-Identifier: curl
+ *
+ ***************************************************************************/
+
+/* This file is for lib internal stuff */
+
+#include "curl_setup.h"
+
+#include "bufq.h"
+
+/* forward declarations */
+struct UserDefined;
+
+enum expect100 {
+ EXP100_SEND_DATA, /* enough waiting, just send the body now */
+ EXP100_AWAITING_CONTINUE, /* waiting for the 100 Continue header */
+ EXP100_SENDING_REQUEST, /* still sending the request but will wait for
+ the 100 header once done with the request */
+ EXP100_FAILED /* used on 417 Expectation Failed */
+};
+
+enum upgrade101 {
+ UPGR101_INIT, /* default state */
+ UPGR101_WS, /* upgrade to WebSockets requested */
+ UPGR101_H2, /* upgrade to HTTP/2 requested */
+ UPGR101_RECEIVED, /* 101 response received */
+ UPGR101_WORKING /* talking upgraded protocol */
+};
+
+
+/*
+ * Request specific data in the easy handle (Curl_easy). Previously,
+ * these members were on the connectdata struct but since a conn struct may
+ * now be shared between different Curl_easys, we store connection-specific
+ * data here. This struct only keeps stuff that's interesting for *this*
+ * request, as it will be cleared between multiple ones
+ */
+struct SingleRequest {
+ curl_off_t size; /* -1 if unknown at this point */
+ curl_off_t maxdownload; /* in bytes, the maximum amount of data to fetch,
+ -1 means unlimited */
+ curl_off_t bytecount; /* total number of bytes read */
+ curl_off_t writebytecount; /* number of bytes written */
+
+ struct curltime start; /* transfer started at this time */
+ unsigned int headerbytecount; /* received server headers (not CONNECT
+ headers) */
+ unsigned int allheadercount; /* all received headers (server + CONNECT) */
+ unsigned int deductheadercount; /* this amount of bytes doesn't count when
+ we check if anything has been transferred
+ at the end of a connection. We use this
+ counter to make only a 100 reply (without
+ a following second response code) result
+ in a CURLE_GOT_NOTHING error code */
+ int headerline; /* counts header lines to better track the
+ first one */
+ curl_off_t offset; /* possible resume offset read from the
+ Content-Range: header */
+ int httpversion; /* Version in response (09, 10, 11, etc.) */
+ int httpcode; /* error code from the 'HTTP/1.? XXX' or
+ 'RTSP/1.? XXX' line */
+ int keepon;
+ enum upgrade101 upgr101; /* 101 upgrade state */
+
+ /* Client Writer stack, handles transfer- and content-encodings, protocol
+ * checks, pausing by client callbacks. */
+ struct Curl_cwriter *writer_stack;
+ /* Client Reader stack, handles transfer- and content-encodings, protocol
+ * checks, pausing by client callbacks. */
+ struct Curl_creader *reader_stack;
+ struct bufq sendbuf; /* data which needs to be send to the server */
+ size_t sendbuf_hds_len; /* amount of header bytes in sendbuf */
+ time_t timeofdoc;
+ long bodywrites;
+ char *location; /* This points to an allocated version of the Location:
+ header data */
+ char *newurl; /* Set to the new URL to use when a redirect or a retry is
+ wanted */
+
+ /* Allocated protocol-specific data. Each protocol handler makes sure this
+ points to data it needs. */
+ union {
+ struct FILEPROTO *file;
+ struct FTP *ftp;
+ struct HTTP *http;
+ struct IMAP *imap;
+ struct ldapreqinfo *ldap;
+ struct MQTT *mqtt;
+ struct POP3 *pop3;
+ struct RTSP *rtsp;
+ struct smb_request *smb;
+ struct SMTP *smtp;
+ struct SSHPROTO *ssh;
+ struct TELNET *telnet;
+ } p;
+#ifndef CURL_DISABLE_DOH
+ struct dohdata *doh; /* DoH specific data for this request */
+#endif
+#ifndef CURL_DISABLE_COOKIES
+ unsigned char setcookies;
+#endif
+ BIT(header); /* incoming data has HTTP header */
+ BIT(done); /* request is done, e.g. no more send/recv should
+ * happen. This can be TRUE before `upload_done` or
+ * `download_done` is TRUE. */
+ BIT(content_range); /* set TRUE if Content-Range: was found */
+ BIT(download_done); /* set to TRUE when download is complete */
+ BIT(eos_written); /* iff EOS has been written to client */
+ BIT(eos_read); /* iff EOS has been read from the client */
+ BIT(rewind_read); /* iff reader needs rewind at next start */
+ BIT(upload_done); /* set to TRUE when all request data has been sent */
+ BIT(upload_aborted); /* set to TRUE when upload was aborted. Will also
+ * show `upload_done` as TRUE. */
+ BIT(ignorebody); /* we read a response-body but we ignore it! */
+ BIT(http_bodyless); /* HTTP response status code is between 100 and 199,
+ 204 or 304 */
+ BIT(chunk); /* if set, this is a chunked transfer-encoding */
+ BIT(ignore_cl); /* ignore content-length */
+ BIT(upload_chunky); /* set TRUE if we are doing chunked transfer-encoding
+ on upload */
+ BIT(getheader); /* TRUE if header parsing is wanted */
+ BIT(no_body); /* the response has no body */
+ BIT(authneg); /* TRUE when the auth phase has started, which means
+ that we are creating a request with an auth header,
+ but it is not the final request in the auth
+ negotiation. */
+ BIT(sendbuf_init); /* sendbuf is initialized */
+};
+
+/**
+ * Initialize the state of the request for first use.
+ */
+CURLcode Curl_req_init(struct SingleRequest *req);
+
+/**
+ * The request is about to start. Record time and do a soft reset.
+ */
+CURLcode Curl_req_start(struct SingleRequest *req,
+ struct Curl_easy *data);
+
+/**
+ * The request may continue with a follow up. Reset
+ * members, but keep start time for overall duration calc.
+ */
+CURLcode Curl_req_soft_reset(struct SingleRequest *req,
+ struct Curl_easy *data);
+
+/**
+ * The request is done. If not aborted, make sure that buffers are
+ * flushed to the client.
+ * @param req the request
+ * @param data the transfer
+ * @param aborted TRUE iff the request was aborted/errored
+ */
+CURLcode Curl_req_done(struct SingleRequest *req,
+ struct Curl_easy *data, bool aborted);
+
+/**
+ * Free the state of the request, not usable afterwards.
+ */
+void Curl_req_free(struct SingleRequest *req, struct Curl_easy *data);
+
+/**
+ * Hard reset the state of the request to virgin state base on
+ * transfer settings.
+ */
+void Curl_req_hard_reset(struct SingleRequest *req, struct Curl_easy *data);
+
+#ifndef USE_HYPER
+/**
+ * Send request headers. If not all could be sent
+ * they will be buffered. Use `Curl_req_flush()` to make sure
+ * bytes are really send.
+ * @param data the transfer making the request
+ * @param buf the complete header bytes, no body
+ * @return CURLE_OK (on blocking with *pnwritten == 0) or error.
+ */
+CURLcode Curl_req_send(struct Curl_easy *data, struct dynbuf *buf);
+
+#endif /* !USE_HYPER */
+
+/**
+ * TRUE iff the request has sent all request headers and data.
+ */
+bool Curl_req_done_sending(struct Curl_easy *data);
+
+/*
+ * Read more from client and flush all buffered request bytes.
+ * @return CURLE_OK on success or the error on the sending.
+ * Never returns CURLE_AGAIN.
+ */
+CURLcode Curl_req_send_more(struct Curl_easy *data);
+
+/**
+ * TRUE iff the request wants to send, e.g. has buffered bytes.
+ */
+bool Curl_req_want_send(struct Curl_easy *data);
+
+/**
+ * Stop sending any more request data to the server.
+ * Will clear the send buffer and mark request sending as done.
+ */
+CURLcode Curl_req_abort_sending(struct Curl_easy *data);
+
+#endif /* HEADER_CURL_REQUEST_H */
diff --git a/libs/libcurl/src/rtsp.c b/libs/libcurl/src/rtsp.c index 17fff38511..9e8120373b 100644 --- a/libs/libcurl/src/rtsp.c +++ b/libs/libcurl/src/rtsp.c @@ -70,8 +70,7 @@ static int rtsp_getsock_do(struct Curl_easy *data, static CURLcode rtsp_rtp_write_resp(struct Curl_easy *data,
const char *buf,
size_t blen,
- bool is_eos,
- bool *done);
+ bool is_eos);
static CURLcode rtsp_setup_connection(struct Curl_easy *data,
struct connectdata *conn);
@@ -225,8 +224,6 @@ static CURLcode rtsp_do(struct Curl_easy *data, bool *done) Curl_RtspReq rtspreq = data->set.rtspreq;
struct RTSP *rtsp = data->req.p.rtsp;
struct dynbuf req_buffer;
- curl_off_t postsize = 0; /* for ANNOUNCE and SET_PARAMETER */
- curl_off_t putsize = 0; /* for ANNOUNCE and SET_PARAMETER */
const char *p_request = NULL;
const char *p_session_id = NULL;
@@ -241,6 +238,8 @@ static CURLcode rtsp_do(struct Curl_easy *data, bool *done) const char *p_userpwd = NULL;
*done = TRUE;
+ /* Initialize a dynamic send buffer */
+ Curl_dyn_init(&req_buffer, DYN_RTSP_REQ_HEADER);
rtsp->CSeq_sent = data->state.rtsp_next_client_CSeq;
rtsp->CSeq_recv = 0;
@@ -310,9 +309,8 @@ static CURLcode rtsp_do(struct Curl_easy *data, bool *done) }
if(rtspreq == RTSPREQ_RECEIVE) {
- Curl_setup_transfer(data, FIRSTSOCKET, -1, TRUE, -1);
-
- return result;
+ Curl_xfer_setup(data, FIRSTSOCKET, -1, TRUE, -1);
+ goto out;
}
p_session_id = data->set.str[STRING_RTSP_SESSION_ID];
@@ -320,7 +318,8 @@ static CURLcode rtsp_do(struct Curl_easy *data, bool *done) (rtspreq & ~(RTSPREQ_OPTIONS | RTSPREQ_DESCRIBE | RTSPREQ_SETUP))) {
failf(data, "Refusing to issue an RTSP request [%s] without a session ID.",
p_request);
- return CURLE_BAD_FUNCTION_ARGUMENT;
+ result = CURLE_BAD_FUNCTION_ARGUMENT;
+ goto out;
}
/* Stream URI. Default to server '*' if not specified */
@@ -347,7 +346,8 @@ static CURLcode rtsp_do(struct Curl_easy *data, bool *done) else {
failf(data,
"Refusing to issue an RTSP SETUP without a Transport: header.");
- return CURLE_BAD_FUNCTION_ARGUMENT;
+ result = CURLE_BAD_FUNCTION_ARGUMENT;
+ goto out;
}
p_transport = data->state.aptr.rtsp_transport;
@@ -366,9 +366,10 @@ static CURLcode rtsp_do(struct Curl_easy *data, bool *done) data->state.aptr.accept_encoding =
aprintf("Accept-Encoding: %s\r\n", data->set.str[STRING_ENCODING]);
- if(!data->state.aptr.accept_encoding)
- return CURLE_OUT_OF_MEMORY;
-
+ if(!data->state.aptr.accept_encoding) {
+ result = CURLE_OUT_OF_MEMORY;
+ goto out;
+ }
p_accept_encoding = data->state.aptr.accept_encoding;
}
}
@@ -390,7 +391,7 @@ static CURLcode rtsp_do(struct Curl_easy *data, bool *done) result = Curl_http_output_auth(data, conn, p_request, HTTPREQ_GET,
p_stream_uri, FALSE);
if(result)
- return result;
+ goto out;
p_proxyuserpwd = data->state.aptr.proxyuserpwd;
p_userpwd = data->state.aptr.userpwd;
@@ -424,23 +425,22 @@ static CURLcode rtsp_do(struct Curl_easy *data, bool *done) */
if(Curl_checkheaders(data, STRCONST("CSeq"))) {
failf(data, "CSeq cannot be set as a custom header.");
- return CURLE_RTSP_CSEQ_ERROR;
+ result = CURLE_RTSP_CSEQ_ERROR;
+ goto out;
}
if(Curl_checkheaders(data, STRCONST("Session"))) {
failf(data, "Session ID cannot be set as a custom header.");
- return CURLE_BAD_FUNCTION_ARGUMENT;
+ result = CURLE_BAD_FUNCTION_ARGUMENT;
+ goto out;
}
- /* Initialize a dynamic send buffer */
- Curl_dyn_init(&req_buffer, DYN_RTSP_REQ_HEADER);
-
result =
Curl_dyn_addf(&req_buffer,
"%s %s RTSP/1.0\r\n" /* Request Stream-URI RTSP/1.0 */
"CSeq: %ld\r\n", /* CSeq */
p_request, p_stream_uri, rtsp->CSeq_sent);
if(result)
- return result;
+ goto out;
/*
* Rather than do a normal alloc line, keep the session_id unformatted
@@ -449,7 +449,7 @@ static CURLcode rtsp_do(struct Curl_easy *data, bool *done) if(p_session_id) {
result = Curl_dyn_addf(&req_buffer, "Session: %s\r\n", p_session_id);
if(result)
- return result;
+ goto out;
}
/*
@@ -481,44 +481,58 @@ static CURLcode rtsp_do(struct Curl_easy *data, bool *done) Curl_safefree(data->state.aptr.userpwd);
if(result)
- return result;
+ goto out;
if((rtspreq == RTSPREQ_SETUP) || (rtspreq == RTSPREQ_DESCRIBE)) {
result = Curl_add_timecondition(data, &req_buffer);
if(result)
- return result;
+ goto out;
}
result = Curl_add_custom_headers(data, FALSE, &req_buffer);
if(result)
- return result;
+ goto out;
if(rtspreq == RTSPREQ_ANNOUNCE ||
rtspreq == RTSPREQ_SET_PARAMETER ||
rtspreq == RTSPREQ_GET_PARAMETER) {
+ curl_off_t req_clen; /* request content length */
if(data->state.upload) {
- putsize = data->state.infilesize;
+ req_clen = data->state.infilesize;
data->state.httpreq = HTTPREQ_PUT;
-
+ result = Curl_creader_set_fread(data, req_clen);
+ if(result)
+ goto out;
}
else {
- postsize = (data->state.infilesize != -1)?
- data->state.infilesize:
- (data->set.postfields? (curl_off_t)strlen(data->set.postfields):0);
- data->state.httpreq = HTTPREQ_POST;
+ if(data->set.postfields) {
+ size_t plen = strlen(data->set.postfields);
+ req_clen = (curl_off_t)plen;
+ result = Curl_creader_set_buf(data, data->set.postfields, plen);
+ }
+ else if(data->state.infilesize >= 0) {
+ req_clen = data->state.infilesize;
+ result = Curl_creader_set_fread(data, req_clen);
+ }
+ else {
+ req_clen = 0;
+ result = Curl_creader_set_null(data);
+ }
+ if(result)
+ goto out;
}
- if(putsize > 0 || postsize > 0) {
+ if(req_clen > 0) {
/* As stated in the http comments, it is probably not wise to
* actually set a custom Content-Length in the headers */
if(!Curl_checkheaders(data, STRCONST("Content-Length"))) {
result =
Curl_dyn_addf(&req_buffer,
"Content-Length: %" CURL_FORMAT_CURL_OFF_T"\r\n",
- (data->state.upload ? putsize : postsize));
+ req_clen);
if(result)
- return result;
+ goto out;
}
if(rtspreq == RTSPREQ_SET_PARAMETER ||
@@ -528,7 +542,7 @@ static CURLcode rtsp_do(struct Curl_easy *data, bool *done) STRCONST("Content-Type: "
"text/parameters\r\n"));
if(result)
- return result;
+ goto out;
}
}
@@ -538,11 +552,9 @@ static CURLcode rtsp_do(struct Curl_easy *data, bool *done) STRCONST("Content-Type: "
"application/sdp\r\n"));
if(result)
- return result;
+ goto out;
}
}
-
- data->state.expect100header = FALSE; /* RTSP posts are simple/small */
}
else if(rtspreq == RTSPREQ_GET_PARAMETER) {
/* Check for an empty GET_PARAMETER (heartbeat) request */
@@ -550,31 +562,26 @@ static CURLcode rtsp_do(struct Curl_easy *data, bool *done) data->req.no_body = TRUE;
}
}
+ else {
+ result = Curl_creader_set_null(data);
+ if(result)
+ goto out;
+ }
- /* RTSP never allows chunked transfer */
- data->req.forbidchunk = TRUE;
/* Finish the request buffer */
result = Curl_dyn_addn(&req_buffer, STRCONST("\r\n"));
if(result)
- return result;
+ goto out;
- if(postsize > 0) {
- result = Curl_dyn_addn(&req_buffer, data->set.postfields,
- (size_t)postsize);
- if(result)
- return result;
- }
+ Curl_xfer_setup(data, FIRSTSOCKET, -1, TRUE, FIRSTSOCKET);
/* issue the request */
- result = Curl_buffer_send(&req_buffer, data, data->req.p.http,
- &data->info.request_size, 0, FIRSTSOCKET);
+ result = Curl_req_send(data, &req_buffer);
if(result) {
failf(data, "Failed sending RTSP request");
- return result;
+ goto out;
}
- Curl_setup_transfer(data, FIRSTSOCKET, -1, TRUE, putsize?FIRSTSOCKET:-1);
-
/* Increment the CSeq on success */
data->state.rtsp_next_client_CSeq++;
@@ -585,7 +592,8 @@ static CURLcode rtsp_do(struct Curl_easy *data, bool *done) if(Curl_pgrsUpdate(data))
result = CURLE_ABORTED_BY_CALLBACK;
}
-
+out:
+ Curl_dyn_free(&req_buffer);
return result;
}
@@ -779,8 +787,7 @@ out: static CURLcode rtsp_rtp_write_resp(struct Curl_easy *data,
const char *buf,
size_t blen,
- bool is_eos,
- bool *done)
+ bool is_eos)
{
struct rtsp_conn *rtspc = &(data->conn->proto.rtspc);
CURLcode result = CURLE_OK;
@@ -788,7 +795,6 @@ static CURLcode rtsp_rtp_write_resp(struct Curl_easy *data, if(!data->req.header)
rtspc->in_header = FALSE;
- *done = FALSE;
if(!blen) {
goto out;
}
@@ -812,7 +818,7 @@ static CURLcode rtsp_rtp_write_resp(struct Curl_easy *data, /* we want to parse headers, do so */
if(data->req.header && blen) {
rtspc->in_header = TRUE;
- result = Curl_http_write_resp_hds(data, buf, blen, &consumed, done);
+ result = Curl_http_write_resp_hds(data, buf, blen, &consumed);
if(result)
goto out;
@@ -838,13 +844,14 @@ static CURLcode rtsp_rtp_write_resp(struct Curl_easy *data, }
if(rtspc->state != RTP_PARSE_SKIP)
- *done = FALSE;
+ data->req.done = FALSE;
/* we SHOULD have consumed all bytes, unless the response is borked.
* In which case we write out the left over bytes, letting the client
* writer deal with it (it will report EXCESS and fail the transfer). */
DEBUGF(infof(data, "rtsp_rtp_write_resp(len=%zu, in_header=%d, done=%d "
" rtspc->state=%d, req.size=%" CURL_FORMAT_CURL_OFF_T ")",
- blen, rtspc->in_header, *done, rtspc->state, data->req.size));
+ blen, rtspc->in_header, data->req.done, rtspc->state,
+ data->req.size));
if(!result && (is_eos || blen)) {
result = Curl_client_write(data, CLIENTWRITE_BODY|
(is_eos? CLIENTWRITE_EOS:0),
diff --git a/libs/libcurl/src/sendf.c b/libs/libcurl/src/sendf.c index 7686e3c5ad..ea598b35df 100644 --- a/libs/libcurl/src/sendf.c +++ b/libs/libcurl/src/sendf.c @@ -41,6 +41,7 @@ #include "cfilters.h"
#include "connect.h"
#include "content_encoding.h"
+#include "cw-out.h"
#include "vtls/vtls.h"
#include "vssh/ssh.h"
#include "easyif.h"
@@ -49,8 +50,8 @@ #include "select.h"
#include "strdup.h"
#include "http2.h"
-#include "headers.h"
#include "progress.h"
+#include "warnless.h"
#include "ws.h"
/* The last 3 #include files should be in this order */
@@ -59,342 +60,18 @@ #include "memdebug.h"
-static CURLcode do_init_stack(struct Curl_easy *data);
-
-#if defined(CURL_DO_LINEEND_CONV) && !defined(CURL_DISABLE_FTP)
-/*
- * convert_lineends() changes CRLF (\r\n) end-of-line markers to a single LF
- * (\n), with special processing for CRLF sequences that are split between two
- * blocks of data. Remaining, bare CRs are changed to LFs. The possibly new
- * size of the data is returned.
- */
-static size_t convert_lineends(struct Curl_easy *data,
- char *startPtr, size_t size)
-{
- char *inPtr, *outPtr;
-
- /* sanity check */
- if(!startPtr || (size < 1)) {
- return size;
- }
-
- if(data->state.prev_block_had_trailing_cr) {
- /* The previous block of incoming data
- had a trailing CR, which was turned into a LF. */
- if(*startPtr == '\n') {
- /* This block of incoming data starts with the
- previous block's LF so get rid of it */
- memmove(startPtr, startPtr + 1, size-1);
- size--;
- /* and it wasn't a bare CR but a CRLF conversion instead */
- data->state.crlf_conversions++;
- }
- data->state.prev_block_had_trailing_cr = FALSE; /* reset the flag */
- }
-
- /* find 1st CR, if any */
- inPtr = outPtr = memchr(startPtr, '\r', size);
- if(inPtr) {
- /* at least one CR, now look for CRLF */
- while(inPtr < (startPtr + size-1)) {
- /* note that it's size-1, so we'll never look past the last byte */
- if(memcmp(inPtr, "\r\n", 2) == 0) {
- /* CRLF found, bump past the CR and copy the NL */
- inPtr++;
- *outPtr = *inPtr;
- /* keep track of how many CRLFs we converted */
- data->state.crlf_conversions++;
- }
- else {
- if(*inPtr == '\r') {
- /* lone CR, move LF instead */
- *outPtr = '\n';
- }
- else {
- /* not a CRLF nor a CR, just copy whatever it is */
- *outPtr = *inPtr;
- }
- }
- outPtr++;
- inPtr++;
- } /* end of while loop */
-
- if(inPtr < startPtr + size) {
- /* handle last byte */
- if(*inPtr == '\r') {
- /* deal with a CR at the end of the buffer */
- *outPtr = '\n'; /* copy a NL instead */
- /* note that a CRLF might be split across two blocks */
- data->state.prev_block_had_trailing_cr = TRUE;
- }
- else {
- /* copy last byte */
- *outPtr = *inPtr;
- }
- outPtr++;
- }
- if(outPtr < startPtr + size)
- /* tidy up by null terminating the now shorter data */
- *outPtr = '\0';
-
- return (outPtr - startPtr);
- }
- return size;
-}
-#endif /* CURL_DO_LINEEND_CONV && !CURL_DISABLE_FTP */
-
-/*
- * Curl_nwrite() is an internal write function that sends data to the
- * server. Works with a socket index for the connection.
- *
- * If the write would block (CURLE_AGAIN), it returns CURLE_OK and
- * (*nwritten == 0). Otherwise we return regular CURLcode value.
- */
-CURLcode Curl_nwrite(struct Curl_easy *data,
- int sockindex,
- const void *buf,
- size_t blen,
- ssize_t *pnwritten)
-{
- ssize_t nwritten;
- CURLcode result = CURLE_OK;
- struct connectdata *conn;
-
- DEBUGASSERT(sockindex >= 0 && sockindex < 2);
- DEBUGASSERT(pnwritten);
- DEBUGASSERT(data);
- DEBUGASSERT(data->conn);
- conn = data->conn;
-#ifdef CURLDEBUG
- {
- /* Allow debug builds to override this logic to force short sends
- */
- char *p = getenv("CURL_SMALLSENDS");
- if(p) {
- size_t altsize = (size_t)strtoul(p, NULL, 10);
- if(altsize)
- blen = CURLMIN(blen, altsize);
- }
- }
-#endif
- nwritten = conn->send[sockindex](data, sockindex, buf, blen, &result);
- if(result == CURLE_AGAIN) {
- nwritten = 0;
- result = CURLE_OK;
- }
- else if(result) {
- nwritten = -1; /* make sure */
- }
- else {
- DEBUGASSERT(nwritten >= 0);
- }
-
- *pnwritten = nwritten;
- return result;
-}
-
-/*
- * Curl_write() is an internal write function that sends data to the
- * server. Works with plain sockets, SCP, SSL or kerberos.
- *
- * If the write would block (CURLE_AGAIN), we return CURLE_OK and
- * (*written == 0). Otherwise we return regular CURLcode value.
- */
-CURLcode Curl_write(struct Curl_easy *data,
- curl_socket_t sockfd,
- const void *mem,
- size_t len,
- ssize_t *written)
-{
- struct connectdata *conn;
- int num;
-
- DEBUGASSERT(data);
- DEBUGASSERT(data->conn);
- conn = data->conn;
- num = (sockfd != CURL_SOCKET_BAD && sockfd == conn->sock[SECONDARYSOCKET]);
- return Curl_nwrite(data, num, mem, len, written);
-}
-
-static CURLcode pausewrite(struct Curl_easy *data,
- int type, /* what type of data */
- bool paused_body,
- const char *ptr,
- size_t len)
-{
- /* signalled to pause sending on this connection, but since we have data
- we want to send we need to dup it to save a copy for when the sending
- is again enabled */
- struct SingleRequest *k = &data->req;
- struct UrlState *s = &data->state;
- unsigned int i;
- bool newtype = TRUE;
-
- Curl_conn_ev_data_pause(data, TRUE);
-
- if(s->tempcount) {
- for(i = 0; i< s->tempcount; i++) {
- if(s->tempwrite[i].type == type &&
- !!s->tempwrite[i].paused_body == !!paused_body) {
- /* data for this type exists */
- newtype = FALSE;
- break;
- }
- }
- DEBUGASSERT(i < 3);
- if(i >= 3)
- /* There are more types to store than what fits: very bad */
- return CURLE_OUT_OF_MEMORY;
- }
- else
- i = 0;
-
- if(newtype) {
- /* store this information in the state struct for later use */
- Curl_dyn_init(&s->tempwrite[i].b, DYN_PAUSE_BUFFER);
- s->tempwrite[i].type = type;
- s->tempwrite[i].paused_body = paused_body;
- s->tempcount++;
- }
-
- if(Curl_dyn_addn(&s->tempwrite[i].b, (unsigned char *)ptr, len))
- return CURLE_OUT_OF_MEMORY;
-
- /* mark the connection as RECV paused */
- k->keepon |= KEEP_RECV_PAUSE;
-
- return CURLE_OK;
-}
-
-
-/* chop_write() writes chunks of data not larger than CURL_MAX_WRITE_SIZE via
- * client write callback(s) and takes care of pause requests from the
- * callbacks.
- */
-static CURLcode chop_write(struct Curl_easy *data,
- int type,
- bool skip_body_write,
- char *optr,
- size_t olen)
-{
- struct connectdata *conn = data->conn;
- curl_write_callback writeheader = NULL;
- curl_write_callback writebody = NULL;
- char *ptr = optr;
- size_t len = olen;
- void *writebody_ptr = data->set.out;
-
- if(!len)
- return CURLE_OK;
-
- /* If reading is paused, append this data to the already held data for this
- type. */
- if(data->req.keepon & KEEP_RECV_PAUSE)
- return pausewrite(data, type, !skip_body_write, ptr, len);
-
- /* Determine the callback(s) to use. */
- if(!skip_body_write &&
- ((type & CLIENTWRITE_BODY) ||
- ((type & CLIENTWRITE_HEADER) && data->set.include_header))) {
- writebody = data->set.fwrite_func;
- }
- if((type & (CLIENTWRITE_HEADER|CLIENTWRITE_INFO)) &&
- (data->set.fwrite_header || data->set.writeheader)) {
- /*
- * Write headers to the same callback or to the especially setup
- * header callback function (added after version 7.7.1).
- */
- writeheader =
- data->set.fwrite_header? data->set.fwrite_header: data->set.fwrite_func;
- }
-
- /* Chop data, write chunks. */
- while(len) {
- size_t chunklen = len <= CURL_MAX_WRITE_SIZE? len: CURL_MAX_WRITE_SIZE;
-
- if(writebody) {
- size_t wrote;
- Curl_set_in_callback(data, true);
- wrote = writebody(ptr, 1, chunklen, writebody_ptr);
- Curl_set_in_callback(data, false);
-
- if(CURL_WRITEFUNC_PAUSE == wrote) {
- if(conn->handler->flags & PROTOPT_NONETWORK) {
- /* Protocols that work without network cannot be paused. This is
- actually only FILE:// just now, and it can't pause since the
- transfer isn't done using the "normal" procedure. */
- failf(data, "Write callback asked for PAUSE when not supported");
- return CURLE_WRITE_ERROR;
- }
- return pausewrite(data, type, TRUE, ptr, len);
- }
- if(wrote != chunklen) {
- failf(data, "Failure writing output to destination");
- return CURLE_WRITE_ERROR;
- }
- }
-
- ptr += chunklen;
- len -= chunklen;
- }
-
-#if !defined(CURL_DISABLE_HTTP) && !defined(CURL_DISABLE_HEADERS_API)
- /* HTTP header, but not status-line */
- if((conn->handler->protocol & PROTO_FAMILY_HTTP) &&
- (type & CLIENTWRITE_HEADER) && !(type & CLIENTWRITE_STATUS) ) {
- unsigned char htype = (unsigned char)
- (type & CLIENTWRITE_CONNECT ? CURLH_CONNECT :
- (type & CLIENTWRITE_1XX ? CURLH_1XX :
- (type & CLIENTWRITE_TRAILER ? CURLH_TRAILER :
- CURLH_HEADER)));
- CURLcode result = Curl_headers_push(data, optr, htype);
- if(result)
- return result;
- }
-#endif
-
- if(writeheader) {
- size_t wrote;
-
- Curl_set_in_callback(data, true);
- wrote = writeheader(optr, 1, olen, data->set.writeheader);
- Curl_set_in_callback(data, false);
-
- if(CURL_WRITEFUNC_PAUSE == wrote)
- return pausewrite(data, type, FALSE, optr, olen);
- if(wrote != olen) {
- failf(data, "Failed writing header");
- return CURLE_WRITE_ERROR;
- }
- }
-
- return CURLE_OK;
-}
-
+static CURLcode do_init_writer_stack(struct Curl_easy *data);
/* Curl_client_write() sends data to the write callback(s)
The bit pattern defines to what "streams" to write to. Body and/or header.
The defines are in sendf.h of course.
-
- If CURL_DO_LINEEND_CONV is enabled, data is converted IN PLACE to the
- local character encoding. This is a problem and should be changed in
- the future to leave the original data alone.
*/
CURLcode Curl_client_write(struct Curl_easy *data,
- int type, char *buf, size_t blen)
+ int type, const char *buf, size_t blen)
{
CURLcode result;
-#if !defined(CURL_DISABLE_FTP) && defined(CURL_DO_LINEEND_CONV)
- /* FTP data may need conversion. */
- if((type & CLIENTWRITE_BODY) &&
- (data->conn->handler->protocol & PROTO_FAMILY_FTP) &&
- data->conn->proto.ftpc.transfertype == 'A') {
- /* convert end-of-line markers */
- blen = convert_lineends(data, buf, blen);
- }
-#endif
/* it is one of those, at least */
DEBUGASSERT(type & (CLIENTWRITE_BODY|CLIENTWRITE_HEADER|CLIENTWRITE_INFO));
/* BODY is only BODY (with optional EOS) */
@@ -405,7 +82,7 @@ CURLcode Curl_client_write(struct Curl_easy *data, ((type & ~(CLIENTWRITE_INFO|CLIENTWRITE_EOS)) == 0));
if(!data->req.writer_stack) {
- result = do_init_stack(data);
+ result = do_init_writer_stack(data);
if(result)
return result;
DEBUGASSERT(data->req.writer_stack);
@@ -414,58 +91,86 @@ CURLcode Curl_client_write(struct Curl_easy *data, return Curl_cwriter_write(data, data->req.writer_stack, type, buf, blen);
}
-CURLcode Curl_client_unpause(struct Curl_easy *data)
-{
- CURLcode result = CURLE_OK;
-
- if(data->state.tempcount) {
- /* there are buffers for sending that can be delivered as the receive
- pausing is lifted! */
- unsigned int i;
- unsigned int count = data->state.tempcount;
- struct tempbuf writebuf[3]; /* there can only be three */
-
- /* copy the structs to allow for immediate re-pausing */
- for(i = 0; i < data->state.tempcount; i++) {
- writebuf[i] = data->state.tempwrite[i];
- Curl_dyn_init(&data->state.tempwrite[i].b, DYN_PAUSE_BUFFER);
- }
- data->state.tempcount = 0;
-
- for(i = 0; i < count; i++) {
- /* even if one function returns error, this loops through and frees
- all buffers */
- if(!result)
- result = chop_write(data, writebuf[i].type,
- !writebuf[i].paused_body,
- Curl_dyn_ptr(&writebuf[i].b),
- Curl_dyn_len(&writebuf[i].b));
- Curl_dyn_free(&writebuf[i].b);
- }
- }
- return result;
-}
-
-void Curl_client_cleanup(struct Curl_easy *data)
+static void cl_reset_writer(struct Curl_easy *data)
{
struct Curl_cwriter *writer = data->req.writer_stack;
- size_t i;
-
while(writer) {
data->req.writer_stack = writer->next;
writer->cwt->do_close(data, writer);
free(writer);
writer = data->req.writer_stack;
}
+}
- for(i = 0; i < data->state.tempcount; i++) {
- Curl_dyn_free(&data->state.tempwrite[i].b);
+static void cl_reset_reader(struct Curl_easy *data)
+{
+ struct Curl_creader *reader = data->req.reader_stack;
+ while(reader) {
+ data->req.reader_stack = reader->next;
+ reader->crt->do_close(data, reader);
+ free(reader);
+ reader = data->req.reader_stack;
}
- data->state.tempcount = 0;
+}
+
+void Curl_client_cleanup(struct Curl_easy *data)
+{
+ DEBUGF(infof(data, "Curl_client_cleanup()"));
+ cl_reset_reader(data);
+ cl_reset_writer(data);
+
data->req.bytecount = 0;
data->req.headerline = 0;
}
+void Curl_client_reset(struct Curl_easy *data)
+{
+ if(data->req.rewind_read) {
+ /* already requested */
+ DEBUGF(infof(data, "Curl_client_reset(), will rewind_read"));
+ }
+ else {
+ DEBUGF(infof(data, "Curl_client_reset(), clear readers"));
+ cl_reset_reader(data);
+ }
+ cl_reset_writer(data);
+
+ data->req.bytecount = 0;
+ data->req.headerline = 0;
+}
+
+CURLcode Curl_client_start(struct Curl_easy *data)
+{
+ if(data->req.rewind_read) {
+ struct Curl_creader *r = data->req.reader_stack;
+ CURLcode result = CURLE_OK;
+
+ DEBUGF(infof(data, "client start, rewind readers"));
+ while(r) {
+ result = r->crt->rewind(data, r);
+ if(result) {
+ failf(data, "rewind of client reader '%s' failed: %d",
+ r->crt->name, result);
+ return result;
+ }
+ r = r->next;
+ }
+ data->req.rewind_read = FALSE;
+ cl_reset_reader(data);
+ }
+ return CURLE_OK;
+}
+
+bool Curl_creader_will_rewind(struct Curl_easy *data)
+{
+ return data->req.rewind_read;
+}
+
+void Curl_creader_set_rewind(struct Curl_easy *data, bool enable)
+{
+ data->req.rewind_read = !!enable;
+}
+
/* Write data using an unencoding writer stack. "nbytes" is not
allowed to be 0. */
CURLcode Curl_cwriter_write(struct Curl_easy *data,
@@ -499,26 +204,6 @@ void Curl_cwriter_def_close(struct Curl_easy *data, (void) writer;
}
-/* Real client writer to installed callbacks. */
-static CURLcode cw_client_write(struct Curl_easy *data,
- struct Curl_cwriter *writer, int type,
- const char *buf, size_t nbytes)
-{
- (void)writer;
- if(!nbytes)
- return CURLE_OK;
- return chop_write(data, type, FALSE, (char *)buf, nbytes);
-}
-
-static const struct Curl_cwtype cw_client = {
- "client",
- NULL,
- Curl_cwriter_def_init,
- cw_client_write,
- Curl_cwriter_def_close,
- sizeof(struct Curl_cwriter)
-};
-
static size_t get_max_body_write_len(struct Curl_easy *data, curl_off_t limit)
{
if(limit != -1) {
@@ -541,28 +226,32 @@ static size_t get_max_body_write_len(struct Curl_easy *data, curl_off_t limit) return SIZE_T_MAX;
}
+struct cw_download_ctx {
+ struct Curl_cwriter super;
+ BIT(started_response);
+};
/* Download client writer in phase CURL_CW_PROTOCOL that
* sees the "real" download body data. */
static CURLcode cw_download_write(struct Curl_easy *data,
struct Curl_cwriter *writer, int type,
const char *buf, size_t nbytes)
{
+ struct cw_download_ctx *ctx = writer->ctx;
CURLcode result;
size_t nwrite, excess_len = 0;
+ bool is_connect = !!(type & CLIENTWRITE_CONNECT);
+
+ if(!is_connect && !ctx->started_response) {
+ Curl_pgrsTime(data, TIMER_STARTTRANSFER);
+ ctx->started_response = TRUE;
+ }
if(!(type & CLIENTWRITE_BODY)) {
- if((type & CLIENTWRITE_CONNECT) && data->set.suppress_connect_headers)
+ if(is_connect && data->set.suppress_connect_headers)
return CURLE_OK;
return Curl_cwriter_write(data, writer->next, type, buf, nbytes);
}
- if(!data->req.bytecount) {
- Curl_pgrsTime(data, TIMER_STARTTRANSFER);
- if(data->req.exp100 > EXP100_SEND_DATA)
- /* set time stamp to compare with when waiting for the 100 */
- data->req.start100 = Curl_now();
- }
-
/* Here, we deal with REAL BODY bytes. All filtering and transfer
* encodings have been applied and only the true content, e.g. BODY,
* bytes are passed here.
@@ -575,6 +264,9 @@ static CURLcode cw_download_write(struct Curl_easy *data, DEBUGF(infof(data, "did not want a BODY, but seeing %zu bytes",
nbytes));
data->req.download_done = TRUE;
+ if(data->info.header_size)
+ /* if headers have been received, this is fine */
+ return CURLE_OK;
return CURLE_WEIRD_SERVER_REPLY;
}
@@ -604,14 +296,14 @@ static CURLcode cw_download_write(struct Curl_easy *data, }
}
- /* Update stats, write and report progress */
- data->req.bytecount += nwrite;
- ++data->req.bodywrites;
- if(!data->req.ignorebody && nwrite) {
+ if(!data->req.ignorebody && (nwrite || (type & CLIENTWRITE_EOS))) {
result = Curl_cwriter_write(data, writer->next, type, buf, nwrite);
if(result)
return result;
}
+ /* Update stats, write and report progress */
+ data->req.bytecount += nwrite;
+ ++data->req.bodywrites;
result = Curl_pgrsSetDownloadCounter(data, data->req.bytecount);
if(result)
return result;
@@ -646,7 +338,7 @@ static const struct Curl_cwtype cw_download = { Curl_cwriter_def_init,
cw_download_write,
Curl_cwriter_def_close,
- sizeof(struct Curl_cwriter)
+ sizeof(struct cw_download_ctx)
};
/* RAW client writer in phase CURL_CW_RAW that
@@ -676,15 +368,18 @@ CURLcode Curl_cwriter_create(struct Curl_cwriter **pwriter, const struct Curl_cwtype *cwt,
Curl_cwriter_phase phase)
{
- struct Curl_cwriter *writer;
+ struct Curl_cwriter *writer = NULL;
CURLcode result = CURLE_OUT_OF_MEMORY;
+ void *p;
DEBUGASSERT(cwt->cwriter_size >= sizeof(struct Curl_cwriter));
- writer = (struct Curl_cwriter *) calloc(1, cwt->cwriter_size);
- if(!writer)
+ p = calloc(1, cwt->cwriter_size);
+ if(!p)
goto out;
+ writer = (struct Curl_cwriter *)p;
writer->cwt = cwt;
+ writer->ctx = p;
writer->phase = phase;
result = cwt->do_init(data, writer);
@@ -716,14 +411,14 @@ size_t Curl_cwriter_count(struct Curl_easy *data, Curl_cwriter_phase phase) return n;
}
-static CURLcode do_init_stack(struct Curl_easy *data)
+static CURLcode do_init_writer_stack(struct Curl_easy *data)
{
struct Curl_cwriter *writer;
CURLcode result;
DEBUGASSERT(!data->req.writer_stack);
result = Curl_cwriter_create(&data->req.writer_stack,
- data, &cw_client, CURL_CW_CLIENT);
+ data, &Curl_cwt_out, CURL_CW_CLIENT);
if(result)
return result;
@@ -752,7 +447,7 @@ CURLcode Curl_cwriter_add(struct Curl_easy *data, struct Curl_cwriter **anchor = &data->req.writer_stack;
if(!*anchor) {
- result = do_init_stack(data);
+ result = do_init_writer_stack(data);
if(result)
return result;
}
@@ -766,6 +461,28 @@ CURLcode Curl_cwriter_add(struct Curl_easy *data, return CURLE_OK;
}
+struct Curl_cwriter *Curl_cwriter_get_by_name(struct Curl_easy *data,
+ const char *name)
+{
+ struct Curl_cwriter *writer;
+ for(writer = data->req.writer_stack; writer; writer = writer->next) {
+ if(!strcmp(name, writer->cwt->name))
+ return writer;
+ }
+ return NULL;
+}
+
+struct Curl_cwriter *Curl_cwriter_get_by_type(struct Curl_easy *data,
+ const struct Curl_cwtype *cwt)
+{
+ struct Curl_cwriter *writer;
+ for(writer = data->req.writer_stack; writer; writer = writer->next) {
+ if(writer->cwt == cwt)
+ return writer;
+ }
+ return NULL;
+}
+
void Curl_cwriter_remove_by_name(struct Curl_easy *data,
const char *name)
{
@@ -782,40 +499,844 @@ void Curl_cwriter_remove_by_name(struct Curl_easy *data, }
}
-/*
- * Internal read-from-socket function. This is meant to deal with plain
- * sockets, SSL sockets and kerberos sockets.
- *
- * Returns a regular CURLcode value.
- */
-CURLcode Curl_read(struct Curl_easy *data, /* transfer */
- curl_socket_t sockfd, /* read from this socket */
- char *buf, /* store read data here */
- size_t sizerequested, /* max amount to read */
- ssize_t *n) /* amount bytes read */
-{
- CURLcode result = CURLE_RECV_ERROR;
- ssize_t nread = 0;
- size_t bytesfromsocket = 0;
- char *buffertofill = NULL;
- struct connectdata *conn = data->conn;
-
- /* Set 'num' to 0 or 1, depending on which socket that has been sent here.
- If it is the second socket, we set num to 1. Otherwise to 0. This lets
- us use the correct ssl handle. */
- int num = (sockfd == conn->sock[SECONDARYSOCKET]);
-
- *n = 0; /* reset amount to zero */
-
- bytesfromsocket = CURLMIN(sizerequested, (size_t)data->set.buffer_size);
- buffertofill = buf;
-
- nread = conn->recv[num](data, num, buffertofill, bytesfromsocket, &result);
- if(nread < 0)
+CURLcode Curl_creader_read(struct Curl_easy *data,
+ struct Curl_creader *reader,
+ char *buf, size_t blen, size_t *nread, bool *eos)
+{
+ if(!reader)
+ return CURLE_READ_ERROR;
+ return reader->crt->do_read(data, reader, buf, blen, nread, eos);
+}
+
+CURLcode Curl_creader_def_init(struct Curl_easy *data,
+ struct Curl_creader *reader)
+{
+ (void)data;
+ (void)reader;
+ return CURLE_OK;
+}
+
+void Curl_creader_def_close(struct Curl_easy *data,
+ struct Curl_creader *reader)
+{
+ (void)data;
+ (void)reader;
+}
+
+CURLcode Curl_creader_def_read(struct Curl_easy *data,
+ struct Curl_creader *reader,
+ char *buf, size_t blen,
+ size_t *nread, bool *eos)
+{
+ if(reader->next)
+ return reader->next->crt->do_read(data, reader->next, buf, blen,
+ nread, eos);
+ else {
+ *nread = 0;
+ *eos = FALSE;
+ return CURLE_READ_ERROR;
+ }
+}
+
+bool Curl_creader_def_needs_rewind(struct Curl_easy *data,
+ struct Curl_creader *reader)
+{
+ (void)data;
+ (void)reader;
+ return FALSE;
+}
+
+curl_off_t Curl_creader_def_total_length(struct Curl_easy *data,
+ struct Curl_creader *reader)
+{
+ return reader->next?
+ reader->next->crt->total_length(data, reader->next) : -1;
+}
+
+CURLcode Curl_creader_def_resume_from(struct Curl_easy *data,
+ struct Curl_creader *reader,
+ curl_off_t offset)
+{
+ (void)data;
+ (void)reader;
+ (void)offset;
+ return CURLE_READ_ERROR;
+}
+
+CURLcode Curl_creader_def_rewind(struct Curl_easy *data,
+ struct Curl_creader *reader)
+{
+ (void)data;
+ (void)reader;
+ return CURLE_OK;
+}
+
+CURLcode Curl_creader_def_unpause(struct Curl_easy *data,
+ struct Curl_creader *reader)
+{
+ (void)data;
+ (void)reader;
+ return CURLE_OK;
+}
+
+void Curl_creader_def_done(struct Curl_easy *data,
+ struct Curl_creader *reader, int premature)
+{
+ (void)data;
+ (void)reader;
+ (void)premature;
+}
+
+struct cr_in_ctx {
+ struct Curl_creader super;
+ curl_read_callback read_cb;
+ void *cb_user_data;
+ curl_off_t total_len;
+ curl_off_t read_len;
+ CURLcode error_result;
+ BIT(seen_eos);
+ BIT(errored);
+ BIT(has_used_cb);
+};
+
+static CURLcode cr_in_init(struct Curl_easy *data, struct Curl_creader *reader)
+{
+ struct cr_in_ctx *ctx = reader->ctx;
+ (void)data;
+ ctx->read_cb = data->state.fread_func;
+ ctx->cb_user_data = data->state.in;
+ ctx->total_len = -1;
+ ctx->read_len = 0;
+ return CURLE_OK;
+}
+
+/* Real client reader to installed client callbacks. */
+static CURLcode cr_in_read(struct Curl_easy *data,
+ struct Curl_creader *reader,
+ char *buf, size_t blen,
+ size_t *pnread, bool *peos)
+{
+ struct cr_in_ctx *ctx = reader->ctx;
+ size_t nread;
+
+ /* Once we have errored, we will return the same error forever */
+ if(ctx->errored) {
+ *pnread = 0;
+ *peos = FALSE;
+ return ctx->error_result;
+ }
+ if(ctx->seen_eos) {
+ *pnread = 0;
+ *peos = TRUE;
+ return CURLE_OK;
+ }
+ /* respect length limitations */
+ if(ctx->total_len >= 0) {
+ curl_off_t remain = ctx->total_len - ctx->read_len;
+ if(remain <= 0)
+ blen = 0;
+ else if(remain < (curl_off_t)blen)
+ blen = (size_t)remain;
+ }
+ nread = 0;
+ if(ctx->read_cb && blen) {
+ Curl_set_in_callback(data, true);
+ nread = ctx->read_cb(buf, 1, blen, ctx->cb_user_data);
+ Curl_set_in_callback(data, false);
+ ctx->has_used_cb = TRUE;
+ }
+
+ switch(nread) {
+ case 0:
+ if((ctx->total_len >= 0) && (ctx->read_len < ctx->total_len)) {
+ failf(data, "client read function EOF fail, only "
+ "only %"CURL_FORMAT_CURL_OFF_T"/%"CURL_FORMAT_CURL_OFF_T
+ " of needed bytes read", ctx->read_len, ctx->total_len);
+ return CURLE_READ_ERROR;
+ }
+ *pnread = 0;
+ *peos = TRUE;
+ ctx->seen_eos = TRUE;
+ break;
+
+ case CURL_READFUNC_ABORT:
+ failf(data, "operation aborted by callback");
+ *pnread = 0;
+ *peos = FALSE;
+ ctx->errored = TRUE;
+ ctx->error_result = CURLE_ABORTED_BY_CALLBACK;
+ return CURLE_ABORTED_BY_CALLBACK;
+
+ case CURL_READFUNC_PAUSE:
+ if(data->conn->handler->flags & PROTOPT_NONETWORK) {
+ /* protocols that work without network cannot be paused. This is
+ actually only FILE:// just now, and it can't pause since the transfer
+ isn't done using the "normal" procedure. */
+ failf(data, "Read callback asked for PAUSE when not supported");
+ return CURLE_READ_ERROR;
+ }
+ /* CURL_READFUNC_PAUSE pauses read callbacks that feed socket writes */
+ data->req.keepon |= KEEP_SEND_PAUSE; /* mark socket send as paused */
+ *pnread = 0;
+ *peos = FALSE;
+ break; /* nothing was read */
+
+ default:
+ if(nread > blen) {
+ /* the read function returned a too large value */
+ failf(data, "read function returned funny value");
+ *pnread = 0;
+ *peos = FALSE;
+ ctx->errored = TRUE;
+ ctx->error_result = CURLE_READ_ERROR;
+ return CURLE_READ_ERROR;
+ }
+ ctx->read_len += nread;
+ if(ctx->total_len >= 0)
+ ctx->seen_eos = (ctx->read_len >= ctx->total_len);
+ *pnread = nread;
+ *peos = ctx->seen_eos;
+ break;
+ }
+ DEBUGF(infof(data, "cr_in_read(len=%zu, total=%"CURL_FORMAT_CURL_OFF_T
+ ", read=%"CURL_FORMAT_CURL_OFF_T") -> %d, %zu, %d",
+ blen, ctx->total_len, ctx->read_len, CURLE_OK, *pnread, *peos));
+ return CURLE_OK;
+}
+
+static bool cr_in_needs_rewind(struct Curl_easy *data,
+ struct Curl_creader *reader)
+{
+ struct cr_in_ctx *ctx = reader->ctx;
+ (void)data;
+ return ctx->has_used_cb;
+}
+
+static curl_off_t cr_in_total_length(struct Curl_easy *data,
+ struct Curl_creader *reader)
+{
+ struct cr_in_ctx *ctx = reader->ctx;
+ (void)data;
+ return ctx->total_len;
+}
+
+static CURLcode cr_in_resume_from(struct Curl_easy *data,
+ struct Curl_creader *reader,
+ curl_off_t offset)
+{
+ struct cr_in_ctx *ctx = reader->ctx;
+ int seekerr = CURL_SEEKFUNC_CANTSEEK;
+
+ DEBUGASSERT(data->conn);
+ /* already started reading? */
+ if(ctx->read_len)
+ return CURLE_READ_ERROR;
+
+ if(data->set.seek_func) {
+ Curl_set_in_callback(data, true);
+ seekerr = data->set.seek_func(data->set.seek_client, offset, SEEK_SET);
+ Curl_set_in_callback(data, false);
+ }
+
+ if(seekerr != CURL_SEEKFUNC_OK) {
+ curl_off_t passed = 0;
+
+ if(seekerr != CURL_SEEKFUNC_CANTSEEK) {
+ failf(data, "Could not seek stream");
+ return CURLE_READ_ERROR;
+ }
+ /* when seekerr == CURL_SEEKFUNC_CANTSEEK (can't seek to offset) */
+ do {
+ char scratch[4*1024];
+ size_t readthisamountnow =
+ (offset - passed > (curl_off_t)sizeof(scratch)) ?
+ sizeof(scratch) :
+ curlx_sotouz(offset - passed);
+ size_t actuallyread;
+
+ Curl_set_in_callback(data, true);
+ actuallyread = ctx->read_cb(scratch, 1, readthisamountnow,
+ ctx->cb_user_data);
+ Curl_set_in_callback(data, false);
+
+ passed += actuallyread;
+ if((actuallyread == 0) || (actuallyread > readthisamountnow)) {
+ /* this checks for greater-than only to make sure that the
+ CURL_READFUNC_ABORT return code still aborts */
+ failf(data, "Could only read %" CURL_FORMAT_CURL_OFF_T
+ " bytes from the input", passed);
+ return CURLE_READ_ERROR;
+ }
+ } while(passed < offset);
+ }
+
+ /* now, decrease the size of the read */
+ if(ctx->total_len > 0) {
+ ctx->total_len -= offset;
+
+ if(ctx->total_len <= 0) {
+ failf(data, "File already completely uploaded");
+ return CURLE_PARTIAL_FILE;
+ }
+ }
+ /* we've passed, proceed as normal */
+ return CURLE_OK;
+}
+
+static CURLcode cr_in_rewind(struct Curl_easy *data,
+ struct Curl_creader *reader)
+{
+ struct cr_in_ctx *ctx = reader->ctx;
+
+ /* If we never invoked the callback, there is noting to rewind */
+ if(!ctx->has_used_cb)
+ return CURLE_OK;
+
+ if(data->set.seek_func) {
+ int err;
+
+ Curl_set_in_callback(data, true);
+ err = (data->set.seek_func)(data->set.seek_client, 0, SEEK_SET);
+ Curl_set_in_callback(data, false);
+ DEBUGF(infof(data, "cr_in, rewind via set.seek_func -> %d", err));
+ if(err) {
+ failf(data, "seek callback returned error %d", (int)err);
+ return CURLE_SEND_FAIL_REWIND;
+ }
+ }
+ else if(data->set.ioctl_func) {
+ curlioerr err;
+
+ Curl_set_in_callback(data, true);
+ err = (data->set.ioctl_func)(data, CURLIOCMD_RESTARTREAD,
+ data->set.ioctl_client);
+ Curl_set_in_callback(data, false);
+ DEBUGF(infof(data, "cr_in, rewind via set.ioctl_func -> %d", (int)err));
+ if(err) {
+ failf(data, "ioctl callback returned error %d", (int)err);
+ return CURLE_SEND_FAIL_REWIND;
+ }
+ }
+ else {
+ /* If no CURLOPT_READFUNCTION is used, we know that we operate on a
+ given FILE * stream and we can actually attempt to rewind that
+ ourselves with fseek() */
+ if(data->state.fread_func == (curl_read_callback)fread) {
+ int err = fseek(data->state.in, 0, SEEK_SET);
+ DEBUGF(infof(data, "cr_in, rewind via fseek -> %d(%d)",
+ (int)err, (int)errno));
+ if(-1 != err)
+ /* successful rewind */
+ return CURLE_OK;
+ }
+
+ /* no callback set or failure above, makes us fail at once */
+ failf(data, "necessary data rewind wasn't possible");
+ return CURLE_SEND_FAIL_REWIND;
+ }
+ return CURLE_OK;
+}
+
+
+static const struct Curl_crtype cr_in = {
+ "cr-in",
+ cr_in_init,
+ cr_in_read,
+ Curl_creader_def_close,
+ cr_in_needs_rewind,
+ cr_in_total_length,
+ cr_in_resume_from,
+ cr_in_rewind,
+ Curl_creader_def_unpause,
+ Curl_creader_def_done,
+ sizeof(struct cr_in_ctx)
+};
+
+CURLcode Curl_creader_create(struct Curl_creader **preader,
+ struct Curl_easy *data,
+ const struct Curl_crtype *crt,
+ Curl_creader_phase phase)
+{
+ struct Curl_creader *reader = NULL;
+ CURLcode result = CURLE_OUT_OF_MEMORY;
+ void *p;
+
+ DEBUGASSERT(crt->creader_size >= sizeof(struct Curl_creader));
+ p = calloc(1, crt->creader_size);
+ if(!p)
goto out;
- *n += nread;
- result = CURLE_OK;
+ reader = (struct Curl_creader *)p;
+ reader->crt = crt;
+ reader->ctx = p;
+ reader->phase = phase;
+ result = crt->do_init(data, reader);
+
out:
+ *preader = result? NULL : reader;
+ if(result)
+ free(reader);
+ return result;
+}
+
+void Curl_creader_free(struct Curl_easy *data, struct Curl_creader *reader)
+{
+ if(reader) {
+ reader->crt->do_close(data, reader);
+ free(reader);
+ }
+}
+
+struct cr_lc_ctx {
+ struct Curl_creader super;
+ struct bufq buf;
+ BIT(read_eos); /* we read an EOS from the next reader */
+ BIT(eos); /* we have returned an EOS */
+};
+
+static CURLcode cr_lc_init(struct Curl_easy *data, struct Curl_creader *reader)
+{
+ struct cr_lc_ctx *ctx = reader->ctx;
+ (void)data;
+ Curl_bufq_init2(&ctx->buf, (16 * 1024), 1, BUFQ_OPT_SOFT_LIMIT);
+ return CURLE_OK;
+}
+
+static void cr_lc_close(struct Curl_easy *data, struct Curl_creader *reader)
+{
+ struct cr_lc_ctx *ctx = reader->ctx;
+ (void)data;
+ Curl_bufq_free(&ctx->buf);
+}
+
+/* client reader doing line end conversions. */
+static CURLcode cr_lc_read(struct Curl_easy *data,
+ struct Curl_creader *reader,
+ char *buf, size_t blen,
+ size_t *pnread, bool *peos)
+{
+ struct cr_lc_ctx *ctx = reader->ctx;
+ CURLcode result;
+ size_t nread, i, start, n;
+ bool eos;
+
+ if(ctx->eos) {
+ *pnread = 0;
+ *peos = TRUE;
+ return CURLE_OK;
+ }
+
+ if(Curl_bufq_is_empty(&ctx->buf)) {
+ if(ctx->read_eos) {
+ ctx->eos = TRUE;
+ *pnread = 0;
+ *peos = TRUE;
+ return CURLE_OK;
+ }
+ /* Still getting data form the next reader, ctx->buf is empty */
+ result = Curl_creader_read(data, reader->next, buf, blen, &nread, &eos);
+ if(result)
+ return result;
+ ctx->read_eos = eos;
+
+ if(!nread || !memchr(buf, '\n', nread)) {
+ /* nothing to convert, return this right away */
+ if(ctx->read_eos)
+ ctx->eos = TRUE;
+ *pnread = nread;
+ *peos = ctx->eos;
+ return CURLE_OK;
+ }
+
+ /* at least one \n needs conversion to '\r\n', place into ctx->buf */
+ for(i = start = 0; i < nread; ++i) {
+ if(buf[i] != '\n')
+ continue;
+ /* on a soft limit bufq, we do not need to check length */
+ result = Curl_bufq_cwrite(&ctx->buf, buf + start, i - start, &n);
+ if(!result)
+ result = Curl_bufq_cwrite(&ctx->buf, STRCONST("\r\n"), &n);
+ if(result)
+ return result;
+ start = i + 1;
+ if(!data->set.crlf && (data->state.infilesize != -1)) {
+ /* we're here only because FTP is in ASCII mode...
+ bump infilesize for the LF we just added */
+ data->state.infilesize++;
+ /* comment: this might work for FTP, but in HTTP we could not change
+ * the content length after having started the request... */
+ }
+ }
+ }
+
+ DEBUGASSERT(!Curl_bufq_is_empty(&ctx->buf));
+ *peos = FALSE;
+ result = Curl_bufq_cread(&ctx->buf, buf, blen, pnread);
+ if(!result && ctx->read_eos && Curl_bufq_is_empty(&ctx->buf)) {
+ /* no more data, read all, done. */
+ ctx->eos = TRUE;
+ *peos = TRUE;
+ }
+ return result;
+}
+
+static curl_off_t cr_lc_total_length(struct Curl_easy *data,
+ struct Curl_creader *reader)
+{
+ /* this reader changes length depending on input */
+ (void)data;
+ (void)reader;
+ return -1;
+}
+
+static const struct Curl_crtype cr_lc = {
+ "cr-lineconv",
+ cr_lc_init,
+ cr_lc_read,
+ cr_lc_close,
+ Curl_creader_def_needs_rewind,
+ cr_lc_total_length,
+ Curl_creader_def_resume_from,
+ Curl_creader_def_rewind,
+ Curl_creader_def_unpause,
+ Curl_creader_def_done,
+ sizeof(struct cr_lc_ctx)
+};
+
+static CURLcode cr_lc_add(struct Curl_easy *data)
+{
+ struct Curl_creader *reader = NULL;
+ CURLcode result;
+
+ result = Curl_creader_create(&reader, data, &cr_lc,
+ CURL_CR_CONTENT_ENCODE);
+ if(!result)
+ result = Curl_creader_add(data, reader);
+
+ if(result && reader)
+ Curl_creader_free(data, reader);
+ return result;
+}
+
+static CURLcode do_init_reader_stack(struct Curl_easy *data,
+ struct Curl_creader *r)
+{
+ CURLcode result = CURLE_OK;
+ curl_off_t clen;
+
+ DEBUGASSERT(r);
+ DEBUGASSERT(r->crt);
+ DEBUGASSERT(r->phase == CURL_CR_CLIENT);
+ DEBUGASSERT(!data->req.reader_stack);
+
+ data->req.reader_stack = r;
+ clen = r->crt->total_length(data, r);
+ /* if we do not have 0 length init, and crlf conversion is wanted,
+ * add the reader for it */
+ if(clen && (data->set.crlf
+#ifdef CURL_DO_LINEEND_CONV
+ || data->state.prefer_ascii
+#endif
+ )) {
+ result = cr_lc_add(data);
+ if(result)
+ return result;
+ }
+
+ return result;
+}
+
+CURLcode Curl_creader_set_fread(struct Curl_easy *data, curl_off_t len)
+{
+ CURLcode result;
+ struct Curl_creader *r;
+ struct cr_in_ctx *ctx;
+
+ result = Curl_creader_create(&r, data, &cr_in, CURL_CR_CLIENT);
+ if(result)
+ return result;
+ ctx = r->ctx;
+ ctx->total_len = len;
+
+ cl_reset_reader(data);
+ return do_init_reader_stack(data, r);
+}
+
+CURLcode Curl_creader_add(struct Curl_easy *data,
+ struct Curl_creader *reader)
+{
+ CURLcode result;
+ struct Curl_creader **anchor = &data->req.reader_stack;
+
+ if(!*anchor) {
+ result = Curl_creader_set_fread(data, data->state.infilesize);
+ if(result)
+ return result;
+ }
+
+ /* Insert the writer as first in its phase.
+ * Skip existing readers of lower phases. */
+ while(*anchor && (*anchor)->phase < reader->phase)
+ anchor = &((*anchor)->next);
+ reader->next = *anchor;
+ *anchor = reader;
+ return CURLE_OK;
+}
+
+CURLcode Curl_creader_set(struct Curl_easy *data, struct Curl_creader *r)
+{
+ CURLcode result;
+
+ DEBUGASSERT(r);
+ DEBUGASSERT(r->crt);
+ DEBUGASSERT(r->phase == CURL_CR_CLIENT);
+
+ cl_reset_reader(data);
+ result = do_init_reader_stack(data, r);
+ if(result)
+ Curl_creader_free(data, r);
+ return result;
+}
+
+CURLcode Curl_client_read(struct Curl_easy *data, char *buf, size_t blen,
+ size_t *nread, bool *eos)
+{
+ CURLcode result;
+
+ DEBUGASSERT(buf);
+ DEBUGASSERT(blen);
+ DEBUGASSERT(nread);
+ DEBUGASSERT(eos);
+
+ if(!data->req.reader_stack) {
+ result = Curl_creader_set_fread(data, data->state.infilesize);
+ if(result)
+ return result;
+ DEBUGASSERT(data->req.reader_stack);
+ }
+
+ result = Curl_creader_read(data, data->req.reader_stack, buf, blen,
+ nread, eos);
+ return result;
+}
+
+bool Curl_creader_needs_rewind(struct Curl_easy *data)
+{
+ struct Curl_creader *reader = data->req.reader_stack;
+ while(reader) {
+ if(reader->crt->needs_rewind(data, reader))
+ return TRUE;
+ reader = reader->next;
+ }
+ return FALSE;
+}
+
+static CURLcode cr_null_read(struct Curl_easy *data,
+ struct Curl_creader *reader,
+ char *buf, size_t blen,
+ size_t *pnread, bool *peos)
+{
+ (void)data;
+ (void)reader;
+ (void)buf;
+ (void)blen;
+ *pnread = 0;
+ *peos = TRUE;
+ return CURLE_OK;
+}
+
+static curl_off_t cr_null_total_length(struct Curl_easy *data,
+ struct Curl_creader *reader)
+{
+ /* this reader changes length depending on input */
+ (void)data;
+ (void)reader;
+ return 0;
+}
+
+static const struct Curl_crtype cr_null = {
+ "cr-null",
+ Curl_creader_def_init,
+ cr_null_read,
+ Curl_creader_def_close,
+ Curl_creader_def_needs_rewind,
+ cr_null_total_length,
+ Curl_creader_def_resume_from,
+ Curl_creader_def_rewind,
+ Curl_creader_def_unpause,
+ Curl_creader_def_done,
+ sizeof(struct Curl_creader)
+};
+
+CURLcode Curl_creader_set_null(struct Curl_easy *data)
+{
+ struct Curl_creader *r;
+ CURLcode result;
+
+ result = Curl_creader_create(&r, data, &cr_null, CURL_CR_CLIENT);
+ if(result)
+ return result;
+
+ cl_reset_reader(data);
+ return do_init_reader_stack(data, r);
+}
+
+struct cr_buf_ctx {
+ struct Curl_creader super;
+ const char *buf;
+ size_t blen;
+ size_t index;
+};
+
+static CURLcode cr_buf_read(struct Curl_easy *data,
+ struct Curl_creader *reader,
+ char *buf, size_t blen,
+ size_t *pnread, bool *peos)
+{
+ struct cr_buf_ctx *ctx = reader->ctx;
+ size_t nread = ctx->blen - ctx->index;
+
+ (void)data;
+ if(!nread || !ctx->buf) {
+ *pnread = 0;
+ *peos = TRUE;
+ }
+ else {
+ if(nread > blen)
+ nread = blen;
+ memcpy(buf, ctx->buf + ctx->index, nread);
+ *pnread = nread;
+ ctx->index += nread;
+ *peos = (ctx->index == ctx->blen);
+ }
+ return CURLE_OK;
+}
+
+static bool cr_buf_needs_rewind(struct Curl_easy *data,
+ struct Curl_creader *reader)
+{
+ struct cr_buf_ctx *ctx = reader->ctx;
+ (void)data;
+ return ctx->index > 0;
+}
+
+static curl_off_t cr_buf_total_length(struct Curl_easy *data,
+ struct Curl_creader *reader)
+{
+ struct cr_buf_ctx *ctx = reader->ctx;
+ (void)data;
+ return (curl_off_t)ctx->blen;
+}
+
+static CURLcode cr_buf_resume_from(struct Curl_easy *data,
+ struct Curl_creader *reader,
+ curl_off_t offset)
+{
+ struct cr_buf_ctx *ctx = reader->ctx;
+ size_t boffset;
+
+ (void)data;
+ DEBUGASSERT(data->conn);
+ /* already started reading? */
+ if(ctx->index)
+ return CURLE_READ_ERROR;
+ if(offset <= 0)
+ return CURLE_OK;
+ boffset = (size_t)offset;
+ if(boffset > ctx->blen)
+ return CURLE_READ_ERROR;
+
+ ctx->buf += boffset;
+ ctx->blen -= boffset;
+ return CURLE_OK;
+}
+
+static const struct Curl_crtype cr_buf = {
+ "cr-buf",
+ Curl_creader_def_init,
+ cr_buf_read,
+ Curl_creader_def_close,
+ cr_buf_needs_rewind,
+ cr_buf_total_length,
+ cr_buf_resume_from,
+ Curl_creader_def_rewind,
+ Curl_creader_def_unpause,
+ Curl_creader_def_done,
+ sizeof(struct cr_buf_ctx)
+};
+
+CURLcode Curl_creader_set_buf(struct Curl_easy *data,
+ const char *buf, size_t blen)
+{
+ CURLcode result;
+ struct Curl_creader *r;
+ struct cr_buf_ctx *ctx;
+
+ result = Curl_creader_create(&r, data, &cr_buf, CURL_CR_CLIENT);
+ if(result)
+ return result;
+ ctx = r->ctx;
+ ctx->buf = buf;
+ ctx->blen = blen;
+ ctx->index = 0;
+
+ cl_reset_reader(data);
+ return do_init_reader_stack(data, r);
+}
+
+curl_off_t Curl_creader_total_length(struct Curl_easy *data)
+{
+ struct Curl_creader *r = data->req.reader_stack;
+ return r? r->crt->total_length(data, r) : -1;
+}
+
+curl_off_t Curl_creader_client_length(struct Curl_easy *data)
+{
+ struct Curl_creader *r = data->req.reader_stack;
+ while(r && r->phase != CURL_CR_CLIENT)
+ r = r->next;
+ return r? r->crt->total_length(data, r) : -1;
+}
+
+CURLcode Curl_creader_resume_from(struct Curl_easy *data, curl_off_t offset)
+{
+ struct Curl_creader *r = data->req.reader_stack;
+ while(r && r->phase != CURL_CR_CLIENT)
+ r = r->next;
+ return r? r->crt->resume_from(data, r, offset) : CURLE_READ_ERROR;
+}
+
+CURLcode Curl_creader_unpause(struct Curl_easy *data)
+{
+ struct Curl_creader *reader = data->req.reader_stack;
+ CURLcode result = CURLE_OK;
+
+ while(reader) {
+ result = reader->crt->unpause(data, reader);
+ if(result)
+ break;
+ reader = reader->next;
+ }
return result;
}
+
+void Curl_creader_done(struct Curl_easy *data, int premature)
+{
+ struct Curl_creader *reader = data->req.reader_stack;
+ while(reader) {
+ reader->crt->done(data, reader, premature);
+ reader = reader->next;
+ }
+}
+
+struct Curl_creader *Curl_creader_get_by_type(struct Curl_easy *data,
+ const struct Curl_crtype *crt)
+{
+ struct Curl_creader *r;
+ for(r = data->req.reader_stack; r; r = r->next) {
+ if(r->crt == crt)
+ return r;
+ }
+ return NULL;
+
+}
diff --git a/libs/libcurl/src/sendf.h b/libs/libcurl/src/sendf.h index abe91eabe7..7225bb4292 100644 --- a/libs/libcurl/src/sendf.h +++ b/libs/libcurl/src/sendf.h @@ -55,20 +55,25 @@ * Write `len` bytes at `prt` to the client. `type` indicates what
* kind of data is being written.
*/
-CURLcode Curl_client_write(struct Curl_easy *data, int type, char *ptr,
+CURLcode Curl_client_write(struct Curl_easy *data, int type, const char *ptr,
size_t len) WARN_UNUSED_RESULT;
/**
- * For a paused transfer, there might be buffered data held back.
- * Attempt to flush this data to the client. This *may* trigger
- * another pause of the transfer.
+ * Free all resources related to client writing.
*/
-CURLcode Curl_client_unpause(struct Curl_easy *data);
+void Curl_client_cleanup(struct Curl_easy *data);
/**
- * Free all resources related to client writing.
+ * Reset readers and writer chains, keep rewind information
+ * when necessary.
*/
-void Curl_client_cleanup(struct Curl_easy *data);
+void Curl_client_reset(struct Curl_easy *data);
+
+/**
+ * A new request is starting, perform any ops like rewinding
+ * previous readers when needed.
+ */
+CURLcode Curl_client_start(struct Curl_easy *data);
/**
* Client Writers - a chain passing transfer BODY data to the client.
@@ -112,10 +117,16 @@ struct Curl_cwtype { size_t cwriter_size; /* sizeof() allocated struct Curl_cwriter */
};
-/* Client writer instance */
+/* Client writer instance, allocated on creation.
+ * `void *ctx` is the pointer from the allocation of
+ * the `struct Curl_cwriter` itself. This is suitable for "downcasting"
+ * by the writers implementation. See https://github.com/curl/curl/pull/13054
+ * for the alignment problems that arise otherwise.
+ */
struct Curl_cwriter {
const struct Curl_cwtype *cwt; /* type implementation */
struct Curl_cwriter *next; /* Downstream writer. */
+ void *ctx; /* allocated instance pointer */
Curl_cwriter_phase phase; /* phase at which it operates */
};
@@ -148,9 +159,19 @@ size_t Curl_cwriter_count(struct Curl_easy *data, Curl_cwriter_phase phase); CURLcode Curl_cwriter_add(struct Curl_easy *data,
struct Curl_cwriter *writer);
+/**
+ * Look up an installed client writer on `data` by its type.
+ * @return first writer with that type or NULL
+ */
+struct Curl_cwriter *Curl_cwriter_get_by_type(struct Curl_easy *data,
+ const struct Curl_cwtype *cwt);
+
void Curl_cwriter_remove_by_name(struct Curl_easy *data,
const char *name);
+struct Curl_cwriter *Curl_cwriter_get_by_name(struct Curl_easy *data,
+ const char *name);
+
/**
* Convenience method for calling `writer->do_write()` that
* checks for NULL writer.
@@ -172,22 +193,205 @@ void Curl_cwriter_def_close(struct Curl_easy *data, struct Curl_cwriter *writer);
-/* internal read-function, does plain socket, SSL and krb4 */
-CURLcode Curl_read(struct Curl_easy *data, curl_socket_t sockfd,
- char *buf, size_t buffersize,
- ssize_t *n);
-/* internal write-function, does plain socket, SSL, SCP, SFTP and krb4 */
-CURLcode Curl_write(struct Curl_easy *data,
- curl_socket_t sockfd,
- const void *mem, size_t len,
- ssize_t *written);
+/* Client Reader Type, provides the implementation */
+struct Curl_crtype {
+ const char *name; /* writer name. */
+ CURLcode (*do_init)(struct Curl_easy *data, struct Curl_creader *reader);
+ CURLcode (*do_read)(struct Curl_easy *data, struct Curl_creader *reader,
+ char *buf, size_t blen, size_t *nread, bool *eos);
+ void (*do_close)(struct Curl_easy *data, struct Curl_creader *reader);
+ bool (*needs_rewind)(struct Curl_easy *data, struct Curl_creader *reader);
+ curl_off_t (*total_length)(struct Curl_easy *data,
+ struct Curl_creader *reader);
+ CURLcode (*resume_from)(struct Curl_easy *data,
+ struct Curl_creader *reader, curl_off_t offset);
+ CURLcode (*rewind)(struct Curl_easy *data, struct Curl_creader *reader);
+ CURLcode (*unpause)(struct Curl_easy *data, struct Curl_creader *reader);
+ void (*done)(struct Curl_easy *data,
+ struct Curl_creader *reader, int premature);
+ size_t creader_size; /* sizeof() allocated struct Curl_creader */
+};
+
+/* Phase a reader operates at. */
+typedef enum {
+ CURL_CR_NET, /* data send to the network (connection filters) */
+ CURL_CR_TRANSFER_ENCODE, /* add transfer-encodings */
+ CURL_CR_PROTOCOL, /* before transfer, but after content decoding */
+ CURL_CR_CONTENT_ENCODE, /* add content-encodings */
+ CURL_CR_CLIENT /* data read from client */
+} Curl_creader_phase;
+
+/* Client reader instance, allocated on creation.
+ * `void *ctx` is the pointer from the allocation of
+ * the `struct Curl_cwriter` itself. This is suitable for "downcasting"
+ * by the writers implementation. See https://github.com/curl/curl/pull/13054
+ * for the alignment problems that arise otherwise.
+ */
+struct Curl_creader {
+ const struct Curl_crtype *crt; /* type implementation */
+ struct Curl_creader *next; /* Downstream reader. */
+ void *ctx;
+ Curl_creader_phase phase; /* phase at which it operates */
+};
+
+/**
+ * Default implementations for do_init, do_write, do_close that
+ * do nothing and pass the data through.
+ */
+CURLcode Curl_creader_def_init(struct Curl_easy *data,
+ struct Curl_creader *reader);
+void Curl_creader_def_close(struct Curl_easy *data,
+ struct Curl_creader *reader);
+CURLcode Curl_creader_def_read(struct Curl_easy *data,
+ struct Curl_creader *reader,
+ char *buf, size_t blen,
+ size_t *nread, bool *eos);
+bool Curl_creader_def_needs_rewind(struct Curl_easy *data,
+ struct Curl_creader *reader);
+curl_off_t Curl_creader_def_total_length(struct Curl_easy *data,
+ struct Curl_creader *reader);
+CURLcode Curl_creader_def_resume_from(struct Curl_easy *data,
+ struct Curl_creader *reader,
+ curl_off_t offset);
+CURLcode Curl_creader_def_rewind(struct Curl_easy *data,
+ struct Curl_creader *reader);
+CURLcode Curl_creader_def_unpause(struct Curl_easy *data,
+ struct Curl_creader *reader);
+void Curl_creader_def_done(struct Curl_easy *data,
+ struct Curl_creader *reader, int premature);
+
+/**
+ * Convenience method for calling `reader->do_read()` that
+ * checks for NULL reader.
+ */
+CURLcode Curl_creader_read(struct Curl_easy *data,
+ struct Curl_creader *reader,
+ char *buf, size_t blen, size_t *nread, bool *eos);
+
+/**
+ * Create a new creader instance with given type and phase. Is not
+ * inserted into the writer chain by this call.
+ * Invokes `reader->do_init()`.
+ */
+CURLcode Curl_creader_create(struct Curl_creader **preader,
+ struct Curl_easy *data,
+ const struct Curl_crtype *cr_handler,
+ Curl_creader_phase phase);
+
+/**
+ * Free a creader instance.
+ * Invokes `reader->do_close()`.
+ */
+void Curl_creader_free(struct Curl_easy *data, struct Curl_creader *reader);
+
+/**
+ * Adds a reader to the transfer's reader chain.
+ * The readers `phase` determines where in the chain it is inserted.
+ */
+CURLcode Curl_creader_add(struct Curl_easy *data,
+ struct Curl_creader *reader);
+
+/**
+ * Set the given reader, which needs to be of type CURL_CR_CLIENT,
+ * as the new first reader. Discard any installed readers and init
+ * the reader chain anew.
+ * The function takes ownership of `r`.
+ */
+CURLcode Curl_creader_set(struct Curl_easy *data, struct Curl_creader *r);
+
+/**
+ * Read at most `blen` bytes at `buf` from the client.
+ * @param date the transfer to read client bytes for
+ * @param buf the memory location to read to
+ * @param blen the amount of memory at `buf`
+ * @param nread on return the number of bytes read into `buf`
+ * @param eos TRUE iff bytes are the end of data from client
+ * @return CURLE_OK on successful read (even 0 length) or error
+ */
+CURLcode Curl_client_read(struct Curl_easy *data, char *buf, size_t blen,
+ size_t *nread, bool *eos) WARN_UNUSED_RESULT;
+
+/**
+ * TRUE iff client reader needs rewing before it can be used for
+ * a retry request.
+ */
+bool Curl_creader_needs_rewind(struct Curl_easy *data);
+
+/**
+ * TRUE iff client reader will rewind at next start
+ */
+bool Curl_creader_will_rewind(struct Curl_easy *data);
+
+/**
+ * En-/disable rewind of client reader at next start.
+ */
+void Curl_creader_set_rewind(struct Curl_easy *data, bool enable);
+
+/**
+ * Get the total length of bytes provided by the installed readers.
+ * This is independent of the amount already delivered and is calculated
+ * by all readers in the stack. If a reader like "chunked" or
+ * "crlf conversion" is installed, the returned length will be -1.
+ * @return -1 if length is indeterminate
+ */
+curl_off_t Curl_creader_total_length(struct Curl_easy *data);
+
+/**
+ * Get the total length of bytes provided by the reader at phase
+ * CURL_CR_CLIENT. This may not match the amount of bytes read
+ * for a request, depending if other, encoding readers are also installed.
+ * However it allows for rough estimation of the overall length.
+ * @return -1 if length is indeterminate
+ */
+curl_off_t Curl_creader_client_length(struct Curl_easy *data);
+
+/**
+ * Ask the installed reader at phase CURL_CR_CLIENT to start
+ * reading from the given offset. On success, this will reduce
+ * the `total_length()` by the amount.
+ * @param date the transfer to read client bytes for
+ * param offset the offset where to start reads from, negative
+ * values will be ignored.
+ * @return CURLE_OK if offset could be set
+ * CURLE_READ_ERROR if not supported by reader or seek/read failed
+ * of offset larger then total length
+ * CURLE_PARTIAL_FILE if offset led to 0 total length
+ */
+CURLcode Curl_creader_resume_from(struct Curl_easy *data, curl_off_t offset);
+
+/**
+ * Unpause all installed readers.
+ */
+CURLcode Curl_creader_unpause(struct Curl_easy *data);
+
+/**
+ * Tell all client readers that they are done.
+ */
+void Curl_creader_done(struct Curl_easy *data, int premature);
+
+/**
+ * Look up an installed client reader on `data` by its type.
+ * @return first reader with that type or NULL
+ */
+struct Curl_creader *Curl_creader_get_by_type(struct Curl_easy *data,
+ const struct Curl_crtype *crt);
+
+
+/**
+ * Set the client reader to provide 0 bytes, immediate EOS.
+ */
+CURLcode Curl_creader_set_null(struct Curl_easy *data);
+
+/**
+ * Set the client reader the reads from fread callback.
+ */
+CURLcode Curl_creader_set_fread(struct Curl_easy *data, curl_off_t len);
-/* internal write-function, using sockindex for connection destination */
-CURLcode Curl_nwrite(struct Curl_easy *data,
- int sockindex,
- const void *buf,
- size_t blen,
- ssize_t *pnwritten);
+/**
+ * Set the client reader the reads from the supplied buf (NOT COPIED).
+ */
+CURLcode Curl_creader_set_buf(struct Curl_easy *data,
+ const char *buf, size_t blen);
#endif /* HEADER_CURL_SENDF_H */
diff --git a/libs/libcurl/src/setopt.c b/libs/libcurl/src/setopt.c index 3bbca8be06..5b8108fb4f 100644 --- a/libs/libcurl/src/setopt.c +++ b/libs/libcurl/src/setopt.c @@ -155,6 +155,12 @@ static CURLcode setstropt_userpwd(char *option, char **userp, char **passwdp) static CURLcode protocol2num(const char *str, curl_prot_t *val)
{
+ /*
+ * We are asked to cherry-pick protocols, so play it safe and disallow all
+ * protocols to start with, and re-add the wanted ones back in.
+ */
+ *val = 0;
+
if(!str)
return CURLE_BAD_FUNCTION_ARGUMENT;
@@ -163,8 +169,6 @@ static CURLcode protocol2num(const char *str, curl_prot_t *val) return CURLE_OK;
}
- *val = 0;
-
do {
const char *token = str;
size_t tlen;
@@ -2210,9 +2214,6 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param) * The application kindly asks for a differently sized receive buffer.
* If it seems reasonable, we'll use it.
*/
- if(data->state.buffer)
- return CURLE_BAD_FUNCTION_ARGUMENT;
-
arg = va_arg(param, long);
if(arg > READBUFFER_MAX)
@@ -2238,7 +2239,6 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param) arg = UPLOADBUFFER_MIN;
data->set.upload_buffer_size = (unsigned int)arg;
- Curl_safefree(data->state.ulbuf); /* force a realloc next opportunity */
break;
case CURLOPT_NOSIGNAL:
@@ -2657,22 +2657,18 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param) break;
case CURLOPT_PROTOCOLS_STR: {
- curl_prot_t prot;
argptr = va_arg(param, char *);
- result = protocol2num(argptr, &prot);
+ result = protocol2num(argptr, &data->set.allowed_protocols);
if(result)
return result;
- data->set.allowed_protocols = prot;
break;
}
case CURLOPT_REDIR_PROTOCOLS_STR: {
- curl_prot_t prot;
argptr = va_arg(param, char *);
- result = protocol2num(argptr, &prot);
+ result = protocol2num(argptr, &data->set.redir_protocols);
if(result)
return result;
- data->set.redir_protocols = prot;
break;
}
@@ -2867,13 +2863,13 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param) #endif
case CURLOPT_TLSAUTH_TYPE:
argptr = va_arg(param, char *);
- if(argptr && !strncasecompare(argptr, "SRP", strlen("SRP")))
+ if(argptr && !strcasecompare(argptr, "SRP"))
return CURLE_BAD_FUNCTION_ARGUMENT;
break;
#ifndef CURL_DISABLE_PROXY
case CURLOPT_PROXY_TLSAUTH_TYPE:
argptr = va_arg(param, char *);
- if(argptr || !strncasecompare(argptr, "SRP", strlen("SRP")))
+ if(argptr && !strcasecompare(argptr, "SRP"))
return CURLE_BAD_FUNCTION_ARGUMENT;
break;
#endif
diff --git a/libs/libcurl/src/smb.c b/libs/libcurl/src/smb.c index b9a9f5702c..53d457cd76 100644 --- a/libs/libcurl/src/smb.c +++ b/libs/libcurl/src/smb.c @@ -456,6 +456,9 @@ static CURLcode smb_connect(struct Curl_easy *data, bool *done) smbc->recv_buf = malloc(MAX_MESSAGE_SIZE);
if(!smbc->recv_buf)
return CURLE_OUT_OF_MEMORY;
+ smbc->send_buf = malloc(MAX_MESSAGE_SIZE);
+ if(!smbc->send_buf)
+ return CURLE_OUT_OF_MEMORY;
/* Multiple requests are allowed with this connection */
connkeep(conn, "SMB default");
@@ -485,7 +488,6 @@ static CURLcode smb_connect(struct Curl_easy *data, bool *done) static CURLcode smb_recv_message(struct Curl_easy *data, void **msg)
{
struct connectdata *conn = data->conn;
- curl_socket_t sockfd = conn->sock[FIRSTSOCKET];
struct smb_conn *smbc = &conn->proto.smbc;
char *buf = smbc->recv_buf;
ssize_t bytes_read;
@@ -494,7 +496,7 @@ static CURLcode smb_recv_message(struct Curl_easy *data, void **msg) size_t len = MAX_MESSAGE_SIZE - smbc->got;
CURLcode result;
- result = Curl_read(data, sockfd, buf + smbc->got, len, &bytes_read);
+ result = Curl_xfer_recv(data, buf + smbc->got, len, &bytes_read);
if(result)
return result;
@@ -560,16 +562,15 @@ static void smb_format_message(struct Curl_easy *data, struct smb_header *h, h->pid = smb_swap16((unsigned short) pid);
}
-static CURLcode smb_send(struct Curl_easy *data, ssize_t len,
+static CURLcode smb_send(struct Curl_easy *data, size_t len,
size_t upload_size)
{
struct connectdata *conn = data->conn;
struct smb_conn *smbc = &conn->proto.smbc;
- ssize_t bytes_written;
+ size_t bytes_written;
CURLcode result;
- result = Curl_nwrite(data, FIRSTSOCKET, data->state.ulbuf,
- len, &bytes_written);
+ result = Curl_xfer_send(data, smbc->send_buf, len, &bytes_written);
if(result)
return result;
@@ -587,16 +588,15 @@ static CURLcode smb_flush(struct Curl_easy *data) {
struct connectdata *conn = data->conn;
struct smb_conn *smbc = &conn->proto.smbc;
- ssize_t bytes_written;
- ssize_t len = smbc->send_size - smbc->sent;
+ size_t bytes_written;
+ size_t len = smbc->send_size - smbc->sent;
CURLcode result;
if(!smbc->send_size)
return CURLE_OK;
- result = Curl_nwrite(data, FIRSTSOCKET,
- data->state.ulbuf + smbc->sent,
- len, &bytes_written);
+ result = Curl_xfer_send(data, smbc->send_buf + smbc->sent, len,
+ &bytes_written);
if(result)
return result;
@@ -611,13 +611,13 @@ static CURLcode smb_flush(struct Curl_easy *data) static CURLcode smb_send_message(struct Curl_easy *data, unsigned char cmd,
const void *msg, size_t msg_len)
{
- CURLcode result = Curl_get_upload_buffer(data);
- if(result)
- return result;
- smb_format_message(data, (struct smb_header *)data->state.ulbuf,
+ struct connectdata *conn = data->conn;
+ struct smb_conn *smbc = &conn->proto.smbc;
+
+ smb_format_message(data, (struct smb_header *)smbc->send_buf,
cmd, msg_len);
- memcpy(data->state.ulbuf + sizeof(struct smb_header),
- msg, msg_len);
+ DEBUGASSERT((sizeof(struct smb_header) + msg_len) <= MAX_MESSAGE_SIZE);
+ memcpy(smbc->send_buf + sizeof(struct smb_header), msg, msg_len);
return smb_send(data, sizeof(struct smb_header) + msg_len, 0);
}
@@ -775,15 +775,14 @@ static CURLcode smb_send_read(struct Curl_easy *data) static CURLcode smb_send_write(struct Curl_easy *data)
{
+ struct connectdata *conn = data->conn;
+ struct smb_conn *smbc = &conn->proto.smbc;
struct smb_write *msg;
struct smb_request *req = data->req.p.smb;
curl_off_t offset = data->req.offset;
curl_off_t upload_size = data->req.size - data->req.bytecount;
- CURLcode result = Curl_get_upload_buffer(data);
- if(result)
- return result;
- msg = (struct smb_write *)data->state.ulbuf;
+ msg = (struct smb_write *)smbc->send_buf;
if(upload_size >= MAX_PAYLOAD_SIZE - 1) /* There is one byte of padding */
upload_size = MAX_PAYLOAD_SIZE - 1;
@@ -812,10 +811,11 @@ static CURLcode smb_send_and_recv(struct Curl_easy *data, void **msg) /* Check if there is data in the transfer buffer */
if(!smbc->send_size && smbc->upload_size) {
- size_t nread = smbc->upload_size > (size_t)data->set.upload_buffer_size ?
- (size_t)data->set.upload_buffer_size : smbc->upload_size;
- data->req.upload_fromhere = data->state.ulbuf;
- result = Curl_fillreadbuffer(data, nread, &nread);
+ size_t nread = smbc->upload_size > (size_t)MAX_MESSAGE_SIZE ?
+ (size_t)MAX_MESSAGE_SIZE : smbc->upload_size;
+ bool eos;
+
+ result = Curl_client_read(data, smbc->send_buf, nread, &nread, &eos);
if(result && result != CURLE_AGAIN)
return result;
if(!nread)
@@ -1133,6 +1133,7 @@ static CURLcode smb_disconnect(struct Curl_easy *data, Curl_safefree(smbc->share);
Curl_safefree(smbc->domain);
Curl_safefree(smbc->recv_buf);
+ Curl_safefree(smbc->send_buf);
return CURLE_OK;
}
diff --git a/libs/libcurl/src/smb.h b/libs/libcurl/src/smb.h index 3947b43382..09281a3adb 100644 --- a/libs/libcurl/src/smb.h +++ b/libs/libcurl/src/smb.h @@ -42,6 +42,7 @@ struct smb_conn { unsigned int session_key;
unsigned short uid;
char *recv_buf;
+ char *send_buf;
size_t upload_size;
size_t send_size;
size_t sent;
diff --git a/libs/libcurl/src/smtp.c b/libs/libcurl/src/smtp.c index aac4e83c67..abb8855cd9 100644 --- a/libs/libcurl/src/smtp.c +++ b/libs/libcurl/src/smtp.c @@ -111,6 +111,7 @@ static CURLcode smtp_continue_auth(struct Curl_easy *data, const char *mech, const struct bufref *resp);
static CURLcode smtp_cancel_auth(struct Curl_easy *data, const char *mech);
static CURLcode smtp_get_message(struct Curl_easy *data, struct bufref *out);
+static CURLcode cr_eob_add(struct Curl_easy *data);
/*
* SMTP protocol handler.
@@ -618,7 +619,7 @@ static CURLcode smtp_perform_mail(struct Curl_easy *data) result = smtp_parse_address(data->set.str[STRING_MAIL_FROM],
&address, &host);
if(result)
- return result;
+ goto out;
/* Establish whether we should report SMTPUTF8 to the server for this
mailbox as per RFC-6531 sect. 3.1 point 4 and sect. 3.4 */
@@ -642,8 +643,10 @@ static CURLcode smtp_perform_mail(struct Curl_easy *data) /* Null reverse-path, RFC-5321, sect. 3.6.3 */
from = strdup("<>");
- if(!from)
- return CURLE_OUT_OF_MEMORY;
+ if(!from) {
+ result = CURLE_OUT_OF_MEMORY;
+ goto out;
+ }
/* Calculate the optional AUTH parameter */
if(data->set.str[STRING_MAIL_AUTH] && conn->proto.smtpc.sasl.authused) {
@@ -655,10 +658,8 @@ static CURLcode smtp_perform_mail(struct Curl_easy *data) converting the host name to an IDN A-label if necessary */
result = smtp_parse_address(data->set.str[STRING_MAIL_AUTH],
&address, &host);
- if(result) {
- free(from);
- return result;
- }
+ if(result)
+ goto out;
/* Establish whether we should report SMTPUTF8 to the server for this
mailbox as per RFC-6531 sect. 3.1 point 4 and sect. 3.4 */
@@ -676,7 +677,6 @@ static CURLcode smtp_perform_mail(struct Curl_easy *data) /* An invalid mailbox was provided but we'll simply let the server
worry about it */
auth = aprintf("<%s>", address);
-
free(address);
}
else
@@ -684,12 +684,12 @@ static CURLcode smtp_perform_mail(struct Curl_easy *data) auth = strdup("<>");
if(!auth) {
- free(from);
-
- return CURLE_OUT_OF_MEMORY;
+ result = CURLE_OUT_OF_MEMORY;
+ goto out;
}
}
+#ifndef CURL_DISABLE_MIME
/* Prepare the mime data if some. */
if(data->set.mimepost.kind != MIMEKIND_NONE) {
/* Use the whole structure as data. */
@@ -705,22 +705,18 @@ static CURLcode smtp_perform_mail(struct Curl_easy *data) result = Curl_mime_add_header(&data->set.mimepost.curlheaders,
"Mime-Version: 1.0");
- /* Make sure we will read the entire mime structure. */
if(!result)
- result = Curl_mime_rewind(&data->set.mimepost);
-
- if(result) {
- free(from);
- free(auth);
-
- return result;
- }
-
- data->state.infilesize = Curl_mime_size(&data->set.mimepost);
-
- /* Read from mime structure. */
- data->state.fread_func = (curl_read_callback) Curl_mime_read;
- data->state.in = (void *) &data->set.mimepost;
+ result = Curl_creader_set_mime(data, &data->set.mimepost);
+ if(result)
+ goto out;
+ data->state.infilesize = Curl_creader_total_length(data);
+ }
+ else
+#endif
+ {
+ result = Curl_creader_set_fread(data, data->state.infilesize);
+ if(result)
+ goto out;
}
/* Calculate the optional SIZE parameter */
@@ -728,10 +724,8 @@ static CURLcode smtp_perform_mail(struct Curl_easy *data) size = aprintf("%" CURL_FORMAT_CURL_OFF_T, data->state.infilesize);
if(!size) {
- free(from);
- free(auth);
-
- return CURLE_OUT_OF_MEMORY;
+ result = CURLE_OUT_OF_MEMORY;
+ goto out;
}
}
@@ -752,6 +746,11 @@ static CURLcode smtp_perform_mail(struct Curl_easy *data) }
}
+ /* Add the client reader doing STMP EOB escaping */
+ result = cr_eob_add(data);
+ if(result)
+ goto out;
+
/* Send the MAIL command */
result = Curl_pp_sendf(data, &conn->proto.smtpc.pp,
"MAIL FROM:%s%s%s%s%s%s",
@@ -763,6 +762,7 @@ static CURLcode smtp_perform_mail(struct Curl_easy *data) utf8 ? " SMTPUTF8" /* Internationalised mailbox */
: ""); /* included in our envelope */
+out:
free(from);
free(auth);
free(size);
@@ -1162,7 +1162,7 @@ static CURLcode smtp_state_data_resp(struct Curl_easy *data, int smtpcode, Curl_pgrsSetUploadSize(data, data->state.infilesize);
/* SMTP upload */
- Curl_setup_transfer(data, -1, -1, FALSE, FIRSTSOCKET);
+ Curl_xfer_setup(data, -1, -1, FALSE, FIRSTSOCKET);
/* End of DO phase */
smtp_state(data, SMTP_STOP);
@@ -1194,7 +1194,6 @@ static CURLcode smtp_statemachine(struct Curl_easy *data, struct connectdata *conn)
{
CURLcode result = CURLE_OK;
- curl_socket_t sock = conn->sock[FIRSTSOCKET];
int smtpcode;
struct smtp_conn *smtpc = &conn->proto.smtpc;
struct pingpong *pp = &smtpc->pp;
@@ -1210,7 +1209,7 @@ static CURLcode smtp_statemachine(struct Curl_easy *data, do {
/* Read the response from the server */
- result = Curl_pp_readresp(data, sock, pp, &smtpcode, &nread);
+ result = Curl_pp_readresp(data, FIRSTSOCKET, pp, &smtpcode, &nread);
if(result)
return result;
@@ -1392,10 +1391,6 @@ static CURLcode smtp_done(struct Curl_easy *data, CURLcode status, CURLcode result = CURLE_OK;
struct connectdata *conn = data->conn;
struct SMTP *smtp = data->req.p.smtp;
- struct pingpong *pp = &conn->proto.smtpc.pp;
- char *eob;
- ssize_t len;
- ssize_t bytes_written;
(void)premature;
@@ -1410,47 +1405,7 @@ static CURLcode smtp_done(struct Curl_easy *data, CURLcode status, result = status; /* use the already set error code */
}
else if(!data->set.connect_only && data->set.mail_rcpt &&
- (data->state.upload || data->set.mimepost.kind)) {
- /* Calculate the EOB taking into account any terminating CRLF from the
- previous line of the email or the CRLF of the DATA command when there
- is "no mail data". RFC-5321, sect. 4.1.1.4.
-
- Note: As some SSL backends, such as OpenSSL, will cause Curl_write() to
- fail when using a different pointer following a previous write, that
- returned CURLE_AGAIN, we duplicate the EOB now rather than when the
- bytes written doesn't equal len. */
- if(smtp->trailing_crlf || !data->state.infilesize) {
- eob = strdup(&SMTP_EOB[2]);
- len = SMTP_EOB_LEN - 2;
- }
- else {
- eob = strdup(SMTP_EOB);
- len = SMTP_EOB_LEN;
- }
-
- if(!eob)
- return CURLE_OUT_OF_MEMORY;
-
- /* Send the end of block data */
- result = Curl_write(data, conn->writesockfd, eob, len, &bytes_written);
- if(result) {
- free(eob);
- return result;
- }
-
- if(bytes_written != len) {
- /* The whole chunk was not sent so keep it around and adjust the
- pingpong structure accordingly */
- pp->sendthis = eob;
- pp->sendsize = len;
- pp->sendleft = len - bytes_written;
- }
- else {
- /* Successfully sent so adjust the response timeout relative to now */
- pp->response = Curl_now();
-
- free(eob);
- }
+ (data->state.upload || IS_MIME_POST(data))) {
smtp_state(data, SMTP_POSTDATA);
@@ -1502,7 +1457,7 @@ static CURLcode smtp_perform(struct Curl_easy *data, bool *connected, smtp->eob = 2;
/* Start the first command in the DO phase */
- if((data->state.upload || data->set.mimepost.kind) && data->set.mail_rcpt)
+ if((data->state.upload || IS_MIME_POST(data)) && data->set.mail_rcpt)
/* MAIL transfer */
result = smtp_perform_mail(data);
else
@@ -1593,7 +1548,7 @@ static CURLcode smtp_dophase_done(struct Curl_easy *data, bool connected) if(smtp->transfer != PPTRANSFER_BODY)
/* no data to transfer */
- Curl_setup_transfer(data, -1, -1, FALSE, -1);
+ Curl_xfer_setup(data, -1, -1, FALSE, -1);
return CURLE_OK;
}
@@ -1818,108 +1773,173 @@ static CURLcode smtp_parse_address(const char *fqma, char **address, return result;
}
-CURLcode Curl_smtp_escape_eob(struct Curl_easy *data,
- const ssize_t nread,
- const ssize_t offset)
+struct cr_eob_ctx {
+ struct Curl_creader super;
+ struct bufq buf;
+ size_t n_eob; /* how many EOB bytes we matched so far */
+ size_t eob; /* Number of bytes of the EOB (End Of Body) that
+ have been received so far */
+ BIT(read_eos); /* we read an EOS from the next reader */
+ BIT(eos); /* we have returned an EOS */
+};
+
+static CURLcode cr_eob_init(struct Curl_easy *data,
+ struct Curl_creader *reader)
{
- /* When sending a SMTP payload we must detect CRLF. sequences making sure
- they are sent as CRLF.. instead, as a . on the beginning of a line will
- be deleted by the server when not part of an EOB terminator and a
- genuine CRLF.CRLF which isn't escaped will wrongly be detected as end of
- data by the server
- */
- ssize_t i;
- ssize_t si;
- struct SMTP *smtp = data->req.p.smtp;
- char *scratch = data->state.scratch;
- char *newscratch = NULL;
- char *oldscratch = NULL;
- size_t eob_sent;
+ struct cr_eob_ctx *ctx = reader->ctx;
+ (void)data;
+ /* The first char we read is the first on a line, as if we had
+ * read CRLF just before */
+ ctx->n_eob = 2;
+ Curl_bufq_init2(&ctx->buf, (16 * 1024), 1, BUFQ_OPT_SOFT_LIMIT);
+ return CURLE_OK;
+}
- /* Do we need to allocate a scratch buffer? */
- if(!scratch || data->set.crlf) {
- oldscratch = scratch;
+static void cr_eob_close(struct Curl_easy *data, struct Curl_creader *reader)
+{
+ struct cr_eob_ctx *ctx = reader->ctx;
+ (void)data;
+ Curl_bufq_free(&ctx->buf);
+}
- scratch = newscratch = malloc(2 * data->set.upload_buffer_size);
- if(!newscratch) {
- failf(data, "Failed to alloc scratch buffer");
+/* this is the 5-bytes End-Of-Body marker for SMTP */
+#define SMTP_EOB "\r\n.\r\n"
+#define SMTP_EOB_FIND_LEN 3
- return CURLE_OUT_OF_MEMORY;
- }
- }
- DEBUGASSERT((size_t)data->set.upload_buffer_size >= (size_t)nread);
-
- /* Have we already sent part of the EOB? */
- eob_sent = smtp->eob;
-
- /* This loop can be improved by some kind of Boyer-Moore style of
- approach but that is saved for later... */
- if(offset)
- memcpy(scratch, data->req.upload_fromhere, offset);
- for(i = offset, si = offset; i < nread; i++) {
- if(SMTP_EOB[smtp->eob] == data->req.upload_fromhere[i]) {
- smtp->eob++;
-
- /* Is the EOB potentially the terminating CRLF? */
- if(2 == smtp->eob || SMTP_EOB_LEN == smtp->eob)
- smtp->trailing_crlf = TRUE;
- else
- smtp->trailing_crlf = FALSE;
- }
- else if(smtp->eob) {
- /* A previous substring matched so output that first */
- memcpy(&scratch[si], &SMTP_EOB[eob_sent], smtp->eob - eob_sent);
- si += smtp->eob - eob_sent;
-
- /* Then compare the first byte */
- if(SMTP_EOB[0] == data->req.upload_fromhere[i])
- smtp->eob = 1;
- else
- smtp->eob = 0;
+/* client reader doing SMTP End-Of-Body escaping. */
+static CURLcode cr_eob_read(struct Curl_easy *data,
+ struct Curl_creader *reader,
+ char *buf, size_t blen,
+ size_t *pnread, bool *peos)
+{
+ struct cr_eob_ctx *ctx = reader->ctx;
+ CURLcode result = CURLE_OK;
+ size_t nread, i, start, n;
+ bool eos;
+
+ if(!ctx->read_eos && Curl_bufq_is_empty(&ctx->buf)) {
+ /* Get more and convert it when needed */
+ result = Curl_creader_read(data, reader->next, buf, blen, &nread, &eos);
+ if(result)
+ return result;
- eob_sent = 0;
+ ctx->read_eos = eos;
+ if(nread) {
+ if(!ctx->n_eob && !memchr(buf, SMTP_EOB[0], nread)) {
+ /* not in the middle of a match, no EOB start found, just pass */
+ *pnread = nread;
+ *peos = FALSE;
+ return CURLE_OK;
+ }
+ /* scan for EOB (continuation) and convert */
+ for(i = start = 0; i < nread; ++i) {
+ if(ctx->n_eob >= SMTP_EOB_FIND_LEN) {
+ /* matched the EOB prefix and seeing additional char, add '.' */
+ result = Curl_bufq_cwrite(&ctx->buf, buf + start, i - start, &n);
+ if(result)
+ return result;
+ result = Curl_bufq_cwrite(&ctx->buf, ".", 1, &n);
+ if(result)
+ return result;
+ ctx->n_eob = 0;
+ start = i;
+ if(data->state.infilesize > 0)
+ data->state.infilesize++;
+ }
- /* Reset the trailing CRLF flag as there was more data */
- smtp->trailing_crlf = FALSE;
+ if(buf[i] != SMTP_EOB[ctx->n_eob])
+ ctx->n_eob = 0;
+
+ if(buf[i] == SMTP_EOB[ctx->n_eob]) {
+ /* matching another char of the EOB */
+ ++ctx->n_eob;
+ }
+ }
+
+ /* add any remainder to buf */
+ if(start < nread) {
+ result = Curl_bufq_cwrite(&ctx->buf, buf + start, nread - start, &n);
+ if(result)
+ return result;
+ }
}
- /* Do we have a match for CRLF. as per RFC-5321, sect. 4.5.2 */
- if(SMTP_EOB_FIND_LEN == smtp->eob) {
- /* Copy the replacement data to the target buffer */
- memcpy(&scratch[si], &SMTP_EOB_REPL[eob_sent],
- SMTP_EOB_REPL_LEN - eob_sent);
- si += SMTP_EOB_REPL_LEN - eob_sent;
- smtp->eob = 0;
- eob_sent = 0;
+ if(ctx->read_eos) {
+ /* if we last matched a CRLF or if the data was empty, add ".\r\n"
+ * to end the body. If we sent something and it did not end with "\r\n",
+ * add "\r\n.\r\n" to end the body */
+ const char *eob = SMTP_EOB;
+ switch(ctx->n_eob) {
+ case 2:
+ /* seen a CRLF at the end, just add the remainder */
+ eob = &SMTP_EOB[2];
+ break;
+ case 3:
+ /* ended with '\r\n.', we should escpe the last '.' */
+ eob = "." SMTP_EOB;
+ break;
+ default:
+ break;
+ }
+ result = Curl_bufq_cwrite(&ctx->buf, eob, strlen(eob), &n);
+ if(result)
+ return result;
}
- else if(!smtp->eob)
- scratch[si++] = data->req.upload_fromhere[i];
}
- if(smtp->eob - eob_sent) {
- /* A substring matched before processing ended so output that now */
- memcpy(&scratch[si], &SMTP_EOB[eob_sent], smtp->eob - eob_sent);
- si += smtp->eob - eob_sent;
+ *peos = FALSE;
+ if(!Curl_bufq_is_empty(&ctx->buf)) {
+ result = Curl_bufq_cread(&ctx->buf, buf, blen, pnread);
}
+ else
+ *pnread = 0;
+
+ if(ctx->read_eos && Curl_bufq_is_empty(&ctx->buf)) {
+ /* no more data, read all, done. */
+ ctx->eos = TRUE;
+ }
+ *peos = ctx->eos;
+ DEBUGF(infof(data, "cr_eob_read(%zu) -> %d, %zd, %d",
+ blen, result, *pnread, *peos));
+ return CURLE_OK;
+}
- /* Only use the new buffer if we replaced something */
- if(si != nread) {
- /* Upload from the new (replaced) buffer instead */
- data->req.upload_fromhere = scratch;
+static curl_off_t cr_eob_total_length(struct Curl_easy *data,
+ struct Curl_creader *reader)
+{
+ /* this reader changes length depending on input */
+ (void)data;
+ (void)reader;
+ return -1;
+}
- /* Save the buffer so it can be freed later */
- data->state.scratch = scratch;
+static const struct Curl_crtype cr_eob = {
+ "cr-smtp-eob",
+ cr_eob_init,
+ cr_eob_read,
+ cr_eob_close,
+ Curl_creader_def_needs_rewind,
+ cr_eob_total_length,
+ Curl_creader_def_resume_from,
+ Curl_creader_def_rewind,
+ Curl_creader_def_unpause,
+ Curl_creader_def_done,
+ sizeof(struct cr_eob_ctx)
+};
- /* Free the old scratch buffer */
- free(oldscratch);
+static CURLcode cr_eob_add(struct Curl_easy *data)
+{
+ struct Curl_creader *reader = NULL;
+ CURLcode result;
- /* Set the new amount too */
- data->req.upload_present = si;
- }
- else
- free(newscratch);
+ result = Curl_creader_create(&reader, data, &cr_eob,
+ CURL_CR_CONTENT_ENCODE);
+ if(!result)
+ result = Curl_creader_add(data, reader);
- return CURLE_OK;
+ if(result && reader)
+ Curl_creader_free(data, reader);
+ return result;
}
#endif /* CURL_DISABLE_SMTP */
diff --git a/libs/libcurl/src/smtp.h b/libs/libcurl/src/smtp.h index a2a86c1bfb..c6b33515b7 100644 --- a/libs/libcurl/src/smtp.h +++ b/libs/libcurl/src/smtp.h @@ -84,17 +84,4 @@ struct smtp_conn { extern const struct Curl_handler Curl_handler_smtp;
extern const struct Curl_handler Curl_handler_smtps;
-/* this is the 5-bytes End-Of-Body marker for SMTP */
-#define SMTP_EOB "\x0d\x0a\x2e\x0d\x0a"
-#define SMTP_EOB_LEN 5
-#define SMTP_EOB_FIND_LEN 3
-
-/* if found in data, replace it with this string instead */
-#define SMTP_EOB_REPL "\x0d\x0a\x2e\x2e"
-#define SMTP_EOB_REPL_LEN 4
-
-CURLcode Curl_smtp_escape_eob(struct Curl_easy *data,
- const ssize_t nread,
- const ssize_t offset);
-
#endif /* HEADER_CURL_SMTP_H */
diff --git a/libs/libcurl/src/socks.c b/libs/libcurl/src/socks.c index ea733d9e98..89f85666ff 100644 --- a/libs/libcurl/src/socks.c +++ b/libs/libcurl/src/socks.c @@ -341,7 +341,7 @@ static CURLproxycode do_SOCKS4(struct Curl_cfilter *cf, case CONNECT_RESOLVING:
/* check if we have the name resolved by now */
- dns = Curl_fetch_addr(data, sx->hostname, (int)conn->port);
+ dns = Curl_fetch_addr(data, sx->hostname, conn->primary.remote_port);
if(dns) {
#ifdef CURLRES_ASYNCH
@@ -1175,7 +1175,7 @@ static CURLcode socks_proxy_cf_connect(struct Curl_cfilter *cf, result = connect_SOCKS(cf, sx, data);
if(!result && sx->state == CONNECT_DONE) {
cf->connected = TRUE;
- Curl_verboseconnect(data, conn);
+ Curl_verboseconnect(data, conn, cf->sockindex);
socks_proxy_cf_free(cf);
}
diff --git a/libs/libcurl/src/socks_gssapi.c b/libs/libcurl/src/socks_gssapi.c index d9dda91aa0..a592caad5b 100644 --- a/libs/libcurl/src/socks_gssapi.c +++ b/libs/libcurl/src/socks_gssapi.c @@ -475,7 +475,7 @@ CURLcode Curl_SOCKS5_gssapi_negotiate(struct Curl_cfilter *cf, gss_recv_token.length, &actualread);
if(result || (actualread != us_length)) {
- failf(data, "Failed to receive GSS-API encryptrion type.");
+ failf(data, "Failed to receive GSS-API encryption type.");
gss_release_buffer(&gss_status, &gss_recv_token);
gss_delete_sec_context(&gss_status, &gss_context, NULL);
return CURLE_COULDNT_CONNECT;
diff --git a/libs/libcurl/src/strtoofft.c b/libs/libcurl/src/strtoofft.c index fbc399361c..fd5864f372 100644 --- a/libs/libcurl/src/strtoofft.c +++ b/libs/libcurl/src/strtoofft.c @@ -79,11 +79,10 @@ static int get_char(char c, int base); static curl_off_t strtooff(const char *nptr, char **endptr, int base)
{
char *end;
- int is_negative = 0;
- int overflow;
+ bool is_negative = FALSE;
+ bool overflow = FALSE;
int i;
curl_off_t value = 0;
- curl_off_t newval;
/* Skip leading whitespace. */
end = (char *)nptr;
@@ -93,7 +92,7 @@ static curl_off_t strtooff(const char *nptr, char **endptr, int base) /* Handle the sign, if any. */
if(end[0] == '-') {
- is_negative = 1;
+ is_negative = TRUE;
end++;
}
else if(end[0] == '+') {
@@ -129,19 +128,15 @@ static curl_off_t strtooff(const char *nptr, char **endptr, int base) }
/* Loop handling digits. */
- value = 0;
- overflow = 0;
for(i = get_char(end[0], base);
i != -1;
end++, i = get_char(end[0], base)) {
- newval = base * value + i;
- if(newval < value) {
- /* We've overflowed. */
- overflow = 1;
+
+ if(value > (CURL_OFF_T_MAX - i) / base) {
+ overflow = TRUE;
break;
}
- else
- value = newval;
+ value = base * value + i;
}
if(!overflow) {
@@ -217,7 +212,7 @@ static int get_char(char c, int base) CURLofft curlx_strtoofft(const char *str, char **endp, int base,
curl_off_t *num)
{
- char *end;
+ char *end = NULL;
curl_off_t number;
errno = 0;
*num = 0; /* clear by default */
diff --git a/libs/libcurl/src/telnet.c b/libs/libcurl/src/telnet.c index f80326b913..359cd09b0a 100644 --- a/libs/libcurl/src/telnet.c +++ b/libs/libcurl/src/telnet.c @@ -1231,20 +1231,24 @@ process_iac: static CURLcode send_telnet_data(struct Curl_easy *data,
char *buffer, ssize_t nread)
{
- ssize_t i, outlen;
+ size_t i, outlen;
unsigned char *outbuf;
CURLcode result = CURLE_OK;
- ssize_t bytes_written, total_written = 0;
+ size_t bytes_written;
+ size_t total_written = 0;
struct connectdata *conn = data->conn;
struct TELNET *tn = data->req.p.telnet;
DEBUGASSERT(tn);
+ DEBUGASSERT(nread > 0);
+ if(nread < 0)
+ return CURLE_TOO_LARGE;
if(memchr(buffer, CURL_IAC, nread)) {
/* only use the escape buffer when necessary */
Curl_dyn_reset(&tn->out);
- for(i = 0; i < nread && !result; i++) {
+ for(i = 0; i < (size_t)nread && !result; i++) {
result = Curl_dyn_addn(&tn->out, &buffer[i], 1);
if(!result && ((unsigned char)buffer[i] == CURL_IAC))
/* IAC is FF in hex */
@@ -1255,7 +1259,7 @@ static CURLcode send_telnet_data(struct Curl_easy *data, outbuf = Curl_dyn_uptr(&tn->out);
}
else {
- outlen = nread;
+ outlen = (size_t)nread;
outbuf = (unsigned char *)buffer;
}
while(!result && total_written < outlen) {
@@ -1270,8 +1274,8 @@ static CURLcode send_telnet_data(struct Curl_easy *data, break;
default: /* write! */
bytes_written = 0;
- result = Curl_nwrite(data, FIRSTSOCKET, outbuf + total_written,
- outlen - total_written, &bytes_written);
+ result = Curl_xfer_send(data, outbuf + total_written,
+ outlen - total_written, &bytes_written);
total_written += bytes_written;
break;
}
@@ -1464,7 +1468,7 @@ static CURLcode telnet_do(struct Curl_easy *data, bool *done) }
if(events.lNetworkEvents & FD_READ) {
/* read data from network */
- result = Curl_read(data, sockfd, buffer, sizeof(buffer), &nread);
+ result = Curl_xfer_recv(data, buffer, sizeof(buffer), &nread);
/* read would've blocked. Loop again */
if(result == CURLE_AGAIN)
break;
@@ -1545,7 +1549,7 @@ static CURLcode telnet_do(struct Curl_easy *data, bool *done) default: /* read! */
if(pfd[0].revents & POLLIN) {
/* read data from network */
- result = Curl_read(data, sockfd, buffer, sizeof(buffer), &nread);
+ result = Curl_xfer_recv(data, buffer, sizeof(buffer), &nread);
/* read would've blocked. Loop again */
if(result == CURLE_AGAIN)
break;
@@ -1635,7 +1639,7 @@ static CURLcode telnet_do(struct Curl_easy *data, bool *done) }
#endif
/* mark this as "no further transfer wanted" */
- Curl_setup_transfer(data, -1, -1, FALSE, -1);
+ Curl_xfer_setup(data, -1, -1, FALSE, -1);
return result;
}
diff --git a/libs/libcurl/src/tftp.c b/libs/libcurl/src/tftp.c index d655a16de1..b31b2c287e 100644 --- a/libs/libcurl/src/tftp.c +++ b/libs/libcurl/src/tftp.c @@ -452,8 +452,6 @@ static CURLcode tftp_send_first(struct tftp_state_data *state, if(data->state.upload) {
/* If we are uploading, send an WRQ */
setpacketevent(&state->spacket, TFTP_EVENT_WRQ);
- state->data->req.upload_fromhere =
- (char *)state->spacket.data + 4;
if(data->state.infilesize != -1)
Curl_pgrsSetUploadSize(data, data->state.infilesize);
}
@@ -708,6 +706,8 @@ static CURLcode tftp_tx(struct tftp_state_data *state, tftp_event_t event) struct SingleRequest *k = &data->req;
size_t cb; /* Bytes currently read */
char buffer[STRERROR_LEN];
+ char *bufptr;
+ bool eos;
switch(event) {
@@ -771,13 +771,14 @@ static CURLcode tftp_tx(struct tftp_state_data *state, tftp_event_t event) * data block.
* */
state->sbytes = 0;
- state->data->req.upload_fromhere = (char *)state->spacket.data + 4;
+ bufptr = (char *)state->spacket.data + 4;
do {
- result = Curl_fillreadbuffer(data, state->blksize - state->sbytes, &cb);
+ result = Curl_client_read(data, bufptr, state->blksize - state->sbytes,
+ &cb, &eos);
if(result)
return result;
state->sbytes += (int)cb;
- state->data->req.upload_fromhere += cb;
+ bufptr += cb;
} while(state->sbytes < state->blksize && cb);
sbytes = sendto(state->sockfd, (void *) state->spacket.data,
@@ -1240,7 +1241,7 @@ static CURLcode tftp_multi_statemach(struct Curl_easy *data, bool *done) *done = (state->state == TFTP_STATE_FIN) ? TRUE : FALSE;
if(*done)
/* Tell curl we're done */
- Curl_setup_transfer(data, -1, -1, FALSE, -1);
+ Curl_xfer_setup(data, -1, -1, FALSE, -1);
}
else {
/* no timeouts to handle, check our socket */
@@ -1263,7 +1264,7 @@ static CURLcode tftp_multi_statemach(struct Curl_easy *data, bool *done) *done = (state->state == TFTP_STATE_FIN) ? TRUE : FALSE;
if(*done)
/* Tell curl we're done */
- Curl_setup_transfer(data, -1, -1, FALSE, -1);
+ Curl_xfer_setup(data, -1, -1, FALSE, -1);
}
/* if rc == 0, then select() timed out */
}
diff --git a/libs/libcurl/src/transfer.c b/libs/libcurl/src/transfer.c index bdfab6459e..566f3da5e8 100644 --- a/libs/libcurl/src/transfer.c +++ b/libs/libcurl/src/transfer.c @@ -63,6 +63,7 @@ #include "content_encoding.h"
#include "hostip.h"
#include "cfilters.h"
+#include "cw-out.h"
#include "transfer.h"
#include "sendf.h"
#include "speedcheck.h"
@@ -114,260 +115,6 @@ char *Curl_checkheaders(const struct Curl_easy *data, }
#endif
-CURLcode Curl_get_upload_buffer(struct Curl_easy *data)
-{
- if(!data->state.ulbuf) {
- data->state.ulbuf = malloc(data->set.upload_buffer_size);
- if(!data->state.ulbuf)
- return CURLE_OUT_OF_MEMORY;
- }
- return CURLE_OK;
-}
-
-#ifndef CURL_DISABLE_HTTP
-/*
- * This function will be called to loop through the trailers buffer
- * until no more data is available for sending.
- */
-static size_t trailers_read(char *buffer, size_t size, size_t nitems,
- void *raw)
-{
- struct Curl_easy *data = (struct Curl_easy *)raw;
- struct dynbuf *trailers_buf = &data->state.trailers_buf;
- size_t bytes_left = Curl_dyn_len(trailers_buf) -
- data->state.trailers_bytes_sent;
- size_t to_copy = (size*nitems < bytes_left) ? size*nitems : bytes_left;
- if(to_copy) {
- memcpy(buffer,
- Curl_dyn_ptr(trailers_buf) + data->state.trailers_bytes_sent,
- to_copy);
- data->state.trailers_bytes_sent += to_copy;
- }
- return to_copy;
-}
-
-static size_t trailers_left(void *raw)
-{
- struct Curl_easy *data = (struct Curl_easy *)raw;
- struct dynbuf *trailers_buf = &data->state.trailers_buf;
- return Curl_dyn_len(trailers_buf) - data->state.trailers_bytes_sent;
-}
-#endif
-
-/*
- * This function will call the read callback to fill our buffer with data
- * to upload.
- */
-CURLcode Curl_fillreadbuffer(struct Curl_easy *data, size_t bytes,
- size_t *nreadp)
-{
- size_t buffersize = bytes;
- size_t nread;
- curl_read_callback readfunc = NULL;
- void *extra_data = NULL;
- int eof_index = 0;
-
-#ifndef CURL_DISABLE_HTTP
- if(data->state.trailers_state == TRAILERS_INITIALIZED) {
- struct curl_slist *trailers = NULL;
- CURLcode result;
- int trailers_ret_code;
-
- /* at this point we already verified that the callback exists
- so we compile and store the trailers buffer, then proceed */
- infof(data,
- "Moving trailers state machine from initialized to sending.");
- data->state.trailers_state = TRAILERS_SENDING;
- Curl_dyn_init(&data->state.trailers_buf, DYN_TRAILERS);
-
- data->state.trailers_bytes_sent = 0;
- Curl_set_in_callback(data, true);
- trailers_ret_code = data->set.trailer_callback(&trailers,
- data->set.trailer_data);
- Curl_set_in_callback(data, false);
- if(trailers_ret_code == CURL_TRAILERFUNC_OK) {
- result = Curl_http_compile_trailers(trailers, &data->state.trailers_buf,
- data);
- }
- else {
- failf(data, "operation aborted by trailing headers callback");
- *nreadp = 0;
- result = CURLE_ABORTED_BY_CALLBACK;
- }
- if(result) {
- Curl_dyn_free(&data->state.trailers_buf);
- curl_slist_free_all(trailers);
- return result;
- }
- infof(data, "Successfully compiled trailers.");
- curl_slist_free_all(trailers);
- }
-#endif
-
-#ifndef CURL_DISABLE_HTTP
- /* if we are transmitting trailing data, we don't need to write
- a chunk size so we skip this */
- if(data->req.upload_chunky &&
- data->state.trailers_state == TRAILERS_NONE) {
- /* if chunked Transfer-Encoding */
- buffersize -= (8 + 2 + 2); /* 32bit hex + CRLF + CRLF */
- data->req.upload_fromhere += (8 + 2); /* 32bit hex + CRLF */
- }
-
- if(data->state.trailers_state == TRAILERS_SENDING) {
- /* if we're here then that means that we already sent the last empty chunk
- but we didn't send a final CR LF, so we sent 0 CR LF. We then start
- pulling trailing data until we have no more at which point we
- simply return to the previous point in the state machine as if
- nothing happened.
- */
- readfunc = trailers_read;
- extra_data = (void *)data;
- eof_index = 1;
- }
- else
-#endif
- {
- readfunc = data->state.fread_func;
- extra_data = data->state.in;
- }
-
- if(!data->req.fread_eof[eof_index]) {
- Curl_set_in_callback(data, true);
- nread = readfunc(data->req.upload_fromhere, 1, buffersize, extra_data);
- Curl_set_in_callback(data, false);
- /* make sure the callback is not called again after EOF */
- data->req.fread_eof[eof_index] = !nread;
- }
- else
- nread = 0;
-
- if(nread == CURL_READFUNC_ABORT) {
- failf(data, "operation aborted by callback");
- *nreadp = 0;
- return CURLE_ABORTED_BY_CALLBACK;
- }
- if(nread == CURL_READFUNC_PAUSE) {
- struct SingleRequest *k = &data->req;
-
- if(data->conn->handler->flags & PROTOPT_NONETWORK) {
- /* protocols that work without network cannot be paused. This is
- actually only FILE:// just now, and it can't pause since the transfer
- isn't done using the "normal" procedure. */
- failf(data, "Read callback asked for PAUSE when not supported");
- return CURLE_READ_ERROR;
- }
-
- /* CURL_READFUNC_PAUSE pauses read callbacks that feed socket writes */
- k->keepon |= KEEP_SEND_PAUSE; /* mark socket send as paused */
- if(data->req.upload_chunky) {
- /* Back out the preallocation done above */
- data->req.upload_fromhere -= (8 + 2);
- }
- *nreadp = 0;
-
- return CURLE_OK; /* nothing was read */
- }
- else if(nread > buffersize) {
- /* the read function returned a too large value */
- *nreadp = 0;
- failf(data, "read function returned funny value");
- return CURLE_READ_ERROR;
- }
-
-#ifndef CURL_DISABLE_HTTP
- if(!data->req.forbidchunk && data->req.upload_chunky) {
- /* if chunked Transfer-Encoding
- * build chunk:
- *
- * <HEX SIZE> CRLF
- * <DATA> CRLF
- */
- /* On non-ASCII platforms the <DATA> may or may not be
- translated based on state.prefer_ascii while the protocol
- portion must always be translated to the network encoding.
- To further complicate matters, line end conversion might be
- done later on, so we need to prevent CRLFs from becoming
- CRCRLFs if that's the case. To do this we use bare LFs
- here, knowing they'll become CRLFs later on.
- */
-
- bool added_crlf = FALSE;
- int hexlen = 0;
- const char *endofline_native;
- const char *endofline_network;
-
- if(
-#ifdef CURL_DO_LINEEND_CONV
- (data->state.prefer_ascii) ||
-#endif
- (data->set.crlf)) {
- /* \n will become \r\n later on */
- endofline_native = "\n";
- endofline_network = "\x0a";
- }
- else {
- endofline_native = "\r\n";
- endofline_network = "\x0d\x0a";
- }
-
- /* if we're not handling trailing data, proceed as usual */
- if(data->state.trailers_state != TRAILERS_SENDING) {
- char hexbuffer[11] = "";
- hexlen = msnprintf(hexbuffer, sizeof(hexbuffer),
- "%zx%s", nread, endofline_native);
-
- /* move buffer pointer */
- data->req.upload_fromhere -= hexlen;
- nread += hexlen;
-
- /* copy the prefix to the buffer, leaving out the NUL */
- memcpy(data->req.upload_fromhere, hexbuffer, hexlen);
-
- /* always append ASCII CRLF to the data unless
- we have a valid trailer callback */
- if((nread-hexlen) == 0 &&
- data->set.trailer_callback != NULL &&
- data->state.trailers_state == TRAILERS_NONE) {
- data->state.trailers_state = TRAILERS_INITIALIZED;
- }
- else {
- memcpy(data->req.upload_fromhere + nread,
- endofline_network,
- strlen(endofline_network));
- added_crlf = TRUE;
- }
- }
-
- if(data->state.trailers_state == TRAILERS_SENDING &&
- !trailers_left(data)) {
- Curl_dyn_free(&data->state.trailers_buf);
- data->state.trailers_state = TRAILERS_DONE;
- data->set.trailer_data = NULL;
- data->set.trailer_callback = NULL;
- /* mark the transfer as done */
- data->req.upload_done = TRUE;
- infof(data, "Signaling end of chunked upload after trailers.");
- }
- else
- if((nread - hexlen) == 0 &&
- data->state.trailers_state != TRAILERS_INITIALIZED) {
- /* mark this as done once this chunk is transferred */
- data->req.upload_done = TRUE;
- infof(data,
- "Signaling end of chunked upload via terminating chunk.");
- }
-
- if(added_crlf)
- nread += strlen(endofline_network); /* for the added end of line */
- }
-#endif
-
- *nreadp = nread;
-
- return CURLE_OK;
-}
-
static int data_pending(struct Curl_easy *data)
{
struct connectdata *conn = data->conn;
@@ -447,7 +194,7 @@ static ssize_t Curl_xfer_recv_resp(struct Curl_easy *data, return 0;
}
- *err = Curl_read(data, data->conn->sockfd, buf, blen, &nread);
+ *err = Curl_xfer_recv(data, buf, blen, &nread);
if(*err)
return -1;
DEBUGASSERT(nread >= 0);
@@ -462,18 +209,19 @@ static ssize_t Curl_xfer_recv_resp(struct Curl_easy *data, */
static CURLcode readwrite_data(struct Curl_easy *data,
struct SingleRequest *k,
- int *didwhat, bool *done)
+ int *didwhat)
{
struct connectdata *conn = data->conn;
CURLcode result = CURLE_OK;
- char *buf;
- size_t blen;
+ char *buf, *xfer_buf;
+ size_t blen, xfer_blen;
int maxloops = 10;
curl_off_t total_received = 0;
bool is_multiplex = FALSE;
- DEBUGASSERT(data->state.buffer);
- *done = FALSE;
+ result = Curl_multi_xfer_buf_borrow(data, &xfer_buf, &xfer_blen);
+ if(result)
+ goto out;
/* This is where we loop until we have read everything there is to
read or we get a CURLE_AGAIN */
@@ -489,16 +237,17 @@ static CURLcode readwrite_data(struct Curl_easy *data, is_multiplex = Curl_conn_is_multiplex(conn, FIRSTSOCKET);
}
- buf = data->state.buffer;
- bytestoread = data->set.buffer_size;
+ buf = xfer_buf;
+ bytestoread = xfer_blen;
- /* Observe any imposed speed limit */
if(bytestoread && data->set.max_recv_speed) {
- curl_off_t net_limit = data->set.max_recv_speed - total_received;
- if(net_limit <= 0)
+ /* In case of speed limit on receiving: if this loop already got
+ * data, break out. If not, limit the amount of bytes to receive.
+ * The overall, timed, speed limiting is done in multi.c */
+ if(total_received)
break;
- if((size_t)net_limit < bytestoread)
- bytestoread = (size_t)net_limit;
+ if((size_t)data->set.max_recv_speed < bytestoread)
+ bytestoread = (size_t)data->set.max_recv_speed;
}
nread = Curl_xfer_recv_resp(data, buf, bytestoread,
@@ -530,8 +279,8 @@ static CURLcode readwrite_data(struct Curl_easy *data, }
total_received += blen;
- result = Curl_xfer_write_resp(data, buf, blen, is_eos, done);
- if(result || *done)
+ result = Curl_xfer_write_resp(data, buf, blen, is_eos);
+ if(result || data->req.done)
goto out;
/* if we are done, we stop receiving. On multiplexed connections,
@@ -564,22 +313,12 @@ static CURLcode readwrite_data(struct Curl_easy *data, }
out:
+ Curl_multi_xfer_buf_release(data, xfer_buf);
if(result)
DEBUGF(infof(data, "readwrite_data() -> %d", result));
return result;
}
-CURLcode Curl_done_sending(struct Curl_easy *data,
- struct SingleRequest *k)
-{
- k->keepon &= ~KEEP_SEND; /* we're done writing */
-
- /* These functions should be moved into the handler struct! */
- Curl_conn_ev_data_done_send(data);
-
- return CURLE_OK;
-}
-
#if defined(_WIN32) && defined(USE_WINSOCK)
#ifndef SIO_IDEAL_SEND_BACKLOG_QUERY
#define SIO_IDEAL_SEND_BACKLOG_QUERY 0x4004747B
@@ -602,245 +341,42 @@ static void win_update_buffer_size(curl_socket_t sockfd) #endif
#define curl_upload_refill_watermark(data) \
- ((ssize_t)((data)->set.upload_buffer_size >> 5))
+ ((size_t)((data)->set.upload_buffer_size >> 5))
/*
* Send data to upload to the server, when the socket is writable.
*/
-static CURLcode readwrite_upload(struct Curl_easy *data,
- struct connectdata *conn,
- int *didwhat)
+static CURLcode readwrite_upload(struct Curl_easy *data, int *didwhat)
{
- ssize_t i, si;
- ssize_t bytes_written;
- CURLcode result;
- ssize_t nread; /* number of bytes read */
- bool sending_http_headers = FALSE;
- struct SingleRequest *k = &data->req;
-
- *didwhat |= KEEP_SEND;
-
- do {
- curl_off_t nbody;
- ssize_t offset = 0;
-
- if(0 != k->upload_present &&
- k->upload_present < curl_upload_refill_watermark(data) &&
- !k->upload_chunky &&/*(variable sized chunked header; append not safe)*/
- !k->upload_done && /*!(k->upload_done once k->upload_present sent)*/
- !(k->writebytecount + k->upload_present - k->pendingheader ==
- data->state.infilesize)) {
- offset = k->upload_present;
- }
-
- /* only read more data if there's no upload data already
- present in the upload buffer, or if appending to upload buffer */
- if(0 == k->upload_present || offset) {
- result = Curl_get_upload_buffer(data);
- if(result)
- return result;
- if(offset && k->upload_fromhere != data->state.ulbuf)
- memmove(data->state.ulbuf, k->upload_fromhere, offset);
- /* init the "upload from here" pointer */
- k->upload_fromhere = data->state.ulbuf;
-
- if(!k->upload_done) {
- /* HTTP pollution, this should be written nicer to become more
- protocol agnostic. */
- size_t fillcount;
- struct HTTP *http = k->p.http;
-
- if((k->exp100 == EXP100_SENDING_REQUEST) &&
- (http->sending == HTTPSEND_BODY)) {
- /* If this call is to send body data, we must take some action:
- We have sent off the full HTTP 1.1 request, and we shall now
- go into the Expect: 100 state and await such a header */
- k->exp100 = EXP100_AWAITING_CONTINUE; /* wait for the header */
- k->keepon &= ~KEEP_SEND; /* disable writing */
- k->start100 = Curl_now(); /* timeout count starts now */
- *didwhat &= ~KEEP_SEND; /* we didn't write anything actually */
- /* set a timeout for the multi interface */
- Curl_expire(data, data->set.expect_100_timeout, EXPIRE_100_TIMEOUT);
- break;
- }
-
- if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
- if(http->sending == HTTPSEND_REQUEST)
- /* We're sending the HTTP request headers, not the data.
- Remember that so we don't change the line endings. */
- sending_http_headers = TRUE;
- else
- sending_http_headers = FALSE;
- }
-
- k->upload_fromhere += offset;
- result = Curl_fillreadbuffer(data, data->set.upload_buffer_size-offset,
- &fillcount);
- k->upload_fromhere -= offset;
- if(result)
- return result;
-
- nread = offset + fillcount;
- }
- else
- nread = 0; /* we're done uploading/reading */
-
- if(!nread && (k->keepon & KEEP_SEND_PAUSE)) {
- /* this is a paused transfer */
- break;
- }
- if(nread <= 0) {
- result = Curl_done_sending(data, k);
- if(result)
- return result;
- break;
- }
-
- /* store number of bytes available for upload */
- k->upload_present = nread;
-
- /* convert LF to CRLF if so asked */
- if((!sending_http_headers) && (
-#ifdef CURL_DO_LINEEND_CONV
- /* always convert if we're FTPing in ASCII mode */
- (data->state.prefer_ascii) ||
-#endif
- (data->set.crlf))) {
- /* Do we need to allocate a scratch buffer? */
- if(!data->state.scratch) {
- data->state.scratch = malloc(2 * data->set.upload_buffer_size);
- if(!data->state.scratch) {
- failf(data, "Failed to alloc scratch buffer");
-
- return CURLE_OUT_OF_MEMORY;
- }
- }
-
- /*
- * ASCII/EBCDIC Note: This is presumably a text (not binary)
- * transfer so the data should already be in ASCII.
- * That means the hex values for ASCII CR (0x0d) & LF (0x0a)
- * must be used instead of the escape sequences \r & \n.
- */
- if(offset)
- memcpy(data->state.scratch, k->upload_fromhere, offset);
- for(i = offset, si = offset; i < nread; i++, si++) {
- if(k->upload_fromhere[i] == 0x0a) {
- data->state.scratch[si++] = 0x0d;
- data->state.scratch[si] = 0x0a;
- if(!data->set.crlf) {
- /* we're here only because FTP is in ASCII mode...
- bump infilesize for the LF we just added */
- if(data->state.infilesize != -1)
- data->state.infilesize++;
- }
- }
- else
- data->state.scratch[si] = k->upload_fromhere[i];
- }
-
- if(si != nread) {
- /* only perform the special operation if we really did replace
- anything */
- nread = si;
-
- /* upload from the new (replaced) buffer instead */
- k->upload_fromhere = data->state.scratch;
+ CURLcode result = CURLE_OK;
- /* set the new amount too */
- k->upload_present = nread;
- }
- }
+ if((data->req.keepon & KEEP_SEND_PAUSE))
+ return CURLE_OK;
-#ifndef CURL_DISABLE_SMTP
- if(conn->handler->protocol & PROTO_FAMILY_SMTP) {
- result = Curl_smtp_escape_eob(data, nread, offset);
- if(result)
- return result;
- }
-#endif /* CURL_DISABLE_SMTP */
- } /* if 0 == k->upload_present or appended to upload buffer */
- else {
- /* We have a partial buffer left from a previous "round". Use
- that instead of reading more data */
- }
+ /* We should not get here when the sending is already done. It
+ * probably means that someone set `data-req.keepon |= KEEP_SEND`
+ * when it should not. */
+ DEBUGASSERT(!Curl_req_done_sending(data));
- /* write to socket (send away data) */
- result = Curl_write(data,
- conn->writesockfd, /* socket to send to */
- k->upload_fromhere, /* buffer pointer */
- k->upload_present, /* buffer size */
- &bytes_written); /* actually sent */
+ if(!Curl_req_done_sending(data)) {
+ *didwhat |= KEEP_SEND;
+ result = Curl_req_send_more(data);
if(result)
return result;
#if defined(_WIN32) && defined(USE_WINSOCK)
+ /* FIXME: this looks like it would fit better into cf-socket.c
+ * but then I do not know enough Windows to say... */
{
struct curltime n = Curl_now();
- if(Curl_timediff(n, k->last_sndbuf_update) > 1000) {
- win_update_buffer_size(conn->writesockfd);
- k->last_sndbuf_update = n;
+ if(Curl_timediff(n, data->conn->last_sndbuf_update) > 1000) {
+ win_update_buffer_size(data->conn->writesockfd);
+ data->conn->last_sndbuf_update = n;
}
}
#endif
-
- if(k->pendingheader) {
- /* parts of what was sent was header */
- curl_off_t n = CURLMIN(k->pendingheader, bytes_written);
- /* show the data before we change the pointer upload_fromhere */
- Curl_debug(data, CURLINFO_HEADER_OUT, k->upload_fromhere, (size_t)n);
- k->pendingheader -= n;
- nbody = bytes_written - n; /* size of the written body part */
- }
- else
- nbody = bytes_written;
-
- if(nbody) {
- /* show the data before we change the pointer upload_fromhere */
- Curl_debug(data, CURLINFO_DATA_OUT,
- &k->upload_fromhere[bytes_written - nbody],
- (size_t)nbody);
-
- k->writebytecount += nbody;
- Curl_pgrsSetUploadCounter(data, k->writebytecount);
- }
-
- if((!k->upload_chunky || k->forbidchunk) &&
- (k->writebytecount == data->state.infilesize)) {
- /* we have sent all data we were supposed to */
- k->upload_done = TRUE;
- infof(data, "We are completely uploaded and fine");
- }
-
- if(k->upload_present != bytes_written) {
- /* we only wrote a part of the buffer (if anything), deal with it! */
-
- /* store the amount of bytes left in the buffer to write */
- k->upload_present -= bytes_written;
-
- /* advance the pointer where to find the buffer when the next send
- is to happen */
- k->upload_fromhere += bytes_written;
- }
- else {
- /* we've uploaded that buffer now */
- result = Curl_get_upload_buffer(data);
- if(result)
- return result;
- k->upload_fromhere = data->state.ulbuf;
- k->upload_present = 0; /* no more bytes left */
-
- if(k->upload_done) {
- result = Curl_done_sending(data, k);
- if(result)
- return result;
- }
- }
-
-
- } while(0); /* just to break out from! */
-
- return CURLE_OK;
+ }
+ return result;
}
static int select_bits_paused(struct Curl_easy *data, int select_bits)
@@ -865,8 +401,7 @@ static int select_bits_paused(struct Curl_easy *data, int select_bits) * Curl_readwrite() is the low-level function to be called when data is to
* be read and written to/from the connection.
*/
-CURLcode Curl_readwrite(struct Curl_easy *data,
- bool *done)
+CURLcode Curl_readwrite(struct Curl_easy *data)
{
struct connectdata *conn = data->conn;
struct SingleRequest *k = &data->req;
@@ -912,8 +447,8 @@ CURLcode Curl_readwrite(struct Curl_easy *data, #ifdef USE_HYPER
if(conn->datastream) {
- result = conn->datastream(data, conn, &didwhat, done, select_bits);
- if(result || *done)
+ result = conn->datastream(data, conn, &didwhat, select_bits);
+ if(result || data->req.done)
goto out;
}
else {
@@ -922,16 +457,17 @@ CURLcode Curl_readwrite(struct Curl_easy *data, the stream was rewound (in which case we have data in a
buffer) */
if((k->keepon & KEEP_RECV) && (select_bits & CURL_CSELECT_IN)) {
- result = readwrite_data(data, k, &didwhat, done);
- if(result || *done)
+ result = readwrite_data(data, k, &didwhat);
+ if(result || data->req.done)
goto out;
}
/* If we still have writing to do, we check if we have a writable socket. */
- if((k->keepon & KEEP_SEND) && (select_bits & CURL_CSELECT_OUT)) {
+ if(((k->keepon & KEEP_SEND) && (select_bits & CURL_CSELECT_OUT)) ||
+ (k->keepon & KEEP_SEND_TIMED)) {
/* write */
- result = readwrite_upload(data, conn, &didwhat);
+ result = readwrite_upload(data, &didwhat);
if(result)
goto out;
}
@@ -941,31 +477,6 @@ CURLcode Curl_readwrite(struct Curl_easy *data, now = Curl_now();
if(!didwhat) {
- /* no read no write, this is a timeout? */
- if(k->exp100 == EXP100_AWAITING_CONTINUE) {
- /* This should allow some time for the header to arrive, but only a
- very short time as otherwise it'll be too much wasted time too
- often. */
-
- /* Quoting RFC2616, section "8.2.3 Use of the 100 (Continue) Status":
-
- Therefore, when a client sends this header field to an origin server
- (possibly via a proxy) from which it has never seen a 100 (Continue)
- status, the client SHOULD NOT wait for an indefinite period before
- sending the request body.
-
- */
-
- timediff_t ms = Curl_timediff(now, k->start100);
- if(ms >= data->set.expect_100_timeout) {
- /* we've waited long enough, continue anyway */
- k->exp100 = EXP100_SEND_DATA;
- k->keepon |= KEEP_SEND;
- Curl_expire_done(data, EXPIRE_100_TIMEOUT);
- infof(data, "Done waiting for 100-continue");
- }
- }
-
result = Curl_conn_ev_data_idle(data);
if(result)
goto out;
@@ -1002,7 +513,6 @@ CURLcode Curl_readwrite(struct Curl_easy *data, * The transfer has been performed. Just make some general checks before
* returning.
*/
-
if(!(data->req.no_body) && (k->size != -1) &&
(k->bytecount != k->size) &&
#ifdef CURL_DO_LINEEND_CONV
@@ -1024,8 +534,10 @@ CURLcode Curl_readwrite(struct Curl_easy *data, }
}
- /* Now update the "done" boolean we return */
- *done = (0 == (k->keepon&(KEEP_RECVBITS|KEEP_SENDBITS))) ? TRUE : FALSE;
+ /* If there is nothing more to send/recv, the request is done */
+ if(0 == (k->keepon&(KEEP_RECVBITS|KEEP_SENDBITS)))
+ data->req.done = TRUE;
+
out:
if(result)
DEBUGF(infof(data, "Curl_readwrite() -> %d", result));
@@ -1400,7 +912,7 @@ CURLcode Curl_follow(struct Curl_easy *data, data->state.url = newurl;
data->state.url_alloc = TRUE;
-
+ Curl_req_soft_reset(&data->req, data);
infof(data, "Issue another request to this URL: '%s'", data->state.url);
/*
@@ -1446,6 +958,7 @@ CURLcode Curl_follow(struct Curl_easy *data, && !(data->set.keep_post & CURL_REDIR_POST_301)) {
infof(data, "Switch from POST to GET");
data->state.httpreq = HTTPREQ_GET;
+ Curl_creader_set_rewind(data, FALSE);
}
break;
case 302: /* Found */
@@ -1471,6 +984,7 @@ CURLcode Curl_follow(struct Curl_easy *data, && !(data->set.keep_post & CURL_REDIR_POST_302)) {
infof(data, "Switch from POST to GET");
data->state.httpreq = HTTPREQ_GET;
+ Curl_creader_set_rewind(data, FALSE);
}
break;
@@ -1573,23 +1087,16 @@ CURLcode Curl_retry_request(struct Curl_easy *data, char **url) prevent i.e HTTP transfers to return
error just because nothing has been
transferred! */
-
-
- if((conn->handler->protocol&PROTO_FAMILY_HTTP) &&
- data->req.writebytecount) {
- data->state.rewindbeforesend = TRUE;
- infof(data, "state.rewindbeforesend = TRUE");
- }
+ Curl_creader_set_rewind(data, TRUE);
}
return CURLE_OK;
}
/*
- * Curl_setup_transfer() is called to setup some basic properties for the
+ * Curl_xfer_setup() is called to setup some basic properties for the
* upcoming transfer.
*/
-void
-Curl_setup_transfer(
+void Curl_xfer_setup(
struct Curl_easy *data, /* transfer */
int sockindex, /* socket index to read from or -1 */
curl_off_t size, /* -1 if unknown at this point */
@@ -1600,22 +1107,19 @@ Curl_setup_transfer( {
struct SingleRequest *k = &data->req;
struct connectdata *conn = data->conn;
- struct HTTP *http = data->req.p.http;
- bool httpsending;
+ bool want_send = Curl_req_want_send(data);
DEBUGASSERT(conn != NULL);
DEBUGASSERT((sockindex <= 1) && (sockindex >= -1));
+ DEBUGASSERT((writesockindex <= 1) && (writesockindex >= -1));
- httpsending = ((conn->handler->protocol&PROTO_FAMILY_HTTP) &&
- (http->sending == HTTPSEND_REQUEST));
-
- if(conn->bits.multiplex || conn->httpversion >= 20 || httpsending) {
+ if(conn->bits.multiplex || conn->httpversion >= 20 || want_send) {
/* when multiplexing, the read/write sockets need to be the same! */
conn->sockfd = sockindex == -1 ?
((writesockindex == -1 ? CURL_SOCKET_BAD : conn->sock[writesockindex])) :
conn->sock[sockindex];
conn->writesockfd = conn->sockfd;
- if(httpsending)
+ if(want_send)
/* special and very HTTP-specific */
writesockindex = FIRSTSOCKET;
}
@@ -1644,51 +1148,22 @@ Curl_setup_transfer( if(sockindex != -1)
k->keepon |= KEEP_RECV;
- if(writesockindex != -1) {
- /* HTTP 1.1 magic:
-
- Even if we require a 100-return code before uploading data, we might
- need to write data before that since the REQUEST may not have been
- finished sent off just yet.
-
- Thus, we must check if the request has been sent before we set the
- state info where we wait for the 100-return code
- */
- if((data->state.expect100header) &&
- (conn->handler->protocol&PROTO_FAMILY_HTTP) &&
- (http->sending == HTTPSEND_BODY)) {
- /* wait with write until we either got 100-continue or a timeout */
- k->exp100 = EXP100_AWAITING_CONTINUE;
- k->start100 = Curl_now();
-
- /* Set a timeout for the multi interface. Add the inaccuracy margin so
- that we don't fire slightly too early and get denied to run. */
- Curl_expire(data, data->set.expect_100_timeout, EXPIRE_100_TIMEOUT);
- }
- else {
- if(data->state.expect100header)
- /* when we've sent off the rest of the headers, we must await a
- 100-continue but first finish sending the request */
- k->exp100 = EXP100_SENDING_REQUEST;
-
- /* enable the write bit when we're not waiting for continue */
- k->keepon |= KEEP_SEND;
- }
- } /* if(writesockindex != -1) */
+ if(writesockindex != -1)
+ k->keepon |= KEEP_SEND;
} /* if(k->getheader || !data->req.no_body) */
}
CURLcode Curl_xfer_write_resp(struct Curl_easy *data,
char *buf, size_t blen,
- bool is_eos, bool *done)
+ bool is_eos)
{
CURLcode result = CURLE_OK;
if(data->conn->handler->write_resp) {
/* protocol handlers offering this function take full responsibility
* for writing all received download data to the client. */
- result = data->conn->handler->write_resp(data, buf, blen, is_eos, done);
+ result = data->conn->handler->write_resp(data, buf, blen, is_eos);
}
else {
/* No special handling by protocol handler, write all received data
@@ -1716,3 +1191,63 @@ CURLcode Curl_xfer_write_resp(struct Curl_easy *data, }
return result;
}
+
+CURLcode Curl_xfer_write_done(struct Curl_easy *data, bool premature)
+{
+ (void)premature;
+ return Curl_cw_out_done(data);
+}
+
+CURLcode Curl_xfer_send(struct Curl_easy *data,
+ const void *buf, size_t blen,
+ size_t *pnwritten)
+{
+ CURLcode result;
+ int sockindex;
+
+ if(!data || !data->conn)
+ return CURLE_FAILED_INIT;
+ /* FIXME: would like to enable this, but some protocols (MQTT) do not
+ * setup the transfer correctly, it seems
+ if(data->conn->writesockfd == CURL_SOCKET_BAD) {
+ failf(data, "transfer not setup for sending");
+ DEBUGASSERT(0);
+ return CURLE_SEND_ERROR;
+ } */
+ sockindex = ((data->conn->writesockfd != CURL_SOCKET_BAD) &&
+ (data->conn->writesockfd == data->conn->sock[SECONDARYSOCKET]));
+ result = Curl_conn_send(data, sockindex, buf, blen, pnwritten);
+ if(result == CURLE_AGAIN) {
+ result = CURLE_OK;
+ *pnwritten = 0;
+ }
+ return result;
+}
+
+CURLcode Curl_xfer_recv(struct Curl_easy *data,
+ char *buf, size_t blen,
+ ssize_t *pnrcvd)
+{
+ int sockindex;
+
+ if(!data || !data->conn)
+ return CURLE_FAILED_INIT;
+ /* FIXME: would like to enable this, but some protocols (MQTT) do not
+ * setup the transfer correctly, it seems
+ if(data->conn->sockfd == CURL_SOCKET_BAD) {
+ failf(data, "transfer not setup for receiving");
+ DEBUGASSERT(0);
+ return CURLE_RECV_ERROR;
+ } */
+ sockindex = ((data->conn->sockfd != CURL_SOCKET_BAD) &&
+ (data->conn->sockfd == data->conn->sock[SECONDARYSOCKET]));
+ if(data->set.buffer_size > 0 && (size_t)data->set.buffer_size < blen)
+ blen = (size_t)data->set.buffer_size;
+ return Curl_conn_recv(data, sockindex, buf, blen, pnrcvd);
+}
+
+CURLcode Curl_xfer_send_close(struct Curl_easy *data)
+{
+ Curl_conn_ev_data_done_send(data);
+ return CURLE_OK;
+}
diff --git a/libs/libcurl/src/transfer.h b/libs/libcurl/src/transfer.h index b269f7a44f..eaae63519c 100644 --- a/libs/libcurl/src/transfer.h +++ b/libs/libcurl/src/transfer.h @@ -45,17 +45,11 @@ typedef enum { CURLcode Curl_follow(struct Curl_easy *data, char *newurl,
followtype type);
-CURLcode Curl_readwrite(struct Curl_easy *data, bool *done);
+CURLcode Curl_readwrite(struct Curl_easy *data);
int Curl_single_getsock(struct Curl_easy *data,
struct connectdata *conn, curl_socket_t *socks);
-CURLcode Curl_fillreadbuffer(struct Curl_easy *data, size_t bytes,
- size_t *nreadp);
CURLcode Curl_retry_request(struct Curl_easy *data, char **url);
bool Curl_meets_timecondition(struct Curl_easy *data, time_t timeofdoc);
-CURLcode Curl_get_upload_buffer(struct Curl_easy *data);
-
-CURLcode Curl_done_sending(struct Curl_easy *data,
- struct SingleRequest *k);
/**
* Write the transfer raw response bytes, as received from the connection.
@@ -72,11 +66,10 @@ CURLcode Curl_done_sending(struct Curl_easy *data, */
CURLcode Curl_xfer_write_resp(struct Curl_easy *data,
char *buf, size_t blen,
- bool is_eos, bool *done);
+ bool is_eos);
/* This sets up a forthcoming transfer */
-void
-Curl_setup_transfer (struct Curl_easy *data,
+void Curl_xfer_setup(struct Curl_easy *data,
int sockindex, /* socket index to read from or -1 */
curl_off_t size, /* -1 if unknown at this point */
bool getheader, /* TRUE if header parsing is wanted */
@@ -85,4 +78,30 @@ Curl_setup_transfer (struct Curl_easy *data, disables */
);
+/**
+ * Multi has set transfer to DONE. Last chance to trigger
+ * missing response things like writing an EOS to the client.
+ */
+CURLcode Curl_xfer_write_done(struct Curl_easy *data, bool premature);
+
+/**
+ * Send data on the socket/connection filter designated
+ * for transfer's outgoing data.
+ * Will return CURLE_OK on blocking with (*pnwritten == 0).
+ */
+CURLcode Curl_xfer_send(struct Curl_easy *data,
+ const void *buf, size_t blen,
+ size_t *pnwritten);
+
+/**
+ * Receive data on the socket/connection filter designated
+ * for transfer's incoming data.
+ * Will return CURLE_AGAIN on blocking with (*pnrcvd == 0).
+ */
+CURLcode Curl_xfer_recv(struct Curl_easy *data,
+ char *buf, size_t blen,
+ ssize_t *pnrcvd);
+
+CURLcode Curl_xfer_send_close(struct Curl_easy *data);
+
#endif /* HEADER_CURL_TRANSFER_H */
diff --git a/libs/libcurl/src/url.c b/libs/libcurl/src/url.c index eab74ab2c0..444129578c 100644 --- a/libs/libcurl/src/url.c +++ b/libs/libcurl/src/url.c @@ -261,7 +261,7 @@ CURLcode Curl_close(struct Curl_easy **datap) free(data->state.range);
/* freed here just in case DONE wasn't called */
- Curl_free_request_state(data);
+ Curl_req_free(&data->req, data);
/* Close down all open SSL info and sessions */
Curl_ssl_close_all(data);
@@ -269,10 +269,6 @@ CURLcode Curl_close(struct Curl_easy **datap) Curl_safefree(data->state.scratch);
Curl_ssl_free_certinfo(data);
- /* Cleanup possible redirect junk */
- free(data->req.newurl);
- data->req.newurl = NULL;
-
if(data->state.referer_alloc) {
Curl_safefree(data->state.referer);
data->state.referer_alloc = FALSE;
@@ -280,9 +276,7 @@ CURLcode Curl_close(struct Curl_easy **datap) data->state.referer = NULL;
up_free(data);
- Curl_safefree(data->state.buffer);
Curl_dyn_free(&data->state.headerb);
- Curl_safefree(data->state.ulbuf);
Curl_flush_cookies(data, TRUE);
Curl_altsvc_save(data, data->asi, data->set.str[STRING_ALTSVC]);
Curl_altsvc_cleanup(&data->asi);
@@ -326,16 +320,7 @@ CURLcode Curl_close(struct Curl_easy **datap) Curl_safefree(data->state.aptr.proxyuser);
Curl_safefree(data->state.aptr.proxypasswd);
-#ifndef CURL_DISABLE_DOH
- if(data->req.doh) {
- Curl_dyn_free(&data->req.doh->probe[0].serverdoh);
- Curl_dyn_free(&data->req.doh->probe[1].serverdoh);
- curl_slist_free_all(data->req.doh->headers);
- Curl_safefree(data->req.doh);
- }
-#endif
-
-#ifndef CURL_DISABLE_HTTP
+#if !defined(CURL_DISABLE_HTTP) && !defined(CURL_DISABLE_FORM_API)
Curl_mime_cleanpart(data->state.formp);
Curl_safefree(data->state.formp);
#endif
@@ -368,7 +353,6 @@ CURLcode Curl_init_userdefined(struct Curl_easy *data) set->fread_func_set = (curl_read_callback)fread;
set->is_fread_set = 0;
- set->seek_func = ZERO_NULL;
set->seek_client = ZERO_NULL;
set->filesize = -1; /* we don't know the size */
@@ -520,9 +504,17 @@ CURLcode Curl_open(struct Curl_easy **curl) data->magic = CURLEASY_MAGIC_NUMBER;
+ result = Curl_req_init(&data->req);
+ if(result) {
+ DEBUGF(fprintf(stderr, "Error: request init failed\n"));
+ free(data);
+ return result;
+ }
+
result = Curl_resolver_init(data, &data->state.async.resolver);
if(result) {
DEBUGF(fprintf(stderr, "Error: resolver_init failed\n"));
+ Curl_req_free(&data->req, data);
free(data);
return result;
}
@@ -546,6 +538,7 @@ CURLcode Curl_open(struct Curl_easy **curl) Curl_resolver_cleanup(data->state.async.resolver);
Curl_dyn_free(&data->state.headerb);
Curl_freeset(data);
+ Curl_req_free(&data->req, data);
free(data);
data = NULL;
}
@@ -1009,9 +1002,9 @@ ConnectionExists(struct Curl_easy *data, if(!canmultiplex) {
if(Curl_resolver_asynch() &&
- /* primary_ip[0] is NUL only if the resolving of the name hasn't
+ /* remote_ip[0] is NUL only if the resolving of the name hasn't
completed yet and until then we don't reuse this connection */
- !check->primary_ip[0])
+ !check->primary.remote_ip[0])
continue;
}
@@ -1334,11 +1327,15 @@ ConnectionExists(struct Curl_easy *data, */
#ifndef CURL_DISABLE_VERBOSE_STRINGS
void Curl_verboseconnect(struct Curl_easy *data,
- struct connectdata *conn)
+ struct connectdata *conn, int sockindex)
{
- if(data->set.verbose)
+ if(data->set.verbose && sockindex == SECONDARYSOCKET)
+ infof(data, "Connected 2nd connection to %s port %u",
+ conn->secondary.remote_ip, conn->secondary.remote_port);
+ else
infof(data, "Connected to %s (%s) port %u",
- CURL_CONN_HOST_DISPNAME(conn), conn->primary_ip, conn->port);
+ CURL_CONN_HOST_DISPNAME(conn), conn->primary.remote_ip,
+ conn->primary.remote_port);
}
#endif
@@ -1358,7 +1355,7 @@ static struct connectdata *allocate_conn(struct Curl_easy *data) conn->sockfd = CURL_SOCKET_BAD;
conn->writesockfd = CURL_SOCKET_BAD;
conn->connection_id = -1; /* no ID */
- conn->port = -1; /* unknown at this point */
+ conn->primary.remote_port = -1; /* unknown at this point */
conn->remote_port = -1; /* unknown at this point */
/* Default protocol-independent behavior doesn't support persistent
@@ -1971,7 +1968,7 @@ static CURLcode parseurlandfillconn(struct Curl_easy *data, }
else {
unsigned long port = strtoul(data->state.up.port, NULL, 10);
- conn->port = conn->remote_port =
+ conn->primary.remote_port = conn->remote_port =
(data->set.use_port && data->state.allow_port) ?
data->set.use_port : curlx_ultous(port);
}
@@ -2047,32 +2044,14 @@ static CURLcode setup_connection_internals(struct Curl_easy *data, p = conn->handler; /* May have changed. */
}
- if(conn->port < 0)
+ if(conn->primary.remote_port < 0)
/* we check for -1 here since if proxy was detected already, this
was very likely already set to the proxy port */
- conn->port = p->defport;
+ conn->primary.remote_port = p->defport;
return CURLE_OK;
}
-/*
- * Curl_free_request_state() should free temp data that was allocated in the
- * Curl_easy for this single request.
- */
-
-void Curl_free_request_state(struct Curl_easy *data)
-{
- Curl_safefree(data->req.p.http);
- Curl_safefree(data->req.newurl);
-#ifndef CURL_DISABLE_DOH
- if(data->req.doh) {
- Curl_close(&data->req.doh->probe[0].easy);
- Curl_close(&data->req.doh->probe[1].easy);
- }
-#endif
- Curl_client_cleanup(data);
-}
-
#ifndef CURL_DISABLE_PROXY
@@ -2314,8 +2293,9 @@ static CURLcode parse_proxy(struct Curl_easy *data, }
if(port >= 0) {
proxyinfo->port = port;
- if(conn->port < 0 || sockstype || !conn->socks_proxy.host.rawalloc)
- conn->port = port;
+ if(conn->primary.remote_port < 0 || sockstype ||
+ !conn->socks_proxy.host.rawalloc)
+ conn->primary.remote_port = port;
}
/* now, clone the proxy host name */
@@ -3213,8 +3193,8 @@ static CURLcode resolve_proxy(struct Curl_easy *data, if(!conn->hostname_resolve)
return CURLE_OUT_OF_MEMORY;
- rc = Curl_resolv_timeout(data, conn->hostname_resolve, (int)conn->port,
- &hostaddr, timeout_ms);
+ rc = Curl_resolv_timeout(data, conn->hostname_resolve,
+ conn->primary.remote_port, &hostaddr, timeout_ms);
conn->dns_entry = hostaddr;
if(rc == CURLRESOLV_PENDING)
*async = TRUE;
@@ -3244,7 +3224,7 @@ static CURLcode resolve_host(struct Curl_easy *data, /* If not connecting via a proxy, extract the port from the URL, if it is
* there, thus overriding any defaults that might have been set above. */
- conn->port = conn->bits.conn_to_port ? conn->conn_to_port :
+ conn->primary.remote_port = conn->bits.conn_to_port ? conn->conn_to_port :
conn->remote_port;
/* Resolve target host right on */
@@ -3252,8 +3232,8 @@ static CURLcode resolve_host(struct Curl_easy *data, if(!conn->hostname_resolve)
return CURLE_OUT_OF_MEMORY;
- rc = Curl_resolv_timeout(data, conn->hostname_resolve, (int)conn->port,
- &hostaddr, timeout_ms);
+ rc = Curl_resolv_timeout(data, conn->hostname_resolve,
+ conn->primary.remote_port, &hostaddr, timeout_ms);
conn->dns_entry = hostaddr;
if(rc == CURLRESOLV_PENDING)
*async = TRUE;
@@ -3590,7 +3570,7 @@ static CURLcode create_conn(struct Curl_easy *data, /* this is supposed to be the connect function so we better at least check
that the file is present here! */
DEBUGASSERT(conn->handler->connect_it);
- Curl_persistconninfo(data, conn, NULL, -1);
+ Curl_persistconninfo(data, conn, NULL);
result = conn->handler->connect_it(data, &done);
/* Setup a "faked" transfer that'll do nothing */
@@ -3610,7 +3590,7 @@ static CURLcode create_conn(struct Curl_easy *data, (void)conn->handler->done(data, result, FALSE);
goto out;
}
- Curl_setup_transfer(data, -1, -1, FALSE, -1);
+ Curl_xfer_setup(data, -1, -1, FALSE, -1);
}
/* since we skip do_init() */
@@ -3621,10 +3601,10 @@ static CURLcode create_conn(struct Curl_easy *data, #endif
/* Setup filter for network connections */
- conn->recv[FIRSTSOCKET] = Curl_conn_recv;
- conn->send[FIRSTSOCKET] = Curl_conn_send;
- conn->recv[SECONDARYSOCKET] = Curl_conn_recv;
- conn->send[SECONDARYSOCKET] = Curl_conn_send;
+ conn->recv[FIRSTSOCKET] = Curl_cf_recv;
+ conn->send[FIRSTSOCKET] = Curl_cf_send;
+ conn->recv[SECONDARYSOCKET] = Curl_cf_recv;
+ conn->send[SECONDARYSOCKET] = Curl_cf_send;
conn->bits.tcp_fastopen = data->set.tcp_fastopen;
/* Complete the easy's SSL configuration for connection cache matching */
@@ -3789,13 +3769,6 @@ static CURLcode create_conn(struct Curl_easy *data, /* Continue connectdata initialization here. */
- /*
- * Inherit the proper values from the urldata struct AFTER we have arranged
- * the persistent connection stuff
- */
- conn->seek_func = data->set.seek_func;
- conn->seek_client = data->set.seek_client;
-
/*************************************************************
* Resolve the address of the server or proxy
*************************************************************/
@@ -3849,6 +3822,9 @@ CURLcode Curl_setup_conn(struct Curl_easy *data, if(!conn->bits.reuse)
result = Curl_conn_setup(data, conn, FIRSTSOCKET, conn->dns_entry,
CURL_CF_SSL_DEFAULT);
+ if(!result)
+ result = Curl_headers_init(data);
+
/* not sure we need this flag to be passed around any more */
*protocol_done = FALSE;
return result;
@@ -3863,11 +3839,8 @@ CURLcode Curl_connect(struct Curl_easy *data, *asyncp = FALSE; /* assume synchronous resolves by default */
- /* init the single-transfer specific data */
- Curl_free_request_state(data);
- memset(&data->req, 0, sizeof(struct SingleRequest));
- data->req.size = data->req.maxdownload = -1;
- data->req.no_body = data->set.opt_no_body;
+ /* Set the request to virgin state based on transfer settings */
+ Curl_req_hard_reset(&data->req, data);
/* call the stuff that needs to be called */
result = create_conn(data, &conn, asyncp);
@@ -3910,8 +3883,6 @@ CURLcode Curl_connect(struct Curl_easy *data, CURLcode Curl_init_do(struct Curl_easy *data, struct connectdata *conn)
{
- struct SingleRequest *k = &data->req;
-
/* if this is a pushed stream, we need this: */
CURLcode result = Curl_preconnect(data);
if(result)
@@ -3927,18 +3898,15 @@ CURLcode Curl_init_do(struct Curl_easy *data, struct connectdata *conn) }
data->state.done = FALSE; /* *_done() is not called yet */
- data->state.expect100header = FALSE;
if(data->req.no_body)
/* in HTTP lingo, no body means using the HEAD request... */
data->state.httpreq = HTTPREQ_HEAD;
- k->start = Curl_now(); /* start time */
- k->header = TRUE; /* assume header */
- k->bytecount = 0;
- k->ignorebody = FALSE;
+ result = Curl_req_start(&data->req, data);
+ if(result)
+ return result;
- Curl_client_cleanup(data);
Curl_speedinit(data);
Curl_pgrsSetUploadCounter(data, 0);
Curl_pgrsSetDownloadCounter(data, 0);
diff --git a/libs/libcurl/src/url.h b/libs/libcurl/src/url.h index 65b04db079..b77f4d24f2 100644 --- a/libs/libcurl/src/url.h +++ b/libs/libcurl/src/url.h @@ -41,7 +41,6 @@ void Curl_disconnect(struct Curl_easy *data, struct connectdata *, bool dead_connection);
CURLcode Curl_setup_conn(struct Curl_easy *data,
bool *protocol_done);
-void Curl_free_request_state(struct Curl_easy *data);
CURLcode Curl_parse_login_details(const char *login, const size_t len,
char **userptr, char **passwdptr,
char **optionsptr);
@@ -59,9 +58,10 @@ const struct Curl_handler *Curl_getn_scheme_handler(const char *scheme, specified */
#ifdef CURL_DISABLE_VERBOSE_STRINGS
-#define Curl_verboseconnect(x,y) Curl_nop_stmt
+#define Curl_verboseconnect(x,y,z) Curl_nop_stmt
#else
-void Curl_verboseconnect(struct Curl_easy *data, struct connectdata *conn);
+void Curl_verboseconnect(struct Curl_easy *data, struct connectdata *conn,
+ int sockindex);
#endif
#if defined(USE_HTTP2) || defined(USE_HTTP3)
diff --git a/libs/libcurl/src/urlapi.c b/libs/libcurl/src/urlapi.c index 70b63983f7..abce1abacf 100644 --- a/libs/libcurl/src/urlapi.c +++ b/libs/libcurl/src/urlapi.c @@ -531,7 +531,7 @@ UNITTEST CURLUcode Curl_parse_port(struct Curl_URL *u, struct dynbuf *host, portptr = strchr(hostname, ':');
if(portptr) {
- char *rest;
+ char *rest = NULL;
long port;
size_t keep = portptr - hostname;
@@ -681,7 +681,7 @@ static int ipv4_normalize(struct dynbuf *host) return HOST_IPV6;
while(!done) {
- char *endp;
+ char *endp = NULL;
unsigned long l;
if(!ISDIGIT(*c))
/* most importantly this doesn't allow a leading plus or minus */
diff --git a/libs/libcurl/src/urldata.h b/libs/libcurl/src/urldata.h index b49e87bdd5..d8c8d07d48 100644 --- a/libs/libcurl/src/urldata.h +++ b/libs/libcurl/src/urldata.h @@ -53,6 +53,8 @@ #define PORT_GOPHER 70
#define PORT_MQTT 1883
+struct curl_trc_featt;
+
#ifdef USE_WEBSOCKETS
/* CURLPROTO_GOPHERS (29) is the highest publicly used protocol bit number,
* the rest are internal information. If we use higher bits we only do this on
@@ -141,6 +143,7 @@ typedef unsigned int curl_prot_t; #include "splay.h"
#include "dynbuf.h"
#include "dynhds.h"
+#include "request.h"
/* return the count of bytes sent, or -1 on error */
typedef ssize_t (Curl_send)(struct Curl_easy *data, /* transfer */
@@ -160,7 +163,6 @@ typedef ssize_t (Curl_recv)(struct Curl_easy *data, /* transfer */ typedef CURLcode (*Curl_datastream)(struct Curl_easy *data,
struct connectdata *conn,
int *didwhat,
- bool *done,
int select_res);
#endif
@@ -266,11 +268,17 @@ typedef enum { /* SSL backend-specific data; declared differently by each SSL backend */
struct ssl_backend_data;
+typedef enum {
+ CURL_SSL_PEER_DNS,
+ CURL_SSL_PEER_IPV4,
+ CURL_SSL_PEER_IPV6
+} ssl_peer_type;
+
struct ssl_peer {
char *hostname; /* hostname for verification */
char *dispname; /* display version of hostname */
char *sni; /* SNI version of hostname or NULL if not usable */
- BIT(is_ip_address); /* if hostname is an IPv4|6 address */
+ ssl_peer_type type; /* type of the peer information */
};
struct ssl_primary_config {
@@ -519,10 +527,6 @@ struct ConnectBits { the TCP layer connect */
BIT(retry); /* this connection is about to get closed and then
re-attempted at another connection. */
- BIT(authneg); /* TRUE when the auth phase has started, which means
- that we are creating a request with an auth header,
- but it is not the final request in the auth
- negotiation. */
#ifndef CURL_DISABLE_FTP
BIT(ftp_use_epsv); /* As set with CURLOPT_FTP_USE_EPSV, but if we find out
EPSV doesn't work we disable it for the forthcoming
@@ -575,6 +579,14 @@ struct hostname { #define KEEP_RECV_PAUSE (1<<4) /* reading is paused */
#define KEEP_SEND_PAUSE (1<<5) /* writing is paused */
+/* KEEP_SEND_TIMED is set when the transfer should attempt sending
+ * at timer (or other) events. A transfer waiting on a timer will
+ * remove KEEP_SEND to suppress POLLOUTs of the connection.
+ * Adding KEEP_SEND_TIMED will then attempt to send whenever the transfer
+ * enters the "readwrite" loop, e.g. when a timer fires.
+ * This is used in HTTP for 'Expect: 100-continue' waiting. */
+#define KEEP_SEND_TIMED (1<<6)
+
#define KEEP_RECVBITS (KEEP_RECV | KEEP_RECV_HOLD | KEEP_RECV_PAUSE)
#define KEEP_SENDBITS (KEEP_SEND | KEEP_SEND_HOLD | KEEP_SEND_PAUSE)
@@ -612,22 +624,6 @@ struct easy_pollset { unsigned char actions[MAX_SOCKSPEREASYHANDLE];
};
-enum expect100 {
- EXP100_SEND_DATA, /* enough waiting, just send the body now */
- EXP100_AWAITING_CONTINUE, /* waiting for the 100 Continue header */
- EXP100_SENDING_REQUEST, /* still sending the request but will wait for
- the 100 header once done with the request */
- EXP100_FAILED /* used on 417 Expectation Failed */
-};
-
-enum upgrade101 {
- UPGR101_INIT, /* default state */
- UPGR101_WS, /* upgrade to WebSockets requested */
- UPGR101_H2, /* upgrade to HTTP/2 requested */
- UPGR101_RECEIVED, /* 101 response received */
- UPGR101_WORKING /* talking upgraded protocol */
-};
-
enum doh_slots {
/* Explicit values for first two symbols so as to match hard-coded
* constants in existing code
@@ -647,111 +643,6 @@ enum doh_slots { };
/*
- * Request specific data in the easy handle (Curl_easy). Previously,
- * these members were on the connectdata struct but since a conn struct may
- * now be shared between different Curl_easys, we store connection-specific
- * data here. This struct only keeps stuff that's interesting for *this*
- * request, as it will be cleared between multiple ones
- */
-struct SingleRequest {
- curl_off_t size; /* -1 if unknown at this point */
- curl_off_t maxdownload; /* in bytes, the maximum amount of data to fetch,
- -1 means unlimited */
- curl_off_t bytecount; /* total number of bytes read */
- curl_off_t writebytecount; /* number of bytes written */
-
- curl_off_t pendingheader; /* this many bytes left to send is actually
- header and not body */
- struct curltime start; /* transfer started at this time */
- unsigned int headerbytecount; /* received server headers (not CONNECT
- headers) */
- unsigned int allheadercount; /* all received headers (server + CONNECT) */
- unsigned int deductheadercount; /* this amount of bytes doesn't count when
- we check if anything has been transferred
- at the end of a connection. We use this
- counter to make only a 100 reply (without
- a following second response code) result
- in a CURLE_GOT_NOTHING error code */
- int headerline; /* counts header lines to better track the
- first one */
- curl_off_t offset; /* possible resume offset read from the
- Content-Range: header */
- int httpcode; /* error code from the 'HTTP/1.? XXX' or
- 'RTSP/1.? XXX' line */
- int keepon;
- struct curltime start100; /* time stamp to wait for the 100 code from */
- enum expect100 exp100; /* expect 100 continue state */
- enum upgrade101 upgr101; /* 101 upgrade state */
-
- /* Client Writer stack, handles trasnfer- and content-encodings, protocol
- * checks, pausing by client callbacks. */
- struct Curl_cwriter *writer_stack;
- time_t timeofdoc;
- long bodywrites;
- char *location; /* This points to an allocated version of the Location:
- header data */
- char *newurl; /* Set to the new URL to use when a redirect or a retry is
- wanted */
-
- /* 'upload_present' is used to keep a byte counter of how much data there is
- still left in the buffer, aimed for upload. */
- ssize_t upload_present;
-
- /* 'upload_fromhere' is used as a read-pointer when we uploaded parts of a
- buffer, so the next read should read from where this pointer points to,
- and the 'upload_present' contains the number of bytes available at this
- position */
- char *upload_fromhere;
-
- /* Allocated protocol-specific data. Each protocol handler makes sure this
- points to data it needs. */
- union {
- struct FILEPROTO *file;
- struct FTP *ftp;
- struct HTTP *http;
- struct IMAP *imap;
- struct ldapreqinfo *ldap;
- struct MQTT *mqtt;
- struct POP3 *pop3;
- struct RTSP *rtsp;
- struct smb_request *smb;
- struct SMTP *smtp;
- struct SSHPROTO *ssh;
- struct TELNET *telnet;
- } p;
-#ifndef CURL_DISABLE_DOH
- struct dohdata *doh; /* DoH specific data for this request */
-#endif
-#if defined(_WIN32) && defined(USE_WINSOCK)
- struct curltime last_sndbuf_update; /* last time readwrite_upload called
- win_update_buffer_size */
-#endif
- char fread_eof[2]; /* the body read callback (index 0) returned EOF or
- the trailer read callback (index 1) returned EOF */
-#ifndef CURL_DISABLE_COOKIES
- unsigned char setcookies;
-#endif
- BIT(header); /* incoming data has HTTP header */
- BIT(content_range); /* set TRUE if Content-Range: was found */
- BIT(download_done); /* set to TRUE when download is complete */
- BIT(eos_written); /* iff EOS has been written to client */
- BIT(upload_done); /* set to TRUE when doing chunked transfer-encoding
- upload and we're uploading the last chunk */
- BIT(ignorebody); /* we read a response-body but we ignore it! */
- BIT(http_bodyless); /* HTTP response status code is between 100 and 199,
- 204 or 304 */
- BIT(chunk); /* if set, this is a chunked transfer-encoding */
- BIT(ignore_cl); /* ignore content-length */
- BIT(upload_chunky); /* set TRUE if we are doing chunked transfer-encoding
- on upload */
- BIT(getheader); /* TRUE if header parsing is wanted */
- BIT(forbidchunk); /* used only to explicitly forbid chunk-upload for
- specific upload buffers. See readmoredata() in http.c
- for details. */
- BIT(no_body); /* the response has no body */
-};
-
-/*
* Specific protocol handler.
*/
@@ -819,7 +710,7 @@ struct Curl_handler { allow the protocol to do extra handling in writing response to
the client. */
CURLcode (*write_resp)(struct Curl_easy *data, const char *buf, size_t blen,
- bool is_eos, bool *done);
+ bool is_eos);
/* This function can perform various checks on the connection. See
CONNCHECK_* for more information about the checks that can be performed,
@@ -875,6 +766,13 @@ struct Curl_handler { #define CONNRESULT_NONE 0 /* No extra information. */
#define CONNRESULT_DEAD (1<<0) /* The connection is dead. */
+struct ip_quadruple {
+ char remote_ip[MAX_IPADR_LEN];
+ char local_ip[MAX_IPADR_LEN];
+ int remote_port;
+ int local_port;
+};
+
struct proxy_info {
struct hostname host;
int port;
@@ -930,14 +828,13 @@ struct connectdata { struct proxy_info socks_proxy;
struct proxy_info http_proxy;
#endif
- /* 'primary_ip' and 'primary_port' get filled with peer's numerical
- ip address and port number whenever an outgoing connection is
- *attempted* from the primary socket to a remote address. When more
- than one address is tried for a connection these will hold data
+ /* 'primary' and 'secondary' get filled with IP quadruple
+ (local/remote numerical ip address and port) whenever a is *attempted*.
+ When more than one address is tried for a connection these will hold data
for the last attempt. When the connection is actually established
these are updated with data which comes directly from the socket. */
-
- char primary_ip[MAX_IPADR_LEN];
+ struct ip_quadruple primary;
+ struct ip_quadruple secondary;
char *user; /* user name string, allocated */
char *passwd; /* password string, allocated */
char *options; /* options string, allocated */
@@ -990,14 +887,17 @@ struct connectdata { #endif /* however, some of them are ftp specific. */
struct Curl_llist easyq; /* List of easy handles using this connection */
- curl_seek_callback seek_func; /* function that seeks the input */
- void *seek_client; /* pointer to pass to the seek() above */
/*************** Request - specific items ************/
#if defined(USE_WINDOWS_SSPI) && defined(SECPKG_ATTR_ENDPOINT_BINDINGS)
CtxtHandle *sslContext;
#endif
+#if defined(_WIN32) && defined(USE_WINSOCK)
+ struct curltime last_sndbuf_update; /* last time readwrite_upload called
+ win_update_buffer_size */
+#endif
+
#ifdef USE_GSASL
struct gsasldata gsasl;
#endif
@@ -1080,7 +980,6 @@ struct connectdata { int socks5_gssapi_enctype;
#endif
/* The field below gets set in connect.c:connecthost() */
- int port; /* which port to use locally - to connect to */
int remote_port; /* the remote port, not the proxy port! */
int conn_to_port; /* the remote port to connect to. valid only if
bits.conn_to_port is set */
@@ -1135,22 +1034,16 @@ struct PureInfo { curl_off_t retry_after; /* info from Retry-After: header */
unsigned int header_size; /* size of read header(s) in bytes */
- /* PureInfo members 'conn_primary_ip', 'conn_primary_port', 'conn_local_ip'
- and, 'conn_local_port' are copied over from the connectdata struct in
- order to allow curl_easy_getinfo() to return this information even when
- the session handle is no longer associated with a connection, and also
- allow curl_easy_reset() to clear this information from the session handle
- without disturbing information which is still alive, and that might be
- reused, in the connection cache. */
-
- char conn_primary_ip[MAX_IPADR_LEN];
- int conn_primary_port; /* this is the destination port to the connection,
- which might have been a proxy */
+ /* PureInfo primary ip_quadruple is copied over from the connectdata
+ struct in order to allow curl_easy_getinfo() to return this information
+ even when the session handle is no longer associated with a connection,
+ and also allow curl_easy_reset() to clear this information from the
+ session handle without disturbing information which is still alive, and
+ that might be reused, in the connection cache. */
+ struct ip_quadruple primary;
int conn_remote_port; /* this is the "remote port", which is the port
number of the used URL, independent of proxy or
not */
- char conn_local_ip[MAX_IPADR_LEN];
- int conn_local_port;
const char *conn_scheme;
unsigned int conn_protocol;
struct curl_certinfo certs; /* info about the certs. Asked for with
@@ -1158,6 +1051,7 @@ struct PureInfo { CURLproxycode pxcode;
BIT(timecond); /* set to TRUE if the time condition didn't match, which
thus made the document NOT get fetched */
+ BIT(used_proxy); /* the transfer used a proxy */
};
@@ -1263,18 +1157,6 @@ struct Curl_data_priority { #endif
};
-/*
- * This struct is for holding data that was attempted to get sent to the user's
- * callback but is held due to pausing. One instance per type (BOTH, HEADER,
- * BODY).
- */
-struct tempbuf {
- struct dynbuf b;
- int type; /* type of the 'tempwrite' buffer as a bitmask that is used with
- Curl_client_write() */
- BIT(paused_body); /* if PAUSE happened before/during BODY write */
-};
-
/* Timers */
typedef enum {
EXPIRE_100_TIMEOUT,
@@ -1337,8 +1219,6 @@ struct UrlState { struct dynbuf headerb; /* buffer to store headers in */
struct curl_slist *hstslist; /* list of HSTS files set by
curl_easy_setopt(HSTS) calls */
- char *buffer; /* download buffer */
- char *ulbuf; /* allocated upload buffer or NULL */
curl_off_t current_speed; /* the ProgressShow() function sets this,
bytes / second */
@@ -1353,8 +1233,6 @@ struct UrlState { int retrycount; /* number of retries on a new connection */
struct Curl_ssl_session *session; /* array of 'max_ssl_sessions' size */
long sessionage; /* number of the most recent session */
- struct tempbuf tempwrite[3]; /* BOTH, HEADER, BODY */
- unsigned int tempcount; /* number of entries in use in tempwrite, 0 - 3 */
int os_errno; /* filled in with errno whenever an error occurs */
char *scratch; /* huge buffer[set.buffer_size*2] for upload CRLF replacing */
long followlocation; /* redirect counter */
@@ -1387,8 +1265,6 @@ struct UrlState { #if !defined(_WIN32) && !defined(MSDOS) && !defined(__EMX__)
/* do FTP line-end conversions on most platforms */
#define CURL_DO_LINEEND_CONV
- /* for FTP downloads: track CRLF sequences that span blocks */
- BIT(prev_block_had_trailing_cr);
/* for FTP downloads: how many CRLFs did we converted to LFs? */
curl_off_t crlf_conversions;
#endif
@@ -1422,8 +1298,10 @@ struct UrlState { this should be dealt with in pretransfer */
#ifndef CURL_DISABLE_HTTP
curl_mimepart *mimepost;
+#ifndef CURL_DISABLE_FORM_API
curl_mimepart *formp; /* storage for old API form-posting, allocated on
demand */
+#endif
size_t trailers_bytes_sent;
struct dynbuf trailers_buf; /* a buffer containing the compiled trailing
headers */
@@ -1442,6 +1320,10 @@ struct UrlState { CURLcode hresult; /* used to pass return codes back from hyper callbacks */
#endif
+#ifndef CURL_DISABLE_VERBOSE_STRINGS
+ struct curl_trc_feat *feat; /* opt. trace feature transfer is part of */
+#endif
+
/* Dynamically allocated strings, MUST be freed before this struct is
killed. */
struct dynamically_allocated_data {
@@ -1490,7 +1372,6 @@ struct UrlState { BIT(authproblem); /* TRUE if there's some problem authenticating */
/* set after initial USER failure, to prevent an authentication loop */
BIT(wildcardmatch); /* enable wildcard matching */
- BIT(expect100header); /* TRUE if we added Expect: 100-continue */
BIT(disableexpect); /* TRUE if Expect: is disabled due to a previous
417 response */
BIT(use_range);
@@ -1509,9 +1390,6 @@ struct UrlState { BIT(url_alloc); /* URL string is malloc()'ed */
BIT(referer_alloc); /* referer string is malloc()ed */
BIT(wildcard_resolve); /* Set to true if any resolve change is a wildcard */
- BIT(rewindbeforesend);/* TRUE when the sending couldn't be stopped even
- though it will be discarded. We must call the data
- rewind callback before trying to send again. */
BIT(upload); /* upload request */
BIT(internal); /* internal: true if this easy handle was created for
internal use and the user does not have ownership of the
@@ -1720,7 +1598,9 @@ struct UserDefined { curl_off_t set_resume_from; /* continue [ftp] transfer from here */
struct curl_slist *headers; /* linked list of extra headers */
struct curl_httppost *httppost; /* linked list of old POST data */
+#if !defined(CURL_DISABLE_MIME) || !defined(CURL_DISABLE_FORM_API)
curl_mimepart mimepost; /* MIME/POST data. */
+#endif
#ifndef CURL_DISABLE_TELNET
struct curl_slist *telnet_options; /* linked list of telnet options */
#endif
@@ -1933,6 +1813,12 @@ struct UserDefined { #endif
};
+#ifndef CURL_DISABLE_MIME
+#define IS_MIME_POST(a) ((a)->set.mimepost.kind != MIMEKIND_NONE)
+#else
+#define IS_MIME_POST(a) FALSE
+#endif
+
struct Names {
struct Curl_hash *hostcache;
enum {
diff --git a/libs/libcurl/src/vauth/digest.c b/libs/libcurl/src/vauth/digest.c index 166b66e0fb..8a0e7335bf 100644 --- a/libs/libcurl/src/vauth/digest.c +++ b/libs/libcurl/src/vauth/digest.c @@ -38,6 +38,7 @@ #include "curl_hmac.h"
#include "curl_md5.h"
#include "curl_sha256.h"
+#include "curl_sha512_256.h"
#include "vtls/vtls.h"
#include "warnless.h"
#include "strtok.h"
@@ -150,7 +151,7 @@ static void auth_digest_md5_to_ascii(unsigned char *source, /* 16 bytes */ msnprintf((char *) &dest[i * 2], 3, "%02x", source[i]);
}
-/* Convert sha256 chunk to RFC7616 -suitable ascii string */
+/* Convert sha256 or SHA-512/256 chunk to RFC7616 -suitable ascii string */
static void auth_digest_sha256_to_ascii(unsigned char *source, /* 32 bytes */
unsigned char *dest) /* 65 bytes */
{
@@ -601,10 +602,20 @@ CURLcode Curl_auth_decode_digest_http_message(const char *chlg, digest->algo = ALGO_SHA256;
else if(strcasecompare(content, "SHA-256-SESS"))
digest->algo = ALGO_SHA256SESS;
- else if(strcasecompare(content, "SHA-512-256"))
+ else if(strcasecompare(content, "SHA-512-256")) {
+#ifdef CURL_HAVE_SHA512_256
digest->algo = ALGO_SHA512_256;
- else if(strcasecompare(content, "SHA-512-256-SESS"))
+#else /* ! CURL_HAVE_SHA512_256 */
+ return CURLE_NOT_BUILT_IN;
+#endif /* ! CURL_HAVE_SHA512_256 */
+ }
+ else if(strcasecompare(content, "SHA-512-256-SESS")) {
+#ifdef CURL_HAVE_SHA512_256
digest->algo = ALGO_SHA512_256SESS;
+#else /* ! CURL_HAVE_SHA512_256 */
+ return CURLE_NOT_BUILT_IN;
+#endif /* ! CURL_HAVE_SHA512_256 */
+ }
else
return CURLE_BAD_CONTENT_ENCODING;
}
@@ -717,8 +728,10 @@ static CURLcode auth_create_digest_http_message( if(!hashthis)
return CURLE_OUT_OF_MEMORY;
- hash(hashbuf, (unsigned char *) hashthis, strlen(hashthis));
+ result = hash(hashbuf, (unsigned char *) hashthis, strlen(hashthis));
free(hashthis);
+ if(result)
+ return result;
convert_to_ascii(hashbuf, (unsigned char *)userh);
}
@@ -738,8 +751,10 @@ static CURLcode auth_create_digest_http_message( if(!hashthis)
return CURLE_OUT_OF_MEMORY;
- hash(hashbuf, (unsigned char *) hashthis, strlen(hashthis));
+ result = hash(hashbuf, (unsigned char *) hashthis, strlen(hashthis));
free(hashthis);
+ if(result)
+ return result;
convert_to_ascii(hashbuf, ha1);
if(digest->algo & SESSION_ALGO) {
@@ -748,8 +763,10 @@ static CURLcode auth_create_digest_http_message( if(!tmp)
return CURLE_OUT_OF_MEMORY;
- hash(hashbuf, (unsigned char *) tmp, strlen(tmp));
+ result = hash(hashbuf, (unsigned char *) tmp, strlen(tmp));
free(tmp);
+ if(result)
+ return result;
convert_to_ascii(hashbuf, ha1);
}
@@ -775,7 +792,11 @@ static CURLcode auth_create_digest_http_message( char hashed[65];
char *hashthis2;
- hash(hashbuf, (const unsigned char *)"", 0);
+ result = hash(hashbuf, (const unsigned char *)"", 0);
+ if(result) {
+ free(hashthis);
+ return result;
+ }
convert_to_ascii(hashbuf, (unsigned char *)hashed);
hashthis2 = aprintf("%s:%s", hashthis, hashed);
@@ -786,8 +807,10 @@ static CURLcode auth_create_digest_http_message( if(!hashthis)
return CURLE_OUT_OF_MEMORY;
- hash(hashbuf, (unsigned char *) hashthis, strlen(hashthis));
+ result = hash(hashbuf, (unsigned char *) hashthis, strlen(hashthis));
free(hashthis);
+ if(result)
+ return result;
convert_to_ascii(hashbuf, ha2);
if(digest->qop) {
@@ -801,8 +824,10 @@ static CURLcode auth_create_digest_http_message( if(!hashthis)
return CURLE_OUT_OF_MEMORY;
- hash(hashbuf, (unsigned char *) hashthis, strlen(hashthis));
+ result = hash(hashbuf, (unsigned char *) hashthis, strlen(hashthis));
free(hashthis);
+ if(result)
+ return result;
convert_to_ascii(hashbuf, request_digest);
/* For test case 64 (snooped from a Mozilla 1.3a request)
@@ -957,12 +982,24 @@ CURLcode Curl_auth_create_digest_http_message(struct Curl_easy *data, outptr, outlen,
auth_digest_md5_to_ascii,
Curl_md5it);
- DEBUGASSERT(digest->algo <= ALGO_SHA512_256SESS);
- return auth_create_digest_http_message(data, userp, passwdp,
- request, uripath, digest,
- outptr, outlen,
- auth_digest_sha256_to_ascii,
- Curl_sha256it);
+
+ if(digest->algo <= ALGO_SHA256SESS)
+ return auth_create_digest_http_message(data, userp, passwdp,
+ request, uripath, digest,
+ outptr, outlen,
+ auth_digest_sha256_to_ascii,
+ Curl_sha256it);
+#ifdef CURL_HAVE_SHA512_256
+ if(digest->algo <= ALGO_SHA512_256SESS)
+ return auth_create_digest_http_message(data, userp, passwdp,
+ request, uripath, digest,
+ outptr, outlen,
+ auth_digest_sha256_to_ascii,
+ Curl_sha512_256it);
+#endif /* CURL_HAVE_SHA512_256 */
+
+ /* Should be unreachable */
+ return CURLE_BAD_CONTENT_ENCODING;
}
/*
diff --git a/libs/libcurl/src/version.c b/libs/libcurl/src/version.c index b98c831177..d21e4a194b 100644 --- a/libs/libcurl/src/version.c +++ b/libs/libcurl/src/version.c @@ -212,9 +212,15 @@ char *curl_version(void) #ifdef USE_LIBPSL
{
+#if defined(PSL_VERSION_MAJOR) && (PSL_VERSION_MAJOR > 0 || \
+ PSL_VERSION_MINOR >= 11)
int num = psl_check_version_number(0);
msnprintf(psl_version, sizeof(psl_version), "libpsl/%d.%d.%d",
num >> 16, (num >> 8) & 0xff, num & 0xff);
+#else
+ msnprintf(psl_version, sizeof(psl_version), "libpsl/%s",
+ psl_get_version());
+#endif
src[i++] = psl_version;
}
#endif
diff --git a/libs/libcurl/src/vquic/curl_msh3.c b/libs/libcurl/src/vquic/curl_msh3.c index 9835ccc5e1..a5b3e54c3b 100644 --- a/libs/libcurl/src/vquic/curl_msh3.c +++ b/libs/libcurl/src/vquic/curl_msh3.c @@ -722,23 +722,6 @@ static bool cf_msh3_data_pending(struct Curl_cfilter *cf, return pending;
}
-static void cf_msh3_active(struct Curl_cfilter *cf, struct Curl_easy *data)
-{
- struct cf_msh3_ctx *ctx = cf->ctx;
-
- /* use this socket from now on */
- cf->conn->sock[cf->sockindex] = ctx->sock[SP_LOCAL];
- /* the first socket info gets set at conn and data */
- if(cf->sockindex == FIRSTSOCKET) {
- cf->conn->remote_addr = &ctx->addr;
- #ifdef ENABLE_IPV6
- cf->conn->bits.ipv6 = (ctx->addr.family == AF_INET6)? TRUE : FALSE;
- #endif
- Curl_persistconninfo(data, cf->conn, ctx->l_ip, ctx->l_port);
- }
- ctx->active = TRUE;
-}
-
static CURLcode h3_data_pause(struct Curl_cfilter *cf,
struct Curl_easy *data,
bool pause)
@@ -785,10 +768,6 @@ static CURLcode cf_msh3_data_event(struct Curl_cfilter *cf, }
}
break;
- case CF_CTRL_CONN_INFO_UPDATE:
- CURL_TRC_CF(data, cf, "req: update info");
- cf_msh3_active(cf, data);
- break;
default:
break;
}
diff --git a/libs/libcurl/src/vquic/curl_ngtcp2.c b/libs/libcurl/src/vquic/curl_ngtcp2.c index d004c9d27f..f4c66c71ac 100644 --- a/libs/libcurl/src/vquic/curl_ngtcp2.c +++ b/libs/libcurl/src/vquic/curl_ngtcp2.c @@ -58,6 +58,7 @@ #include "http1.h"
#include "select.h"
#include "inet_pton.h"
+#include "transfer.h"
#include "vquic.h"
#include "vquic_int.h"
#include "vquic-tls.h"
@@ -145,11 +146,9 @@ struct cf_ngtcp2_ctx { struct h3_stream_ctx {
int64_t id; /* HTTP/3 protocol identifier */
struct bufq sendbuf; /* h3 request body */
- struct bufq recvbuf; /* h3 response body */
struct h1_req_parser h1; /* h1 request parsing */
size_t sendbuf_len_in_flight; /* sendbuf amount "in flight" */
size_t upload_blocked_len; /* the amount written last and EGAINed */
- size_t recv_buf_nonflow; /* buffered bytes, not counting for flow control */
uint64_t error3; /* HTTP/3 stream error code */
curl_off_t upload_left; /* number of request bytes left to upload */
int status_code; /* HTTP status code */
@@ -190,11 +189,6 @@ static CURLcode h3_data_setup(struct Curl_cfilter *cf, Curl_bufq_initp(&stream->sendbuf, &ctx->stream_bufcp,
H3_STREAM_SEND_CHUNKS, BUFQ_OPT_NONE);
stream->sendbuf_len_in_flight = 0;
- /* on recv, we need a flexible buffer limit since we also write
- * headers to it that are not counted against the nghttp3 flow limits. */
- Curl_bufq_initp(&stream->recvbuf, &ctx->stream_bufcp,
- H3_STREAM_RECV_CHUNKS, BUFQ_OPT_SOFT_LIMIT);
- stream->recv_buf_nonflow = 0;
Curl_h1_req_parse_init(&stream->h1, H1_PARSE_DEFAULT_MAX_LINE_LEN);
H3_STREAM_LCTX(data) = stream;
@@ -219,7 +213,6 @@ static void h3_data_done(struct Curl_cfilter *cf, struct Curl_easy *data) }
Curl_bufq_free(&stream->sendbuf);
- Curl_bufq_free(&stream->recvbuf);
Curl_h1_req_parse_free(&stream->h1);
free(stream);
H3_STREAM_LCTX(data) = NULL;
@@ -387,36 +380,6 @@ static int cb_handshake_completed(ngtcp2_conn *tconn, void *user_data) return 0;
}
-static void report_consumed_data(struct Curl_cfilter *cf,
- struct Curl_easy *data,
- size_t consumed)
-{
- struct h3_stream_ctx *stream = H3_STREAM_CTX(data);
- struct cf_ngtcp2_ctx *ctx = cf->ctx;
-
- if(!stream)
- return;
- /* the HTTP/1.1 response headers are written to the buffer, but
- * consuming those does not count against flow control. */
- if(stream->recv_buf_nonflow) {
- if(consumed >= stream->recv_buf_nonflow) {
- consumed -= stream->recv_buf_nonflow;
- stream->recv_buf_nonflow = 0;
- }
- else {
- stream->recv_buf_nonflow -= consumed;
- consumed = 0;
- }
- }
- if(consumed > 0) {
- CURL_TRC_CF(data, cf, "[%" PRId64 "] ACK %zu bytes of DATA",
- stream->id, consumed);
- ngtcp2_conn_extend_max_stream_offset(ctx->qconn, stream->id,
- consumed);
- ngtcp2_conn_extend_max_offset(ctx->qconn, consumed);
- }
-}
-
static int cb_recv_stream_data(ngtcp2_conn *tconn, uint32_t flags,
int64_t stream_id, uint64_t offset,
const uint8_t *buf, size_t buflen,
@@ -796,46 +759,18 @@ static int cb_h3_stream_close(nghttp3_conn *conn, int64_t stream_id, return 0;
}
-/*
- * write_resp_raw() copies response data in raw format to the `data`'s
- * receive buffer. If not enough space is available, it appends to the
- * `data`'s overflow buffer.
- */
-static CURLcode write_resp_raw(struct Curl_cfilter *cf,
- struct Curl_easy *data,
- const void *mem, size_t memlen,
- bool flow)
+static CURLcode write_resp_hds(struct Curl_easy *data,
+ const char *buf, size_t blen)
{
- struct h3_stream_ctx *stream = H3_STREAM_CTX(data);
- CURLcode result = CURLE_OK;
- ssize_t nwritten;
-
- (void)cf;
- if(!stream) {
- return CURLE_RECV_ERROR;
- }
- nwritten = Curl_bufq_write(&stream->recvbuf, mem, memlen, &result);
- if(nwritten < 0) {
- return result;
- }
-
- if(!flow)
- stream->recv_buf_nonflow += (size_t)nwritten;
-
- if((size_t)nwritten < memlen) {
- /* This MUST not happen. Our recbuf is dimensioned to hold the
- * full max_stream_window and then some for this very reason. */
- DEBUGASSERT(0);
- return CURLE_RECV_ERROR;
- }
- return result;
+ return Curl_xfer_write_resp(data, (char *)buf, blen, FALSE);
}
static int cb_h3_recv_data(nghttp3_conn *conn, int64_t stream3_id,
- const uint8_t *buf, size_t buflen,
+ const uint8_t *buf, size_t blen,
void *user_data, void *stream_user_data)
{
struct Curl_cfilter *cf = user_data;
+ struct cf_ngtcp2_ctx *ctx = cf->ctx;
struct Curl_easy *data = stream_user_data;
struct h3_stream_ctx *stream = H3_STREAM_CTX(data);
CURLcode result;
@@ -846,14 +781,19 @@ static int cb_h3_recv_data(nghttp3_conn *conn, int64_t stream3_id, if(!stream)
return NGHTTP3_ERR_CALLBACK_FAILURE;
- result = write_resp_raw(cf, data, buf, buflen, TRUE);
+ result = Curl_xfer_write_resp(data, (char *)buf, blen, FALSE);
if(result) {
CURL_TRC_CF(data, cf, "[%" PRId64 "] DATA len=%zu, ERROR receiving %d",
- stream->id, buflen, result);
+ stream->id, blen, result);
return NGHTTP3_ERR_CALLBACK_FAILURE;
}
- CURL_TRC_CF(data, cf, "[%" PRId64 "] DATA len=%zu", stream->id, buflen);
- h3_drain_stream(cf, data);
+ if(blen) {
+ CURL_TRC_CF(data, cf, "[%" PRId64 "] ACK %zu bytes of DATA",
+ stream->id, blen);
+ ngtcp2_conn_extend_max_stream_offset(ctx->qconn, stream->id, blen);
+ ngtcp2_conn_extend_max_offset(ctx->qconn, blen);
+ }
+ CURL_TRC_CF(data, cf, "[%" PRId64 "] DATA len=%zu", stream->id, blen);
return 0;
}
@@ -888,7 +828,7 @@ static int cb_h3_end_headers(nghttp3_conn *conn, int64_t stream_id, if(!stream)
return 0;
/* add a CRLF only if we've received some headers */
- result = write_resp_raw(cf, data, "\r\n", 2, FALSE);
+ result = write_resp_hds(data, "\r\n", 2);
if(result) {
return -1;
}
@@ -934,7 +874,7 @@ static int cb_h3_recv_header(nghttp3_conn *conn, int64_t stream_id, ncopy = msnprintf(line, sizeof(line), "HTTP/3 %03d \r\n",
stream->status_code);
CURL_TRC_CF(data, cf, "[%" PRId64 "] status: %s", stream_id, line);
- result = write_resp_raw(cf, data, line, ncopy, FALSE);
+ result = write_resp_hds(data, line, ncopy);
if(result) {
return -1;
}
@@ -944,19 +884,19 @@ static int cb_h3_recv_header(nghttp3_conn *conn, int64_t stream_id, CURL_TRC_CF(data, cf, "[%" PRId64 "] header: %.*s: %.*s",
stream_id, (int)h3name.len, h3name.base,
(int)h3val.len, h3val.base);
- result = write_resp_raw(cf, data, h3name.base, h3name.len, FALSE);
+ result = write_resp_hds(data, (const char *)h3name.base, h3name.len);
if(result) {
return -1;
}
- result = write_resp_raw(cf, data, ": ", 2, FALSE);
+ result = write_resp_hds(data, ": ", 2);
if(result) {
return -1;
}
- result = write_resp_raw(cf, data, h3val.base, h3val.len, FALSE);
+ result = write_resp_hds(data, (const char *)h3val.base, h3val.len);
if(result) {
return -1;
}
- result = write_resp_raw(cf, data, "\r\n", 2, FALSE);
+ result = write_resp_hds(data, "\r\n", 2);
if(result) {
return -1;
}
@@ -1092,7 +1032,7 @@ static ssize_t recv_closed_stream(struct Curl_cfilter *cf, if(stream->reset) {
failf(data,
"HTTP/3 stream %" PRId64 " reset by server", stream->id);
- *err = stream->resp_hds_complete? CURLE_PARTIAL_FILE : CURLE_HTTP3;
+ *err = data->req.bytecount? CURLE_PARTIAL_FILE : CURLE_HTTP3;
goto out;
}
else if(!stream->resp_hds_complete) {
@@ -1112,7 +1052,7 @@ out: /* incoming data frames on the h3 stream */
static ssize_t cf_ngtcp2_recv(struct Curl_cfilter *cf, struct Curl_easy *data,
- char *buf, size_t len, CURLcode *err)
+ char *buf, size_t blen, CURLcode *err)
{
struct cf_ngtcp2_ctx *ctx = cf->ctx;
struct h3_stream_ctx *stream = H3_STREAM_CTX(data);
@@ -1121,6 +1061,7 @@ static ssize_t cf_ngtcp2_recv(struct Curl_cfilter *cf, struct Curl_easy *data, struct pkt_io_ctx pktx;
(void)ctx;
+ (void)buf;
CF_DATA_SAVE(save, cf, data);
DEBUGASSERT(cf->connected);
@@ -1136,46 +1077,18 @@ static ssize_t cf_ngtcp2_recv(struct Curl_cfilter *cf, struct Curl_easy *data, goto out;
}
- if(!Curl_bufq_is_empty(&stream->recvbuf)) {
- nread = Curl_bufq_read(&stream->recvbuf,
- (unsigned char *)buf, len, err);
- if(nread < 0) {
- CURL_TRC_CF(data, cf, "[%" PRId64 "] read recvbuf(len=%zu) "
- "-> %zd, %d", stream->id, len, nread, *err);
- goto out;
- }
- report_consumed_data(cf, data, nread);
- }
-
if(cf_progress_ingress(cf, data, &pktx)) {
*err = CURLE_RECV_ERROR;
nread = -1;
goto out;
}
- /* recvbuf had nothing before, maybe after progressing ingress? */
- if(nread < 0 && !Curl_bufq_is_empty(&stream->recvbuf)) {
- nread = Curl_bufq_read(&stream->recvbuf,
- (unsigned char *)buf, len, err);
- if(nread < 0) {
- CURL_TRC_CF(data, cf, "[%" PRId64 "] read recvbuf(len=%zu) "
- "-> %zd, %d", stream->id, len, nread, *err);
- goto out;
- }
- report_consumed_data(cf, data, nread);
- }
-
- if(nread > 0) {
- h3_drain_stream(cf, data);
- }
- else {
- if(stream->closed) {
- nread = recv_closed_stream(cf, data, stream, err);
- goto out;
- }
- *err = CURLE_AGAIN;
- nread = -1;
+ if(stream->closed) {
+ nread = recv_closed_stream(cf, data, stream, err);
+ goto out;
}
+ *err = CURLE_AGAIN;
+ nread = -1;
out:
if(cf_progress_egress(cf, data, &pktx)) {
@@ -1189,8 +1102,8 @@ out: nread = -1;
}
}
- CURL_TRC_CF(data, cf, "[%" PRId64 "] cf_recv(len=%zu) -> %zd, %d",
- stream? stream->id : -1, len, nread, *err);
+ CURL_TRC_CF(data, cf, "[%" PRId64 "] cf_recv(blen=%zu) -> %zd, %d",
+ stream? stream->id : -1, blen, nread, *err);
CF_DATA_RESTORE(cf, save);
return nread;
}
@@ -1593,7 +1506,6 @@ static CURLcode cf_progress_ingress(struct Curl_cfilter *cf, struct cf_ngtcp2_ctx *ctx = cf->ctx;
struct pkt_io_ctx local_pktx;
size_t pkts_chunk = 128, i;
- size_t pkts_max = 10 * pkts_chunk;
CURLcode result = CURLE_OK;
if(!pktx) {
@@ -1608,17 +1520,13 @@ static CURLcode cf_progress_ingress(struct Curl_cfilter *cf, if(result)
return result;
- for(i = 0; i < pkts_max; i += pkts_chunk) {
+ for(i = 0; i < 4; ++i) {
+ if(i)
+ pktx_update_time(pktx, cf);
pktx->pkt_count = 0;
result = vquic_recv_packets(cf, data, &ctx->q, pkts_chunk,
recv_pkt, pktx);
- if(result) /* error */
- break;
- if(pktx->pkt_count < pkts_chunk) /* got less than we could */
- break;
- /* give egress a chance before we receive more */
- result = cf_progress_egress(cf, data, pktx);
- if(result) /* error */
+ if(result || !pktx->pkt_count) /* error or got nothing */
break;
}
return result;
@@ -1769,7 +1677,7 @@ static CURLcode cf_progress_egress(struct Curl_cfilter *cf, }
/* In UDP, there is a maximum theoretical packet paload length and
- * a minimum payload length that is "guarantueed" to work.
+ * a minimum payload length that is "guaranteed" to work.
* To detect if this minimum payload can be increased, ngtcp2 sends
* now and then a packet payload larger than the minimum. It that
* is ACKed by the peer, both parties know that it works and
@@ -1857,9 +1765,9 @@ out: static bool cf_ngtcp2_data_pending(struct Curl_cfilter *cf,
const struct Curl_easy *data)
{
- const struct h3_stream_ctx *stream = H3_STREAM_CTX(data);
(void)cf;
- return stream && !Curl_bufq_is_empty(&stream->recvbuf);
+ (void)data;
+ return FALSE;
}
static CURLcode h3_data_pause(struct Curl_cfilter *cf,
@@ -2070,8 +1978,7 @@ static CURLcode cf_connect_start(struct Curl_cfilter *cf, if(result)
return result;
- Curl_cf_socket_peek(cf->next, data, &ctx->q.sockfd,
- &sockaddr, NULL, NULL, NULL, NULL);
+ Curl_cf_socket_peek(cf->next, data, &ctx->q.sockfd, &sockaddr, NULL);
if(!sockaddr)
return CURLE_QUIC_CONNECT_ERROR;
ctx->q.local_addrlen = sizeof(ctx->q.local_addr);
@@ -2186,13 +2093,11 @@ out: #ifndef CURL_DISABLE_VERBOSE_STRINGS
if(result) {
- const char *r_ip = NULL;
- int r_port = 0;
+ struct ip_quadruple ip;
- Curl_cf_socket_peek(cf->next, data, NULL, NULL,
- &r_ip, &r_port, NULL, NULL);
+ Curl_cf_socket_peek(cf->next, data, NULL, NULL, &ip);
infof(data, "QUIC connect to %s port %u failed: %s",
- r_ip, r_port, curl_easy_strerror(result));
+ ip.remote_ip, ip.remote_port, curl_easy_strerror(result));
}
#endif
if(!result && ctx->qconn) {
diff --git a/libs/libcurl/src/vquic/curl_osslq.c b/libs/libcurl/src/vquic/curl_osslq.c index 82fda51971..caa78935f1 100644 --- a/libs/libcurl/src/vquic/curl_osslq.c +++ b/libs/libcurl/src/vquic/curl_osslq.c @@ -67,7 +67,7 @@ * Chunk size is large enough to take a full DATA frame */
#define H3_STREAM_WINDOW_SIZE (128 * 1024)
#define H3_STREAM_CHUNK_SIZE (16 * 1024)
-/* The pool keeps spares around and half of a full stream windows
+/* The pool keeps spares around and half of a full stream window
* seems good. More does not seem to improve performance.
* The benefit of the pool is that stream buffer to not keep
* spares. So memory consumption goes down when streams run empty,
@@ -100,7 +100,7 @@ typedef unsigned long sslerr_t; static CURLcode cf_progress_ingress(struct Curl_cfilter *cf,
struct Curl_easy *data);
-static const char *SSL_ERROR_to_str(int err)
+static const char *osslq_SSL_ERROR_to_str(int err)
{
switch(err) {
case SSL_ERROR_NONE:
@@ -139,7 +139,7 @@ static const char *SSL_ERROR_to_str(int err) }
/* Return error string for last OpenSSL error */
-static char *ossl_strerror(unsigned long error, char *buf, size_t size)
+static char *osslq_strerror(unsigned long error, char *buf, size_t size)
{
DEBUGASSERT(size);
*buf = '\0';
@@ -381,8 +381,8 @@ static CURLcode cf_osslq_h3conn_add_stream(struct cf_osslq_h3conn *h3, }
static CURLcode cf_osslq_ssl_err(struct Curl_cfilter *cf,
- struct Curl_easy *data,
- int detail, CURLcode def_result)
+ struct Curl_easy *data,
+ int detail, CURLcode def_result)
{
struct cf_osslq_ctx *ctx = cf->ctx;
CURLcode result = def_result;
@@ -421,17 +421,17 @@ static CURLcode cf_osslq_ssl_err(struct Curl_cfilter *cf, /* If client certificate is required, communicate the
error to client */
result = CURLE_SSL_CLIENTCERT;
- ossl_strerror(errdetail, ebuf, sizeof(ebuf));
+ osslq_strerror(errdetail, ebuf, sizeof(ebuf));
}
#endif
else if((lib == ERR_LIB_SSL) && (reason == SSL_R_PROTOCOL_IS_SHUTDOWN)) {
ctx->protocol_shutdown = TRUE;
- err_descr = "QUIC connectin has been shut down";
+ err_descr = "QUIC connection has been shut down";
result = def_result;
}
else {
result = def_result;
- ossl_strerror(errdetail, ebuf, sizeof(ebuf));
+ osslq_strerror(errdetail, ebuf, sizeof(ebuf));
}
/* detail is already set to the SSL error above */
@@ -443,16 +443,14 @@ static CURLcode cf_osslq_ssl_err(struct Curl_cfilter *cf, if(CURLE_SSL_CONNECT_ERROR == result && errdetail == 0) {
char extramsg[80]="";
int sockerr = SOCKERRNO;
- const char *r_ip = NULL;
- int r_port = 0;
+ struct ip_quadruple ip;
- Curl_cf_socket_peek(cf->next, data, NULL, NULL,
- &r_ip, &r_port, NULL, NULL);
+ Curl_cf_socket_peek(cf->next, data, NULL, NULL, &ip);
if(sockerr && detail == SSL_ERROR_SYSCALL)
Curl_strerror(sockerr, extramsg, sizeof(extramsg));
failf(data, "QUIC connect: %s in connection to %s:%d (%s)",
- extramsg[0] ? extramsg : SSL_ERROR_to_str(detail),
- ctx->peer.dispname, r_port, r_ip);
+ extramsg[0] ? extramsg : osslq_SSL_ERROR_to_str(detail),
+ ctx->peer.dispname, ip.remote_port, ip.remote_ip);
}
else {
/* Could be a CERT problem */
@@ -976,7 +974,7 @@ static nghttp3_callbacks ngh3_callbacks = { };
static CURLcode cf_osslq_h3conn_init(struct cf_osslq_ctx *ctx, SSL *conn,
- void *user_data)
+ void *user_data)
{
struct cf_osslq_h3conn *h3 = &ctx->h3;
CURLcode result;
@@ -1039,7 +1037,6 @@ static CURLcode cf_osslq_ctx_start(struct Curl_cfilter *cf, CURLcode result;
int rv;
const struct Curl_sockaddr_ex *peer_addr = NULL;
- int peer_port;
BIO *bio = NULL;
BIO_ADDR *baddr = NULL;
@@ -1061,8 +1058,7 @@ static CURLcode cf_osslq_ctx_start(struct Curl_cfilter *cf, goto out;
result = CURLE_QUIC_CONNECT_ERROR;
- Curl_cf_socket_peek(cf->next, data, &ctx->q.sockfd,
- &peer_addr, NULL, &peer_port, NULL, NULL);
+ Curl_cf_socket_peek(cf->next, data, &ctx->q.sockfd, &peer_addr, NULL);
if(!peer_addr)
goto out;
@@ -1078,7 +1074,20 @@ static CURLcode cf_osslq_ctx_start(struct Curl_cfilter *cf, goto out;
}
+ /* Type conversions, see #12861: OpenSSL wants an `int`, but on 64-bit
+ * Win32 systems, Microsoft defines SOCKET as `unsigned long long`.
+ */
+#if defined(_WIN32) && !defined(__LWIP_OPT_H__) && !defined(LWIP_HDR_OPT_H)
+ if(ctx->q.sockfd > INT_MAX) {
+ failf(data, "Windows socket identifier larger than MAX_INT, "
+ "unable to set in OpenSSL dgram API.");
+ result = CURLE_QUIC_CONNECT_ERROR;
+ goto out;
+ }
+ bio = BIO_new_dgram((int)ctx->q.sockfd, BIO_NOCLOSE);
+#else
bio = BIO_new_dgram(ctx->q.sockfd, BIO_NOCLOSE);
+#endif
if(!bio) {
result = CURLE_OUT_OF_MEMORY;
goto out;
@@ -1095,6 +1104,16 @@ static CURLcode cf_osslq_ctx_start(struct Curl_cfilter *cf, goto out;
}
+#ifdef SSL_VALUE_QUIC_IDLE_TIMEOUT
+ /* Added in OpenSSL v3.3.x */
+ if(!SSL_set_feature_request_uint(ctx->tls.ssl, SSL_VALUE_QUIC_IDLE_TIMEOUT,
+ CURL_QUIC_MAX_IDLE_MS)) {
+ CURL_TRC_CF(data, cf, "error setting idle timeout, ");
+ result = CURLE_FAILED_INIT;
+ goto out;
+ }
+#endif
+
SSL_set_bio(ctx->tls.ssl, bio, bio);
bio = NULL;
SSL_set_connect_state(ctx->tls.ssl);
@@ -1146,7 +1165,7 @@ static ssize_t h3_quic_recv(void *reader_ctx, SSL_get_stream_read_error_code(x->s->ssl, &app_error_code);
CURL_TRC_CF(x->data, x->cf, "[%" PRId64 "] h3_quic_recv -> RESET, "
"rv=%d, app_err=%" PRIu64,
- x->s->id, rv, app_error_code);
+ x->s->id, rv, app_error_code);
if(app_error_code != NGHTTP3_H3_NO_ERROR) {
x->s->reset = TRUE;
}
@@ -1361,7 +1380,7 @@ static CURLcode h3_send_streams(struct Curl_cfilter *cf, size_t written;
int eos, ok, rv;
size_t total_len, acked_len = 0;
- bool blocked = FALSE;
+ bool blocked = FALSE, eos_written = FALSE;
n = nghttp3_conn_writev_stream(ctx->h3.conn, &stream_id, &eos,
vec, ARRAYSIZE(vec));
@@ -1392,9 +1411,19 @@ static CURLcode h3_send_streams(struct Curl_cfilter *cf, for(i = 0; (i < n) && !blocked; ++i) {
/* Without stream->s.ssl, we closed that already, so
* pretend the write did succeed. */
+#ifdef SSL_WRITE_FLAG_CONCLUDE
+ /* Since OpenSSL v3.3.x, on last chunk set EOS if needed */
+ uint64_t flags = (eos && ((i + 1) == n))? SSL_WRITE_FLAG_CONCLUDE : 0;
+ written = vec[i].len;
+ ok = !s->ssl || SSL_write_ex2(s->ssl, vec[i].base, vec[i].len, flags,
+ &written);
+ if(ok && flags & SSL_WRITE_FLAG_CONCLUDE)
+ eos_written = TRUE;
+#else
written = vec[i].len;
ok = !s->ssl || SSL_write_ex(s->ssl, vec[i].base, vec[i].len,
&written);
+#endif
if(ok) {
/* As OpenSSL buffers the data, we count this as acknowledged
* from nghttp3's point of view */
@@ -1409,7 +1438,7 @@ static CURLcode h3_send_streams(struct Curl_cfilter *cf, case SSL_ERROR_WANT_READ:
/* QUIC blocked us from writing more */
CURL_TRC_CF(data, cf, "[%"PRId64"] send %zu bytes to QUIC blocked",
- s->id, vec[i].len);
+ s->id, vec[i].len);
written = 0;
nghttp3_conn_block_stream(ctx->h3.conn, s->id);
s->send_blocked = blocked = TRUE;
@@ -1426,6 +1455,7 @@ static CURLcode h3_send_streams(struct Curl_cfilter *cf, if(acked_len > 0 || (eos && !s->send_blocked)) {
/* Since QUIC buffers the data written internally, we can tell
* nghttp3 that it can move forward on it */
+ ctx->q.last_io = Curl_now();
rv = nghttp3_conn_add_write_offset(ctx->h3.conn, s->id, acked_len);
if(rv && rv != NGHTTP3_ERR_STREAM_NOT_FOUND) {
failf(data, "nghttp3_conn_add_write_offset returned error: %s\n",
@@ -1444,7 +1474,7 @@ static CURLcode h3_send_streams(struct Curl_cfilter *cf, "to QUIC, eos=%d", s->id, acked_len, total_len, eos);
}
- if(eos && !s->send_blocked) {
+ if(eos && !s->send_blocked && !eos_written) {
/* wrote everything and H3 indicates end of stream */
CURL_TRC_CF(data, cf, "[%" PRId64 "] closing QUIC stream", s->id);
SSL_stream_conclude(s->ssl, 0);
@@ -1569,6 +1599,7 @@ static CURLcode cf_osslq_connect(struct Curl_cfilter *cf, if(err == 1) {
/* connected */
ctx->handshake_at = now;
+ ctx->q.last_io = now;
CURL_TRC_CF(data, cf, "handshake complete after %dms",
(int)Curl_timediff(now, ctx->started_at));
result = cf_osslq_verify_peer(cf, data);
@@ -1584,15 +1615,18 @@ static CURLcode cf_osslq_connect(struct Curl_cfilter *cf, int detail = SSL_get_error(ctx->tls.ssl, err);
switch(detail) {
case SSL_ERROR_WANT_READ:
+ ctx->q.last_io = now;
CURL_TRC_CF(data, cf, "QUIC SSL_connect() -> WANT_RECV");
result = Curl_vquic_tls_before_recv(&ctx->tls, cf, data);
goto out;
case SSL_ERROR_WANT_WRITE:
+ ctx->q.last_io = now;
CURL_TRC_CF(data, cf, "QUIC SSL_connect() -> WANT_SEND");
result = CURLE_OK;
goto out;
#ifdef SSL_ERROR_WANT_ASYNC
case SSL_ERROR_WANT_ASYNC:
+ ctx->q.last_io = now;
CURL_TRC_CF(data, cf, "QUIC SSL_connect() -> WANT_ASYNC");
result = CURLE_OK;
goto out;
@@ -1619,13 +1653,11 @@ out: #ifndef CURL_DISABLE_VERBOSE_STRINGS
if(result) {
- const char *r_ip = NULL;
- int r_port = 0;
+ struct ip_quadruple ip;
- Curl_cf_socket_peek(cf->next, data, NULL, NULL,
- &r_ip, &r_port, NULL, NULL);
+ Curl_cf_socket_peek(cf->next, data, NULL, NULL, &ip);
infof(data, "QUIC connect to %s port %u failed: %s",
- r_ip, r_port, curl_easy_strerror(result));
+ ip.remote_ip, ip.remote_port, curl_easy_strerror(result));
}
#endif
if(!result)
@@ -1885,7 +1917,7 @@ static ssize_t recv_closed_stream(struct Curl_cfilter *cf, if(stream->reset) {
failf(data,
"HTTP/3 stream %" PRId64 " reset by server", stream->s.id);
- *err = stream->resp_hds_complete? CURLE_PARTIAL_FILE : CURLE_HTTP3;
+ *err = data->req.bytecount? CURLE_PARTIAL_FILE : CURLE_HTTP3;
goto out;
}
else if(!stream->resp_hds_complete) {
@@ -2055,7 +2087,24 @@ static bool cf_osslq_conn_is_alive(struct Curl_cfilter *cf, if(!ctx->tls.ssl)
goto out;
- /* TODO: how to check negotiated connection idle time? */
+#ifdef SSL_VALUE_QUIC_IDLE_TIMEOUT
+ /* Added in OpenSSL v3.3.x */
+ {
+ timediff_t idletime;
+ uint64_t idle_ms = ctx->max_idle_ms;
+ if(!SSL_get_value_uint(ctx->tls.ssl, SSL_VALUE_CLASS_FEATURE_NEGOTIATED,
+ SSL_VALUE_QUIC_IDLE_TIMEOUT, &idle_ms)) {
+ CURL_TRC_CF(data, cf, "error getting negotiated idle timeout, "
+ "assume connection is dead.");
+ goto out;
+ }
+ CURL_TRC_CF(data, cf, "negotiated idle timeout: %zums", (size_t)idle_ms);
+ idletime = Curl_timediff(Curl_now(), ctx->q.last_io);
+ if(idletime > 0 && (uint64_t)idletime > idle_ms)
+ goto out;
+ }
+
+#endif
if(!cf->next || !cf->next->cft->is_alive(cf->next, data, input_pending))
goto out;
@@ -2111,15 +2160,24 @@ static CURLcode cf_osslq_query(struct Curl_cfilter *cf, int query, int *pres1, void *pres2)
{
struct cf_osslq_ctx *ctx = cf->ctx;
- struct cf_call_data save;
switch(query) {
case CF_QUERY_MAX_CONCURRENT: {
- /* TODO: how to get this? */
- CF_DATA_SAVE(save, cf, data);
+#ifdef SSL_VALUE_QUIC_STREAM_BIDI_LOCAL_AVAIL
+ /* Added in OpenSSL v3.3.x */
+ uint64_t v;
+ if(!SSL_get_value_uint(ctx->tls.ssl, SSL_VALUE_CLASS_GENERIC,
+ SSL_VALUE_QUIC_STREAM_BIDI_LOCAL_AVAIL, &v)) {
+ CURL_TRC_CF(data, cf, "error getting available local bidi streams");
+ return CURLE_HTTP3;
+ }
+ /* we report avail + in_use */
+ v += CONN_INUSE(cf->conn);
+ *pres1 = (v > INT_MAX)? INT_MAX : (int)v;
+#else
*pres1 = 100;
+#endif
CURL_TRC_CF(data, cf, "query max_conncurrent -> %d", *pres1);
- CF_DATA_RESTORE(cf, save);
return CURLE_OK;
}
case CF_QUERY_CONNECT_REPLY_MS:
diff --git a/libs/libcurl/src/vquic/curl_quiche.c b/libs/libcurl/src/vquic/curl_quiche.c index 263b9180f7..e79137ebbd 100644 --- a/libs/libcurl/src/vquic/curl_quiche.c +++ b/libs/libcurl/src/vquic/curl_quiche.c @@ -732,7 +732,7 @@ static ssize_t recv_closed_stream(struct Curl_cfilter *cf, if(stream->reset) {
failf(data,
"HTTP/3 stream %" PRId64 " reset by server", stream->id);
- *err = stream->resp_got_header? CURLE_PARTIAL_FILE : CURLE_HTTP3;
+ *err = data->req.bytecount? CURLE_PARTIAL_FILE : CURLE_HTTP3;
CURL_TRC_CF(data, cf, "[%" PRId64 "] cf_recv, was reset -> %d",
stream->id, *err);
}
@@ -1243,8 +1243,7 @@ static CURLcode cf_connect_start(struct Curl_cfilter *cf, if(result)
return result;
- Curl_cf_socket_peek(cf->next, data, &ctx->q.sockfd,
- &sockaddr, NULL, NULL, NULL, NULL);
+ Curl_cf_socket_peek(cf->next, data, &ctx->q.sockfd, &sockaddr, NULL);
ctx->q.local_addrlen = sizeof(ctx->q.local_addr);
rv = getsockname(ctx->q.sockfd, (struct sockaddr *)&ctx->q.local_addr,
&ctx->q.local_addrlen);
@@ -1390,13 +1389,11 @@ static CURLcode cf_quiche_connect(struct Curl_cfilter *cf, out:
#ifndef CURL_DISABLE_VERBOSE_STRINGS
if(result && result != CURLE_AGAIN) {
- const char *r_ip;
- int r_port;
+ struct ip_quadruple ip;
- Curl_cf_socket_peek(cf->next, data, NULL, NULL,
- &r_ip, &r_port, NULL, NULL);
+ Curl_cf_socket_peek(cf->next, data, NULL, NULL, &ip);
infof(data, "connect to %s port %u failed: %s",
- r_ip, r_port, curl_easy_strerror(result));
+ ip.remote_ip, ip.remote_port, curl_easy_strerror(result));
}
#endif
return result;
diff --git a/libs/libcurl/src/vquic/vquic-tls.c b/libs/libcurl/src/vquic/vquic-tls.c index 29c23ef58c..2a5f243e5d 100644 --- a/libs/libcurl/src/vquic/vquic-tls.c +++ b/libs/libcurl/src/vquic/vquic-tls.c @@ -375,6 +375,7 @@ static CURLcode curl_wssl_init_ctx(struct quic_tls_ctx *ctx, char error_buffer[256];
ERR_error_string_n(ERR_get_error(), error_buffer, sizeof(error_buffer));
failf(data, "wolfSSL failed to set ciphers: %s", error_buffer);
+ result = CURLE_BAD_FUNCTION_ARGUMENT;
goto out;
}
@@ -382,6 +383,7 @@ static CURLcode curl_wssl_init_ctx(struct quic_tls_ctx *ctx, conn_config->curves :
(char *)QUIC_GROUPS) != 1) {
failf(data, "wolfSSL failed to set curves");
+ result = CURLE_BAD_FUNCTION_ARGUMENT;
goto out;
}
@@ -392,6 +394,7 @@ static CURLcode curl_wssl_init_ctx(struct quic_tls_ctx *ctx, wolfSSL_CTX_set_keylog_callback(ctx->ssl_ctx, keylog_callback);
#else
failf(data, "wolfSSL was built without keylog callback");
+ result = CURLE_NOT_BUILT_IN;
goto out;
#endif
}
@@ -414,6 +417,7 @@ static CURLcode curl_wssl_init_ctx(struct quic_tls_ctx *ctx, " CAfile: %s CApath: %s",
ssl_cafile ? ssl_cafile : "none",
ssl_capath ? ssl_capath : "none");
+ result = CURLE_SSL_CACERT_BADFILE;
goto out;
}
infof(data, " CAfile: %s", ssl_cafile ? ssl_cafile : "none");
diff --git a/libs/libcurl/src/vquic/vquic.c b/libs/libcurl/src/vquic/vquic.c index 8a6bb99aac..422f7d18c0 100644 --- a/libs/libcurl/src/vquic/vquic.c +++ b/libs/libcurl/src/vquic/vquic.c @@ -370,12 +370,10 @@ static CURLcode recvmmsg_packets(struct Curl_cfilter *cf, goto out;
}
if(!cf->connected && SOCKERRNO == ECONNREFUSED) {
- const char *r_ip = NULL;
- int r_port = 0;
- Curl_cf_socket_peek(cf->next, data, NULL, NULL,
- &r_ip, &r_port, NULL, NULL);
+ struct ip_quadruple ip;
+ Curl_cf_socket_peek(cf->next, data, NULL, NULL, &ip);
failf(data, "QUIC: connection to %s port %u refused",
- r_ip, r_port);
+ ip.remote_ip, ip.remote_port);
result = CURLE_COULDNT_CONNECT;
goto out;
}
@@ -440,12 +438,10 @@ static CURLcode recvmsg_packets(struct Curl_cfilter *cf, goto out;
}
if(!cf->connected && SOCKERRNO == ECONNREFUSED) {
- const char *r_ip = NULL;
- int r_port = 0;
- Curl_cf_socket_peek(cf->next, data, NULL, NULL,
- &r_ip, &r_port, NULL, NULL);
+ struct ip_quadruple ip;
+ Curl_cf_socket_peek(cf->next, data, NULL, NULL, &ip);
failf(data, "QUIC: connection to %s port %u refused",
- r_ip, r_port);
+ ip.remote_ip, ip.remote_port);
result = CURLE_COULDNT_CONNECT;
goto out;
}
@@ -500,12 +496,10 @@ static CURLcode recvfrom_packets(struct Curl_cfilter *cf, goto out;
}
if(!cf->connected && SOCKERRNO == ECONNREFUSED) {
- const char *r_ip = NULL;
- int r_port = 0;
- Curl_cf_socket_peek(cf->next, data, NULL, NULL,
- &r_ip, &r_port, NULL, NULL);
+ struct ip_quadruple ip;
+ Curl_cf_socket_peek(cf->next, data, NULL, NULL, &ip);
failf(data, "QUIC: connection to %s port %u refused",
- r_ip, r_port);
+ ip.remote_ip, ip.remote_port);
result = CURLE_COULDNT_CONNECT;
goto out;
}
diff --git a/libs/libcurl/src/vssh/libssh.c b/libs/libcurl/src/vssh/libssh.c index 59b8bf872b..8d5cdcfbb9 100644 --- a/libs/libcurl/src/vssh/libssh.c +++ b/libs/libcurl/src/vssh/libssh.c @@ -1292,10 +1292,10 @@ static CURLcode myssh_statemach_act(struct Curl_easy *data, bool *block) position. */
if(data->state.resume_from > 0) {
/* Let's read off the proper amount of bytes from the input. */
- if(conn->seek_func) {
+ if(data->set.seek_func) {
Curl_set_in_callback(data, true);
- seekerr = conn->seek_func(conn->seek_client, data->state.resume_from,
- SEEK_SET);
+ seekerr = data->set.seek_func(data->set.seek_client,
+ data->state.resume_from, SEEK_SET);
Curl_set_in_callback(data, false);
}
@@ -1349,9 +1349,9 @@ static CURLcode myssh_statemach_act(struct Curl_easy *data, bool *block) Curl_pgrsSetUploadSize(data, data->state.infilesize);
}
/* upload data */
- Curl_setup_transfer(data, -1, -1, FALSE, FIRSTSOCKET);
+ Curl_xfer_setup(data, -1, -1, FALSE, FIRSTSOCKET);
- /* not set by Curl_setup_transfer to preserve keepon bits */
+ /* not set by Curl_xfer_setup to preserve keepon bits */
conn->sockfd = conn->writesockfd;
/* store this original bitmask setup to use later on if we can't
@@ -1575,7 +1575,7 @@ static CURLcode myssh_statemach_act(struct Curl_easy *data, bool *block) sshc->sftp_dir = NULL;
/* no data to transfer */
- Curl_setup_transfer(data, -1, -1, FALSE, -1);
+ Curl_xfer_setup(data, -1, -1, FALSE, -1);
state(data, SSH_STOP);
break;
@@ -1665,6 +1665,8 @@ static CURLcode myssh_statemach_act(struct Curl_easy *data, bool *block) size = 0;
}
else {
+ if((to - from) == CURL_OFF_T_MAX)
+ return CURLE_RANGE_ERROR;
size = to - from + 1;
}
@@ -1718,14 +1720,14 @@ static CURLcode myssh_statemach_act(struct Curl_easy *data, bool *block) /* Setup the actual download */
if(data->req.size == 0) {
/* no data to transfer */
- Curl_setup_transfer(data, -1, -1, FALSE, -1);
+ Curl_xfer_setup(data, -1, -1, FALSE, -1);
infof(data, "File already completely downloaded");
state(data, SSH_STOP);
break;
}
- Curl_setup_transfer(data, FIRSTSOCKET, data->req.size, FALSE, -1);
+ Curl_xfer_setup(data, FIRSTSOCKET, data->req.size, FALSE, -1);
- /* not set by Curl_setup_transfer to preserve keepon bits */
+ /* not set by Curl_xfer_setup to preserve keepon bits */
conn->writesockfd = conn->sockfd;
/* we want to use the _receiving_ function even when the socket turns
@@ -1847,9 +1849,9 @@ static CURLcode myssh_statemach_act(struct Curl_easy *data, bool *block) }
/* upload data */
- Curl_setup_transfer(data, -1, data->req.size, FALSE, FIRSTSOCKET);
+ Curl_xfer_setup(data, -1, data->req.size, FALSE, FIRSTSOCKET);
- /* not set by Curl_setup_transfer to preserve keepon bits */
+ /* not set by Curl_xfer_setup to preserve keepon bits */
conn->sockfd = conn->writesockfd;
/* store this original bitmask setup to use later on if we can't
@@ -1891,9 +1893,9 @@ static CURLcode myssh_statemach_act(struct Curl_easy *data, bool *block) /* download data */
bytecount = ssh_scp_request_get_size(sshc->scp_session);
data->req.maxdownload = (curl_off_t) bytecount;
- Curl_setup_transfer(data, FIRSTSOCKET, bytecount, FALSE, -1);
+ Curl_xfer_setup(data, FIRSTSOCKET, bytecount, FALSE, -1);
- /* not set by Curl_setup_transfer to preserve keepon bits */
+ /* not set by Curl_xfer_setup to preserve keepon bits */
conn->writesockfd = conn->sockfd;
/* we want to use the _receiving_ function even when the socket turns
diff --git a/libs/libcurl/src/vssh/libssh2.c b/libs/libcurl/src/vssh/libssh2.c index 32c6764be9..f1e1dfd0fc 100644 --- a/libs/libcurl/src/vssh/libssh2.c +++ b/libs/libcurl/src/vssh/libssh2.c @@ -2142,10 +2142,10 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block) position. */
if(data->state.resume_from > 0) {
/* Let's read off the proper amount of bytes from the input. */
- if(conn->seek_func) {
+ if(data->set.seek_func) {
Curl_set_in_callback(data, true);
- seekerr = conn->seek_func(conn->seek_client, data->state.resume_from,
- SEEK_SET);
+ seekerr = data->set.seek_func(data->set.seek_client,
+ data->state.resume_from, SEEK_SET);
Curl_set_in_callback(data, false);
}
@@ -2195,9 +2195,9 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block) Curl_pgrsSetUploadSize(data, data->state.infilesize);
}
/* upload data */
- Curl_setup_transfer(data, -1, -1, FALSE, FIRSTSOCKET);
+ Curl_xfer_setup(data, -1, -1, FALSE, FIRSTSOCKET);
- /* not set by Curl_setup_transfer to preserve keepon bits */
+ /* not set by Curl_xfer_setup to preserve keepon bits */
conn->sockfd = conn->writesockfd;
if(result) {
@@ -2448,7 +2448,7 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block) Curl_safefree(sshp->readdir_longentry);
/* no data to transfer */
- Curl_setup_transfer(data, -1, -1, FALSE, -1);
+ Curl_xfer_setup(data, -1, -1, FALSE, -1);
state(data, SSH_STOP);
break;
@@ -2544,6 +2544,8 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block) size = 0;
}
else {
+ if((to - from) == CURL_OFF_T_MAX)
+ return CURLE_RANGE_ERROR;
size = to - from + 1;
}
@@ -2588,14 +2590,14 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block) /* Setup the actual download */
if(data->req.size == 0) {
/* no data to transfer */
- Curl_setup_transfer(data, -1, -1, FALSE, -1);
+ Curl_xfer_setup(data, -1, -1, FALSE, -1);
infof(data, "File already completely downloaded");
state(data, SSH_STOP);
break;
}
- Curl_setup_transfer(data, FIRSTSOCKET, data->req.size, FALSE, -1);
+ Curl_xfer_setup(data, FIRSTSOCKET, data->req.size, FALSE, -1);
- /* not set by Curl_setup_transfer to preserve keepon bits */
+ /* not set by Curl_xfer_setup to preserve keepon bits */
conn->writesockfd = conn->sockfd;
/* we want to use the _receiving_ function even when the socket turns
@@ -2739,9 +2741,9 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block) /* upload data */
data->req.size = data->state.infilesize;
Curl_pgrsSetUploadSize(data, data->state.infilesize);
- Curl_setup_transfer(data, -1, -1, FALSE, FIRSTSOCKET);
+ Curl_xfer_setup(data, -1, -1, FALSE, FIRSTSOCKET);
- /* not set by Curl_setup_transfer to preserve keepon bits */
+ /* not set by Curl_xfer_setup to preserve keepon bits */
conn->sockfd = conn->writesockfd;
if(result) {
@@ -2810,9 +2812,9 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block) /* download data */
bytecount = (curl_off_t)sb.st_size;
data->req.maxdownload = (curl_off_t)sb.st_size;
- Curl_setup_transfer(data, FIRSTSOCKET, bytecount, FALSE, -1);
+ Curl_xfer_setup(data, FIRSTSOCKET, bytecount, FALSE, -1);
- /* not set by Curl_setup_transfer to preserve keepon bits */
+ /* not set by Curl_xfer_setup to preserve keepon bits */
conn->writesockfd = conn->sockfd;
/* we want to use the _receiving_ function even when the socket turns
@@ -3191,12 +3193,13 @@ static ssize_t ssh_tls_recv(libssh2_socket_t sock, void *buffer, struct connectdata *conn = data->conn;
Curl_recv *backup = conn->recv[0];
struct ssh_conn *ssh = &conn->proto.sshc;
+ int socknum = Curl_conn_sockindex(data, sock);
(void)flags;
/* swap in the TLS reader function for this call only, and then swap back
the SSH one again */
conn->recv[0] = ssh->tls_recv;
- result = Curl_read(data, sock, buffer, length, &nread);
+ result = Curl_conn_recv(data, socknum, buffer, length, &nread);
conn->recv[0] = backup;
if(result == CURLE_AGAIN)
return -EAGAIN; /* magic return code for libssh2 */
@@ -3210,24 +3213,25 @@ static ssize_t ssh_tls_send(libssh2_socket_t sock, const void *buffer, size_t length, int flags, void **abstract)
{
struct Curl_easy *data = (struct Curl_easy *)*abstract;
- ssize_t nwrite;
+ size_t nwrite;
CURLcode result;
struct connectdata *conn = data->conn;
Curl_send *backup = conn->send[0];
struct ssh_conn *ssh = &conn->proto.sshc;
+ int socknum = Curl_conn_sockindex(data, sock);
(void)flags;
/* swap in the TLS writer function for this call only, and then swap back
the SSH one again */
conn->send[0] = ssh->tls_send;
- result = Curl_write(data, sock, buffer, length, &nwrite);
+ result = Curl_conn_send(data, socknum, buffer, length, &nwrite);
conn->send[0] = backup;
if(result == CURLE_AGAIN)
return -EAGAIN; /* magic return code for libssh2 */
else if(result)
return -1; /* error */
- Curl_debug(data, CURLINFO_DATA_OUT, (char *)buffer, (size_t)nwrite);
- return nwrite;
+ Curl_debug(data, CURLINFO_DATA_OUT, (char *)buffer, nwrite);
+ return (ssize_t)nwrite;
}
#endif
@@ -3268,7 +3272,7 @@ static CURLcode ssh_connect(struct Curl_easy *data, bool *done) #endif /* CURL_LIBSSH2_DEBUG */
/* libcurl MUST to set custom memory functions so that the kbd_callback
- funciton's memory allocations can be properled freed */
+ function's memory allocations can be properly freed */
sshc->ssh_session = libssh2_session_init_ex(my_libssh2_malloc,
my_libssh2_free,
my_libssh2_realloc, data);
diff --git a/libs/libcurl/src/vssh/wolfssh.c b/libs/libcurl/src/vssh/wolfssh.c index 5583398915..fcba60e8ee 100644 --- a/libs/libcurl/src/vssh/wolfssh.c +++ b/libs/libcurl/src/vssh/wolfssh.c @@ -625,10 +625,10 @@ static CURLcode wssh_statemach_act(struct Curl_easy *data, bool *block) if(data->state.resume_from > 0) {
/* Let's read off the proper amount of bytes from the input. */
int seekerr = CURL_SEEKFUNC_OK;
- if(conn->seek_func) {
+ if(data->set.seek_func) {
Curl_set_in_callback(data, true);
- seekerr = conn->seek_func(conn->seek_client, data->state.resume_from,
- SEEK_SET);
+ seekerr = data->set.seek_func(data->set.seek_client,
+ data->state.resume_from, SEEK_SET);
Curl_set_in_callback(data, false);
}
@@ -678,9 +678,9 @@ static CURLcode wssh_statemach_act(struct Curl_easy *data, bool *block) Curl_pgrsSetUploadSize(data, data->state.infilesize);
}
/* upload data */
- Curl_setup_transfer(data, -1, -1, FALSE, FIRSTSOCKET);
+ Curl_xfer_setup(data, -1, -1, FALSE, FIRSTSOCKET);
- /* not set by Curl_setup_transfer to preserve keepon bits */
+ /* not set by Curl_xfer_setup to preserve keepon bits */
conn->sockfd = conn->writesockfd;
if(result) {
@@ -778,14 +778,14 @@ static CURLcode wssh_statemach_act(struct Curl_easy *data, bool *block) /* Setup the actual download */
if(data->req.size == 0) {
/* no data to transfer */
- Curl_setup_transfer(data, -1, -1, FALSE, -1);
+ Curl_xfer_setup(data, -1, -1, FALSE, -1);
infof(data, "File already completely downloaded");
state(data, SSH_STOP);
break;
}
- Curl_setup_transfer(data, FIRSTSOCKET, data->req.size, FALSE, -1);
+ Curl_xfer_setup(data, FIRSTSOCKET, data->req.size, FALSE, -1);
- /* not set by Curl_setup_transfer to preserve keepon bits */
+ /* not set by Curl_xfer_setup to preserve keepon bits */
conn->writesockfd = conn->sockfd;
/* we want to use the _receiving_ function even when the socket turns
diff --git a/libs/libcurl/src/vtls/bearssl.c b/libs/libcurl/src/vtls/bearssl.c index 924802c61f..9ccfd8f473 100644 --- a/libs/libcurl/src/vtls/bearssl.c +++ b/libs/libcurl/src/vtls/bearssl.c @@ -707,7 +707,7 @@ static CURLcode bearssl_connect_step1(struct Curl_cfilter *cf, infof(data, VTLS_INFOF_ALPN_OFFER_1STR, proto.data);
}
- if(connssl->peer.is_ip_address) {
+ if(connssl->peer.type != CURL_SSL_PEER_DNS) {
if(verifyhost) {
failf(data, "BearSSL: "
"host verification of IP address is not supported");
diff --git a/libs/libcurl/src/vtls/gtls.c b/libs/libcurl/src/vtls/gtls.c index dd40380738..1184ceb5c6 100644 --- a/libs/libcurl/src/vtls/gtls.c +++ b/libs/libcurl/src/vtls/gtls.c @@ -117,6 +117,8 @@ static ssize_t gtls_pull(void *s, void *buf, size_t blen) (CURLE_AGAIN == result)? EAGAIN : EINVAL);
nread = -1;
}
+ else if(nread == 0)
+ connssl->peer_closed = TRUE;
return nread;
}
@@ -1489,7 +1491,7 @@ static int gtls_shutdown(struct Curl_cfilter *cf, bool done = FALSE;
char buf[120];
- while(!done) {
+ while(!done && !connssl->peer_closed) {
int what = SOCKET_READABLE(Curl_conn_cf_get_socket(cf, data),
SSL_SHUTDOWN_TIMEOUT);
if(what > 0) {
diff --git a/libs/libcurl/src/vtls/mbedtls.c b/libs/libcurl/src/vtls/mbedtls.c index c008eace9b..22a6a8cba2 100644 --- a/libs/libcurl/src/vtls/mbedtls.c +++ b/libs/libcurl/src/vtls/mbedtls.c @@ -110,7 +110,8 @@ struct mbed_ssl_backend_data { };
/* apply threading? */
-#if defined(USE_THREADS_POSIX) || defined(USE_THREADS_WIN32)
+#if (defined(USE_THREADS_POSIX) && defined(HAVE_PTHREAD_H)) || \
+ defined(_WIN32)
#define THREADING_SUPPORT
#endif
@@ -123,7 +124,6 @@ static mbedtls_entropy_context ts_entropy; static int entropy_init_initialized = 0;
-/* start of entropy_init_mutex() */
static void entropy_init_mutex(mbedtls_entropy_context *ctx)
{
/* lock 0 = entropy_init_mutex() */
@@ -134,9 +134,18 @@ static void entropy_init_mutex(mbedtls_entropy_context *ctx) }
Curl_mbedtlsthreadlock_unlock_function(0);
}
-/* end of entropy_init_mutex() */
-/* start of entropy_func_mutex() */
+static void entropy_cleanup_mutex(mbedtls_entropy_context *ctx)
+{
+ /* lock 0 = use same lock as init */
+ Curl_mbedtlsthreadlock_lock_function(0);
+ if(entropy_init_initialized == 1) {
+ mbedtls_entropy_free(ctx);
+ entropy_init_initialized = 0;
+ }
+ Curl_mbedtlsthreadlock_unlock_function(0);
+}
+
static int entropy_func_mutex(void *data, unsigned char *output, size_t len)
{
int ret;
@@ -147,7 +156,6 @@ static int entropy_func_mutex(void *data, unsigned char *output, size_t len) return ret;
}
-/* end of entropy_func_mutex() */
#endif /* THREADING_SUPPORT */
@@ -237,6 +245,23 @@ static const mbedtls_x509_crt_profile mbedtls_x509_crt_profile_fr = #define PUB_DER_MAX_BYTES (RSA_PUB_DER_MAX_BYTES > ECP_PUB_DER_MAX_BYTES ? \
RSA_PUB_DER_MAX_BYTES : ECP_PUB_DER_MAX_BYTES)
+#if MBEDTLS_VERSION_NUMBER >= 0x03020000
+static CURLcode mbedtls_version_from_curl(
+ mbedtls_ssl_protocol_version* mbedver, long version)
+{
+ switch(version) {
+ case CURL_SSLVERSION_TLSv1_0:
+ case CURL_SSLVERSION_TLSv1_1:
+ case CURL_SSLVERSION_TLSv1_2:
+ *mbedver = MBEDTLS_SSL_VERSION_TLS1_2;
+ return CURLE_OK;
+ case CURL_SSLVERSION_TLSv1_3:
+ break;
+ }
+
+ return CURLE_SSL_CONNECT_ERROR;
+}
+#else
static CURLcode mbedtls_version_from_curl(int *mbedver, long version)
{
#if MBEDTLS_VERSION_NUMBER >= 0x03000000
@@ -267,6 +292,7 @@ static CURLcode mbedtls_version_from_curl(int *mbedver, long version) return CURLE_SSL_CONNECT_ERROR;
}
+#endif
static CURLcode
set_ssl_version_min_max(struct Curl_cfilter *cf, struct Curl_easy *data)
@@ -275,7 +301,10 @@ set_ssl_version_min_max(struct Curl_cfilter *cf, struct Curl_easy *data) struct mbed_ssl_backend_data *backend =
(struct mbed_ssl_backend_data *)connssl->backend;
struct ssl_primary_config *conn_config = Curl_ssl_cf_get_primary_config(cf);
-#if MBEDTLS_VERSION_NUMBER >= 0x03000000
+#if MBEDTLS_VERSION_NUMBER >= 0x03020000
+ mbedtls_ssl_protocol_version mbedtls_ver_min = MBEDTLS_SSL_VERSION_TLS1_2;
+ mbedtls_ssl_protocol_version mbedtls_ver_max = MBEDTLS_SSL_VERSION_TLS1_2;
+#elif MBEDTLS_VERSION_NUMBER >= 0x03000000
int mbedtls_ver_min = MBEDTLS_SSL_MINOR_VERSION_3;
int mbedtls_ver_max = MBEDTLS_SSL_MINOR_VERSION_3;
#else
@@ -313,10 +342,15 @@ set_ssl_version_min_max(struct Curl_cfilter *cf, struct Curl_easy *data) return result;
}
+#if MBEDTLS_VERSION_NUMBER >= 0x03020000
+ mbedtls_ssl_conf_min_tls_version(&backend->config, mbedtls_ver_min);
+ mbedtls_ssl_conf_max_tls_version(&backend->config, mbedtls_ver_max);
+#else
mbedtls_ssl_conf_min_version(&backend->config, MBEDTLS_SSL_MAJOR_VERSION_3,
mbedtls_ver_min);
mbedtls_ssl_conf_max_version(&backend->config, MBEDTLS_SSL_MAJOR_VERSION_3,
mbedtls_ver_max);
+#endif
return result;
}
@@ -351,7 +385,6 @@ mbed_connect_step1(struct Curl_cfilter *cf, struct Curl_easy *data) }
#ifdef THREADING_SUPPORT
- entropy_init_mutex(&ts_entropy);
mbedtls_ctr_drbg_init(&backend->ctr_drbg);
ret = mbedtls_ctr_drbg_seed(&backend->ctr_drbg, entropy_func_mutex,
@@ -654,14 +687,13 @@ mbed_connect_step1(struct Curl_cfilter *cf, struct Curl_easy *data) &backend->clicert, &backend->pk);
}
- if(connssl->peer.sni) {
- if(mbedtls_ssl_set_hostname(&backend->ssl, connssl->peer.sni)) {
- /* mbedtls_ssl_set_hostname() sets the name to use in CN/SAN checks and
- the name to set in the SNI extension. So even if curl connects to a
- host specified as an IP address, this function must be used. */
- failf(data, "Failed to set SNI");
- return CURLE_SSL_CONNECT_ERROR;
- }
+ if(mbedtls_ssl_set_hostname(&backend->ssl, connssl->peer.sni?
+ connssl->peer.sni : connssl->peer.hostname)) {
+ /* mbedtls_ssl_set_hostname() sets the name to use in CN/SAN checks and
+ the name to set in the SNI extension. So even if curl connects to a
+ host specified as an IP address, this function must be used. */
+ failf(data, "Failed to set SNI");
+ return CURLE_SSL_CONNECT_ERROR;
}
#ifdef HAS_ALPN
@@ -775,6 +807,7 @@ mbed_connect_step2(struct Curl_cfilter *cf, struct Curl_easy *data) peercert = mbedtls_ssl_get_peer_cert(&backend->ssl);
if(peercert && data->set.verbose) {
+#ifndef MBEDTLS_X509_REMOVE_INFO
const size_t bufsize = 16384;
char *buffer = malloc(bufsize);
@@ -787,6 +820,9 @@ mbed_connect_step2(struct Curl_cfilter *cf, struct Curl_easy *data) infof(data, "Unable to dump certificate information");
free(buffer);
+#else
+ infof(data, "Unable to dump certificate information");
+#endif
}
if(pinnedpubkey) {
@@ -1216,14 +1252,19 @@ static CURLcode mbedtls_connect(struct Curl_cfilter *cf, */
static int mbedtls_init(void)
{
- return Curl_mbedtlsthreadlock_thread_setup();
+ if(!Curl_mbedtlsthreadlock_thread_setup())
+ return 0;
+#ifdef THREADING_SUPPORT
+ entropy_init_mutex(&ts_entropy);
+#endif
+ return 1;
}
static void mbedtls_cleanup(void)
{
#ifdef THREADING_SUPPORT
- mbedtls_entropy_free(&ts_entropy);
-#endif /* THREADING_SUPPORT */
+ entropy_cleanup_mutex(&ts_entropy);
+#endif
(void)Curl_mbedtlsthreadlock_thread_cleanup();
}
diff --git a/libs/libcurl/src/vtls/mbedtls_threadlock.c b/libs/libcurl/src/vtls/mbedtls_threadlock.c index 757d19f003..d6d20328a7 100644 --- a/libs/libcurl/src/vtls/mbedtls_threadlock.c +++ b/libs/libcurl/src/vtls/mbedtls_threadlock.c @@ -26,12 +26,12 @@ #if defined(USE_MBEDTLS) && \
((defined(USE_THREADS_POSIX) && defined(HAVE_PTHREAD_H)) || \
- defined(USE_THREADS_WIN32))
+ defined(_WIN32))
#if defined(USE_THREADS_POSIX) && defined(HAVE_PTHREAD_H)
# include <pthread.h>
# define MBEDTLS_MUTEX_T pthread_mutex_t
-#elif defined(USE_THREADS_WIN32)
+#elif defined(_WIN32)
# define MBEDTLS_MUTEX_T HANDLE
#endif
@@ -59,7 +59,7 @@ int Curl_mbedtlsthreadlock_thread_setup(void) #if defined(USE_THREADS_POSIX) && defined(HAVE_PTHREAD_H)
if(pthread_mutex_init(&mutex_buf[i], NULL))
return 0; /* pthread_mutex_init failed */
-#elif defined(USE_THREADS_WIN32)
+#elif defined(_WIN32)
mutex_buf[i] = CreateMutex(0, FALSE, 0);
if(mutex_buf[i] == 0)
return 0; /* CreateMutex failed */
@@ -80,7 +80,7 @@ int Curl_mbedtlsthreadlock_thread_cleanup(void) #if defined(USE_THREADS_POSIX) && defined(HAVE_PTHREAD_H)
if(pthread_mutex_destroy(&mutex_buf[i]))
return 0; /* pthread_mutex_destroy failed */
-#elif defined(USE_THREADS_WIN32)
+#elif defined(_WIN32)
if(!CloseHandle(mutex_buf[i]))
return 0; /* CloseHandle failed */
#endif /* USE_THREADS_POSIX && HAVE_PTHREAD_H */
@@ -100,7 +100,7 @@ int Curl_mbedtlsthreadlock_lock_function(int n) "Error: mbedtlsthreadlock_lock_function failed\n"));
return 0; /* pthread_mutex_lock failed */
}
-#elif defined(USE_THREADS_WIN32)
+#elif defined(_WIN32)
if(WaitForSingleObject(mutex_buf[n], INFINITE) == WAIT_FAILED) {
DEBUGF(fprintf(stderr,
"Error: mbedtlsthreadlock_lock_function failed\n"));
@@ -120,7 +120,7 @@ int Curl_mbedtlsthreadlock_unlock_function(int n) "Error: mbedtlsthreadlock_unlock_function failed\n"));
return 0; /* pthread_mutex_unlock failed */
}
-#elif defined(USE_THREADS_WIN32)
+#elif defined(_WIN32)
if(!ReleaseMutex(mutex_buf[n])) {
DEBUGF(fprintf(stderr,
"Error: mbedtlsthreadlock_unlock_function failed\n"));
diff --git a/libs/libcurl/src/vtls/mbedtls_threadlock.h b/libs/libcurl/src/vtls/mbedtls_threadlock.h index 96f4bb8d43..ae651c3866 100644 --- a/libs/libcurl/src/vtls/mbedtls_threadlock.h +++ b/libs/libcurl/src/vtls/mbedtls_threadlock.h @@ -29,7 +29,7 @@ #ifdef USE_MBEDTLS
#if (defined(USE_THREADS_POSIX) && defined(HAVE_PTHREAD_H)) || \
- defined(USE_THREADS_WIN32)
+ defined(_WIN32)
int Curl_mbedtlsthreadlock_thread_setup(void);
int Curl_mbedtlsthreadlock_thread_cleanup(void);
@@ -43,7 +43,7 @@ int Curl_mbedtlsthreadlock_unlock_function(int n); #define Curl_mbedtlsthreadlock_lock_function(x) 1
#define Curl_mbedtlsthreadlock_unlock_function(x) 1
-#endif /* USE_THREADS_POSIX || USE_THREADS_WIN32 */
+#endif /* (USE_THREADS_POSIX && HAVE_PTHREAD_H) || _WIN32 */
#endif /* USE_MBEDTLS */
diff --git a/libs/libcurl/src/vtls/openssl.c b/libs/libcurl/src/vtls/openssl.c index be6780b7ed..b22ec269ac 100644 --- a/libs/libcurl/src/vtls/openssl.c +++ b/libs/libcurl/src/vtls/openssl.c @@ -769,6 +769,9 @@ static int ossl_bio_cf_in_read(BIO *bio, char *buf, int blen) if(CURLE_AGAIN == result)
BIO_set_retry_read(bio);
}
+ else if(nread == 0) {
+ connssl->peer_closed = TRUE;
+ }
/* Before returning server replies to the SSL instance, we need
* to have setup the x509 store or verification will fail. */
@@ -1887,16 +1890,41 @@ static void ossl_close(struct Curl_cfilter *cf, struct Curl_easy *data) DEBUGASSERT(backend);
if(backend->handle) {
- if(cf->next && cf->next->connected) {
+ /* Send the TLS shutdown if we are still connected *and* if
+ * the peer did not already close the connection. */
+ if(cf->next && cf->next->connected && !connssl->peer_closed) {
char buf[1024];
int nread, err;
long sslerr;
/* Maybe the server has already sent a close notify alert.
Read it to avoid an RST on the TCP connection. */
- (void)SSL_read(backend->handle, buf, (int)sizeof(buf));
ERR_clear_error();
- if(SSL_shutdown(backend->handle) == 1) {
+ nread = SSL_read(backend->handle, buf, (int)sizeof(buf));
+ err = SSL_get_error(backend->handle, nread);
+ if(!nread && err == SSL_ERROR_ZERO_RETURN) {
+ CURLcode result;
+ ssize_t n;
+ size_t blen = sizeof(buf);
+ CURL_TRC_CF(data, cf, "peer has shutdown TLS");
+ /* SSL_read() will not longer touch the socket, let's receive
+ * directly from the next filter to see if the underlying
+ * connection has also been closed. */
+ n = Curl_conn_cf_recv(cf->next, data, buf, blen, &result);
+ if(!n) {
+ connssl->peer_closed = TRUE;
+ CURL_TRC_CF(data, cf, "peer closed connection");
+ }
+ }
+ ERR_clear_error();
+ if(connssl->peer_closed) {
+ /* As the peer closed, we do not expect it to read anything more we
+ * may send. It may be harmful, leading to TCP RST and delaying
+ * a lingering close. Just leave. */
+ CURL_TRC_CF(data, cf, "not from sending TLS shutdown on "
+ "connection closed by peer");
+ }
+ else if(SSL_shutdown(backend->handle) == 1) {
CURL_TRC_CF(data, cf, "SSL shutdown finished");
}
else {
@@ -2134,7 +2162,7 @@ CURLcode Curl_ossl_verifyhost(struct Curl_easy *data, struct connectdata *conn, struct ssl_peer *peer, X509 *server_cert)
{
bool matched = FALSE;
- int target = GEN_DNS; /* target type, GEN_DNS or GEN_IPADD */
+ int target; /* target type, GEN_DNS or GEN_IPADD */
size_t addrlen = 0;
STACK_OF(GENERAL_NAME) *altnames;
#ifdef ENABLE_IPV6
@@ -2149,19 +2177,28 @@ CURLcode Curl_ossl_verifyhost(struct Curl_easy *data, struct connectdata *conn, (void)conn;
hostlen = strlen(peer->hostname);
- if(peer->is_ip_address) {
+ switch(peer->type) {
+ case CURL_SSL_PEER_IPV4:
+ if(!Curl_inet_pton(AF_INET, peer->hostname, &addr))
+ return CURLE_PEER_FAILED_VERIFICATION;
+ target = GEN_IPADD;
+ addrlen = sizeof(struct in_addr);
+ break;
#ifdef ENABLE_IPV6
- if(conn->bits.ipv6_ip &&
- Curl_inet_pton(AF_INET6, peer->hostname, &addr)) {
- target = GEN_IPADD;
- addrlen = sizeof(struct in6_addr);
- }
- else
+ case CURL_SSL_PEER_IPV6:
+ if(!Curl_inet_pton(AF_INET6, peer->hostname, &addr))
+ return CURLE_PEER_FAILED_VERIFICATION;
+ target = GEN_IPADD;
+ addrlen = sizeof(struct in6_addr);
+ break;
#endif
- if(Curl_inet_pton(AF_INET, peer->hostname, &addr)) {
- target = GEN_IPADD;
- addrlen = sizeof(struct in_addr);
- }
+ case CURL_SSL_PEER_DNS:
+ target = GEN_DNS;
+ break;
+ default:
+ DEBUGASSERT(0);
+ failf(data, "unexpected ssl peer type: %d", peer->type);
+ return CURLE_PEER_FAILED_VERIFICATION;
}
/* get a "list" of alternative names */
@@ -2242,9 +2279,12 @@ CURLcode Curl_ossl_verifyhost(struct Curl_easy *data, struct connectdata *conn, /* an alternative name matched */
;
else if(dNSName || iPAddress) {
- infof(data, " subjectAltName does not match %s", peer->dispname);
+ const char *tname = (peer->type == CURL_SSL_PEER_DNS) ? "host name" :
+ (peer->type == CURL_SSL_PEER_IPV4) ?
+ "ipv4 address" : "ipv6 address";
+ infof(data, " subjectAltName does not match %s %s", tname, peer->dispname);
failf(data, "SSL: no alternative certificate subject name matches "
- "target host name '%s'", peer->dispname);
+ "target %s '%s'", tname, peer->dispname);
result = CURLE_PEER_FAILED_VERIFICATION;
}
else {
diff --git a/libs/libcurl/src/vtls/rustls.c b/libs/libcurl/src/vtls/rustls.c index f0eb1443e6..0688df13ef 100644 --- a/libs/libcurl/src/vtls/rustls.c +++ b/libs/libcurl/src/vtls/rustls.c @@ -7,6 +7,7 @@ *
* Copyright (C) Jacob Hoffman-Andrews,
* <github@hoffman-andrews.com>
+ * Copyright (C) kpcyrd, <kpcyrd@archlinux.org>
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -85,6 +86,7 @@ static int read_cb(void *userdata, uint8_t *buf, uintptr_t len, uintptr_t *out_n)
{
struct io_ctx *io_ctx = userdata;
+ struct ssl_connect_data *const connssl = io_ctx->cf->ctx;
CURLcode result;
int ret = 0;
ssize_t nread = Curl_conn_cf_recv(io_ctx->cf->next, io_ctx->data,
@@ -96,6 +98,8 @@ read_cb(void *userdata, uint8_t *buf, uintptr_t len, uintptr_t *out_n) else
ret = EINVAL;
}
+ else if(nread == 0)
+ connssl->peer_closed = TRUE;
*out_n = (int)nread;
return ret;
}
@@ -291,7 +295,7 @@ cr_send(struct Curl_cfilter *cf, struct Curl_easy *data, DEBUGASSERT(backend);
rconn = backend->conn;
- CURL_TRC_CF(data, cf, "cf_send: %ld plain bytes", plainlen);
+ CURL_TRC_CF(data, cf, "cf_send: %zu plain bytes", plainlen);
io_ctx.cf = cf;
io_ctx.data = data;
@@ -342,7 +346,7 @@ cr_send(struct Curl_cfilter *cf, struct Curl_easy *data, /* A server certificate verify callback for rustls that always returns
RUSTLS_RESULT_OK, or in other words disable certificate verification. */
-static enum rustls_result
+static uint32_t
cr_verify_none(void *userdata UNUSED_PARAM,
const rustls_verify_server_cert_params *params UNUSED_PARAM)
{
@@ -373,7 +377,10 @@ cr_init_backend(struct Curl_cfilter *cf, struct Curl_easy *data, struct ssl_primary_config *conn_config = Curl_ssl_cf_get_primary_config(cf);
struct rustls_connection *rconn = NULL;
struct rustls_client_config_builder *config_builder = NULL;
- struct rustls_root_cert_store *roots = NULL;
+ const struct rustls_root_cert_store *roots = NULL;
+ struct rustls_root_cert_store_builder *roots_builder = NULL;
+ struct rustls_web_pki_server_cert_verifier_builder *verifier_builder = NULL;
+ struct rustls_server_cert_verifier *server_cert_verifier = NULL;
const struct curl_blob *ca_info_blob = conn_config->ca_info_blob;
const char * const ssl_cafile =
/* CURLOPT_CAINFO_BLOB overrides CURLOPT_CAINFO */
@@ -414,38 +421,60 @@ cr_init_backend(struct Curl_cfilter *cf, struct Curl_easy *data, hostname = "example.invalid";
}
}
- else if(ca_info_blob) {
- roots = rustls_root_cert_store_new();
-
- /* Enable strict parsing only if verification isn't disabled. */
- result = rustls_root_cert_store_add_pem(roots, ca_info_blob->data,
- ca_info_blob->len, verifypeer);
- if(result != RUSTLS_RESULT_OK) {
- failf(data, "rustls: failed to parse trusted certificates from blob");
- rustls_root_cert_store_free(roots);
- rustls_client_config_free(
- rustls_client_config_builder_build(config_builder));
- return CURLE_SSL_CACERT_BADFILE;
+ else if(ca_info_blob || ssl_cafile) {
+ roots_builder = rustls_root_cert_store_builder_new();
+
+ if(ca_info_blob) {
+ /* Enable strict parsing only if verification isn't disabled. */
+ result = rustls_root_cert_store_builder_add_pem(roots_builder,
+ ca_info_blob->data,
+ ca_info_blob->len,
+ verifypeer);
+ if(result != RUSTLS_RESULT_OK) {
+ failf(data, "rustls: failed to parse trusted certificates from blob");
+ rustls_root_cert_store_builder_free(roots_builder);
+ rustls_client_config_free(
+ rustls_client_config_builder_build(config_builder));
+ return CURLE_SSL_CACERT_BADFILE;
+ }
+ }
+ else if(ssl_cafile) {
+ /* Enable strict parsing only if verification isn't disabled. */
+ result = rustls_root_cert_store_builder_load_roots_from_file(
+ roots_builder, ssl_cafile, verifypeer);
+ if(result != RUSTLS_RESULT_OK) {
+ failf(data, "rustls: failed to load trusted certificates");
+ rustls_root_cert_store_builder_free(roots_builder);
+ rustls_client_config_free(
+ rustls_client_config_builder_build(config_builder));
+ return CURLE_SSL_CACERT_BADFILE;
+ }
}
- result = rustls_client_config_builder_use_roots(config_builder, roots);
- rustls_root_cert_store_free(roots);
+ result = rustls_root_cert_store_builder_build(roots_builder, &roots);
+ rustls_root_cert_store_builder_free(roots_builder);
if(result != RUSTLS_RESULT_OK) {
failf(data, "rustls: failed to load trusted certificates");
rustls_client_config_free(
rustls_client_config_builder_build(config_builder));
return CURLE_SSL_CACERT_BADFILE;
}
- }
- else if(ssl_cafile) {
- result = rustls_client_config_builder_load_roots_from_file(
- config_builder, ssl_cafile);
+
+ verifier_builder = rustls_web_pki_server_cert_verifier_builder_new(roots);
+
+ result = rustls_web_pki_server_cert_verifier_builder_build(
+ verifier_builder, &server_cert_verifier);
+ rustls_web_pki_server_cert_verifier_builder_free(verifier_builder);
if(result != RUSTLS_RESULT_OK) {
failf(data, "rustls: failed to load trusted certificates");
+ rustls_server_cert_verifier_free(server_cert_verifier);
rustls_client_config_free(
rustls_client_config_builder_build(config_builder));
return CURLE_SSL_CACERT_BADFILE;
}
+
+ rustls_client_config_builder_set_server_verifier(config_builder,
+ server_cert_verifier);
}
backend->config = rustls_client_config_builder_build(config_builder);
@@ -671,7 +700,7 @@ cr_close(struct Curl_cfilter *cf, struct Curl_easy *data) DEBUGASSERT(backend);
- if(backend->conn) {
+ if(backend->conn && !connssl->peer_closed) {
rustls_connection_send_close_notify(backend->conn);
n = cr_send(cf, data, NULL, 0, &tmperr);
if(n < 0) {
diff --git a/libs/libcurl/src/vtls/schannel.c b/libs/libcurl/src/vtls/schannel.c index 64a31f155d..d63d321bf3 100644 --- a/libs/libcurl/src/vtls/schannel.c +++ b/libs/libcurl/src/vtls/schannel.c @@ -1159,7 +1159,7 @@ schannel_connect_step1(struct Curl_cfilter *cf, struct Curl_easy *data) }
/* Warn if SNI is disabled due to use of an IP address */
- if(connssl->peer.is_ip_address) {
+ if(connssl->peer.type != CURL_SSL_PEER_DNS) {
infof(data, "schannel: using IP address, SNI is not supported by OS.");
}
@@ -2133,7 +2133,6 @@ schannel_recv(struct Curl_cfilter *cf, struct Curl_easy *data, infof(data, "schannel: server indicated shutdown in a prior call");
goto cleanup;
}
-
/* It's debatable what to return when !len. Regardless we can't return
immediately because there may be data to decrypt (in the case we want to
decrypt all encrypted cached data) so handle !len later in cleanup.
@@ -2317,10 +2316,10 @@ schannel_recv(struct Curl_cfilter *cf, struct Curl_easy *data, /* In Windows 2000 SEC_I_CONTEXT_EXPIRED (close_notify) is not
returned so we have to work around that in cleanup. */
backend->recv_sspi_close_notify = true;
- if(!backend->recv_connection_closed) {
+ if(!backend->recv_connection_closed)
backend->recv_connection_closed = true;
- infof(data, "schannel: server closed the connection");
- }
+ infof(data,
+ "schannel: server close notification received (close_notify)");
goto cleanup;
}
}
@@ -2443,7 +2442,10 @@ static bool schannel_data_pending(struct Curl_cfilter *cf, if(backend->ctxt) /* SSL/TLS is in use */
return (backend->decdata_offset > 0 ||
- (backend->encdata_offset > 0 && !backend->encdata_is_incomplete));
+ (backend->encdata_offset > 0 && !backend->encdata_is_incomplete) ||
+ backend->recv_connection_closed ||
+ backend->recv_sspi_close_notify ||
+ backend->recv_unrecoverable_err);
else
return FALSE;
}
diff --git a/libs/libcurl/src/vtls/sectransp.c b/libs/libcurl/src/vtls/sectransp.c index eb43d08c5b..f62c034e55 100644 --- a/libs/libcurl/src/vtls/sectransp.c +++ b/libs/libcurl/src/vtls/sectransp.c @@ -2008,7 +2008,7 @@ static CURLcode sectransp_connect_step1(struct Curl_cfilter *cf, return CURLE_SSL_CONNECT_ERROR;
}
- if(connssl->peer.is_ip_address) {
+ if(connssl->peer.type != CURL_SSL_PEER_DNS) {
infof(data, "WARNING: using IP address, SNI is being disabled by "
"the OS.");
}
diff --git a/libs/libcurl/src/vtls/vtls.c b/libs/libcurl/src/vtls/vtls.c index 6deb512879..e4fcb0dd83 100644 --- a/libs/libcurl/src/vtls/vtls.c +++ b/libs/libcurl/src/vtls/vtls.c @@ -774,9 +774,13 @@ void Curl_ssl_adjust_pollset(struct Curl_cfilter *cf, struct Curl_easy *data, if(sock != CURL_SOCKET_BAD) {
if(connssl->connecting_state == ssl_connect_2_writing) {
Curl_pollset_set_out_only(data, ps, sock);
+ CURL_TRC_CF(data, cf, "adjust_pollset, POLLOUT fd=%"
+ CURL_FORMAT_SOCKET_T, sock);
}
else {
Curl_pollset_set_in_only(data, ps, sock);
+ CURL_TRC_CF(data, cf, "adjust_pollset, POLLIN fd=%"
+ CURL_FORMAT_SOCKET_T, sock);
}
}
}
@@ -1512,7 +1516,7 @@ void Curl_ssl_peer_cleanup(struct ssl_peer *peer) free(peer->sni);
free(peer->hostname);
peer->hostname = peer->sni = peer->dispname = NULL;
- peer->is_ip_address = FALSE;
+ peer->type = CURL_SSL_PEER_DNS;
}
static void cf_close(struct Curl_cfilter *cf, struct Curl_easy *data)
@@ -1526,18 +1530,23 @@ static void cf_close(struct Curl_cfilter *cf, struct Curl_easy *data) cf->connected = FALSE;
}
-static int is_ip_address(const char *hostname)
+static ssl_peer_type get_peer_type(const char *hostname)
{
+ if(hostname && hostname[0]) {
#ifdef ENABLE_IPV6
- struct in6_addr addr;
+ struct in6_addr addr;
#else
- struct in_addr addr;
+ struct in_addr addr;
#endif
- return (hostname && hostname[0] && (Curl_inet_pton(AF_INET, hostname, &addr)
+ if(Curl_inet_pton(AF_INET, hostname, &addr))
+ return CURL_SSL_PEER_IPV4;
#ifdef ENABLE_IPV6
- || Curl_inet_pton(AF_INET6, hostname, &addr)
+ else if(Curl_inet_pton(AF_INET6, hostname, &addr)) {
+ return CURL_SSL_PEER_IPV6;
+ }
#endif
- ));
+ }
+ return CURL_SSL_PEER_DNS;
}
CURLcode Curl_ssl_peer_init(struct ssl_peer *peer, struct Curl_cfilter *cf)
@@ -1566,6 +1575,7 @@ CURLcode Curl_ssl_peer_init(struct ssl_peer *peer, struct Curl_cfilter *cf) }
/* change if ehostname changed */
+ DEBUGASSERT(!ehostname || ehostname[0]);
if(ehostname && (!peer->hostname
|| strcmp(ehostname, peer->hostname))) {
Curl_ssl_peer_cleanup(peer);
@@ -1585,8 +1595,8 @@ CURLcode Curl_ssl_peer_init(struct ssl_peer *peer, struct Curl_cfilter *cf) }
peer->sni = NULL;
- peer->is_ip_address = is_ip_address(peer->hostname)? TRUE : FALSE;
- if(peer->hostname[0] && !peer->is_ip_address) {
+ peer->type = get_peer_type(peer->hostname);
+ if(peer->type == CURL_SSL_PEER_DNS && peer->hostname[0]) {
/* not an IP address, normalize according to RCC 6066 ch. 3,
* max len of SNI is 2^16-1, no trailing dot */
size_t len = strlen(peer->hostname);
@@ -1715,32 +1725,17 @@ static ssize_t ssl_cf_recv(struct Curl_cfilter *cf, {
struct cf_call_data save;
ssize_t nread;
- size_t ntotal = 0;
CF_DATA_SAVE(save, cf, data);
*err = CURLE_OK;
- /* Do receive until we fill the buffer somehwhat or EGAIN, error or EOF */
- while(!ntotal || (len - ntotal) > (4*1024)) {
+ nread = Curl_ssl->recv_plain(cf, data, buf, len, err);
+ if(nread > 0) {
+ DEBUGASSERT((size_t)nread <= len);
+ }
+ else if(nread == 0) {
+ /* eof */
*err = CURLE_OK;
- nread = Curl_ssl->recv_plain(cf, data, buf + ntotal, len - ntotal, err);
- if(nread < 0) {
- if(*err == CURLE_AGAIN && ntotal > 0) {
- /* we EAGAINed after having reed data, return the success amount */
- *err = CURLE_OK;
- break;
- }
- /* we have a an error to report */
- goto out;
- }
- else if(nread == 0) {
- /* eof */
- break;
- }
- ntotal += (size_t)nread;
- DEBUGASSERT((size_t)ntotal <= len);
}
- nread = (ssize_t)ntotal;
-out:
CURL_TRC_CF(data, cf, "cf_recv(len=%zu) -> %zd, %d", len,
nread, *err);
CF_DATA_RESTORE(cf, save);
diff --git a/libs/libcurl/src/vtls/vtls_int.h b/libs/libcurl/src/vtls/vtls_int.h index 858a0de8e7..563a5d11a9 100644 --- a/libs/libcurl/src/vtls/vtls_int.h +++ b/libs/libcurl/src/vtls/vtls_int.h @@ -76,6 +76,7 @@ struct ssl_connect_data { int port; /* remote port at origin */
BIT(use_alpn); /* if ALPN shall be used in handshake */
BIT(reused_session); /* session-ID was reused for this */
+ BIT(peer_closed); /* peer has closed connection */
};
diff --git a/libs/libcurl/src/vtls/wolfssl.c b/libs/libcurl/src/vtls/wolfssl.c index 349d077ec4..fe845c5752 100644 --- a/libs/libcurl/src/vtls/wolfssl.c +++ b/libs/libcurl/src/vtls/wolfssl.c @@ -232,7 +232,6 @@ static const struct group_name_map gnm[] = { static int wolfssl_bio_cf_create(WOLFSSL_BIO *bio)
{
wolfSSL_BIO_set_shutdown(bio, 1);
- wolfSSL_BIO_set_init(bio, 1);
wolfSSL_BIO_set_data(bio, NULL);
return 1;
}
@@ -321,6 +320,8 @@ static int wolfssl_bio_cf_in_read(WOLFSSL_BIO *bio, char *buf, int blen) wolfSSL_BIO_clear_retry_flags(bio);
if(nread < 0 && CURLE_AGAIN == result)
BIO_set_retry_read(bio);
+ else if(nread == 0)
+ connssl->peer_closed = TRUE;
return (int)nread;
}
@@ -1059,7 +1060,8 @@ static void wolfssl_close(struct Curl_cfilter *cf, struct Curl_easy *data) /* Maybe the server has already sent a close notify alert.
Read it to avoid an RST on the TCP connection. */
(void)wolfSSL_read(backend->handle, buf, (int)sizeof(buf));
- (void)wolfSSL_shutdown(backend->handle);
+ if(!connssl->peer_closed)
+ (void)wolfSSL_shutdown(backend->handle);
wolfSSL_free(backend->handle);
backend->handle = NULL;
}
diff --git a/libs/libcurl/src/ws.c b/libs/libcurl/src/ws.c index 0caca18f05..174b62aa38 100644 --- a/libs/libcurl/src/ws.c +++ b/libs/libcurl/src/ws.c @@ -363,7 +363,7 @@ struct ws_cw_ctx { static CURLcode ws_cw_init(struct Curl_easy *data,
struct Curl_cwriter *writer)
{
- struct ws_cw_ctx *ctx = (struct ws_cw_ctx *)writer;
+ struct ws_cw_ctx *ctx = writer->ctx;
(void)data;
Curl_bufq_init2(&ctx->buf, WS_CHUNK_SIZE, 1, BUFQ_OPT_SOFT_LIMIT);
return CURLE_OK;
@@ -371,7 +371,7 @@ static CURLcode ws_cw_init(struct Curl_easy *data, static void ws_cw_close(struct Curl_easy *data, struct Curl_cwriter *writer)
{
- struct ws_cw_ctx *ctx = (struct ws_cw_ctx *)writer;
+ struct ws_cw_ctx *ctx = writer->ctx;
(void) data;
Curl_bufq_free(&ctx->buf);
}
@@ -423,7 +423,7 @@ static CURLcode ws_cw_write(struct Curl_easy *data, struct Curl_cwriter *writer, int type,
const char *buf, size_t nbytes)
{
- struct ws_cw_ctx *ctx = (struct ws_cw_ctx *)writer;
+ struct ws_cw_ctx *ctx = writer->ctx;
struct websocket *ws;
CURLcode result;
@@ -754,13 +754,26 @@ CURLcode Curl_ws_accept(struct Curl_easy *data, DEBUGASSERT(data->conn);
ws = data->conn->proto.ws;
if(!ws) {
+ size_t chunk_size = WS_CHUNK_SIZE;
ws = calloc(1, sizeof(*ws));
if(!ws)
return CURLE_OUT_OF_MEMORY;
data->conn->proto.ws = ws;
- Curl_bufq_init2(&ws->recvbuf, WS_CHUNK_SIZE, WS_CHUNK_COUNT,
+#ifdef DEBUGBUILD
+ {
+ char *p = getenv("CURL_WS_CHUNK_SIZE");
+ if(p) {
+ long l = strtol(p, NULL, 10);
+ if(l > 0 && l <= (1*1024*1024)) {
+ chunk_size = (size_t)l;
+ }
+ }
+ }
+#endif
+ DEBUGF(infof(data, "WS, using chunk size %zu", chunk_size));
+ Curl_bufq_init2(&ws->recvbuf, chunk_size, WS_CHUNK_COUNT,
BUFQ_OPT_SOFT_LIMIT);
- Curl_bufq_init2(&ws->sendbuf, WS_CHUNK_SIZE, WS_CHUNK_COUNT,
+ Curl_bufq_init2(&ws->sendbuf, chunk_size, WS_CHUNK_COUNT,
BUFQ_OPT_SOFT_LIMIT);
ws_dec_init(&ws->dec);
ws_enc_init(&ws->enc);
@@ -834,7 +847,7 @@ CURLcode Curl_ws_accept(struct Curl_easy *data, struct ws_collect {
struct Curl_easy *data;
- void *buffer;
+ unsigned char *buffer;
size_t buflen;
size_t bufidx;
int frame_age;
@@ -886,7 +899,7 @@ static ssize_t ws_client_collect(const unsigned char *buf, size_t buflen, return -1;
}
*err = CURLE_OK;
- memcpy(ctx->buffer, buf, nwritten);
+ memcpy(ctx->buffer + ctx->bufidx, buf, nwritten);
ctx->bufidx += nwritten;
}
return nwritten;
@@ -1001,14 +1014,17 @@ static CURLcode ws_flush(struct Curl_easy *data, struct websocket *ws, if(!Curl_bufq_is_empty(&ws->sendbuf)) {
CURLcode result;
const unsigned char *out;
- size_t outlen;
- ssize_t n;
+ size_t outlen, n;
while(Curl_bufq_peek(&ws->sendbuf, &out, &outlen)) {
if(data->set.connect_only)
result = Curl_senddata(data, out, outlen, &n);
- else
- result = Curl_write(data, data->conn->writesockfd, out, outlen, &n);
+ else {
+ result = Curl_xfer_send(data, out, outlen, &n);
+ if(!result && !n && outlen)
+ result = CURLE_AGAIN;
+ }
+
if(result) {
if(result == CURLE_AGAIN) {
if(!complete) {
@@ -1027,8 +1043,8 @@ static CURLcode ws_flush(struct Curl_easy *data, struct websocket *ws, }
}
else {
- infof(data, "WS: flushed %zu bytes", (size_t)n);
- Curl_bufq_skip(&ws->sendbuf, (size_t)n);
+ infof(data, "WS: flushed %zu bytes", n);
+ Curl_bufq_skip(&ws->sendbuf, n);
}
}
}
@@ -1041,8 +1057,8 @@ CURL_EXTERN CURLcode curl_ws_send(CURL *data, const void *buffer, unsigned int flags)
{
struct websocket *ws;
- ssize_t nwritten, n;
- size_t space;
+ ssize_t n;
+ size_t nwritten, space;
CURLcode result;
*sent = 0;
@@ -1073,15 +1089,14 @@ CURL_EXTERN CURLcode curl_ws_send(CURL *data, const void *buffer, /* raw mode sends exactly what was requested, and this is from within
the write callback */
if(Curl_is_in_callback(data)) {
- result = Curl_write(data, data->conn->writesockfd, buffer, buflen,
- &nwritten);
+ result = Curl_xfer_send(data, buffer, buflen, &nwritten);
}
else
result = Curl_senddata(data, buffer, buflen, &nwritten);
infof(data, "WS: wanted to send %zu bytes, sent %zu bytes",
buflen, nwritten);
- *sent = (nwritten >= 0)? (size_t)nwritten : 0;
+ *sent = nwritten;
return result;
}
@@ -1091,7 +1106,7 @@ CURL_EXTERN CURLcode curl_ws_send(CURL *data, const void *buffer, return result;
/* TODO: the current design does not allow partial writes, afaict.
- * It is not clear who the application is supposed to react. */
+ * It is not clear how the application is supposed to react. */
space = Curl_bufq_space(&ws->sendbuf);
DEBUGF(infof(data, "curl_ws_send(len=%zu), sendbuf len=%zu space %zu",
buflen, Curl_bufq_len(&ws->sendbuf), space));
@@ -1148,11 +1163,6 @@ static CURLcode ws_setup_conn(struct Curl_easy *data, }
-void Curl_ws_done(struct Curl_easy *data)
-{
- (void)data;
-}
-
static CURLcode ws_disconnect(struct Curl_easy *data,
struct connectdata *conn,
bool dead_connection)
diff --git a/libs/libcurl/src/ws.h b/libs/libcurl/src/ws.h index af26e3f140..b0d5e8d1b5 100644 --- a/libs/libcurl/src/ws.h +++ b/libs/libcurl/src/ws.h @@ -75,7 +75,6 @@ struct websocket { CURLcode Curl_ws_request(struct Curl_easy *data, REQTYPE *req);
CURLcode Curl_ws_accept(struct Curl_easy *data, const char *mem, size_t len);
-void Curl_ws_done(struct Curl_easy *data);
extern const struct Curl_handler Curl_handler_ws;
#ifdef USE_SSL
@@ -85,7 +84,6 @@ extern const struct Curl_handler Curl_handler_wss; #else
#define Curl_ws_request(x,y) CURLE_OK
-#define Curl_ws_done(x) Curl_nop_stmt
#define Curl_ws_free(x) Curl_nop_stmt
#endif
|