diff options
Diffstat (limited to 'libs')
139 files changed, 15484 insertions, 11013 deletions
diff --git a/libs/libcurl/docs/CHANGES b/libs/libcurl/docs/CHANGES index a9e2dcf620..3fe822bc9e 100644 --- a/libs/libcurl/docs/CHANGES +++ b/libs/libcurl/docs/CHANGES @@ -6,6 +6,3418 @@ Changelog
+Version 8.1.2 (30 May 2023)
+
+Daniel Stenberg (30 May 2023)
+
+- RELEASE-NOTES: synced
+
+ 8.1.2 release
+
+- THANKS: contributors from 8.1.2
+
+- lib1560: verify more scheme guessing
+
+ - on 2nd level domains
+ - on names without dots
+
+ As mentioned in #11161, "imap.com" will be guessed IMAP
+
+ Closes #11219
+
+- page-header: minor wording polish in the URL segment
+
+ Closes #11217
+
+- page-header: mention curl version and how to figure out current release
+
+ Closes #11216
+
+- RELEASE-NOTES: synced
+
+- configure: without pkg-config and no custom path, use -lnghttp2
+
+ Reported-by: correctmost on github
+ Fixes #11186
+ Closes #11210
+
+Stefan Eissing (28 May 2023)
+
+- curl: cache the --trace-time value for a second
+
+ - caches HH:MM:SS computed and reuses it for logging during
+ the same second.
+ - common function for plain log line start formatting
+
+ Closes #11211
+
+Kev Jackson (28 May 2023)
+
+- libcurl.m4: remove trailing 'dnl' that causes this to break autoconf
+
+ Closes #11212
+
+Stefan Eissing (26 May 2023)
+
+- http3: send EOF indicator early as possible
+
+ - ngtcp2 and quiche implementations relied on the DONE_SEND event
+ to forward the EOF for uploads to the libraries. This often
+ result in a last 0 length EOF data. Tracking the amount of
+ data left to upload allows EOF indication earlier.
+ - refs #11205 where CloudFlare DoH servers did not like to
+ receive the initial upload DATA without EOF and returned
+ a 400 Bad Request
+
+ Reported-by: Sergey Fionov
+ Fixes #11205
+ Closes #11207
+
+Daniel Stenberg (26 May 2023)
+
+- scripts/contri*sh: no longer grep -v ' '
+
+ Originally these scripts filtered out names that have no space so that
+ they better avoid nick names not intended for credits. Such names are
+ not too commonly used, plus we now give credit even to those.
+
+ Additionally: non-latin names, like Asian, don't have spaces at all so
+ they were also filtered out and had to be manually added which made it
+ an error-prone operation where Asian names eventually easily fell off by
+ mistake.
+
+ Closes #11206
+
+- cf-socket: restore Curl_sock_assign_addr()
+
+ Regression since it was not private. Also used by msh3.c
+
+ Follow-up to 8e85764b7bd7f05f5
+ Reported-by: Gisle Vanem
+ Fixes #11202
+ Closes #11204
+
+- RELEASE-NOTES: synced
+
+ Taken down to 8.1.2 now for pending patch release
+
+- libssh: when keyboard-interactive auth fails, try password
+
+ The state machine had a mistake in that it would not carry on to that
+ next step.
+
+ This also adds a verbose output what methods that are available from the
+ server and renames the macros that change to the next auth methods to
+ try.
+
+ Reported-by: 左潇峰
+ Fixes #11196
+ Closes #11197
+
+Emanuele Torre (25 May 2023)
+
+- configure: fix build with arbitrary CC and LD_LIBRARY_PATH
+
+ Since ./configure and processes that inherit its environment variables
+ are the only callers of the run-compiler script, we can just save the
+ current value of the LD_LIBRARY_PATH and CC variables to another pair of
+ environment variables, and make run-compiler a static script that
+ simply restores CC and LD_LIBRARY_PATH to the saved value, and before
+ running the compiler.
+
+ This avoids having to inject the values of the variables in the script,
+ possibly causing problems if they contains spaces, quotes, and other
+ special characters.
+
+ Also add exports in the script just in case LD_LIBRARY_PATH and CC are
+ not already in the environment.
+
+ follow-up from 471dab2
+
+ Closes #11182
+
+Daniel Stenberg (25 May 2023)
+
+- urlapi: remove superfluous host name check
+
+ ... as it is checked later more proper.
+
+ Closes #11195
+
+Stefan Eissing (25 May 2023)
+
+- http2: fix EOF handling on uploads with auth negotiation
+
+ - doing a POST with `--digest` does an override on the initial request
+ with `Content-Length: 0`, but the http2 filter was unaware of that
+ and expected the originally request body. It did therefore not
+ send a final DATA frame with EOF flag to the server.
+ - The fix overrides any initial notion of post size when the `done_send`
+ event is triggered by the transfer loop, leading to the EOF that
+ is necessary.
+ - refs #11194. The fault did not happen in testing, as Apache httpd
+ never tries to read the request body of the initial request,
+ sends the 401 reply and closes the stream. The server used in the
+ reported issue however tried to read the EOF and timed out on the
+ request.
+
+ Reported-by: Aleksander Mazur
+ Fixes #11194
+ Cloes #11200
+
+Daniel Stenberg (23 May 2023)
+
+- RELEASE-NOTES: synced
+
+ bump to 8.2.0
+
+- lib: remove unused functions, make single-use static
+
+ Closes #11174
+
+- scripts/singleuse.pl: add more API calls
+
+Christian Hesse (23 May 2023)
+
+- configure: quote the assignments for run-compiler
+
+ Building for multilib failed, as the compiler command contains an
+ extra argument. That needs quoting.
+
+ Regression from b78ca50cb3dda361f9c1
+
+ Fixes #11179
+ Closes #11180
+
+Daniel Stenberg (23 May 2023)
+
+- misc: fix spelling mistakes
+
+ Reported-by: musvaage on github
+ Fixes #11171
+ Closes #11172
+
+Version 8.1.1 (23 May 2023)
+
+Daniel Stenberg (23 May 2023)
+
+- RELEASE-NOTES: synced
+
+ curl 8.1.1
+
+- THANKS: contributors from the 8.1.1 release
+
+Dan Fandrich (22 May 2023)
+
+- docs: fix fuzzing documentation link
+
+ Follow-up to 4c712a1b
+
+- CI: add an Alpine build with MUSL
+
+ MUSL is another libc implementation which has its own unique issues
+ worth testing.
+
+ Ref: #11140
+ Closes #11178
+
+- runtests: add a missing \n at the end of a log message
+
+correctmost on github (22 May 2023)
+
+- SECURITY-PROCESS.md: link security advisory doc and fix typo
+
+ Closes #11177
+
+Daniel Stenberg (22 May 2023)
+
+- TODO: build curl with Windows Unicode support
+
+ Closes #7229
+
+- KNOWN_BUGS: hyper memory-leaks
+
+ Closes #10803
+
+Stefan Eissing (22 May 2023)
+
+- http/2: unstick uploads
+
+ - refs #11157 and #11175 where uploads get stuck or lead to RST streams
+ - fixes our h2 send behaviour to continue sending in the nghttp2 session
+ as long as it wants to. This will empty our send buffer as long as
+ the remote stream/connection window allows.
+ - in case the window is exhausted, the data remaining in the send buffer
+ will wait for a WINDOW_UPDATE from the server. Which is a socket event
+ that engages our transfer loop again
+ - the problem in the issue was that we did not exhaust the window, but
+ left data in the sendbuffer and no further socket events did happen.
+ The server was just waiting for us to send more.
+ - relatedly, there was an issue fixed that closing a stream with KEEP_HOLD
+ set kept the transfer from shutting down - as it should have - leading
+ to a timeout.
+
+ Closes #11176
+
+Daniel Stenberg (21 May 2023)
+
+- workflows/macos: add a job using gcc + debug + secure transport
+
+Jay Satiro (21 May 2023)
+
+- lib: fix conversion warnings with gcc on macOS
+
+Daniel Stenberg (21 May 2023)
+
+- sectransp.c: make the code c89 compatible
+
+ Follow-up to dd2bb485521c2ec713001b3a
+
+ Reported-by: FeignClaims on github
+ Fixes #11155
+ Closes #11159
+
+Emanuele Torre (21 May 2023)
+
+- Revert "urlapi: respect CURLU_ALLOW_SPACE and CURLU_NO_AUTHORITY for redirect
+ s"
+
+ This reverts commit df6c2f7b544f1f35f2a3e0be11f345affeb6fe9c.
+ (It only keep the test case that checks redirection to an absolute URL
+ without hostname and CURLU_NO_AUTHORITY).
+
+ I originally wanted to make CURLU_ALLOW_SPACE accept spaces in the
+ hostname only because I thought
+ curl_url_set(CURLUPART_URL, CURLU_ALLOW_SPACE) was already accepting
+ them, and they were only not being accepted in the hostname when
+ curl_url_set(CURLUPART_URL) was used for a redirection.
+
+ That is not actually the case, urlapi never accepted hostnames with
+ spaces, and a hostname with a space in it never makes sense.
+ I probably misread the output of my original test when I they were
+ normally accepted when using CURLU_ALLOW_SPACE, and not redirecting.
+
+ Some other URL parsers seems to allow space in the host part of the URL,
+ e.g. both python3's urllib.parse module, and Chromium's javascript URL
+ object allow spaces (chromium percent escapes the spaces with %20),
+ (they also both ignore TABs, and other whitespace characters), but those
+ URLs with spaces in the hostname are useless, neither python3's requests
+ module nor Chromium's window.location can actually use them.
+
+ There is no reason to add support for URLs with spaces in the host,
+ since it was not a inconsistency bug; let's revert that patch before it
+ makes it into release. Sorry about that.
+
+ I also reverted the extra check for CURLU_NO_AUTHORITY since that does
+ not seem to be necessary, CURLU_NO_AUTHORITY already worked for
+ redirects.
+
+ Closes #11169
+
+Dan Fandrich (20 May 2023)
+
+- runtests: use the correct fd after select
+
+ The code was using the wrong fd when determining which runner was ready
+ with a response.
+
+ Ref: #10818
+ Closes #11160
+
+- test425: fix the log directory for the upload
+
+ This must be %LOGDIR to let it work with parallel tests.
+
+ Ref: #10969
+
+- runtests: handle interrupted reads from IPC pipes
+
+ These can be interrupted by signals, especially SIGINT to shut down, and
+ must be restarted so the IPC call arrives correctly. If the read just
+ returns an error instead, the IPC calling state will go out of sync and
+ a proper shutdown won't happen.
+
+ Ref: #10818
+
+Stefan Eissing (20 May 2023)
+
+- http2: upload improvements
+
+ Make send buffer smaller to have progress and "upload done" reporting
+ closer to reality. Fix handling of send "drain" condition to no longer
+ trigger once the transfer loop reports it is done sending. Also do not
+ trigger the send "drain" on RST streams.
+
+ Background:
+ - a upload stall was reported in #11157 that timed out
+ - test_07_33a reproduces a problem with such a stall if the
+ server 404s the request and RSTs the stream.
+ - test_07_33b verifies a successful PUT, using the parameters
+ from #11157 and checks success
+
+ Ref: #11157
+ Closes #11165
+
+- http2: increase stream window size to 10 MB
+
+ Reported-by: pandada8 on github
+
+ Fixes #11162
+ Closes #11167
+
+Daniel Stenberg (20 May 2023)
+
+- lib: rename struct 'http_req' to 'httpreq'
+
+ Because FreeBSD 14 kidnapped the name.
+ Ref: https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=271526
+
+ Fixes #11163
+ Closes #11164
+
+Viktor Szakats (20 May 2023)
+
+- cmake: avoid `list(PREPEND)` for compatibility
+
+ `list(PREPEND)` requires CMake v3.15, our minimum is v3.7.
+
+ Ref: https://cmake.org/cmake/help/latest/command/list.html#prepend
+
+ Regression from 1e3319a167d2f32d295603167486e9e88af9bb4e
+
+ Reported-by: Keitagit-kun on Github
+ Fixes #11141
+ Closes #11144
+
+Daniel Stenberg (19 May 2023)
+
+- RELEASE-NOTES: synced
+
+Stefan Eissing (19 May 2023)
+
+- ngtcp2: proper handling of uint64_t when adjusting send buffer
+
+ Fixes #11149
+ Closes #11153
+
+- ngtcp2: fix compiler warning about possible null-deref
+
+ - compiler analyzer did not include the call context for this
+ static function where the condition had already been checked.
+ - eleminating the problem by making stream a call parameter
+
+ Fixes #11147
+ Closes #11151
+
+Emanuele Torre (19 May 2023)
+
+- docs: document that curl_url_cleanup(NULL) is a safe no-op
+
+ This has always been the case, but it was not documented.
+
+ The paragraph was copied verbatim from curl_easy_cleanup.3
+
+ Closes #11150
+
+Antoine Pitrou (19 May 2023)
+
+- select: avoid returning an error on EINTR from select() or poll()
+
+ This was already done for the poll() and select() calls
+ made directly from Curl_poll(), but was missed in
+ Curl_wait_ms(), which is called when there are no fds
+ to wait on.
+
+ Fixes #11135
+ Closes #11143
+
+Daniel Stenberg (19 May 2023)
+
+- vquic.c: make recvfrom_packets static, avoid compiler warning
+
+ warning: no previous prototype for 'recvfrom_packets'
+
+ Reported-by: Keitagit-kun on github
+ Fixes #11146
+ Closes #11148
+
+- urlapi: allow numerical parts in the host name
+
+ It can only be an IPv4 address if all parts are all digits and no more than
+ four parts, otherwise it is a host name. Even slightly wrong IPv4 will now be
+ passed through as a host name.
+
+ Regression from 17a15d88467 shipped in 8.1.0
+
+ Extended test 1560 accordingly.
+
+ Reported-by: Pavel Kalyugin
+ Fixes #11129
+ Closes #11131
+
+Emilio Cobos Álvarez (19 May 2023)
+
+- http2: double http request parser max line length
+
+ This works around #11138, by doubling the limit, and should be a
+ relatively safe fix.
+
+ Ideally the buffer would grow as needed and there would be no need for a
+ limit? But that might be follow-up material.
+
+ Fixes #11138
+ Closes #11139
+
+Emanuele Torre (18 May 2023)
+
+- configure: fix --help alignment
+
+ AC_ARG_ENABLE seems to only trim off whitespace from the start and end
+ of its help-string argument, while prepending two spaces of indentation
+ to all lines.
+
+ This means that the two spaces of indentation between the --enable-rtsp
+ and the --disable-rtsp line were not removed causing ./configure --help
+ to print:
+
+ Optional Features:
+ [...]
+ --enable-rtsp Enable RTSP support
+ --disable-rtsp Disable RTSP support
+
+ I removed the indentation to fix the issue, now it prints:
+
+ Optional Features:
+ [...]
+ --enable-rtsp Enable RTSP support
+ --disable-rtsp Disable RTSP support
+
+ The --enable-hsts and --disable-hsts lines had the same problems, and
+ have been fixed too.
+
+ Closes #11142
+
+Deal(一线灵) (18 May 2023)
+
+- cmake: repair cross compiling
+
+ It cannot *run* code for testing purposes when cross-compiling.
+
+ Closes #11130
+
+Daniel Stenberg (18 May 2023)
+
+- configure: generate a script to run the compiler
+
+ in the CURL_RUN_IFELSE macro, with LD_LIBRARY_PATH set to the value of
+ the configure invoke, and not the value that might be used later,
+ intended for the execution of the output the compiler ouputs.
+
+ For example when the compiler uses the same library (like libz) that
+ configure checks for.
+
+ Reported-by: Jonas Bülow
+ Fixes #11114
+ Closes #11120
+
+Stefan Eissing (18 May 2023)
+
+- cf-socket: completely remove the disabled USE_RECV_BEFORE_SEND_WORKAROUND
+
+ Closes #11118
+
+Emanuele Torre (18 May 2023)
+
+- urlapi: respect CURLU_ALLOW_SPACE and CURLU_NO_AUTHORITY for redirects
+
+ curl_url_set(uh, CURLUPART_URL, redirurl, flags) was not respecing
+ CURLU_ALLOW_SPACE and CURLU_NO_AUTHORITY in the host part of redirurl
+ when redirecting to an absolute URL.
+
+ Closes #11136
+
+Colin Cross (18 May 2023)
+
+- hostip: move easy_lock.h include above curl_memory.h
+
+ Similar to #9561, move easy_lock.h above curl_memory.h to fix building
+ against musl libc.
+
+ Closes #11140
+
+Hind Montassif (18 May 2023)
+
+- curl_easy_getinfo: clarify on return data types
+
+ Closes #11126
+
+Emanuele Torre (18 May 2023)
+
+- checksrc: disallow spaces before labels
+
+ Out of 415 labels throughout the code base, 86 of those labels were
+ not at the start of the line. Which means labels always at the start of
+ the line is the favoured style overall with 329 instances.
+
+ Out of the 86 labels not at the start of the line:
+ * 75 were indented with the same indentation level of the following line
+ * 8 were indented with exactly one space
+ * 2 were indented with one fewer indentation level then the following
+ line
+ * 1 was indented with the indentation level of the following line minus
+ three space (probably unintentional)
+
+ Co-Authored-By: Viktor Szakats
+
+ Closes #11134
+
+Daniel Stenberg (18 May 2023)
+
+- cookie: update the comment on cookie length and size limits
+
+ To refer to the proper cookie RFC and the upcoming RFC refresh.
+
+ Closes #11127
+
+- url: provide better error message when URLs fail to parse
+
+ By providing the URL API error message into the error message.
+
+ Ref: #11129
+ Closes #11137
+
+- RELEASE-NOTES: synced
+
+ bumped to 8.1.1
+
+Jon Rumsey (18 May 2023)
+
+- os400: update chkstrings.c
+
+ Compensate changes for recent changes to urldata.h to reclassify
+ STRING_AWS_SIGV4.
+
+ Fixes #11132
+ Closes #11133
+
+Version 8.1.0 (17 May 2023)
+
+Daniel Stenberg (17 May 2023)
+
+- RELEASE-NOTES: synced
+
+- THANKS: contributors from the 8.1.0 release
+
+- hostip: include easy_lock.h before using GLOBAL_INIT_IS_THREADSAFE
+
+ Since that header file is the only place that define can be defined.
+
+ Reported-by: Marc Deslauriers
+
+ Follow-up to 13718030ad4b3209
+
+ Closes #11121
+
+Thomas Taylor (16 May 2023)
+
+- aws-sigv4.d: fix region identifier in example
+
+ Closes #11117
+
+Philip Heiduck (15 May 2023)
+
+- mlc_config.json: remove this linkcheck CI job config file
+
+ Closes #11113
+
+Daniel Silverstone (15 May 2023)
+
+- ssh: Add support for libssh2 read timeout
+
+ Hook the new (1.11.0 or newer) libssh2 support for setting a read timeout
+ into the SERVER_RESPONSE_TIMEOUT option. With this done, clients can use
+ the standard curl response timeout setting to also control the time that
+ libssh2 will wait for packets from a slow server. This is necessary to
+ enable use of very slow SFTP servers.
+
+ Signed-off-by: Daniel Silverstone <daniel.silverstone@codethink.co.uk>
+
+ Closes #10965
+
+Osama Albahrani (14 May 2023)
+
+- GIT-INFO: add --with-openssl
+
+ Closes #11110
+
+Daniel Stenberg (13 May 2023)
+
+- RELEASE-NOTES: synced
+
+Marcel Raad (13 May 2023)
+
+- md(4|5): don't use deprecated iOS functions
+
+ They are marked as deprecated in iOS 13.0, which might result in
+ warnings-as-errors.
+
+ Also, use `*_MIN_REQUIRED` instead of `*_MIN_ALLOWED`, which seems to
+ be what's currently used.
+
+ Bug: https://github.com/curl/curl/issues/11098
+ Closes https://github.com/curl/curl/pull/11102
+
+- md4: only build when used
+
+ Its only usage in curl_ntlm_core.c is guarded by `USE_CURL_NTLM_CORE`,
+ so let's use this here too.
+
+ Ref: https://github.com/curl/curl/issues/11098
+ Closes https://github.com/curl/curl/pull/11102
+
+Vítor Galvão (12 May 2023)
+
+- write-out.d: Use response_code in example
+
+ Closes #11107
+
+Shohei Maeda (12 May 2023)
+
+- url: fix null dispname for --connect-to option
+
+ Closes #11106
+
+Daniel Stenberg (12 May 2023)
+
+- test2306: verify getting a second response with folded headers
+
+ Reproduces the isue #11101 and verifies the fix.
+
+ Verifies a17b2a503f
+
+- headers: clear (possibly) lingering pointer in init
+
+ The "prevhead" pointer is used for the headers storage but was not
+ cleared correctly in init, which made it possible to act up when a
+ handle is reused.
+
+ Reported-by: Steve Herrell
+ Fixes #11101
+ Closes #11103
+
+- RELEASE-NOTES: synced
+
+- ngtcp2: use 0.15.0
+
+ - nghttp3 0.11.0
+ - nghttp2 1.53.0
+
+ Adapt to new API calls
+
+ Closes #11031
+
+Jay Satiro (10 May 2023)
+
+- openssl: fix indent
+
+Daniel Stenberg (10 May 2023)
+
+- CURLOPT_DNS_CACHE_TIMEOUT.3: fix spelling
+
+ Follow-up to 9ed7d56e044f5aa1b29
+
+ Closes #11096
+
+- hostip: use time_t for storing oldest DNS entry
+
+ Theoretically, the oldest time could overflow an int. In practice that
+ won't happen, but let's do this to please analyzers.
+
+ Follow-up to 9ed7d56e044f5aa1b2928ccde6245d0
+
+ Pointed out by Coverity.
+ Closes #11094
+
+- http: free the url before storing a new copy
+
+ To avoid a memory-leak.
+
+ Reported-by: Hiroki Kurosawa
+
+ Closes #11093
+
+- compressed.d: clarify the words on "not notifying headers"
+
+ Reported-by: Dylan Anthony
+ Fixes #11091
+ Closes #11092
+
+- libssh2: free fingerprint better
+
+ Reported-by: Wei Chong Tan
+ Closes #11088
+
+- CURLOPT_IPRESOLVE.3: clarify that this for host names, not IP addresses
+
+ Reported-by: Harry Sintonen
+ Closes #11087
+
+- hostip: enforce a maximum DNS cache size independent of timeout value
+
+ To reduce the damage an application can cause if using -1 or other
+ ridiculous timeout values and letting the cache live long times.
+
+ The maximum number of entries in the DNS cache is now totally
+ arbitrarily and hard-coded set to 29999.
+
+ Closes #11084
+
+- hostip: store dns timeout as 'int'
+
+ ... because it set and held as an 'int' elsewhere and can never be
+ larger.
+
+- RELEASE-NOTES: synced
+
+- tool_operate: refuse (--data or --form) and --continue-at combo
+
+ libcurl assumes that a --continue-at resumption is done to continue an
+ upload using the read callback and neither --data nor --form use
+ that and thus won't do what the user wants. Whatever the user wants
+ with this strange combination.
+
+ Add test 426 to verify.
+
+ Reported-by: Smackd0wn on github
+ Fixes #11081
+ Closes #11083
+
+- transfer: refuse POSTFIELDS + RESUME_FROM combo
+
+ The code assumes that such a resume is wanting to continue an upload
+ using the read callback, and since POSTFIELDS is done without callback
+ libcurl will just misbehave.
+
+ This combo will make the transfer fail with CURLE_BAD_FUNCTION_ARGUMENT
+ with an explanation in the error message.
+
+ Reported-by: Smackd0wn on github
+ Fixes #11081
+ Closes #11083
+
+- ipv4.d/ipv6.d: they are "mutex", not "boolean"
+
+ ... which for example means they do not have --no-* versions.
+
+ Reported-by: Harry Sintonen
+ Fixes #11085
+ Closes #11086
+
+- docs/SECURITY-ADVISORY.md: how to write a curl security advisory
+
+ Closes #11080
+
+nobedee on github (5 May 2023)
+
+- MANUAL.md: add dict example for looking up a single definition
+
+ Closes #11077
+
+Dan Fandrich (5 May 2023)
+
+- runtests: fix -c option when run with valgrind
+
+ The curl binary argument wasn't being quoted properly. This seems to
+ have broken at some point after quoting was added in commit 606b29fe.
+
+ Reported-by: Daniel Stenberg
+ Ref: #11073
+ Fixes #11074
+ Closes #11076
+
+- runtests: support creating more than one runner process
+
+ The controller currently only creates and uses one, but more are now
+ possible.
+
+ Ref: #10818
+
+- runtests: spawn a new process for the test runner
+
+ When the -j option is given, a new process is spawned in which the test
+ programs are run and from which test servers are started. Only one
+ process can be started at once, but this is sufficient to test that the
+ infrastructure can isolate those functions in a new task. There should
+ be no visible difference between the two modes at the moment.
+
+ Ref: #10818
+ Closes #11064
+
+- runtests: turn singletest() into a state machine
+
+ This allows it to run in a non-blocking manner.
+
+ Ref: #10818
+
+- runtests: change runner interface to be asynchronous
+
+ Program arguments are marshalled and then written to the end of a pipe
+ which is later read from and the arguments unmarshalled before the
+ desired function is called normally. The function return values are
+ then marshalled and written into another pipe when is later read from
+ and unmarshalled before being returned to the caller.
+
+ The implementation is currently blocking but can be made non-blocking
+ without any changes to the API. This allows calling multiple runners
+ without blocking in the future.
+
+ Ref: #10818
+
+- runtests: call citest_finishtest in singletest
+
+ This is where citest_starttest is called.
+
+ Ref: #10818
+
+- runtests: add a runner initialization function
+
+ This sets up the runner environment to start running tests.
+
+ Ref: #10818
+
+- runtests: remove directory from server filename variables
+
+ There will soon be multiple log directories so the paths will no longer
+ be static in runtests.pl. Also, get rid of $SERVER2IN which was not
+ used.
+
+ Ref: #10818
+
+- runtests: reduce package exports after refactoring
+
+ Some recent refactoring made these export no longer necessary. Also,
+ stop displaying the Unix socket paths at startup since there will soon
+ be many of them and they're not that interesting.
+
+ Ref: #10818
+
+- runtests: use a function to obtain $LOGDIR for a test
+
+ This will no longer be static soon.
+
+ Ref: #10818
+
+Jay Satiro (5 May 2023)
+
+- tool_cb_hdr: Fix 'Location:' formatting for early VTE terminals
+
+ - Disable hyperlink formatting for the 'Location:' header value in VTE
+ 0.48.1 and earlier, since it is buggy in some of those versions.
+
+ Prior to this change those terminals may show the location header value
+ as gibberish or show it twice.
+
+ Ref: https://gist.github.com/egmontkob/eb114294efbcd5adb1944c9f3cb5feda#backw
+ ard-compatibility
+
+ Fixes https://github.com/curl/curl/issues/10428
+ Closes https://github.com/curl/curl/pull/11071
+
+François Michel (3 May 2023)
+
+- quiche: disable pacing while pacing is not actually performed
+
+ Closes #11068
+
+Daniel Stenberg (2 May 2023)
+
+- easy_cleanup: require a "good" handle to act
+
+ By insisting that the passed in handle is "good" (the magic number is
+ intact), this can limit the potential damage if a bad pointer is passed
+ in. Like when this function is called twice on the same handle pointer.
+
+ Ref: #10964
+ Closes #11061
+
+Andreas Falkenhahn (1 May 2023)
+
+- amiga: Fix CA certificate paths for AmiSSL and MorphOS
+
+ AmiSSL stores certificates in `AmiSSL:Certs` and MorphOS stores them in
+ `MOSSYS:Data/SSL/curl-ca-bundle.crt`.
+
+ Closes https://github.com/curl/curl/pull/11059
+
+Daniel Stenberg (30 Apr 2023)
+
+- http2: (void)-mark when we explicitly ignore the return code
+
+ When h2_progress_egress() is called. Pointed out by Coverity.
+
+ Closes #11057
+
+- checksrc: find bad indentation in conditions without open brace
+
+ If the previous line starts with if/while/for AND ends with a closed
+ parenthesis and there's an equal number of open and closed parentheses
+ on that line, verify that this line is indented $indent more steps, if
+ not a cpp line.
+
+ Also adjust the fall-out from this fix.
+
+ Closes #11054
+
+Diogo Teles Sant'Anna (28 Apr 2023)
+
+- CI: Set minimal permissions on workflow ngtcp2-quictls.yml
+
+ Signed-off-by: Diogo Teles Sant'Anna <diogoteles@google.com>
+
+ Closes #11055
+
+Dan Fandrich (28 Apr 2023)
+
+- CI: use another glob syntax for matching files on Appveyor
+
+ The previous globbing syntax was not matching files recursively in
+ directories, so try appending a /* to more closely match the examples at
+ https://www.appveyor.com/docs/how-to/filtering-commits/
+
+Daniel Stenberg (28 Apr 2023)
+
+- multi: add multi-ignore logic to multi_socket_action
+
+ The multi-ignore logic that was previously applied to
+ curl_multi_perform() (#10750) is here applied to the loop within
+ curl_multi_socket_action() to make it use the same optimization: most
+ handles have the same signal-ignore option state so this drastically
+ reduces the number of ignore/unignore calls per libcurl function invoke.
+
+ Follow-up to bc90308328afb8
+
+ Closes #11045
+
+Stefan Eissing (28 Apr 2023)
+
+- http2: do flow window accounting for cancelled streams
+
+ - nghttp2 does not free connection level window flow for
+ aborted streams
+ - when closing transfers, make sure that any buffered
+ response data is "given back" to the flow control window
+ - add tests test_02_22 and test_02_23 to reproduce
+
+ Closes #11052
+
+- pingpong: fix compiler warning "assigning an enum to unsigned char"
+
+ Closes #11050
+
+Daniel Stenberg (28 Apr 2023)
+
+- configure: fix detection of apxs (for httpd)
+
+ The condition check was turned the wrong way around!
+
+ Closes #11051
+
+Viktor Szakats (28 Apr 2023)
+
+- ci: `-Wno-vla` no longer necessary
+
+ We handle this issue in the source now.
+
+ Follow-up to b725fe1944b45406676ea3aff333ae3085a848d9
+
+ Reviewed-by: Marcel Raad
+ Reviewed-by: Daniel Stenberg
+ Closes #11048
+
+Marcel Raad (28 Apr 2023)
+
+- tests/http: make curl_setup.h the first include
+
+ This is required for the macros there to take effect for system
+ libraries. Specifically, including the system libraries first led to
+ warnings about `_FILE_OFFSET_BITS` being redefined in curl_config.h on
+ the Solaris autobuilds for ws-data.c and ws-pingpong.c.
+ Also make the curl includes come first for the other source files here
+ for consistency.
+
+ Closes https://github.com/curl/curl/pull/11046
+
+Emanuele Torre (27 Apr 2023)
+
+- checksrc: check for spaces before the colon of switch labels
+
+ Closes #11047
+
+Daniel Stenberg (27 Apr 2023)
+
+- RELEASE-NOTES: synced
+
+- libssh: tell it to use SFTP non-blocking
+
+ Reported-by: Andreas Huebner
+ Fixes #11020
+ Closes #11039
+
+Stefan Eissing (27 Apr 2023)
+
+- http2: enlarge the connection window
+
+ - fixes stalled connections
+
+ - Make the connection window large enough, so that there is
+ some room left should 99/100 streams be PAUSED by the application
+
+ Reported-by: Paweł Wegner
+ Fixes #10988
+ Closes #11043
+
+Daniel Stenberg (27 Apr 2023)
+
+- checksrc: fix SPACEBEFOREPAREN for conditions starting with "*"
+
+ The open paren check wants to warn for spaces before open parenthesis
+ for if/while/for but also for any function call. In order to avoid
+ catching function pointer declarations, the logic allows a space if the
+ first character after the open parenthesis is an asterisk.
+
+ I also spotted what we did not include "switch" in the check but we should.
+
+ This check is a little lame, but we reduce this problem by not allowing
+ that space for if/while/for/switch.
+
+ Reported-by: Emanuele Torre
+ Closes #11044
+
+- docs: minor polish
+
+ - "an HTTP*" (not "a")
+ - remove a few contractions
+ - remove a spurious "a"
+ - reduce use of "I" in texts
+
+ Closes #11040
+
+- ws: fix CONT opcode check
+
+ Detected by Coverity. Follow-up to 930c00c259
+
+ Closes #11037
+
+Dan Fandrich (27 Apr 2023)
+
+- CI: switch the awslc builds to build out-of-tree
+
+ This is a common configuration that should be tested to avoid
+ regressions. The awsls cmake build was already out-of-tree so the
+ automake build now joins it.
+
+ Ref: #11006
+
+- tests/http: fix out-of-tree builds
+
+ Add both lib/ directories (src & build) to the search path so
+ curl_setup.h and its dependencies can be found.
+
+ Followup-to acd82c8b
+
+ Ref: #11006
+ Closes #11036
+
+Daniel Stenberg (27 Apr 2023)
+
+- urlapi: make internal function start with Curl_
+
+ Curl_url_set_authority() it is.
+
+ Follow-up to acd82c8bfd
+
+ Closes #11035
+
+YX Hao (26 Apr 2023)
+
+- cf-socket: turn off IPV6_V6ONLY on Windows if it is supported
+
+ IPV6_V6ONLY refs:
+ https://en.wikipedia.org/wiki/IPv6#IPv4-mapped_IPv6_addresses
+ https://github.com/golang/go/blob/master/src/net/ipsock_posix.go
+ https://en.wikipedia.org/wiki/Unix-like
+ https://learn.microsoft.com/en-us/windows/win32/winsock/ipproto-ipv6-socket-o
+ ptions
+
+ default value refs:
+ https://datatracker.ietf.org/doc/html/rfc3493#section-5.3
+ https://www.kernel.org/doc/html/latest/networking/ip-sysctl.html#proc-sys-net
+ -ipv6-variables
+
+ Closes #10975
+
+Daniel Stenberg (26 Apr 2023)
+
+- urldata: shrink *select_bits int => unsigned char
+
+ - dselect_bits
+ - cselect_bits
+
+ ... are using less than 8 bits. Changed types and moved them towards
+ the end of the structs to fit better.
+
+ Closes #11025
+
+Stefan Eissing (26 Apr 2023)
+
+- tests/http: more tests with specific clients
+
+ - Makefile support for building test specific clients in tests/http/clients
+ - auto-make of clients when invoking pytest
+ - added test_09_02 for server PUSH_PROMISEs using clients/h2-serverpush
+ - added test_02_21 for lib based downloads and pausing/unpausing transfers
+
+ curl url parser:
+ - added internal method `curl_url_set_authority()` for setting the
+ authority part of a url (used for PUSH_PROMISE)
+
+ http2:
+ - made logging of PUSH_PROMISE handling nicer
+
+ Placing python test requirements in requirements.txt files
+ - separate files to base test suite and http tests since use
+ and module lists differ
+ - using the files in the gh workflows
+
+ websocket test cases, fixes for we and bufq
+ - bufq: account for spare chunks in space calculation
+ - bufq: reset chunks that are skipped empty
+ - ws: correctly encode frames with 126 bytes payload
+ - ws: update frame meta information on first call of collect
+ callback that fills user buffer
+ - test client ws-data: some test/reporting improvements
+
+ Closes #11006
+
+Jay Satiro (26 Apr 2023)
+
+- libssh2: fix crash in keyboard callback
+
+ - Always set the libssh2 'abstract' user-pointer to the libcurl easy
+ handle associated with the ssh session, so it is always passed to the
+ ssh keyboard callback.
+
+ Prior to this change and since 8b5f100 (precedes curl 8.0.0), if libcurl
+ was built without CURL_DEBUG then it could crash during the ssh auth
+ phase due to a null dereference in the ssh keyboard callback.
+
+ Reported-by: Andreas Falkenhahn
+
+ Fixes https://github.com/curl/curl/pull/11024
+ Closes https://github.com/curl/curl/pull/11026
+
+Daniel Stenberg (26 Apr 2023)
+
+- docs: clarify that more backends have HTTPS proxy support
+
+ Closes #11033
+
+- KNOWN_BUGS: remove two not-bugs
+
+ - 11.7 signal-based resolver timeouts
+
+ Not considered a bug anymore but just implementation details. People
+ should avoid using timeouts with the synchronous name resolver.
+
+ - 11.16 libcurl uses renames instead of locking for atomic operations
+
+ Not a bug, just a description of how it works
+
+ Closes #11032
+
+Harry Sintonen (26 Apr 2023)
+
+- hostip: add locks around use of global buffer for alarm()
+
+ When building with the sync name resolver and timeout ability we now
+ require thread-safety to be present to enable it.
+
+ Closes #11030
+
+Daniel Stenberg (26 Apr 2023)
+
+- curl_path: bring back support for SFTP path ending in /~
+
+ libcurl used to do a directory listing for this case (even though the
+ documentation says a URL needs to end in a slash for this), but
+ 4e2b52b5f7a3 modified the behavior.
+
+ This change brings back a directory listing for SFTP paths that are
+ specified exactly as /~ in the URL.
+
+ Reported-by: Pavel Mayorov
+ Fixes #11001
+ Closes #11023
+
+Emanuele Torre (26 Apr 2023)
+
+- docs/libcurl/curl_*escape.3: rename "url" argument to "input"/"string"
+
+ Also reword the DESCRIPTION section to mention "input"/"string" argument
+ in bold.
+
+ Closes #11027
+
+- docs/libcurl: minor cleanups
+
+ I was reading curl_unescape(3) and I noticed that there was an extra
+ space after the open parenthesis in the SYNOPSIS; I removed the extra
+ space.
+
+ I also ran a few grep -r commands to find and remove extra spaces
+ after '(' in other files, and to find and replace uses of `T*' instead
+ of `T *'. Some of the instances of `T*` where unnecessary casts that I
+ removed.
+
+ I also fixed a comment that was misaligned in CURLMOPT_SOCKETFUNCTION.3.
+
+ And I fixed some formatting inconsistencies: in curl_unescape(3), all
+ function parameter were mentioned with bold text except length, that was
+ mentioned as 'length'; and, in curl_easy_unescape(3), all parameters
+ were mentioned in bold text except url that was italicised. Now they are
+ all mentioned in bold.
+ Documentation is not very consistent in how function parameter are
+ formatted: many pages italicise them, and others display them in bold
+ text; but I think it makes sense to at least be consistent with
+ formatting within the same page.
+
+ Closes #11027
+
+Daniel Stenberg (26 Apr 2023)
+
+- man pages: simplify the .TH sections
+
+ - remove the version numbers
+ - simplify the texts
+
+ The date and version number will be put there for releases when maketgz
+ runs the updatemanpages.pl script.
+
+ Closes #11029
+
+- hostcheck: fix host name wildcard checking
+
+ The leftmost "label" of the host name can now only match against single
+ '*'. Like the browsers have worked for a long time.
+
+ - extended unit test 1397 for this
+ - move some SOURCE variables from unit/Makefile.am to unit/Makefile.inc
+
+ Reported-by: Hiroki Kurosawa
+ Closes #11018
+
+Dan Fandrich (25 Apr 2023)
+
+- smbserver: remove temporary files before exit
+
+ Each execution of test 1451 would leave a file in /tmp before. Since
+ Windows can't delete a file while it's open, all the temporary file
+ names are stored and deleted on exit.
+
+ Closes #10990
+
+Stefan Eissing (25 Apr 2023)
+
+- Websocket en-/decoding
+
+ - state is fully kept at connection, since curl_ws_send() and
+ curl_ws_rec() have lifetime beyond usual transfers
+ - no more limit on frame sizes
+
+ Reported-by: simplerobot on github
+ Fixes #10962
+ Closes #10999
+
+Patrick Monnerat (25 Apr 2023)
+
+- urldata: copy CURLOPT_AWS_SIGV4 value on handle duplication
+
+ Prior to this change STRING_AWS_SIGV4 (CURLOPT_AWS_SIGV4) was wrongly
+ marked as binary data that could not be duplicated.
+
+ Without this fix, this option's value is not copied upon calling
+ curl_easy_duphandle().
+
+ Closes https://github.com/curl/curl/pull/11021
+
+Stefan Eissing (25 Apr 2023)
+
+- http3: expire unpaused transfers in all HTTP/3 backends
+
+ Closes #11005
+
+- http2: always EXPIRE_RUN_NOW unpaused http/2 transfers
+
+ - just increasing the http/2 flow window does not necessarily
+ make a server send new data. It may already have exhausted
+ the window before
+
+ Closes #11005
+
+- http2: pass `stream` to http2_handle_stream_close to avoid NULL checks
+
+ Closes #11005
+
+- h2/h3: replace `state.drain` counter with `state.dselect_bits`
+
+ - `drain` was used by http/2 and http/3 implementations to indicate
+ that the transfer requires send/recv independant from its socket
+ poll state. Intended as a counter, it was used as bool flag only.
+ - a similar mechanism exists on `connectdata->cselect_bits` where
+ specific protocols can indicate something similar, only for the
+ whole connection.
+ - `cselect_bits` are cleard in transfer.c on use and, importantly,
+ also set when the transfer loop expended its `maxloops` tries.
+ `drain` was not cleared by transfer and the http2/3 implementations
+ had to take care of that.
+ - `dselect_bits` is cleared *and* set by the transfer loop. http2/3
+ does no longer clear it, only set when new events happen.
+
+ This change unifies the handling of socket poll overrides, extending
+ `cselect_bits` by a easy handle specific value and a common treatment in
+ transfers.
+
+ Closes #11005
+
+Daniel Stenberg (25 Apr 2023)
+
+- socketpair: verify with a random value
+
+ ... instead of using the curl time struct, since it would use a few
+ uninitialized bytes and the sanitizers would complain. This is a neater
+ approach I think.
+
+ Reported-by: Boris Kuschel
+ Fixes #10993
+ Closes #11015
+
+Stefan Eissing (25 Apr 2023)
+
+- HTTP3: document the ngtcp2/nghttp3 versions to use for building curl
+
+ - refs #11011 to clarify this for people building curl themselves
+
+ Closes #11019
+
+Daniel Stenberg (25 Apr 2023)
+
+- lib: unify the upload/method handling
+
+ By making sure we set state.upload based on the set.method value and not
+ independently as set.upload, we reduce confusion and mixup risks, both
+ internally and externally.
+
+ Closes #11017
+
+- RELEASE-NOTES: synced
+
+Dan Fandrich (24 Apr 2023)
+
+- CI: don't run CI jobs if only another CI was changed
+
+ A few paths were missed in the last commit, as well as a job added since
+ then.
+
+ Followup-to 395b9175
+
+- CI: adjust labeler match patterns
+
+- runtests: support buffering log messages in runner & servers
+
+ Log messages generated with logmsg can now be buffered and returned from
+ the runner as a return value. This will be needed with parallel testing
+ to allow all messages for one test to be displayed together instead of
+ interspersed with messages of multiple tests. Buffering can be disabled
+ by setting a logging callback function with setlogfunc, which is
+ currently being done to preserve existing logging behaviour for now.
+
+ Some additional output is generated in verbose and debugprotocol modes,
+ which don't always use logmsg. These modes also impact some servers
+ which generate extra messages. No attempt is made to buffer everything
+ if these modes are enabled.
+
+ Ref: #10818
+ Closes #11016
+
+- runtests: more consistently use logmsg in server control code
+
+ Also, display an error when sshversioninfo returns one.
+
+ Ref: #10818
+
+- runtests: create runner functions for clearlocks and stopservers
+
+ runtests.pl now uses runner for all server actions beyond the initial
+ variable configuration.
+
+ Ref: #10818
+
+- runtests: tightened servers package exports
+
+ The defaults are intended for runtests.pl, whereas runner.pm needs to
+ explicitly specify them.
+
+- runtests: display logs on server failure in singletest()
+
+ This is closer to the place where logs are displayed on test failure.
+ Also, only display these logs if -p is given, which is the same flag
+ that controls display of test failure logs. Some server log files
+ need to be deleted later so that they stay around long enough to be
+ displayed on failure.
+
+ Ref: #10818
+
+- runtests: turn a print into a logmsg
+
+ Also enable another couple of useful messages in verbose mode.
+
+ Ref: #10818
+
+Daniel Stenberg (24 Apr 2023)
+
+- http: store the password in the correct variable
+
+ Typo from fc2f1e547a4a, detected by Coverity (because there's dead code
+ due to this).
+
+ Closes #11002
+
+Stefan Eissing (24 Apr 2023)
+
+- HTTP3/quiche: terminate h1 response header when no body is sent
+
+ - fixes a failure in test2501 where a response without body was missing
+ the final empty line
+
+ Closes #11003
+
+Dan Fandrich (22 Apr 2023)
+
+- runtests: move showdiff into runtests.pl
+
+ It's not used anywhere else.
+
+- devtest: add a new script for testing the test harness
+
+ This is currently useful for starting a test server on its own without
+ an associated test, which can be used for interactive curl testing or
+ for validating parts of the test harness itself. More commands can be
+ added to perform additional functions in the future.
+
+ Ref: #10818
+ Closes #11008
+
+- runtests: refactor the main test loop into two
+
+ The test loop now has an initial loop that first runs through all
+ possible tests to build a set of those to attempt on this run based on
+ features and keywords and only then goes through that new list to run
+ them. This actually makes it three loops through all tests cases, as
+ there is an existing loop that gathers possible test numbers from the
+ test files on disk.
+
+ This has two minor effects on the output: all the tests that will be
+ skipped are displayed at the start (instead of being interspersed with
+ other tests) and the -l option no longer shows a count of tests at the
+ end or a (misleading) statement that tests have run successfully. The
+ skipped tests are also omitted from the test results sent to AppVeyor
+ and Azure in CI builds.
+
+ Another effect is a reduction in the amount of work considered part of
+ the "Test definition reading and preparation time" reported with -r
+ making those figures slightly lower than before.
+
+ Ref: #10818
+
+- runtests: track only the current test timings in runner.pm
+
+ This avoids passing these data through through global variables, which
+ soon won't be possible.
+
+ Ref: #10818
+
+- runtests: skip test preprocessing when doing -l
+
+ This speeds up the output tremendously by avoiding unnecessary work.
+
+- runtests: simplify value returned regarding use of valgrind
+
+ As a side effect this will now also show in verbose mode that valgrind
+ is being skipped on tests that explicitly disable it, such as 600.
+
+ Ref: #10818
+
+- runtests: fix quoting in Appveyor and Azure test integration
+
+ Test 1442's name was not quoted correctly so wasn't registered in
+ Appveyor and it had the wrong name in Azure. The JSON string quotes were
+ also invalid, even though both servers happened to accept it regardless.
+
+ Closes #11010
+
+Daniel Stenberg (19 Apr 2023)
+
+- RELEASE-NOTES: synced
+
+Dan Fandrich (18 Apr 2023)
+
+- runtests: spread out the port numbers used by servers
+
+ The server ports are chosen randomly for each server, but the random
+ ranges chosen were inconsistently-sized and overlapping. Now, they are
+ spread out more so at least the first random port chosen for each server
+ is guaranteed to not also be chosen by another server. The starting port
+ numbers are also raised to put them in the Ephemeral Port range—not the
+ range defined by RFC 6335 but the one used by Linux, which starts lower
+ and gives us more room to work with.
+
+ Reported-by: Daniel Stenberg
+
+- runtests: fix problems on <killserver> failure
+
+ The verify time must be set in this case, like all cases. An error
+ message needs to be displayed as well.
+
+- runtests: fix perl warning when <tool> is wrong
+
+- runtests: don't try to stop stunnel before trying again
+
+ Calling stopserver() before retrying stunnel due to an error would stop
+ the dependent server (such as HTTP) meaning stunnel would have nothing
+ to talk to when it came up. Don't try to force a stop when it didn't
+ actually start. Also, don't mark the server as bad for future use when
+ it starts up on a retry.
+
+ Reported-by: eaglegai at github
+ Tested-by: eaglegai at github
+ Fixes #10976
+
+- runtests: don't accidentally randomly choose the same port
+
+ If a server couldn't be started on a port, a new one is randomly chosen
+ and the server is tried again. Avoid accidentally using a
+ randomly-chosen 0 port offset by adding 1 to the random number.
+
+ Found-by: Daniel Stenberg
+
+- runtests: don't attempt to use a port we know is in use
+
+ This reduces the startup time when there is a known conflict on the
+ random port chosen for a server. This was already done for stunnel, but
+ now it's done for all servers.
+
+- http-server: fix server name in a log message
+
+ This changed when the file was renamed in commit cbf57176
+
+- runtests: refactor into more packages
+
+ testutil.pm now contains a few miscellaneous functions that are used in
+ several places but have no better place to live. subvariables moves to
+ servers.pm since most variables that it substitutes relate to servers,
+ so this is the most appropriate place. Rename a few functions for better
+ naming consistency.
+
+ Ref: #10818
+ Closes #10995
+
+- runtests: call timestampskippedevents() in singletest
+
+ ..rather than by the runner
+
+- runtests: assume a newer Valgrind by default
+
+ The tests for an older Valgrind version should probably just be deleted,
+ given that they're testing for an 18-year-old version.
+
+- runtests: refactor test runner code into runner.pm
+
+ This is code that is directly responsible for running a single test.
+ This will eventually run in a separate process as part of the parallel
+ testing project.
+
+ Ref: #10818
+
+- runtests: skip unneeded work if test won't be running
+
+ This speeds up tests by avoiding unnecessary processing.
+
+ Ref: #10818
+
+- runtests: factor out singletest_postcheck
+
+ This will eventually need to be part of the test runner.
+
+ Ref: #10818
+
+- test303: kill server after test
+
+ Otherwise, an HTTP test closely following this one with a tight time
+ constraint (e.g. 672) could fail because the test server stays sitting
+ with the wait command for a while.
+
+Patrick Monnerat (18 Apr 2023)
+
+- OS400: provide ILE/RPG usage examples
+
+ Closes https://github.com/curl/curl/pull/10994
+
+- OS400: improve vararg emulation
+
+ - Use V7R4 RPG procedure overloading to improve vararg emulation.
+
+ From OS400 V7R4 and above, ILE/RPG implements a limited procedure
+ overloading feature that can be used to improve curl's typed
+ implementation of varargs procedures. This commit applies it to
+ curl_easy_setopt(), curl_multi_setopt(), curl_share_setopt() and
+ curl_easy_getinfo().
+
+ Closes https://github.com/curl/curl/pull/10994
+
+- OS400: fix and complete ILE/RPG binding
+
+ - Fix wrong definitions of CURL_ZERO_TERNINATED, curl_mime_data() and
+ curl_mime_data_ccsid().
+
+ - Add recent definitions, in particular blob, header API and WebSockets
+ API.
+
+ - Support for CURLVERSION_ELEVENTH.
+
+ - New functions for EBCDIC support.
+
+ Reflect these changes in README.OS400.
+
+ Closes https://github.com/curl/curl/pull/10994
+
+- OS400: implement EBCDIC support for recent features
+
+ - Support CURLVERSION_ELEVENTH.
+
+ - New function curl_url_strerror_ccsid().
+
+ - curl_easy_setopt_ccsid() supports blobs and 3 recent string options.
+
+ - New function curl_easy_header_ccsid().
+
+ - New generic latin1<-->ccsid conversion functions curl_from_ccsid() and
+ curl_to_ccsid() for user convenience.
+
+ - README.OS400 updated accordingly.
+
+ - Removed a leftover QsoSSL support identifier.
+
+ Closes https://github.com/curl/curl/pull/10994
+
+- OS400: rework build scripts
+
+ - Rename shell function "system" to "CLcommand" to avoid confusion with
+ built-in command.
+
+ - Reformat scripts. Fix some indentations. Avoid lines > 80 characters
+ where possible.
+
+ - Support ASCII runtime development files in a user-defined directory
+ path.
+
+ - FIX SONAME detection.
+
+ - Drop form API test program compilation (does not exist anymore).
+
+ Closes https://github.com/curl/curl/pull/10994
+
+Sevan Janiyan (18 Apr 2023)
+
+- tests/sshserver.pl: Define AddressFamily earlier
+
+ As the comment states "Address family must be specified before ListenAddress"
+ , otherwise the tests fail to run
+ `"failed starting SSH server" 52 times (582, 583, 600, 601, 602, 603, 604, 60
+ 5, 606 and 43 more)`
+
+ Closes #10983
+
+Stefan Eissing (18 Apr 2023)
+
+- quiche: Enable IDLE egress handling
+
+ Follow-up to 544abeea which added the handling but wrongly left it
+ commented out.
+
+ Closes https://github.com/curl/curl/pull/11000
+
+Daniel Stenberg (18 Apr 2023)
+
+- docs/examples/protofeats.c: Outputs all protocols and features
+
+ Showing off one way to get to char pointer arrays of info returned by
+ curl_version_info()
+
+ Closes #10991
+
+- tests/keywords.pl: remove
+
+ This script does not work since the introduction of the test
+ preprocessing. If we need this functionality, it probably needs to be
+ moved into the runtests tool or similar.
+
+ Reported-by: Dan Fandrich
+ Fixes #10895
+ Closes #10987
+
+Stefan Eissing (17 Apr 2023)
+
+- http2: support HTTP/2 to forward proxies, non-tunneling
+
+ - with `--proxy-http2` allow h2 ALPN negotiation to
+ forward proxies
+ - applies to http: requests against a https: proxy only,
+ as https: requests will auto-tunnel
+ - adding a HTTP/1 request parser in http1.c
+ - removed h2h3.c
+ - using new request parser in nghttp2 and all h3 backends
+ - adding test 2603 for request parser
+ - adding h2 proxy test cases to test_10_*
+
+ scorecard.py: request scoring accidentally always run curl
+ with '-v'. Removed that, expect double numbers.
+
+ labeller: added http1.* and h2-proxy sources to detection
+
+ Closes #10967
+
+Daniel Stenberg (17 Apr 2023)
+
+- curl_easy_unescape.3: rename the argument
+
+ and highlight it appropriately in the text.
+
+ Closes #10979
+
+Viktor Szakats (17 Apr 2023)
+
+- autotools: sync up clang picky warnings with cmake
+
+ Bringing missing options over from CMake.
+
+ Move around existing `-Wno-pointer-bool-conversion` option to come
+ _after_ `-Wconversion`.
+
+ Reviewed-by: Marcel Raad
+ Closes #10974
+
+Daniel Stenberg (17 Apr 2023)
+
+- tests/libtest/lib1900.c: remove
+
+ This file was left behind when the rest of the test was previously removed.
+
+ Follow-up to e50a877df74f
+
+- src/tool_operhlp.c: fix value stored to 'uerr' is never read
+
+ Ref: https://github.com/curl/curl/pull/10974#issuecomment-1510461343
+ Reported-by: Viktor Szakats
+ Closes #10982
+
+Viktor Szakats (16 Apr 2023)
+
+- cmake: speed up and extend picky clang/gcc options
+
+ Extend existing picky compiler options with ones missing compared to
+ autotools builds. Also sync options between clang and gcc.
+
+ Redesign the way we enable these options to avoid the slow option
+ detection almost completely.
+
+ This reduces the number of detections from 35 to zero for clang and
+ 3 for gcc, even after adding a bunch of new options.
+
+ clang 3.0 (2011-11-29) and gcc 2.95 (1999-07-31) now required.
+
+ Also show enabled picky options.
+
+ Ref: https://github.com/libssh2/libssh2/pull/952
+
+ Reviewed-by: Daniel Stenberg
+ Closes #10973
+
+Andreas Falkenhahn (16 Apr 2023)
+
+- nbtlm: use semicolons instead of commas for (void) args
+
+ Closes #10978
+
+Daniel Stenberg (15 Apr 2023)
+
+- multi: free up more data earleier in DONE
+
+ Before checking for more users of the connection and possibly bailing
+ out.
+
+ Fixes #10971
+ Reported-by: Paweł Wegner
+ Closes #10972
+
+- RELEASE-NOTES: synced
+
+- curl: do NOT append file name to path for upload when there's a query
+
+ Added test 425 to verify.
+
+ Reported-by: Dirk Rosenkranz
+ Bug: https://curl.se/mail/archive-2023-04/0008.html
+ Closes #10969
+
+- libcurl-thread.3: improved name resolver wording
+
+ And make better .SH sections
+
+ Closes #10966
+
+Colman Mbuya (14 Apr 2023)
+
+- CURLOPT_PROXY_SSL_VERIFYPEER.3: fix minor grammar mistake
+
+ Closes #10968
+
+Daniel Stenberg (14 Apr 2023)
+
+- curl: add --proxy-http2
+
+ For trying HTTP/2 with an HTTPS proxy.
+
+ Closes #10926
+
+- KNOWN_BUGS: remove fixed or outdated issues, move non-bugs
+
+ - remove h3 issues believed to be fixed
+
+ - make the flaky CI issue be generic and not Windows specific
+
+ - "TLS session cache does not work with TFO" now documented
+
+ This is now a documented restriction and not a bug. TFO in general is
+ rarely used and has other problems, making it a low-priotity thing to
+ work on.
+
+ - remove "Renegotiate from server may cause hang for OpenSSL backend"
+
+ This is an OpenSSL issue, not a curl one. Even if it taints curl.
+
+ - rm "make distclean loops forever"
+
+ - rm "configure finding libs in wrong directory"
+
+ Added a section to docs/INSTALL.md about it.
+
+ - "A shared connection cache is not thread-safe"
+
+ Moved over to TODO and expanded for other sharing improvements we
+ could do
+
+ - rm "CURLOPT_OPENSOCKETPAIRFUNCTION is missing"
+
+ - rm "Blocking socket operations in non-blocking API"
+
+ Already listed as a TODO
+
+ - rm "curl compiled on OSX 10.13 failed to run on OSX 10.10"
+
+ Water under the bridge. No one cares about this anymore.
+
+ - rm "build on Linux links libcurl to libdl"
+
+ Verified to not be true (anymore).
+
+ - rm "libpsl is not supported"
+
+ The cmake build supports it since cafb356e19cda22
+
+ Closes #10963
+
+- url: fix PVS nits
+
+ - expression 'hostptr' is always true
+ - a part of conditional expression is always true: proxypasswd
+ - expression 'proxyuser' is always true
+ - avoid multiple Curl_now() calls in allocate_conn
+
+ Ref: #10929
+ Closes #10959
+
+- bufq: simplify since expression is always true
+
+ The check for 'len' is already done so it will remain true until
+ updated. Pointed out by PVS.
+
+ Ref: #10929
+ Closes #10958
+
+- hash: fix assigning same value
+
+ Pointed out by PVS
+
+ Ref: #10929
+ Closes #10956
+
+- cookie: address PVS nits
+
+ - avoid assigning the same value again
+ - remove superfluous check of co->domain
+ - reduce variable scope for namep/valuep
+
+ Ref: #10929
+ Closes #10954
+
+Stefan Eissing (14 Apr 2023)
+
+- cf-socket: Disable socket receive buffer by default
+
+ - Disable socket receive buffer unless USE_RECV_BEFORE_SEND_WORKAROUND
+ is in place.
+
+ While we would like to use the receive buffer, we have stalls in
+ parallel transfers where not all buffered data is consumed and no socket
+ events happen.
+
+ Note USE_RECV_BEFORE_SEND_WORKAROUND is a Windows sockets workaround
+ that has been disabled by default since b4b6e4f1, due to other bugs.
+
+ Closes https://github.com/curl/curl/pull/10961
+
+- cf-h2-proxy: fix processing ingress to stop too early
+
+ - progress ingress stopped too early, causing data
+ from the underlying filters to not be processed and
+ report that no tunnel data was available
+ - this lead to "hangers" where no socket activity was
+ seen but data rested in buffers
+
+ Closes #10952
+
+- http3: check stream_ctx more thoroughly in all backends
+
+ - callbacks and filter methods might be invoked at unexpected
+ times, e.g. when the transfer's stream_ctx has not been initialized
+ yet or, more likely, has already been taken down.
+ - check for existance of stream_ctx in such places and return
+ an error or silently succeed the call.
+
+ Closes #10951
+
+Daniel Stenberg (13 Apr 2023)
+
+- ftp: fix 'portsock' variable was assigned the same value
+
+ Pointed out by PVS
+
+ Ref: #10929
+ Closes #10955
+
+- ftp: remove dead code
+
+ This condition can never be true here since it is handled already 28
+ lines above.
+
+ Pointed out by PVS.
+
+ Ref: #10929
+ Closes #10957
+
+- cf-h1-proxy: skip an extra NULL assign
+
+ and use Curl_safefree() once to save another NULL assign. Found by PVS.
+
+ Ref. #10929
+ Closes #10953
+
+Philip Heiduck (13 Apr 2023)
+
+- GHA: suppress git clone output
+
+ Follow-up: https://github.com/curl/curl/commit/8203aa6ed405ec832d2c62f18dfda2
+ 93f89a23f9
+
+ Closes #10949
+
+Stefan Eissing (13 Apr 2023)
+
+- cf-socket: remove dead code discovered by PVS
+
+ Closes #10960
+
+Daniel Stenberg (13 Apr 2023)
+
+- http: skip a double NULL assign
+
+ and also use a local variable to shorten the long names and increase
+ readability in the function. Pointed out by PVS.
+
+ Ref: #10929
+ Closes #10950
+
+- mime: skip NULL assigns after Curl_safefree()
+
+ Pointed out by PVS.
+
+ Ref: #10929
+ Closes #10947
+
+- rtsp: skip NULL assigns after Curl_safefree()
+
+ ... since this is a macro that assigns NULL itself. Pointed out by PVS.
+
+ Ref: #10929
+ Closes #10946
+
+- smb: remove double assign
+
+ The same value is assigned the same value already a few lines above.
+ Pointed out by PVS.
+
+ Ref: #10929
+ Closes #10945
+
+- transfer: skip extra assign
+
+ The 'result' variable already contains CURLE_OK at this point, no use in
+ setting it again. Pointed out by PVS.
+
+ Ref: #10929
+ Closes #10944
+
+- urlapi: skip a pointless assign
+
+ It stores a null byte after already having confirmed there is a null
+ byte there. Detected by PVS.
+
+ Ref: #10929
+ Closes #10943
+
+Philip Heiduck (13 Apr 2023)
+
+- GHA: suppress git clone output
+
+ Closes #10939
+
+Stefan Eissing (13 Apr 2023)
+
+- tests: make test_12_01 a bit more forgiving on connection counts
+
+- cf-socket: add socket recv buffering for most tcp cases
+
+ - use bufq as recv buffer, also for Windows pre-receive handling
+ - catch small reads followed by larger ones in a single socket
+ call. A common pattern on TLS connections.
+
+ Closes #10787
+
+Daniel Stenberg (13 Apr 2023)
+
+- urlapi: cleanups
+
+ - move host checks together
+ - simplify the scheme parser loop and the end of host name parser
+ - avoid itermediate buffer storing in multiple places
+ - reduce scope for several variables
+ - skip the Curl_dyn_tail() call for speed
+ - detect IPv6 earlier and skip extra checks for such hosts
+ - normalize directly in dynbuf instead of itermediate buffer
+ - split out the IPv6 parser into its own funciton
+ - call the IPv6 parser directly for ipv6 addresses
+ - remove (unused) special treatment of % in host names
+ - junkscan() once in the beginning instead of scattered
+ - make junkscan return error code
+ - remove unused query management from dedotdotify()
+ - make Curl_parse_login_details use memchr
+ - more use of memchr() instead of strchr() and less strlen() calls
+ - make junkscan check and return the URL length
+
+ An optimized build runs one of my benchmark URL parsing programs ~41%
+ faster using this branch. (compared against the shipped 7.88.1 library
+ in Debian)
+
+ Closes #10935
+
+Josh McCullough (13 Apr 2023)
+
+- http2: fix typo in infof() call
+
+ Closes #10940
+
+Daniel Stenberg (12 Apr 2023)
+
+- noproxy: pointer to local array 'hostip' is stored outside scope
+
+ Ref: #10929
+ Closes #10933
+
+Stefan Eissing (12 Apr 2023)
+
+- connect: fix https connection setup to treat ssl_mode correctly
+
+ - for HTTPS protocol, a disabled ssl should never be acceptables.
+
+ Closes #10934
+
+Douglas R. Reno (12 Apr 2023)
+
+- CMakeLists.txt: fix typo for Haiku detection
+
+ Closes #10937
+
+Dan Fandrich (11 Apr 2023)
+
+- pathhelp: use the cached $use_cygpath when available
+
+- runtests: eliminate unneeded variable
+
+- runtests: make the # of server start attempts a constant
+
+- runtests: on startup failure call displaylogs only in serverfortest
+
+ This reduces the number of calls spread throughout the code.
+
+ Ref: #10818
+ Closes #10919
+
+- runtests: return an error code with startservers()
+
+ The code indicates the kind of failure encountered in starting a server,
+ which can be used by the caller to tailor the user experience.
+
+ Ref: #10818
+
+- runtests: abort early if runpingpongserver is given a bad server type
+
+- runtests: don't use the SMB server verification time as reference
+
+ %FTPTIME2 and %FTPTIME3 should be set by the FTP server only, for
+ consistency.
+
+- tests: factor out the test server management code
+
+ This now lives in servers.pm with some configuration variables moved to
+ globalconfig.pm
+
+ Ref: #10818
+
+- runtests: remove an inappropriate use of runclientoutput
+
+ This function is intended for running client code, not servers.
+
+- runtests: only add $LIBDIR to the path for checktestcmd
+
+ Since checkcmd is for finding servers, there will never be anything in
+ this directory of interest to them.
+
+ Ref: #10818
+
+- tests: log sshserver.pl messages to a file
+
+ The logmsg messages were thrown away before, so they are now available
+ for debugging.
+
+- runtests: also show DISABLED tests with -l
+
+ Other reasons for skipping tests are ignored for -l, so being explicitly
+ disabled should be too.
+
+- runtests: move the UNIX sockets into $PIDDIR
+
+ These were missed when the other server files were moved there.
+
+ Follow-up to 70d2fca2
+
+ Ref: #10818
+
+- tests: tighten up perl exports
+
+ This reduces namespace pollution a little.
+
+ Ref: #10818
+
+- tests: turn perl modules into full packages
+
+ This helps enforce more modularization and encapsulation. Enable and fix
+ warnings on a few packages. Also, rename ftp.pm to processhelp.pm since
+ there's really nothing ftp-specific in it.
+
+ Ref: #10818
+
+Daniel Stenberg (11 Apr 2023)
+
+- multi: remove a few superfluous assigns
+
+ PVS found these "The 'rc' variable was assigned the same value." cases.
+
+ Ref: #10929
+ Closes #10932
+
+- schannel: add clarifying comment
+
+ Explaining how the PVS warning in #10929 is wrong: Dereferencing of the
+ null pointer 'backend->cred' might take place.
+
+ Closes #10931
+
+- cookie: clarify that init with data set to NULL reads no file
+
+ ... and make Curl_cookie_add() require 'data' being set proper with an
+ assert.
+
+ The function has not worked with a NULL data for quite some time so this
+ just corrects the code and comment.
+
+ This is a different take than the proposed fixed in #10927
+
+ Reported-by: Kvarec Lezki
+ Ref: #10929
+ Closes #10930
+
+Kvarec Lezki (11 Apr 2023)
+
+- vtls: remove int typecast for sizeof()
+
+ V220 Suspicious sequence of types castings: memsize -> 32-bit integer ->
+ memsize. The value being cast: 'sizeof
+ (buf->data)'. curl\lib\vtls\vtls.c 2025
+
+ https://pvs-studio.com/en/docs/warnings/v220/
+
+ Closes #10928
+
+Stefan Eissing (11 Apr 2023)
+
+- http2: fix copynpaste error reported by coverity
+
+ - move all code handling HTTP/2 frames for a particular
+ stream into a separate function to keep from confusing
+ the call `data` with the stream `data`.
+
+ Closes #10924
+
+Dan Fandrich (11 Apr 2023)
+
+- tests: log a too-long Unix socket path in sws and socksd
+
+ Ref: #10919
+
+Daniel Stenberg (11 Apr 2023)
+
+- gen.pl: error on duplicated See-Also fields
+
+ Updated http2.d accordingly.
+
+ Closes #10925
+
+- http2: avoid possible null pointer dereference
+
+ Reported-by: Dan Fandrich
+ Fixes #10920
+ Closes #10923
+
+- lib1560: verify that more bad host names are rejected
+
+ when setting the hostname component of a URL
+
+ Closes #10922
+
+- curl_url_set.3: mention that users can set content rather freely
+
+ ... which then might render bad URLs if you extract a URL later.
+
+ Closes #10921
+
+Dan Fandrich (10 Apr 2023)
+
+- CI: retry failed downloads of aws-lc
+
+ Don't fail the build in case of a temporary server problem.
+
+- test1169: fix so it works properly everywhere
+
+ - Use an absolute path for the -L option since the module isn't in the
+ perl path
+ - Create the needed test file in a <file> section; <precheck> isn't
+ intended for this
+ - Fix the test number in the file name, which was wrong
+
+ Follow-up to f754990a
+
+ Ref: #10818
+ Fixes #10889
+ Closes #10917
+
+- tests: stop using strndup(), which isn't portable
+
+ It's not available on Solaris 10, for example. Since this is just test
+ code that doesn't need to use an optimized system version, replace it
+ with the implementation copied from tool_cb_hdr.c.
+
+- runtests: fix an incorrect comment about the ld_preload feature
+
+ Follow-up to 1f631864
+
+ Ref: #10818
+
+Daniel Stenberg (9 Apr 2023)
+
+- urlapi: prevent setting invalid schemes with *url_set()
+
+ A typical mistake would be to try to set "https://" - including the
+ separator - this is now rejected as that would then lead to
+ url_get(... URL...) would get an invalid URL extracted.
+
+ Extended test 1560 to verify.
+
+ Closes #10911
+
+Biswapriyo Nath (9 Apr 2023)
+
+- http2: remove unused Curl_http2_strerror function declaration
+
+ Curl_http2_strerror was renamed to http2_strerror in
+ 05b100aee247bb9bec8e9a1b0 and then http2_strerror was removed in
+ 5808a0d0f5ea0399d4a2a2
+
+ This also fixes the following compiler error
+
+ lib/http2.h:41:33: error: unknown type name 'uint32_t'
+ lib/http2.h:1:1: note: 'uint32_t' is defined in header '<stdint.h>'
+
+ Closes #10912
+
+Daniel Stenberg (8 Apr 2023)
+
+- RELEASE-NOTES: synced
+
+SuperIlu on github (8 Apr 2023)
+
+- config-dos.h: fix SIZEOF_CURL_OFF_T for MS-DOS/DJGPP
+
+ Fixes #10905
+ Closes #10910
+
+Daniel Stenberg (8 Apr 2023)
+
+- lib: remove CURLX_NO_MEMORY_CALLBACKS
+
+ The only user of this define was 'chkdecimalpoint' - a special purpose
+ test tool that was built but not used anymore (since 17c18fbc3 - Apr
+ 2020).
+
+ Closes #10908
+
+- CURLPROXY_HTTPS2: for HTTPS proxy that may speak HTTP/2
+
+ Setting this proxy type allows curl to negotiate and use HTTP/2 with
+ HTTPS proxies.
+
+ Closes #10900
+
+Ali Khodkar (8 Apr 2023)
+
+- write-out.d: add missing periods
+
+ Closes #10897
+
+Daniel Stenberg (7 Apr 2023)
+
+- http2: remove check for !data after it was already dereferenced
+
+ Pointed out by Coverity
+
+ Closes #10906
+
+- http_proxy: provide missing arg to infof() call
+
+ Pointed out by Coverity
+
+ Closes #10904
+
+- content_encoding: only do tranfer-encoding compression if asked to
+
+ To reduce surprises. Update test 387 and 418 accordingly.
+
+ Closes #10899
+
+- sws: comparison of unsigned expression < 0 is always false
+
+ Follow-up to 356dd0b73a75ed6d5
+
+ Closes #10903
+
+- lib/cmake: add HAVE_WRITABLE_ARGV check
+
+ Assisted-by: Jakub Zakrzewski
+ Closes #10896
+
+- configure: don't set HAVE_WRITABLE_ARGV on Windows
+
+ Ref: #10888
+ Closes #10896
+
+- vtls: fix build error when proxy-disabled
+
+ Closes #10901
+
+Stefan Eissing (6 Apr 2023)
+
+- tests: increase sws timeout for more robust testing
+
+ - for https CONNECT forwarding, this was fixed at 5 seconds
+ which led to spurious CI test failures
+ - add --keepalive parameter to sws to control this
+ - let httpserver use 30 seconds
+
+ Closes #10898
+
+- http2: move HTTP/2 stream vars into local context
+
+ - remove NGHTTP2 members of `struct HTTP`
+ - add `void *h2_ctx` to `struct HTTP`
+ - add `void *h3_ctx` to `struct HTTP`
+ - separate h2/h3 pointers are needed for eyeballing
+ - manage local stream_ctx in http implementations
+
+ Closes #10877
+
+- proxy: http2 proxy tunnel implementation
+
+ - currently only on debug build and when env variable
+ CURL_PROXY_TUNNEL_H2 is present.
+ - will ALPN negotiate with the proxy server and switch
+ tunnel filter based on the protocol negotiated.
+ - http/1.1 tunnel code moved into cf-h1-proxy.[ch]
+ - http/2 tunnel code implemented in cf-h2-proxy.[ch]
+ - tunnel start and ALPN set remains in http_proxy.c
+ - moving all haproxy related code into cf-haproxy.[ch]
+
+ VTLS changes
+ - SSL filters rely solely on the "alpn" specification they
+ are created with and no longer check conn->bits.tls_enable_alpn.
+ - checks on which ALPN specification to use (or none at all) are
+ done in vtls.c when creating the filter.
+
+ Testing
+ - added a nghttpx forward proxy to the pytest setup that
+ speaks HTTP/2 and forwards all requests to the Apache httpd
+ forward proxy server.
+ - extending test coverage in test_10 cases
+ - adding proxy tests for direct/tunnel h1/h2 use of basic auth.
+ - adding test for http/1.1 and h2 proxy tunneling to pytest
+
+ Closes #10780
+
+- vtls and h2 improvements
+
+ - eliminate receive loop in vtls to fill buffer. This may
+ lead to partial reads of data which is counter productive
+ - let http2 instead loop smarter to process pending network
+ data without transfer switches
+
+ scorecard improvements
+ - do not start caddy when only httpd is requested
+ - allow curl -v to stderr file on --curl-verbose
+
+ Closes #10891
+
+Daniel Stenberg (6 Apr 2023)
+
+- tests: 1078 1288 1297 use valid IPv4 addresses
+
+ With the enhanced URL parser, these tests failed because of their bad
+ IPv4 use.
+
+- urlapi: detect and error on illegal IPv4 addresses
+
+ Using bad numbers in an IPv4 numerical address now returns
+ CURLUE_BAD_HOSTNAME.
+
+ I noticed while working on trurl and it was originally reported here:
+ https://github.com/curl/trurl/issues/78
+
+ Updated test 1560 accordingly.
+
+ Closes #10894
+
+- RELEASE-NOTES: synced
+
+- urlapi: URL encoding for the URL missed the fragment
+
+ Meaning that it would wrongly still store the fragment using spaces
+ instead of %20 if allowing space while also asking for URL encoding.
+
+ Discovered when playing with trurl.
+
+ Added test to lib1560 to verify the fix.
+
+ Closes #10887
+
+- rtsp: convert mallocs to dynbuf for RTP buffering
+
+ Closes #10786
+
+- tool_writeout: add URL component variables
+
+ Output specific components from the used URL. The following variables
+ are added for this purpose:
+
+ url.scheme, url.user, url.password, url.options, url.host, url.port,
+ url.path, url.query, url.fragment, url.zoneid
+
+ Add the following for outputting parts of the "effective URL":
+
+ urle.scheme, urle.user, urle.password, urle.options, urle.host, urle.port,
+ urle.path, urle.query, urle.fragment, urle.zoneid
+
+ Added test 423 and 424 to verify.
+
+ Closes #10853
+
+Stefan Eissing (4 Apr 2023)
+
+- tests/http: improved httpd detection
+
+ - better error messages when not found/complete
+ - handling of `--without-test-httpd`
+
+ Reported-by: kwind on github
+ Fixes #10879
+ Closes #10883
+
+Daniel Stenberg (4 Apr 2023)
+
+- configure: make quiche require quiche_conn_send_ack_eliciting
+
+ curl now requires quiche version >= 1.17.1 to be used and this function
+ was added in this version and makes a convenient check.
+
+ This requirement is because this is the lowest quiche version that
+ supports peer-initiated key updates correctly.
+
+ Closes #10886
+
+Dan Fandrich (1 Apr 2023)
+
+- unit tests: use the unit test infrastructure better
+
+ Allow UNITTEST_STOP to return the error code, use the fail & abort
+ macros to indicate test failure and return success instead of fail if
+ the unit test can't test anything because of missing features at
+ compile-time. A couple of tests could never fail because they were
+ overriding the failure return code.
+
+- runtests: strip EOL on precheck output on Windows, too
+
+ Precheck failures would show on two lines in the test summary output
+ otherwise.
+
+- tests: move server config files under the pid dir
+
+ These files are generated by the test servers and must therefore be
+ found in the log directory to make them available to only those servers
+ once multiple test runners are executing in parallel. They must also not
+ be deleted with the log files, so they are stored in the pidfile
+ directory.
+
+ Ref: #10818
+ Closes #10875
+
+- runtests: use the ssh key filenames from the sshhelp package
+
+- tests: move pidfiles and portfiles under the log directory
+
+ This is to segregate all files written by a test process into a single
+ root to allow for future parallel testing.
+
+ Ref: #10818
+ Closes #10874
+
+- runtests: minor code cleanups
+
+- runtests: call processexists() and pidfromfile()
+
+ rather than duplicating the logic in several places.
+
+Viktor Szakats (31 Mar 2023)
+
+- cmake: do not add zlib headers for openssl
+
+ Logic copied earlier from wolfSSL. wolfSSL requires zlib headers for its
+ public headers. OpenSSL does not, so stop adding zlib headers for it.
+
+ Follow-up to 1e3319a167d2f32d295603167486e9e88af9bb4e
+
+ Closes #10878
+
+Stefan Eissing (31 Mar 2023)
+
+- rustls: fix error in recv handling
+
+ - when rustls is told to recieve more TLS data and its internal
+ plaintext buffers are full, it returns an IOERROR
+ - avoid receiving TLS data while plaintext is not read empty
+
+ pytest:
+ - increase curl run timeout when invoking pytest with higher verbosity
+
+ Closes #10876
+
+- http3: improvements across backends
+
+ - ngtcp2: using bufq for recv stream data
+ - internal stream_ctx instead of `struct HTTP` members
+ for quiche, ngtcp2 and msh3
+ - no more QUIC related members in `struct HTTP`
+ - experimental use of recvmmsg(), disabled by default
+ - testing on my old debian box shows no throughput improvements.
+ - leaving it in, but disabled, for future revisit
+ - vquic: common UDP receive code for ngtcp2 and quiche
+ - vquic: common UDP send code for ngtcp2 and quiche
+ - added pytest skips for known msh3 failures
+ - fix unit2601 to survive torture testing
+ - quiche: using latest `master` from quiche and enabling large download
+ tests, now that key change is supported
+ - fixing test_07_21 where retry handling of starting a stream
+ was faulty
+ - msh3: use bufq for recv buffering headers and data
+ - msh3: replace fprintf debug logging with LOG_CF where possible
+ - msh3: force QUIC expire timers on recv/send to have more than
+ 1 request per second served
+
+ Closes #10772
+
+Dan Fandrich (30 Mar 2023)
+
+- test1471/2: add http as a required feature
+
+ curl bails out early with a different error message if http support is
+ compiled out.
+
+ Ref: #10705
+
+- tests: limit return code of unit tests and lib tests
+
+ Values greater than 125 have special meanings, so cap it there. Unit
+ tests and lib tests use the number of failures as the return code, so a
+ large number of failures (such as test 2601 as a torture test) can
+ exceed this causing the test to be erroneously reported as having
+ failed.
+
+ Ref: #10720
+
+- test1960: point to the correct path for the precheck tool
+
+ Otherwise, it might find the binary in .libs which can cause it to use
+ the system libcurl which can fail. This error is only visible by
+ noticing that the test is skipped.
+
+ Follow-up to e4dfe6fc
+
+ Ref: #10651
+
+- tests: use the proper %LOGDIR path on two tests
+
+ Follow-up to e7a021e1
+
+ Ref: #10818
+
+Daniel Stenberg (30 Mar 2023)
+
+- rtsp: fix Value stored to 'skip_size' is never read
+
+ Pointed out by scan-build
+
+ Follow-up to 6c6306f3008f2c9b20a64
+
+ Closes #10872
+
+Stefan Eissing (30 Mar 2023)
+
+- tests/http: relax connection check in test_07_02
+
+ Only 1 connection will be used when curl is slow, happens when
+ address-sanitized in CI, for example
+
+ Closes #10865
+
+- http2: flow control and buffer improvements
+
+ - use bufq for send/receive of network data
+ - usd bufq for send/receive of stream data
+ - use HTTP/2 flow control with no-auto updates to control the
+ amount of data we are buffering for a stream
+ HTTP/2 stream window set to 128K after local tests, defined
+ code constant for now
+ - elminiating PAUSEing nghttp2 processing when receiving data
+ since a stream can now take in all DATA nghttp2 forwards
+
+ Improved scorecard and adjuste http2 stream window sizes
+ - scorecard improved output formatting and options default
+ - scorecard now also benchmarks small requests / second
+
+ Closes #10771
+
+Dan Fandrich (30 Mar 2023)
+
+- runtests: show error message if file can't be written
+
+- tests: fix remaining servers to run with a dynamic log directory
+
+ This final commit in the series is sufficient to allow the tests succeed
+ if $LOGDIR is changed in runtests.pl.
+
+ Ref: #10818
+ Closes #10866
+
+- tests: fix fake_ntlm to run with a dynamic log directory
+
+ Ref: #10818
+
+- tests: fix http servers to run with a dynamic log directory
+
+ Ref: #10818
+
+- tests: fix ftpserver to run with a dynamic log directory
+
+ Ref: #10818
+
+- tests: fix C servers to run with a dynamic log directory
+
+ Ref: #10818
+
+- tests: fix lib tests to run with a dynamic log directory
+
+ Ref: #10818
+
+- tests: fix unit tests to run with a dynamic log directory
+
+ Ref: #10818
+
+- tests: use %LOGDIR to refer to the log directory
+
+ This will allow it be set dynamically.
+
+ Ref: #10818
+
+- runtests: track verification time even if no files to compare
+
+- getpart: better handle case of file not found
+
+- testcurl: bump version date
+
+ It hadn't been updated in 9 years; it's time.
+
+- tests: switch to 3-argument open in test suite
+
+ The perl 2-argument open has been considered not-quite-deprecated since
+ the 3-argument form was introduced almost a quarter century ago.
+
+- tests: silence some Perl::Critic warnings in test suite
+
+ Not all warnings are fixed; many are as much stylistic suggestions than
+ anything and IMHO don't do much to actually improve the code.
+
+ Ref: #10818
+ Closes #10861
+
+- docs: bump the minimum perl version to 5.6
+
+ It's actually been this way since at least 2012 (when a 3-argument open
+ was added to runtests.pl). Given the lack of complaints in the interim,
+ it's safe to call this 23 year old perl version the minimum.
+
+- runtests: memoize the getpart* subroutines to speed up access
+
+ The refactored code calls these functions with the same arguments more
+ often, so this prevents redundant test case file parsing.
+
+ Approved-by: Daniel Stenberg
+ Ref: #10818
+ Closes #10833
+
+- runtests: remove duplicated feature variables
+
+ Use the feature map stored in the hash table instead. Most of the
+ variables were only used only once, to set the value in the hash table.
+
+ Ref: #10818
+
+- runtests: also ignore test file problems when ignoring results
+
+ This simplifies error handling in the test verification code and makes
+ it more consistent.
+
+ Ref: #10818
+
+- runtests: more refactoring for clarity
+
+ Ref: #10818
+
+- runtests: don't start servers if -l is given
+
+- runtests: fix typos
+
+- runtests: refactor singletest() into separate functions
+
+ This takes it from a 1200 line behemoth into something more manageable.
+ The content and order of the functions is taken almost directly from
+ singletest() so the diff sans whitespace is quite short.
+
+ Ref: #10818
+
+- runtests: refactor singletest() into distinct sections
+
+ Namely:
+ - Verify that this test case should be run
+ - Start the servers needed to run this test case
+ - Check that test environment is fine to run this test case
+ - Prepare the test environment to run this test case
+ - Run the test command
+ - Clean up after test command
+ - Verify test succeeded
+
+ Ref: #10818
+
+- runtests: stop copying a few arrays where not needed
+
+ Unlike some other languages that just copy a pointer, perl copies the
+ entire array contents which takes time for a large array.
+
+ Ref: #10818
+
+- runtests: reduce redundant calls to getpart/getpartattr
+
+ These functions scan through the entire test file every time to find the
+ right section, so they can be slow for large test files.
+
+ Ref: #10818
+
+- tests: document that the unittest keyword is special
+
+ Also, add other features that were missing.
+
+Stefan Eissing (30 Mar 2023)
+
+- docs: add documentation for bufq
+
+ Closes #10869
+
+Daniel Stenberg (30 Mar 2023)
+
+- RELEASE-NOTES: synced
+
+Matt Jolly (30 Mar 2023)
+
+- hostip: refuse to resolve the .onion TLD
+
+ RFC 7686 states that:
+
+ > Applications that do not implement the Tor
+ > protocol SHOULD generate an error upon the use of .onion and
+ > SHOULD NOT perform a DNS lookup.
+
+ Let's do that.
+
+ https://www.rfc-editor.org/rfc/rfc7686#section-2
+
+ Add test 1471 and 1472 to verify
+
+ Fixes #543
+ Closes #10705
+
+Philip Heiduck (30 Mar 2023)
+
+- GHA: update ngtcp2-*.yml to v0.10.0
+
+ Closes #10612
+
+Stefan Eissing (30 Mar 2023)
+
+- tests/http: fix log formatting on wrong exit code
+
+ Closes #10868
+
+Daniel Stenberg (30 Mar 2023)
+
+- spellcheck.words: unify the AWS-LC spelling
+
+ Follow-up to 34ef4fab22d93
+
+ Closes #10867
+
+Jim King (30 Mar 2023)
+
+- openssl: interop with AWS-LC
+
+ * Configure changes to detect AWS-LC
+ * CMakeLists.txt changes to detect AWS-LC
+ * Compile-time branches needed to support AWS-LC
+ * Correctly set OSSL_VERSION and report AWS-LC release number
+ * GitHub Actions script to build with autoconf and cmake against AWS-LC
+
+ AWS-LC is a BoringSSL/OpenSSL derivative
+ For more information see https://github.com/awslabs/aws-lc/
+
+ Closes #10320
+
+Viktor Szakats (30 Mar 2023)
+
+- cmake: picky-linker fixes for openssl, ZLIB, H3 and more
+
+ - fix HTTP/3 support detection with OpenSSL/quictls built with ZLIB.
+ (Requires curl be built with ZLIB option also.)
+
+ - fix HTTP/3 support detection with OpenSSL/quictls/LibreSSL and `ld`
+ linker on Windows.
+
+ - fix HTTP/3 support detection with wolfSSL to automatically add
+ `ws2_32` to the lib list on Windows. For all linkers.
+
+ - reposition ZLIB (and other compression) detection _after_ TLS
+ detection, but before calling HTTP/3-support detection via
+ `CheckQuicSupportInOpenSSL`.
+
+ May be a regression from ebef55a61df0094b9790710a42f63c48e7de3c13
+ May fix #10832 (Reported-by: Micah Snyder)
+
+ This also seems to fix an odd case, where OpenSSL/quictls is correctly
+ detected, but its header path is not set while compiling, breaking
+ build at `src/curl_ntlm_core.c`. Reason for this remains undiscovered.
+
+ - satisfy "picky" linkers such as `ld` with MinGW, that are highly
+ sensitive to lib order, by also adding brotli to the beginning of the
+ lib list.
+
+ - satisfy "picky" linkers by adding certain Windows systems libs to
+ the lib list for OpenSSL/LibreSSL. (Might need additional ones for
+ other forks, such as `pthread` for BoringSSL.)
+
+ Note: It'd make sense to _always_ add `ws2_32`, `crypt32` (except
+ Windows App targets perhaps?), `bcrypt` (except old-mingw!) on Windows
+ at this point. They are almost always required, and if some aren't,
+ they are ignored by the linker with no effect on final binaries.
+
+ Closes #10857
+
+Stefan Eissing (30 Mar 2023)
+
+- vlts: use full buffer size when receiving data if possible
+
+ SSL backends like OpenSSL/wolfSSL and other return the content of one
+ TLS record on read, but usually there are more available.
+
+ Change the vtls cfilter recv() function to fill the given buffer until a
+ read would block.
+
+ Closes #10736
+
+dengjfzh on github (30 Mar 2023)
+
+- rtsp: skip malformed RTSP interleaved frame data
+
+ Some IP cameras send malformed RTSP interleaved frames sometimes, which
+ can cause curl_easy_perform return 1 (CURLE_UNSUPPORTED_PROTOCOL). This
+ change attempts to skip clearly incorrect RTSP interleaving frame data.
+
+ Closes #10808
+
+Stefan Eissing (30 Mar 2023)
+
+- lib: add `bufq` and `dynhds`
+
+ Adding `bufq`:
+ - at init() time configured to hold up to `n` chunks of `m` bytes each.
+ - various methods for reading from and writing to it.
+ - `peek` support to get access to buffered data without copy
+ - `pass` support to allow buffer flushing on write if it becomes full
+ - use case: IO buffers for dynamic reads and writes that do not blow up
+ - distinct from `dynbuf` in that:
+ - it maintains a read position
+ - writes on a full bufq return CURLE_AGAIN instead of nuking itself
+ - Init options:
+ - SOFT_LIMIT: allow writes into a full bufq
+ - NO_SPARES: free empty chunks right away
+ - a `bufc_pool` that can keep a number of spare chunks to
+ be shared between different `bufq` instances
+
+ Adding `dynhds`:
+ - a straightforward list of name+value pairs as used for HTTP headers
+ - headers can be appended dynamically
+ - headers can be removed again
+ - headers can be replaced
+ - headers can be looked up
+ - http/1.1 formatting into a `dynbuf`
+ - configured at init() with limits on header counts and total string
+ sizes
+ - use case: pass a HTTP request or response around without being version
+ specific
+ - express a HTTP request without a curl easy handle (used in h2 proxy
+ tunnels)
+ - future extension possibilities:
+ - conversions of `dynhds` to nghttp2/nghttp3 name+value arrays
+
+ Closes #10720
+
+- pytest: improvements for suitable curl and error output
+
+ - will check built curl for http and https support and
+ skip all tests if not there
+ - will dump stdout/stderr/trace output on errored responses
+
+ Closes #10829
+
+Daniel Stenberg (29 Mar 2023)
+
+- lib: use correct printf flags for sockets and timediffs
+
+ Introduces CURL_FORMAT_SOCKET_T for outputting socket numbers.
+
+ Fixes #10737
+ Reported-by: Gisle Vanem
+ Closes #10855
+
+- telnet: make MSVC ignore warning for assignment within conditional
+
+ Follow-up to d92a5007b60e0af7d
+
+ Closes #10859
+
+- ws: handle reads before EAGAIN better
+
+ Reported-by: simplerobot on github
+ Fixes #10831
+ Closes #10856
+
+- test1592: add flaky keyword
+
+ Closes #10860
+
+Frank Gevaerts (28 Mar 2023)
+
+- lib/sha256.c: typo fix in comment (duplicated "is available")
+
+ Closes #10851
+
+Arne Soete (28 Mar 2023)
+
+- tests: update tests/httpd references to tests/http
+
+ tests/httpd was renamed to tests/http in #10654. This patch updates some
+ references in the README
+
+ Closes #10854
+
+Kamil Dudka (28 Mar 2023)
+
+- telnet: simplify the implementation of str_is_nonascii()
+
+ There is no need to traverse the string twice.
+
+ Closes #10852
+
+Frank Gevaerts (28 Mar 2023)
+
+- curl_easy_getinfo.3: typo fix (duplicated "from the")
+
+ Closes #10850
+
+Philip Heiduck (28 Mar 2023)
+
+- wolfssl.yml: bump to version 5.6.0
+
+ Closes #10843
+
+Daniel Stenberg (28 Mar 2023)
+
+- RELEASE-NOTES: synced
+
+Ronan Pigott (28 Mar 2023)
+
+- docs/cmdline-opts: document the dotless config path
+
+ The real xdg config path is $XDG_CONFIG_HOME/curlrc, without the dot.
+ The dotless name seems preferable, so let's match the documentation to
+ the behavior.
+
+ Closes #10849
+
+Daniel Stenberg (28 Mar 2023)
+
+- HTTP-COOKIES.md: mention the #HttpOnly_ prefix
+
+ Fixes #10847
+ Reported-by: Harry Sintonen
+ Closes #10848
+
+- dynbuf: never allocate larger than "toobig"
+
+ As dynbufs always have a fixed maximum size which they are not allowed
+ to grow larger than, making sure that it never allocates a larger buffer
+ makes sure the buffer does not allocate memory that will never be used.
+
+ Closes #10845
+
+- ftplistparser: replace realloc with dynbuf
+
+ Closes #10844
+
+- ftplistparser: use ISDIGIT()
+
+ Closes #10844
+
+- ftplistparser: move out private data from public struct
+
+ The public 'curl_fileinfo' struct contained three fields that are for
+ internal purposes only. This change makes them unused in the public
+ struct.
+
+ The new private struct fields are also renamed to make this separation
+ more obvious internally.
+
+ Closes #10844
+
+- openssl: fix indents - white space edits only
+
+ Closes #10840
+
+- url: remove call to Curl_llist_destroy in Curl_close
+
+ A list that is created with a NULL "destructor" does not need to be
+ destroyed. Not calling it is faster than calling it.
+
+ Closes #10846
+
+- multi: remove PENDING + MSGSENT handles from the main linked list
+
+ As they are not driving transfers or any socket activity, the main loop
+ does not need to iterate over these handles. A performance improvement.
+
+ They are instead only held in their own separate lists.
+
+ 'data->multi' is kept a pointer to the multi handle as long as the easy
+ handle is actually part of it even when the handle is moved to the
+ pending/msgsent lists. It needs to know which multi handle it belongs
+ to, if for example curl_easy_cleanup() is called before the handle is
+ removed from the multi handle.
+
+ Alll 'data->multi' pointers of handles still part of the multi handle
+ gets cleared by curl_multi_cleanup() which "orphans" all previously
+ attached easy handles.
+
+ This is take 2. The first version was reverted for the 8.0.1 release.
+
+ Assisted-by: Stefan Eissing
+ Closes #10801
+
+Stefan Eissing (26 Mar 2023)
+
+- tests/http: add timeout to running curl in test cases
+
+ - we had a CI case once where `curl` seemingly did not
+ return and it was hard to guess what happened.
+ - make curl execution in test cases time out after 60 seconds
+
+ Closes #10783
+
+Daniel Stenberg (26 Mar 2023)
+
+- RELEASE-PROCEDURE: update to new schedule
+
+ Ref: https://curl.se/mail/lib-2023-03/0062.html
+
+ Assisted-by: Andy Alt
+ Assisted-by: Dan Frandrich
+
+ Closes #10827
+
+Patrick Monnerat (26 Mar 2023)
+
+- doc: curl_mime_init() strong easy handle binding has been relaxed in 7.87.0
+
+ Reported-by: Chloe Kudryavtsev
+ Fixes #10834
+ Closes #10835
+
+Jay Satiro (25 Mar 2023)
+
+- CURLOPT_WRITEFUNCTION.3: fix typo
+
+ Reported-by: Osaila@users.noreply.github.com
+
+ Fixes https://github.com/curl/curl/issues/10839
+
+Dan Fandrich (24 Mar 2023)
+
+- CI: skip some more builds when possible
+
+ When a commit only contains tests, documentation, or cmake files, skip
+ those builds that aren't affected by those.
+
+ The file filters available on the CI services don't seem to allow
+ skipping individual jobs, only the entire workflow, so we can't get any
+ more fine-grained than this.
+
+- CI: add and adjust labeler match patterns
+
+ Allow cmdline tool alongside other labels.
+
+Kai Pastor (25 Mar 2023)
+
+- CMake: make config version 8 compatible with 7
+
+ Reviewed-by: Jakub Zakrzewski
+ Closes #10819
+
+Daniel Stenberg (24 Mar 2023)
+
+- RELEASE-NOTES: synced
+
+ Bumped version-in-progress to 8.1.0
+
+- GHA: add a memory-sanitizer job
+
+ Closes #10815
+
+Dan Fandrich (23 Mar 2023)
+
+- CI: fix brew retries on GHA
+
+ The fix in the previous commit was complete for Cirrus but accidentally
+ left off a part for GHA.
+
+ Follow-up to c2b7249d
+
+- CI: skip Azure for more commits which change only GHA
+
+Daniel Stenberg (23 Mar 2023)
+
+- cmake: set SONAME for SunOS too
+
+ Provided-by: Brian Lund
+
+ Closes #10816
+
+Stefan Eissing (23 Mar 2023)
+
+- ngtcp2: adjust config and code checks for ngtcp2 without nghttp3
+
+ - make configure show on HTTP3 feature that both ngtcp2 and nghttp3
+ are in play
+ - define ENABLE_QUIC only when USE_NGTCP2 and USE_NGHTTP3 are defined
+ - add USE_NGHTTP3 in the ngtcp2 implementation
+
+ Fixes #10793
+ Closes #10821
+
+Daniel Stenberg (23 Mar 2023)
+
+- data.d: emphasize no conversion
+
+ When asking curl to send a POST, curl does not encode or change the data.
+
+ Ref: #10820
+ Closes #10823
+
+- server/getpart: clear the buffer before load
+
+ Fixes msan warnings:
+
+ ==54195==WARNING: MemorySanitizer: use-of-uninitialized-value
+ #0 0x55ece35e57cb in line_length /home/runner/work/curl/curl/tests/server
+ /getpart.c:111:25
+ #1 0x55ece35e3b83 in readline /home/runner/work/curl/curl/tests/server/ge
+ tpart.c:164:24
+ #2 0x55ece35e0269 in getpart /home/runner/work/curl/curl/tests/server/get
+ part.c:347:18
+ #3 0x55ece36180b6 in parse_servercmd /home/runner/work/curl/curl/tests/se
+ rver/sws.c:283:13
+
+ Closes #10822
+
+- ntlm: clear lm and nt response buffers before use
+
+ To avoid the risk of MemorySanitizer: use-of-uninitialized-value
+
+ Closes #10814
+
+- digest: clear target buffer
+
+ Closes #10814
+
+Douglas R. Reno (22 Mar 2023)
+
+- cmake: bring in the network library on Haiku.
+
+ When cross-compiling for Haiku, the networking library needs to be
+ brought in. Without this, an unknown type of "Error" is reported in
+ lib/curl_setup_once.h.
+
+ This is also needed when using CMake natively on Haiku to build libcurl.
+
+ Fixes #10296
+ Closes #10792
+
+Daniel Stenberg (22 Mar 2023)
+
+- runtests: die if curl version can be found
+
+ Closes #10813
+
+Stefan Eissing (22 Mar 2023)
+
+- multi: add handle asserts in DEBUG builds
+
+ For GOOD_EASY_HANDLE and GOOD_MULTI_HANDLE checks
+
+ - allow NULL pointers to "just" return an error as before
+ - fail hard on nun-NULL pointers that no longer show the MAGICs
+
+ Closes #10812
+
+Jon Rumsey (22 Mar 2023)
+
+- gskit: various compile errors in OS400
+
+ Various compile failures in gskit.c;
+
+ - pipe_ssloverssl() needs Curl_easy data parameter for
+ Curl_conn_cf_get_socket(cf, data)
+ - key_passwd is in ssl_config, not conn_config
+ - close_on() has 2 parameters, not 4
+ - getsockopt() needs to call Curl_conn_cf_get_socket(), not
+ cxn->sock[FIRSTSOCKET]
+
+ Fixes #10799
+ Closes #10800
+
+Daniel Stenberg (22 Mar 2023)
+
+- tool_operate: pass a long as CURLOPT_HEADEROPT argument
+
+ Closes #10798
+
+- GHA: run all linux test jobs with valgrind
+
+ Closes #10798
+
+- GHA-linux: add an address-sanitizer build
+
+ Closes #10810
+
Version 8.0.1 (20 Mar 2023)
Daniel Stenberg (20 Mar 2023)
@@ -5831,4057 +9243,3 @@ Daniel Stenberg (18 Nov 2022) Reported-by: Andy Stamp
Fixes #9937
Closes #9939
-
-Alexandre Ferrieux (18 Nov 2022)
-
-- CURLOPT_QUICK_EXIT: don't wait for DNS thread on exit
-
- Fixes #2975
- Closes #9147
-
-Daniel Stenberg (17 Nov 2022)
-
-- HTTP-COOKIES.md: mention that http://localhost is a secure context
-
- Reported-by: Trail of Bits
-
- Closes #9938
-
-- lib: parse numbers with fixed known base 10
-
- ... instead of using 0 argument that allows decimal, hex or octal when
- the number is documented and assumed to use base 10.
-
- Closes #9933
-
-- RELEASE-NOTES: synced
-
-- scripts/delta: adapt to curl.h changes for the opt counter
-
-- cookie: expire cookies at once when max-age is negative
-
- Update test 329 to verify
-
- Reported-by: godmar on github
- Fixes #9930
- Closes #9932
-
-Stefan Eissing (17 Nov 2022)
-
-- proxy: haproxy filter is only available when PROXY and HTTP are
-
- Closes #9935
-
-Daniel Stenberg (16 Nov 2022)
-
-- OtherTests.cmake: check for cross-compile, not for toolchain
-
- Build systems like vcpkg alway sets `CMAKE_TOOLCHAIN_FILE` so it should
- not be used as a sign that this is a cross-compile.
-
- Also indented the function correctly.
-
- Reported-by: Philip Chan
- Fixes #9921
- Closes #9923
-
-- ntlm: improve comment for encrypt_des
-
- Reported-by: Andrei Rybak
- Fixes #9903
- Closes #9925
-
-- include/curl/curl.h: bump the deprecated requirements to gcc 5.3
-
- Reported-by: Stephan Guilloux
- Fixes #9917
- Closes #9918
-
-Stefan Eissing (15 Nov 2022)
-
-- proxy: refactor haproxy protocol handling as connection filter
-
- Closes #9893
-
-Patrick Monnerat (15 Nov 2022)
-
-- lib: feature deprecation warnings in gcc >= 4.3
-
- Add a deprecated attribute to functions and enum values that should not
- be used anymore.
- This uses a gcc 4.3 dialect, thus is only available for this version of
- gcc and newer. Note that the _Pragma() keyword is introduced by C99, but
- is available as part of the gcc dialect even when compiling in C89 mode.
-
- It is still possible to disable deprecation at a calling module compile
- time by defining CURL_DISABLE_DEPRECATION.
-
- Gcc type checking macros are made aware of possible deprecations.
-
- Some testing support Perl programs are adapted to the extended
- declaration syntax.
-
- Several test and unit test C programs intentionally use deprecated
- functions/options and are annotated to not generate a warning.
-
- New test 1222 checks the deprecation status in doc and header files.
-
- Closes #9667
-
-Daniel Stenberg (15 Nov 2022)
-
-- log2changes.pl: wrap long lines at 80 columns
-
- Also, only use author names in the output.
-
- Fixes #9896
- Reported-by: John Sherrill
- Closes #9897
-
-- cfilters: use %zu for outputting size_t
-
- Detected by Coverity CID 1516894
-
- Closes #9907
-
-- Curl_closesocket: avoid using 'conn' if NULL
-
- ... in debug-only code.
-
- Reported by Coverity CID 1516896
-
- Closes #9907
-
-- url: only acknowledge fresh_reuse for non-followed transfers
-
- ... to make sure NTLM auth sticks to the connection it needs, as
- verified by 2032.
-
- Follow-up to fa0b9227616e
-
- Assisted-by: Stefan Eissing
- Closes #9905
-
-- netrc.d: provide mutext info
-
- Reported-by: xianghongai on github
- Fixes #9899
- Closes #9901
-
-- cmdline-opts/page-footer: remove long option nroff formatting
-
- As gen.pl adds them
-
-- nroff-scan.pl: detect double highlights
-
-- cmdline-opts/gen.pl: fix the linkifier
-
- Improved logic for finding existing --options in text and replacing with
- the full version with nroff syntax. This also makes the web version link
- options better.
-
- Reported-by: xianghongai on github
- Fixes #9899
- Closes #9902
-
-Patrick Monnerat (14 Nov 2022)
-
-- tool: use feature names instead of bit mask, when possible
-
- If the run-time libcurl is too old to support feature names, the name
- array is created locally from the bit masks. This is the only sequence
- left that uses feature bit masks.
-
- Closes #9583
-
-- docs: curl_version_info is not thread-safe before libcurl initialization
-
- Closes #9583
-
-- version: add a feature names array to curl_version_info_data
-
- Field feature_names contains a null-terminated sorted array of feature
- names. Bitmask field features is deprecated.
-
- Documentation is updated. Test 1177 and tests/version-scan.pl updated to
- match new documentation format and extended to check feature names too.
-
- Closes #9583
-
-Stefan Eissing (14 Nov 2022)
-
-- negtelnetserver.py: have it call its close() method
-
- Closes #9894
-
-Nathan Moinvaziri (13 Nov 2022)
-
-- ntlm: silence ubsan warning about copying from null target_info pointer.
-
- runtime error: null pointer passed as argument 2, which is declared to
- never be null
-
- Closes #9898
-
-Daniel Stenberg (12 Nov 2022)
-
-- RELEASE-NOTES: synced
-
-Stefan Eissing (12 Nov 2022)
-
-- Websocket: fixes for partial frames and buffer updates.
-
- - buffers updated correctly when handling partial frames
- - callbacks no longer invoked for incomplete payload data of 0 length
- - curl_ws_recv no longer returns with 0 length partial payload
-
- Closes #9890
-
-Daniel Stenberg (12 Nov 2022)
-
-- tool_operate: provide better errmsg for -G with bad URL
-
- If the URL that -G would try to add a query to could not be parsed, it would
- display
-
- curl: (27) Out of memory
-
- It now instead shows:
-
- curl: (2) Could not parse the URL, failed to set query
-
- Reported-by: Alex Xu
- Fixes #9889
- Closes #9892
-
-- vtls: fix build without proxy support
-
- Follow-up to dafdb20a26d0c890
-
- Closes #9895
-
-- tool_getparam: make --no-get work as the opposite of --get
-
- ... as documented.
-
- Closes #9891
-
-- http: mark it 'this_is_a_follow' in the Location: logic
-
- To make regular auth "reloads" to not count as redirects.
-
- Verified by test 3101
-
- Fixes #9885
- Closes #9887
-
-Viktor Szakats (11 Nov 2022)
-
-- config-win32: fix SIZEOF_OFF_T for MSVC and old MinGW
-
- The previously set default value of 8 (64-bit) is only correct for
- mingw-w64 and only when we set `_FILE_OFFSET_BITS` to 64 (the default
- when building curl). For MSVC, old MinGW and other Windows compilers,
- the correct value is 4 (32-bit). Adjust condition accordingly. Also
- drop the manual override option.
-
- Regression in 7.86.0 (from 68fa9bf3f5d7b4fcbb57619f70cb4aabb79a51f6)
-
- Bug: https://github.com/curl/curl/pull/9712#issuecomment-1307330551
-
- Reported-by: Peter Piekarski
- Reviewed-by: Jay Satiro
-
- Closes #9872
-
-Daniel Stenberg (11 Nov 2022)
-
-- lib: remove bad set.opt_no_body assignments
-
- This struct field MUST remain what the application set it to, so that
- handle reuse and handle duplication work.
-
- Instead, the request state bit 'no_body' is introduced for code flows
- that need to change this in run-time.
-
- Closes #9888
-
-Stefan Eissing (11 Nov 2022)
-
-- lib: connection filters (cfilter) addition to curl:
-
- - general construct/destroy in connectdata
- - default implementations of callback functions
- - connect: cfilters for connect and accept
- - socks: cfilter for socks proxying
- - http_proxy: cfilter for http proxy tunneling
- - vtls: cfilters for primary and proxy ssl
- - change in general handling of data/conn
- - Curl_cfilter_setup() sets up filter chain based on data settings,
- if none are installed by the protocol handler setup
- - Curl_cfilter_connect() boot straps filters into `connected` status,
- used by handlers and multi to reach further stages
- - Curl_cfilter_is_connected() to check if a conn is connected,
- e.g. all filters have done their work
- - Curl_cfilter_get_select_socks() gets the sockets and READ/WRITE
- indicators for multi select to work
- - Curl_cfilter_data_pending() asks filters if the have incoming
- data pending for recv
- - Curl_cfilter_recv()/Curl_cfilter_send are the general callbacks
- installed in conn->recv/conn->send for io handling
- - Curl_cfilter_attach_data()/Curl_cfilter_detach_data() inform filters
- and addition/removal of a `data` from their connection
- - adding vtl functions to prevent use of Curl_ssl globals directly
- in other parts of the code.
-
- Reviewed-by: Daniel Stenberg
- Closes #9855
-
-- curl-rustls.m4: on macOS, rustls also needs the Security framework
-
- Closes #9883
-
-Daniel Stenberg (10 Nov 2022)
-
-- rtsp: only store first_host once
-
- Suggested-by: Erik Janssen
- URL: https://github.com/curl/curl/pull/9870#issuecomment-1309499744
- Closes #9882
-
-Fata Nugraha (10 Nov 2022)
-
-- test3028: verify PROXY
-
-- http: do not send PROXY more than once
-
- Unlike `CONNECT`, currently we don't keep track whether `PROXY` is
- already sent or not. This causes `PROXY` header to be sent twice during
- `MSTATE_TUNNELING` and `MSTATE_PROTOCONNECT`.
-
- Closes #9878
- Fixes #9442
-
-Jay Satiro (10 Nov 2022)
-
-- lib: add CURL_WRITEFUNC_ERROR to signal write callback error
-
- Prior to this change if the user wanted to signal an error from their
- write callbacks they would have to use logic to return a value different
- from the number of bytes (nmemb) passed to the callback. Also, the
- inclination of some users has been to just return 0 to signal error,
- which is incorrect as that may be the number of bytes passed to the
- callback.
-
- To remedy this the user can now return CURL_WRITEFUNC_ERROR instead.
-
- Ref: https://github.com/curl/curl/issues/9873
-
- Closes https://github.com/curl/curl/pull/9874
-
-Daniel Stenberg (9 Nov 2022)
-
-- Revert "GHA: add scorecard.yml"
-
- This reverts commit ca76c79b34f9d90105674a2151bf228ff7b13bef.
-
-- GHA: add scorecard.yml
-
- add a "scorecard" scanner job
-
-Lorenzo Miniero (9 Nov 2022)
-
-- test3100: RTSP Basic authentication
-
- Closes #9449
-
-Daniel Stenberg (9 Nov 2022)
-
-- rtsp: fix RTSP auth
-
- Verified with test 3100
-
- Fixes #4750
- Closes #9870
-
-- KNOWN_BUGS: remove eight entries
-
- - 1.2 Multiple methods in a single WWW-Authenticate: header
-
- This is not considered a bug anymore but a restriction and one that we
- keep because we have NEVER gotten this reported by users in the wild and
- because of this I consider this a fringe edge case we don't need to
- support.
-
- - 1.6 Unnecessary close when 401 received waiting for 100
-
- This is not a bug, but possibly an optimization that *can* be done.
-
- - 1.7 Deflate error after all content was received
-
- This is not a curl bug. This happens due to broken servers.
-
- - 2.1 CURLINFO_SSL_VERIFYRESULT has limited support
-
- This is not a bug. This is just the nature of the implementation.
-
- - 2.2 DER in keychain
-
- This is not a bug.
-
- - 5.7 Visual Studio project gaps
-
- This is not a bug.
-
- - 15.14 cmake build is not thread-safe
-
- Fixed in 109e9730ee5e2b
-
- - 11.3 Disconnects do not do verbose
-
- This is not a bug.
-
- Closes #9871
-
-Hirotaka Tagawa (9 Nov 2022)
-
-- headers: add endif comments
-
- Closes #9853
-
-Daniel Stenberg (8 Nov 2022)
-
-- test1221: verify --url-query
-
-- curl: add --url-query
-
- This option adds a piece of data, usually a name + value pair, to the
- end of the URL query part. The syntax is identical to that used for
- --data-urlencode with one extension:
-
- If the argument starts with a '+' (plus), the rest of the string is
- provided as-is unencoded.
-
- This allows users to "build" query parts with options and URL encoding
- even when not doing GET requests, which the already provided option -G
- (--get) is limited to.
-
- This idea was born in a Twitter thread.
-
- Closes #9691
-
-- maketgz: set the right version in lib/libcurl.plist
-
- Follow-up to e498a9b1fe5964a18eb2a3a99dc52
-
- Make sure the tarball gets a version of the libcurl.plist file that is
- updated with the new version string.
-
- Reported-by: jvreelanda on github
- Fixes #9866
- Closes #9867
-
-- RELEASE-NOTES: synced
-
- Bumped version to 7.87.0
-
-Michael Drake (8 Nov 2022)
-
-- curl.h: add CURLOPT_CA_CACHE_TIMEOUT option
-
- Adds a new option to control the maximum time that a cached
- certificate store may be retained for.
-
- Currently only the OpenSSL backend implements support for
- caching certificate stores.
-
- Closes #9620
-
-- openssl: reduce CA certificate bundle reparsing by caching
-
- Closes #9620
-
-Rose (8 Nov 2022)
-
-- lib: fix some type mismatches and remove unneeded typecasts
-
- Many of these castings are unneeded if we change the variables to work
- better with each other.
-
- Ref: https://github.com/curl/curl/pull/9823
-
- Closes https://github.com/curl/curl/pull/9835
-
-Daniel Stenberg (8 Nov 2022)
-
-- cookie: compare cookie prefixes case insensitively
-
- Adapted to language in rfc6265bis draft-11.
-
- Closes #9863
-
- Reviewed-by: Daniel Gustafsson
-
-- tool_operate: when aborting, make sure there is a non-NULL error buffer
-
- To store custom errors in. Or SIGSEGVs will follow.
-
- Reported-by: Trail of Bits
- Closes #9865
-
-- WEBSOCKET.md: fix broken link
-
- Reported-by: Felipe Gasper
- Bug: https://curl.se/mail/lib-2022-10/0097.html
- Closes #9864
-
-- CURLOPT_DEBUGFUNCTION.3: do not assume nul-termination in example
-
- Reported-by: Oskar Sigvardsson
-
- Bug: https://curl.se/mail/lib-2022-11/0016.html
-
- Closes #9862
-
-Stefan Eissing (7 Nov 2022)
-
-- websockets: fix handling of partial frames
-
- buffer used and send length calculations are fixed when a partial
- websocket frame has been received.
-
- Closes #9861
-
-Daniel Stenberg (7 Nov 2022)
-
-- mailmap: unify Stefan Eissing
-
-Stefan Eissing (7 Nov 2022)
-
-- hyper: fix handling of hyper_task's when reusing the same address
-
- Fixes #9840
- Closes #9860
-
-Jay Satiro (7 Nov 2022)
-
-- ws: return CURLE_NOT_BUILT_IN when websockets not built in
-
- - Change curl_ws_recv & curl_ws_send to return CURLE_NOT_BUILT_IN when
- websockets support is not built in.
-
- Prior to this change they returned CURLE_OK.
-
- Closes #9851
-
-Daniel Stenberg (7 Nov 2022)
-
-- noproxy: tailmatch like in 7.85.0 and earlier
-
- A regfression in 7.86.0 (via 1e9a538e05c010) made the tailmatch work
- differently than before. This restores the logic to how it used to work:
-
- All names listed in NO_PROXY are tailmatched against the used domain
- name, if the lengths are identical it needs a full match.
-
- Update the docs, update test 1614.
-
- Reported-by: Stuart Henderson
- Fixes #9842
- Closes #9858
-
-- configure: require fork for NTLM-WB
-
- Reported-by: ウさん
-
- Fixes #9847
- Closes #9856
-
-- docs/EARLY-RELEASE.md: how to determine an early release
-
- URL: https://curl.se/mail/lib-2022-10/0079.html
-
- Closes #9820
-
-- RELEASE-NOTES: synced
-
-Zespre Schmidt (3 Nov 2022)
-
-- docs: add missing parameters for --retry flag
-
- Closes #9848
-
-Adam Averay (3 Nov 2022)
-
-- libcurl-errors.3: remove duplicate word
-
- Closes #9846
-
-Eric Vigeant (3 Nov 2022)
-
-- cur_path: do not add '/' if homedir ends with one
-
- When using SFTP and a path relative to the user home, do not add a
- trailing '/' to the user home dir if it already ends with one.
-
- Closes #9844
-
-Viktor Szakats (1 Nov 2022)
-
-- windows: fail early with a missing windres in autotools
-
- `windres` is not always auto-detected by autotools when building for
- Windows. When this happened, the build failed with a confusing error due
- to the empty `RC` command:
-
- ```
- /bin/bash ../libtool --tag=RC --mode=compile -I../include -DCURL_EMBED_MANIF
- EST -i curl.rc -o curl.o
- [...]
- Usage: /sandbox/curl/libtool [OPTION]... [MODE-ARG]...
- Try 'libtool --help' for more information.
- libtool: error: unrecognised option: '-I../include'
- ```
-
- Improve this by verifying if `RC` is set, and fail with a clear error
- otherwise.
-
- Follow-up to 6de7322c03d5b4d91576a7d9fc893e03cc9d1057
-
- Ref: https://curl.se/mail/lib-2022-10/0049.html
- Reported-by: Thomas Glanzmann
- Closes #9781
-
-- lib: sync guard for Curl_getaddrinfo_ex() definition and use
-
- `Curl_getaddrinfo_ex()` gets _defined_ with `HAVE_GETADDRINFO` set. But,
- `hostip4.c` _used_ it with `HAVE_GETADDRINFO_THREADSAFE` set alone. It
- meant a build with the latter, but without the former flag could result
- in calling this function but not defining it, and failing to link.
-
- Patch this by adding an extra check for `HAVE_GETATTRINFO` around the
- call.
-
- Before this patch, build systems prevented this condition. Now they
- don't need to.
-
- While here, simplify the related CMake logic on Windows by setting
- `HAVE_GETADDRINFO_THREADSAFE` to the detection result of
- `HAVE_GETADDRINFO`. This expresses the following intent clearer than
- the previous patch and keeps the logic in a single block of code:
- When we have `getaddrinfo()` on Windows, it's always threadsafe.
-
- Follow-up to 67d88626d44ec04b9e11dca4cfbf62cd29fe9781
-
- Reviewed-by: Jay Satiro
- Closes #9734
-
-- tidy-up: process.h detection and use
-
- This patch aims to cleanup the use of `process.h` header and the macro
- `HAVE_PROCESS_H` associated with it.
-
- - `process.h` is always available on Windows. In curl, it is required
- only for `_beginthreadex()` in `lib/curl_threads.c`.
-
- - `process.h` is also available in MS-DOS. In curl, its only use was in
- `lib/smb.c` for `getpid()`. But `getpid()` is in fact declared by
- `unistd.h`, which is always enabled via `lib/config-dos.h`. So the
- header is not necessary.
-
- - `HAVE_PROCESS_H` was detected by CMake, forced to 1 on Windows and
- left to real detection for other platforms.
- It was also set to always-on in `lib/config-win32.h` and
- `lib/config-dos.h`.
- In autotools builds, there was no detection and the macro was never
- set.
-
- Based on these observations, in this patch we:
-
- - Rework Windows `getpid` logic in `lib/smb.c` to always use the
- equivalent direct Win32 API function `GetCurrentProcessId()`, as we
- already did for Windows UWP apps. This makes `process.h` unnecessary
- here on Windows.
-
- - Stop #including `process.h` into files where it was not necessary.
- This is everywhere, except `lib/curl_threads.c`.
-
- > Strangely enough, `lib/curl_threads.c` compiled fine with autotools
- > because `process.h` is also indirecty included via `unistd.h`. This
- > might have been broken in autotools MSVC builds, where the latter
- > header is missing.
-
- - Delete all remaining `HAVE_PROCESS_H` feature guards, for they were
- unnecessary.
-
- - Delete `HAVE_PROCESS_H` detection from CMake and predefined values
- from `lib/config-*.h` headers.
-
- Reviewed-by: Jay Satiro
- Closes #9703
-
-Daniel Stenberg (1 Nov 2022)
-
-- lib1301: unit103 turned into a libtest
-
- It is not a unit test so moved over to libtests.
-
-- strcase: use curl_str(n)equal for case insensitive matches
-
- No point in having two entry points for the same functions.
-
- Also merged the *safe* function treatment into these so that they can
- also be used when one or both pointers are NULL.
-
- Closes #9837
-
-- README.md: remove badges and xmas-tree garnish
-
- URL: https://curl.se/mail/lib-2022-10/0050.html
-
- Closes #9833
-
-Patrick Monnerat (1 Nov 2022)
-
-- gen.pl: do not generate CURLHELP bitmask lines > 79 characters
-
- If a command line option is in many help categories, there is a risk
- that CURLHELP bitmask source lines generated for listhelp are longer
- than 79 characters.
-
- This change takes care of folding such long lines.
-
- Cloes #9834
-
-Marc Hoersken (30 Oct 2022)
-
-- CI/cirrus: remove superfluous double-quotes and sudo
-
- Follow up to #9565 and #9677
- Closes #9738
-
-- tests/sshserver.pl: re-enable ssh-rsa while using openssh 8.8+
-
- Ref: #9738
-
-Daniel Stenberg (30 Oct 2022)
-
-- style: use space after comment start and before comment end
-
- /* like this */
-
- /*not this*/
-
- checksrc is updated accordingly
-
- Closes #9828
-
-Patrick Schlangen (30 Oct 2022)
-
-- docs: remove performance note in CURLOPT_SSL_VERIFYPEER
-
- This note became obsolete since PR #7892 (see also discussion in the PR
- comments).
-
- Closes #9832
-
-Daniel Stenberg (30 Oct 2022)
-
-- tests/server: make use of strcasecompare from lib/
-
- ... instead of having a second private implementation.
-
- Idea triggered by #9830
-
- Closes #9831
-
-- curl: timeout in the read callback
-
- The read callback can timeout if there's nothing to read within the
- given maximum period. Example use case is when doing "curl -m 3
- telnet://example.com" or anything else that expects input on stdin or
- similar that otherwise would "hang" until something happens and then not
- respect the timeout.
-
- This fixes KNOWN_BUG 8.1, first filed in July 2009.
-
- Bug: https://sourceforge.net/p/curl/bugs/846/
-
- Closes #9815
-
-- noproxy: fix tail-matching
-
- Also ignore trailing dots in both host name and comparison pattern.
-
- Regression in 7.86.0 (from 1e9a538e05c0)
-
- Extended test 1614 to verify better.
-
- Reported-by: Henning Schild
- Fixes #9821
- Closes #9822
-
-- docs: explain the noproxy CIDR notation support
-
- Follow-up to 1e9a538e05c0107c
-
- Closes #9818
-
-Jon Rumsey (27 Oct 2022)
-
-- os400: use platform socklen_t in Curl_getnameinfo_a
-
- Curl_getnameinfo_a() is prototyped before including curl.h as an
- ASCII'fied wrapper for getnameinfo(), which itself is prototyped with
- socklen_t arguments, so this should use the platform socklen_t and not
- curl_socklen_t too.
-
- Update setup-os400.h
-
- Fixes #9811
- Closes #9812
-
-Daniel Stenberg (27 Oct 2022)
-
-- noproxy: also match with adjacent comma
-
- If the host name is an IP address and the noproxy string contained that
- IP address with a following comma, it would erroneously not match.
-
- Extended test 1614 to verify this combo as well.
-
- Reported-by: Henning Schild
-
- Fixes #9813
- Closes #9814
-
-Randall S. Becker (27 Oct 2022)
-
-- build: fix for NonStop
-
- - Include arpa/inet.h in all units where htonl is called.
-
- Signed-off-by: Randall S. Becker <rsbecker@nexbridge.com>
-
- Closes https://github.com/curl/curl/pull/9816
-
-- system.h: support 64-bit curl_off_t for NonStop 32-bit
-
- - Correctly define curl_off_t on NonStop (ie __TANDEM) ia64 and x86 for
- 32-bit builds.
-
- Signed-off-by: Randall S. Becker <randall.becker@nexbridge.ca>
-
- Closes https://github.com/curl/curl/pull/9817
-
-Daniel Stenberg (27 Oct 2022)
-
-- spellcheck.words: remove 'github' as an accepted word
-
- Prefer the properly cased version: GitHub
-
- Use markdown for links and GitHub in text.
-
- Closes #9810
-
-Ayesh Karunaratne (27 Oct 2022)
-
-- misc: typo and grammar fixes
-
- - Replace `Github` with `GitHub`.
- - Replace `windows` with `Windows`
- - Replace `advice` with `advise` where a verb is used.
- - A few fixes on removing repeated words.
- - Replace `a HTTP` with `an HTTP`
-
- Closes #9802
-
-Viktor Szakats (27 Oct 2022)
-
-- windows: fix linking .rc to shared curl with autotools
-
- `./configure --enable-shared --disable-static` fails when trying to link
- a shared `curl.exe`, due to `libtool` magically changing the output
- filename of `windres` to one that it doesn't find when linking:
-
- ```
- /bin/sh ../libtool --tag=RC --mode=compile windres -I../../curl/include -DCUR
- L_EMBED_MANIFEST -i ../../curl/src/curl.rc -o curl.o
- libtool: compile: windres -I../../curl/include -DCURL_EMBED_MANIFEST -i ../.
- ./curl/src/curl.rc -o .libs/curl.o
- [...]
- CCLD curl.exe
- clang: error: no such file or directory: 'curl.o'
- ```
-
- Let's resolve this by skipping `libtool` and calling `windres` directly
- when building `src` (aka `curl.exe`). Leave `lib` unchanged, as it does
- need the `libtool` magic. This solution is compatible with building
- a static `curl.exe`.
-
- This build scenario is not CI-tested.
-
- While here, delete an obsolete comment about a permanent `libtool`
- warning that we've resolved earlier.
-
- Regression from 6de7322c03d5b4d91576a7d9fc893e03cc9d1057
-
- Reported-by: Christoph Reiter
- Fixes #9803
- Closes #9805
-
-- cmake: really enable warnings with clang
-
- Even though `PICKY_COMPILER=ON` is the default, warnings were not
- enabled when using llvm/clang, because `CMAKE_COMPILER_IS_CLANG` was
- always false (in my tests at least).
-
- This is the single use of this variable in curl, and in a different
- place we already use `CMAKE_C_COMPILER_ID MATCHES "Clang"`, which works
- as expected, so change the condition to use that instead.
-
- Also fix the warnings uncovered by the above:
-
- - lib: add casts to silence clang warnings
-
- - schannel: add casts to silence clang warnings in ALPN code
-
- Assuming the code is correct, solve the warnings with a cast.
- This particular build case isn't CI tested.
-
- There is a chance the warning is relevant for some platforms, perhaps
- Windows 32-bit ARM7.
-
- Closes #9783
-
-Joel Depooter (26 Oct 2022)
-
-- sendf: remove unnecessary if condition
-
- At this point, the psnd->buffer will always exist. We have already
- allocated a new buffer if one did not previously exist, and returned
- from the function if the allocation failed.
-
- Closes #9801
-
-Viktor Szakats (26 Oct 2022)
-
-- winidn: drop WANT_IDN_PROTOTYPES
-
- `WANT_IDN_PROTOTYPES` was necessary to avoid using a header that came
- via an optional package. MS stopped distributing this package some
- years ago and the winidn definitions are part of standard headers (via
- `windows.h`) since Vista.
-
- Auto-detect Vista inside `lib/idn_win32.c` and enable the manual
- definitions if building for an older Windows.
-
- This allows to delete this manual knob from all build-systems.
-
- Also drop the `_SAL_VERSION` sub-case:
-
- Our manual definitions are now only enabled with old systems. We assume
- that code analysis is not run on such systems, allowing us to delete the
- SAL-friendly flavour of these.
-
- Reviewed-by: Jay Satiro
- Closes #9793
-
-Daniel Stenberg (26 Oct 2022)
-
-- misc: remove duplicated include files
-
- Closes #9796
-
-- scripts/checksrc.pl: detect duplicated include files
-
- After an idea by Dan Fandrich in #9794
-
- Closes #9796
-
-- RELEASE-NOTES: synced
-
- And bumped version to 7.86.1 for now
-
-- CURLMOPT_SOCKETFUNCTION.3: clarify CURL_POLL_REMOVE
-
- The removal is brief or long, don't assume.
-
- Reported-by: Luca Niccoli
-
- Fixes #9799
- Closes #9800
-
-Version 7.86.0 (26 Oct 2022)
-
-Daniel Stenberg (26 Oct 2022)
-
-- RELEASE: synced
-
- The 7.86.0 release
-
-- THANKS: added from the 7.86.0 release
-
-Viktor Szakats (25 Oct 2022)
-
-- noproxy: include netinet/in.h for htonl()
-
- Solve the Amiga build warning by including `netinet/in.h`.
-
- `krb5.c` and `socketpair.c` are using `htonl()` too. This header is
- already included in those sources.
-
- Regression from 1e9a538e05c0107c54ef81d9de7cd0b27cd13309
-
- Reviewed-by: Daniel Stenberg
- Closes #9787
-
-Marc Hoersken (24 Oct 2022)
-
-- CI: fix AppVeyor status failing for starting jobs
-
-Daniel Stenberg (24 Oct 2022)
-
-- test445: verifies the protocols-over-http-proxy flaw and fix
-
-- http_proxy: restore the protocol pointer on error
-
- Reported-by: Trail of Bits
-
- Closes #9790
-
-- multi: remove duplicate include of connect.h
-
- Reported-by: Martin Strunz
- Fixes #9794
- Closes #9795
-
-Daniel Gustafsson (24 Oct 2022)
-
-- idn: fix typo in test description
-
- s/enabked/enabled/i
-
-Daniel Stenberg (24 Oct 2022)
-
-- url: use IDN decoded names for HSTS checks
-
- Reported-by: Hiroki Kurosawa
-
- Closes #9791
-
-- unit1614: fix disabled-proxy build
-
- Follow-up to 1e9a538e05c01
-
- Closes #9792
-
-Daniel Gustafsson (24 Oct 2022)
-
-- cookies: optimize control character check
-
- When checking for invalid octets the strcspn() call will return the
- position of the first found invalid char or the first NULL byte.
- This means that we can check the indicated position in the search-
- string saving a strlen() call.
-
- Closes: #9736
- Reviewed-by: Jay Satiro <raysatiro@yahoo.com>
-
-Daniel Stenberg (24 Oct 2022)
-
-- netrc: replace fgets with Curl_get_line
-
- Make the parser only accept complete lines and avoid problems with
- overly long lines.
-
- Reported-by: Hiroki Kurosawa
-
- Closes #9789
-
-- RELEASE-NOTES: add "Planned upcoming removals include"
-
- URL: https://curl.se/mail/archive-2022-10/0001.html
-
- Suggested-by: Dan Fandrich
-
-Viktor Szakats (23 Oct 2022)
-
-- ci: bump to gcc-11 for macos
-
- Ref: https://github.blog/changelog/2022-10-03-github-actions-jobs-running-on-
- macos-latest-are-now-running-on-macos-12/
- Ref: https://github.com/actions/runner-images/blob/main/images/macos/macos-12
- -Readme.md
-
- Reviewed-by: Max Dymond
- Closes #9785
-
-- Makefile.m32: reintroduce CROSSPREFIX and -W -Wall [ci skip]
-
- - Reintroduce `CROSSPREFIX`:
-
- If set, we add it to the `CC` and `AR` values, and to the _default_
- value of `RC`, which is `windres`. This allows to control each of
- these individidually, while also allowing to simplify configuration
- via `CROSSPREFIX`.
-
- This variable worked differently earlier. Hopefully this new solution
- hits a better compromise in usefulness/complexity/flexibility.
-
- Follow-up to: aa970c4c08775afcd0c2853be89b0a6f02582d50
-
- - Enable warnings again:
-
- This time with an option to override it via `CFLAGS`. Warnings are
- also enabled by default in CMake, `makefile.dj` and `makefile.amiga`
- builds (not in autotools though).
-
- Follow-up to 10fbd8b4e3f83b967fd9ad9a41ab484c0e7e7ca3
-
- Closes #9784
-
-- noproxy: silence unused variable warnings with no ipv6
-
- Follow-up to 36474f1050c7f4117e3c8de6cc9217cfebfc717d
-
- Reviewed-by: Daniel Stenberg
- Closes #9782
-
-Daniel Stenberg (22 Oct 2022)
-
-- test644: verify --xattr (with redirect)
-
-- tool_xattr: save the original URL, not the final redirected one
-
- Adjusted test 1621 accordingly.
-
- Reported-by: Viktor Szakats
- Fixes #9766
- Closes #9768
-
-- docs: make sure libcurl opts examples pass in long arguments
-
- Reported-by: Sergey
- Fixes #9779
- Closes #9780
-
-Marc Hoersken (21 Oct 2022)
-
-- CI: fix AppVeyor job links only working for most recent build
-
- Ref: https://github.com/curl/curl/pull/9768#issuecomment-1286675916
- Reported-by: Daniel Stenberg
-
- Follow up to #9769
-
-Viktor Szakats (21 Oct 2022)
-
-- noproxy: fix builds without AF_INET6
-
- Regression from 1e9a538e05c0107c54ef81d9de7cd0b27cd13309
-
- Reviewed-by: Daniel Stenberg
-
- Closes #9778
-
-Daniel Stenberg (21 Oct 2022)
-
-- noproxy: support proxies specified using cidr notation
-
- For both IPv4 and IPv6 addresses. Now also checks IPv6 addresses "correctly"
- and not with string comparisons.
-
- Split out the noproxy checks and functionality into noproxy.c
-
- Added unit test 1614 to verify checking functions.
-
- Reported-by: Mathieu Carbonneaux
-
- Fixes #9773
- Fixes #5745
- Closes #9775
-
-- urlapi: remove two variable assigns
-
- To please scan-build:
-
- urlapi.c:1163:9: warning: Value stored to 'qlen' is never read
- qlen = Curl_dyn_len(&enc);
- ^ ~~~~~~~~~~~~~~~~~~
- urlapi.c:1164:9: warning: Value stored to 'query' is never read
- query = u->query = Curl_dyn_ptr(&enc);
- ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
- Follow-up to 7d6cf06f571d57
-
- Closes #9777
-
-Jeremy Maitin-Shepard (21 Oct 2022)
-
-- cmake: improve usability of CMake build as a sub-project
-
- - Renames `uninstall` -> `curl_uninstall`
- - Ensures all export rules are guarded by CURL_ENABLE_EXPORT_TARGET
-
- Closes #9638
-
-Don J Olmstead (21 Oct 2022)
-
-- easy_lock: check for HAVE_STDATOMIC_H as well
-
- The check for `HAVE_STDATOMIC_H` looks to see if the `stdatomic.h`
- header is present.
-
- Closes #9755
-
-Daniel Stenberg (21 Oct 2022)
-
-- RELEASE-NOTES: synced
-
-Brad Harder (20 Oct 2022)
-
-- CURLMOPT_PIPELINING.3: dedup manpage xref
-
- Closes #9776
-
-Marc Hoersken (20 Oct 2022)
-
-- CI: report AppVeyor build status for each job
-
- Also give each job on AppVeyor CI a human-readable name.
-
- This aims to make job and therefore build failures more visible.
-
- Reviewed-by: Marcel Raad
- Closes #9769
-
-Viktor Szakats (20 Oct 2022)
-
-- amiga: set SIZEOF_CURL_OFF_T=8 by default [ci skip]
-
- Reviewed-by: Daniel Stenberg
-
- Closes #9771
-
-- connect: fix builds without AF_INET6
-
- Regression from 2b309560c1e5d6ed5c0e542e6fdffa968b0521c9
-
- Reviewed-by: Daniel Stenberg
- Reviewed-by: Jay Satiro
-
- Closes #9770
-
-Daniel Stenberg (20 Oct 2022)
-
-- test1105: adjust <data> to work with a hyper build
-
- Closes #9767
-
-- urlapi: fix parsing URL without slash with CURLU_URLENCODE
-
- When CURLU_URLENCODE is set, the parser would mistreat the path
- component if the URL was specified without a slash like in
- http://local.test:80?-123
-
- Extended test 1560 to reproduce and verify the fix.
-
- Reported-by: Trail of Bits
-
- Closes #9763
-
-Marc Hoersken (19 Oct 2022)
-
-- tests: avoid CreateThread if _beginthreadex is available
-
- CreateThread is not threadsafe if mixed with CRT calls.
- _beginthreadex on the other hand can be mixed with CRT.
-
- Reviewed-by: Marcel Raad
- Closes #9705
-
-Joel Depooter (19 Oct 2022)
-
-- schannel: Don't reset recv/send function pointers on renegotiation
-
- These function pointers will have been set when the initial TLS
- handshake was completed. If they are unchanged, there is no need to set
- them again. If they have been changed, as is the case with HTTP/2, we
- don't want to override that change. That would result in the
- http22_recv/send functions being completely bypassed.
-
- Prior to this change a connection that uses Schannel with HTTP/2 would
- fail on renegotiation with error "Received HTTP/0.9 when not allowed".
-
- Fixes https://github.com/curl/curl/issues/9451
- Closes https://github.com/curl/curl/pull/9756
-
-Viktor Szakats (18 Oct 2022)
-
-- hostip: guard PF_INET6 use
-
- Some platforms (e.g. Amiga OS) do not have `PF_INET6`. Adjust the code
- for these.
-
- ```
- hostip.c: In function 'fetch_addr':
- hostip.c:308:12: error: 'PF_INET6' undeclared (first use in this function)
- pf = PF_INET6;
- ^~~~~~~~
- ```
-
- Regression from 1902e8fc511078fb5e26fc2b907b4cce77e1240d
-
- Reviewed-by: Daniel Stenberg
-
- Closes #9760
-
-- amiga: do not hardcode openssl/zlib into the os config [ci skip]
-
- Enable them in `lib/makefile.amiga` and `src/makefile.amiga` instead.
-
- This allows builds without openssl and/or zlib. E.g. with the
- <https://github.com/bebbo/amiga-gcc> cross-compiler.
-
- Reviewed-by: Daniel Stenberg
-
- Closes #9762
-
-- amigaos: add missing curl header [ci skip]
-
- Without it, `CURLcode` and `CURLE_*` are undefined. `lib/hostip.h` and
- conditional local code need them.
-
- Reviewed-by: Daniel Stenberg
-
- Closes #9761
-
-Daniel Stenberg (18 Oct 2022)
-
-- cmdline/docs: add a required 'multi' keyword for each option
-
- The keyword specifies how option works when specified multiple times:
-
- - single: the last provided value replaces the earlier ones
- - append: it supports being provided multiple times
- - boolean: on/off values
- - mutex: flag-like option that disable anoter flag
-
- The 'gen.pl' script then outputs the proper and unified language for
- each option's multi-use behavior in the generated man page.
-
- The multi: header is requires in each .d file and will cause build error
- if missing or set to an unknown value.
-
- Closes #9759
-
-- CURLOPT_AUTOREFERER.3: highlight the privacy leak risk
-
- Closes #9757
-
-- mprintf: reject two kinds of precision for the same argument
-
- An input like "%.*1$.9999d" would first use the precision taken as an
- argument *and* then the precision specified in the string, which is
- confusing and wrong. pass1 will now instead return error on this double
- use.
-
- Adjusted unit test 1398 to verify
-
- Reported-by: Peter Goodman
-
- Closes #9754
-
-- ftp: remove redundant if
-
- Reported-by: Trail of Bits
-
- Closes #9753
-
-- tool_operate: more transfer cleanup after parallel transfer fail
-
- In some circumstances when doing parallel transfers, the
- single_transfer_cleanup() would not be called and then 'inglob' could
- leak.
-
- Test 496 verifies
-
- Reported-by: Trail of Bits
- Closes #9749
-
-- mqtt: spell out CONNECT in comments
-
- Instead of calling it 'CONN' in several comments, use the full and
- correct protocol packet name.
-
- Suggested by Trail of Bits
-
- Closes #9751
-
-- CURLOPT_POSTFIELDS.3: refer to CURLOPT_MIMEPOST
-
- Not the deprecated CURLOPT_HTTPPOST option.
-
- Also added two see-alsos.
-
- Reported-by: Trail of Bits
- Closes #9752
-
-- RELEASE-NOTES: synced
-
-Jay Satiro (17 Oct 2022)
-
-- ngtcp2: Fix build errors due to changes in ngtcp2 library
-
- ngtcp2/ngtcp2@b0d86f60 changed:
-
- - ngtcp2_conn_get_max_udp_payload_size =>
- ngtcp2_conn_get_max_tx_udp_payload_size
-
- - ngtcp2_conn_get_path_max_udp_payload_size =>
- ngtcp2_conn_get_path_max_tx_udp_payload_size
-
- ngtcp2/ngtcp2@ec59b873 changed:
-
- - 'early_data_rejected' member added to ng_callbacks.
-
- Assisted-by: Daniel Stenberg
- Reported-by: jurisuk@users.noreply.github.com
-
- Fixes https://github.com/curl/curl/issues/9747
- Closes https://github.com/curl/curl/pull/9748
-
-Daniel Stenberg (16 Oct 2022)
-
-- curl_path: return error if given a NULL homedir
-
- Closes #9740
-
-- libssh: if sftp_init fails, don't get the sftp error code
-
- This flow extracted the wrong code (sftp code instead of ssh code), and
- the code is sometimes (erroneously) returned as zero anyway, so skip
- getting it and set a generic error.
-
- Reported-by: David McLaughlin
- Fixes #9737
- Closes #9740
-
-- mqtt: return error for too long topic
-
- Closes #9744
-
-Rickard Hallerbäck (16 Oct 2022)
-
-- tool_paramhlp: make the max argument a 'double'
-
- To fix compiler warnings "Implicit conversion from 'long' to 'double'
- may lose precision"
-
- Closes #9700
-
-Philip Heiduck (15 Oct 2022)
-
-- cirrus-ci: add more macOS builds with m1 based on x86_64 builds
-
- Also refactor macOS builds to use task matrix.
-
- Assisted-by: Marc Hörsken
- Closes #9565
-
-Viktor Szakats (14 Oct 2022)
-
-- cmake: set HAVE_SOCKADDR_IN6_SIN6_SCOPE_ID on Windows
-
- `lib/config-win32.h` enables this configuration option unconditionally.
- Make it apply to CMake builds as well.
-
- While here, delete a broken check for
- `HAVE_SOCKADDR_IN6_SIN6_SCOPE_ID` from `CMakeLists.txt`. This came with
- the initial commit [1], but did not include the actual verification code
- inside `CMake/CurlTests.c`, so it always failed. A later commit [2]
- added a second test, for non-Windows platforms.
-
- Enabling this flag causes test 1056 to fail with CMake builds, as they
- do with autotools builds. Let's apply the same solution and ignore the
- results here as well.
-
- [1] 4c5307b45655ba75ab066564afdc0c111a8b9291
- [2] aec7c5a87c8482b6ddffa352d7d220698652262e
-
- Reviewed-by: Daniel Stenberg
- Assisted-by: Marcel Raad
-
- Closes #9726
-
-- cmake: set HAVE_GETADDRINFO_THREADSAFE on Windows
-
- autotools enables this configuration option unconditionally for Windows
- [^1]. Do the same in CMake.
-
- The above will make this work for all reasonably recent environments.
- The logic present in `lib/config-win32.h` [^2] has the following
- exceptions which we did not cover in this CMake update:
-
- - Builds targeting Windows 2000 and earlier
- - MS Visual C++ 5.0 (1997) and earlier
-
- Also make sure to disable this feature when `HAVE_GETADDRINFO` isn't
- set, to avoid a broken build. We might want to handle that in the C
- sources in a future commit.
-
- [^1]: https://github.com/curl/curl/blob/68fa9bf3f5d7b4fcbb57619f70cb4aabb79a5
- 1f6/m4/curl-functions.m4#L2067-L2070
-
- [^2]: https://github.com/curl/curl/blob/68fa9bf3f5d7b4fcbb57619f70cb4aabb79a5
- 1f6/lib/config-win32.h#L511-L528
-
- Closes #9727
-
-- cmake: sync HAVE_SIGNAL detection with autotools
-
- `HAVE_SIGNAL` means the availability of the `signal()` function in
- autotools, while in CMake it meant the availability of that function
- _and_ the symbol `SIGALRM`.
-
- The latter is not available on Windows, but the function is, which means
- on Windows, autotools did define `HAVE_SIGNAL`, but CMake did not,
- introducing a slight difference into the binaries.
-
- This patch syncs CMake behaviour with autotools to look for the function
- only.
-
- The logic came with the initial commit adding CMake support to curl, so
- the commit history doesn't reveal the reason behind it. In any case,
- it's best to check the existence of `SIGALRM` directly in the source
- before use. For now, curl builds fine with `HAVE_SIGNAL` enabled and
- `SIGALRM` missing.
-
- Follow-up to 68fa9bf3f5d7b4fcbb57619f70cb4aabb79a51f6
-
- Closes #9725
-
-- cmake: delete duplicate HAVE_GETADDRINFO test
-
- A custom `HAVE_GETADDRINFO` check came with the initial CMake commit
- [1]. A later commit [2] added a standard check for it as well. The
- standard check run before the custom one, so CMake ignored the latter.
-
- The custom check was also non-portable, so this patch deletes it in
- favor of the standard check.
-
- [1] 4c5307b45655ba75ab066564afdc0c111a8b9291
- [2] aec7c5a87c8482b6ddffa352d7d220698652262e
-
- Closes #9731
-
-Daniel Stenberg (14 Oct 2022)
-
-- tool_formparse: unroll the NULL_CHECK and CONST_FREE macros
-
- To make the code read more obvious
-
- Assisted-by: Jay Satiro
-
- Closes #9710
-
-Christopher Sauer (14 Oct 2022)
-
-- docs/INSTALL: update Android Instructions for newer NDKs
-
- Closes #9732
-
-Daniel Stenberg (14 Oct 2022)
-
-- markdown-uppercase: ignore quoted sections
-
- Sections within the markdown ~~~ or ``` are now ignored.
-
- Closes #9733
-
-- RELEASE-NOTES: synced
-
-- test8: update as cookies no longer can have "embedded" TABs in content
-
-- test1105: extend to verify TAB in name/content discarding cookies
-
-- cookie: reject cookie names or content with TAB characters
-
- TABs in name and content seem allowed by RFC 6265: "the algorithm strips
- leading and trailing whitespace from the cookie name and value (but
- maintains internal whitespace)"
-
- Cookies with TABs in the names are rejected by Firefox and Chrome.
-
- TABs in content are stripped out by Firefox, while Chrome discards the
- whole cookie.
-
- TABs in cookies also cause issues in saved netscape cookie files.
-
- Reported-by: Trail of Bits
-
- URL: https://curl.se/mail/lib-2022-10/0032.html
- URL: https://github.com/httpwg/http-extensions/issues/2262
-
- Closes #9659
-
-- curl/add_parallel_transfers: better error handling
-
- 1 - consider the transfer handled at once when in the function, to avoid
- the same list entry to get added more than once in rare error
- situations
-
- 2 - set the ERRORBUFFER for the handle first after it has been added
- successfully
-
- Reported-by: Trail of Bits
-
- Closes #9729
-
-- netrc: remove the two 'changed' arguments
-
- As no user of these functions used the returned content.
-
-- test495: verify URL encoded user name + netrc-optional
-
- Reproduced issue #9709
-
-- netrc: use the URL-decoded user
-
- When the user name is provided in the URL it is URL encoded there, but
- when used for authentication the encoded version should be used.
-
- Regression introduced after 7.83.0
-
- Reported-by: Jonas Haag
- Fixes #9709
- Closes #9715
-
-Shaun Mirani (13 Oct 2022)
-
-- url: allow non-HTTPS HSTS-matching for debug builds
-
- Closes #9728
-
-Daniel Stenberg (13 Oct 2022)
-
-- test1275: remove the check of stderr
-
- To avoid the mysterious test failures on Windows, instead rely on the
- error code returned on failure.
-
- Fixes #9716
- Closes #9723
-
-Viktor Szakats (13 Oct 2022)
-
-- lib: set more flags in config-win32.h
-
- The goal is to add any flag that affect the created binary, to get in
- sync with the ones built with CMake and autotools.
-
- I took these flags from curl-for-win [0], where they've been tested with
- mingw-w64 and proven to work well.
-
- This patch brings them to curl as follows:
-
- - Enable unconditionally those force-enabled via
- `CMake/WindowsCache.cmake`:
-
- - `HAVE_SETJMP_H`
- - `HAVE_STRING_H`
- - `HAVE_SIGNAL` (CMake equivalent is `HAVE_SIGNAL_FUNC`)
-
- - Expand existing guards with mingw-w64:
-
- - `HAVE_STDBOOL_H`
- - `HAVE_BOOL_T`
-
- - Enable Win32 API functions for Windows Vista and later:
-
- - `HAVE_INET_NTOP`
- - `HAVE_INET_PTON`
-
- - Set sizes, if not already set:
-
- - `SIZEOF_OFF_T = 8`
- - `_FILE_OFFSET_BITS = 64` when `USE_WIN32_LARGE_FILES` is set,
- and using mingw-w64.
-
- - Add the remaining for mingw-w64 only. Feel free to expand as desired:
-
- - `HAVE_LIBGEN_H`
- - `HAVE_FTRUNCATE`
- - `HAVE_BASENAME`
- - `HAVE_STRTOK_R`
-
- Future TODO:
-
- - `HAVE_SIGNAL` has a different meaning in CMake. It's enabled when both
- the `signal()` function and the `SIGALRM` macro are found. In
- autotools and this header, it means the function only. For the
- function alone, CMake uses `HAVE_SIGNAL_FUNC`.
-
- [0] https://github.com/curl/curl-for-win/blob/c9b9a5f273c94c73d2b565ee892c4df
- f0ca97a8c/curl-m32.sh#L53-L58
-
- Reviewed-by: Daniel Stenberg
-
- Closes #9712
-
-Daniel Stenberg (13 Oct 2022)
-
-- tests: add tests/markdown-uppercase.pl to dist tarball
-
- Follow-up to aafb06c5928183d
-
- Closes #9722
-
-- tool_paramhelp: asserts verify maximum sizes for string loading
-
- The two defines MAX_FILE2MEMORY and MAX_FILE2STRING define the largest
- strings accepted when loading files into memory, but as the size is
- later used as input to functions that take the size as 'int' as
- argument, the sizes must not be larger than INT_MAX.
-
- These two new assert()s make the code error out if someone would bump
- the sizes without this consideration.
-
- Reported-by Trail of Bits
-
- Closes #9719
-
-- http: try parsing Retry-After: as a number first
-
- Since the date parser allows YYYYMMDD as a date format (due to it being
- a bit too generic for parsing this particular header), a large integer
- number could wrongly match that pattern and cause the parser to generate
- a wrong value.
-
- No date format accepted for this header starts with a decimal number, so
- by reversing the check and trying a number first we can deduct that if
- that works, it was not a date.
-
- Reported-by Trail of Bits
-
- Closes #9718
-
-Patrick Monnerat (13 Oct 2022)
-
-- doc: fix deprecation versions inconsistencies
-
- Ref: https://curl.se/mail/lib-2022-10/0026.html
-
- Closes #9711
-
-Daniel Stenberg (13 Oct 2022)
-
-- http_aws_sigv4: fix strlen() check
-
- The check was off-by-one leading to buffer overflow.
-
- Follow-up to 29c4aa00a16872
-
- Detected by OSS-Fuzz
-
- Closes #9714
-
-- curl/main_checkfds: check the fcntl return code better
-
- fcntl() can (in theory) return a non-zero number for success, so a
- better test for error is checking for -1 explicitly.
-
- Follow-up to 41e1b30ea1b77e9ff
-
- Mentioned-by: Dominik Klemba
-
- Closes #9708
-
-Viktor Szakats (12 Oct 2022)
-
-- tidy-up: delete unused HAVE_STRUCT_POLLFD
-
- It was only defined in `lib/config-win32.h`, when building for Vista.
-
- It was only used in `select.h`, in a condition that also included a
- check for `POLLIN` which is a superior choice for this detection and
- which was already used by cmake and autotools builds.
-
- Delete both instances of this macro.
-
- Closes #9707
-
-Daniel Stenberg (12 Oct 2022)
-
-- test1275: verify upercase after period in markdown
-
- Script based on the #9474 pull-request logic, but implemented in perl.
-
- Updated docs/URL-SYNTAX.md accordingly.
-
- Suggested-by: Dan Fandrich
-
- Closes #9697
-
-12932 (12 Oct 2022)
-
-- misc: nitpick grammar in comments/docs
-
- because the 'u' in URL is actually a consonant *sound* it is only
- correct to write "a URL"
-
- sorry this is a bit nitpicky :P
-
- https://english.stackexchange.com/questions/152/when-should-i-use-a-vs-an
- https://www.techtarget.com/whatis/feature/Which-is-correct-a-URL-or-an-URL
-
- Closes #9699
-
-Viktor Szakats (11 Oct 2022)
-
-- Makefile.m32: drop CROSSPREFIX and our CC/AR defaults [ci skip]
-
- This patch aimed to fix a regression [0], where `CC` initialization
- moved beyond its first use. But, on closer inspection it turned out that
- the `CC` initialization does not work as expected due to GNU Make
- filling it with `cc` by default. So unless implicit values were
- explicitly disabled via a GNU Make option, the default value of
- `$CROSSPREFIX` + `gcc` was never used. At the same time the implicit
- value `cc` maps to `gcc` in (most/all?) MinGW envs.
-
- `AR` has the same issue, with a default value of `ar`.
-
- We could reintroduce a separate variable to fix this without ill
- effects, but for simplicity and flexibility, it seems better to drop
- support for `CROSSPREFIX`, along with our own `CC`/`AR` init logic, and
- require the caller to initialize `CC`, `AR` and `RC` to the full
- (prefixed if necessary) names of these tools, as desired.
-
- We keep `RC ?= windres` because `RC` is empty by default.
-
- Also fix grammar in a comment.
-
- [0] 10fbd8b4e3f83b967fd9ad9a41ab484c0e7e7ca3
-
- Closes #9698
-
-- smb: replace CURL_WIN32 with WIN32
-
- PR #9255 aimed to fix a Cygwin/MSYS issue (#8220). It used the
- `CURL_WIN32` macro, but that one is not defined here, while compiling
- curl itself. This patch changes this to `WIN32`, assuming this was the
- original intent.
-
- Regression from 1c52e8a3795ccdf8ec9c308f4f8f19cf10ea1f1a
-
- Reviewed-by: Marcel Raad
-
- Closes #9701
-
-Matthias Gatto (11 Oct 2022)
-
-- aws_sigv4: fix header computation
-
- Handle canonical headers and signed headers creation as explained here:
- https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.
- html
-
- The algo tells that signed and canonical must contain at last host and
- x-amz-date.
-
- So we check whatever thoses are present in the curl http headers list.
- If they are, we use the one enter by curl user, otherwise we generate
- them. then we to lower, and remove space from each http headers plus
- host and x-amz-date, then sort them all by alphabetical order.
-
- This patch also fix a bug with host header, which was ignoring the port.
-
- Closes #7966
-
-Aftab Alam (11 Oct 2022)
-
-- README.md: link the curl logo to the website
-
- - Link the curl:// image to https://curl.se/
-
- Closes https://github.com/curl/curl/pull/9675
-
-Dustin Howett (11 Oct 2022)
-
-- schannel: when importing PFX, disable key persistence
-
- By default, the PFXImportCertStore API persists the key in the user's
- key store (as though the certificate was being imported for permanent,
- ongoing use.)
-
- The documentation specifies that keys that are not to be persisted
- should be imported with the flag PKCS12_NO_PERSIST_KEY.
- NOTE: this flag is only supported on versions of Windows newer than XP
- and Server 2003.
-
- --
-
- This is take 2 of the original fix. It extends the lifetime of the
- client certificate store to that of the credential handle. The original
- fix which landed in 70d010d and was later reverted in aec8d30 failed to
- work properly because it did not do that.
-
- Minor changes were made to the schannel credential context to support
- closing the client certificate store handle at the end of an SSL session.
-
- --
-
- Reported-by: ShadowZzj@users.noreply.github.com
-
- Fixes https://github.com/curl/curl/issues/9300
- Supersedes https://github.com/curl/curl/pull/9363
- Closes https://github.com/curl/curl/pull/9460
-
-Viktor Szakats (11 Oct 2022)
-
-- Makefile.m32: support more options [ci skip]
-
- - Add support for these options:
- `-wolfssl`, `-wolfssh`, `-mbedtls`, `-libssh`, `-psl`
-
- Caveats:
- - `-wolfssh` requires `-wolfssl`.
- - `-wolfssl` cannot be used with OpenSSL backends in parallel.
- - `-libssh` has build issues with BoringSSL and LibreSSL, and also
- what looks like a world-writable-config vulnerability on Windows.
- Consider it experimental.
- - `-psl` requires `-idn2` and extra libs passed via
- `LIBS=-liconv -lunistring`.
-
- - Detect BoringSSL/wolfSSL and set ngtcp2 crypto lib accordingly.
- - Generalize MultiSSL detection.
- - Use else-if syntax. Requires GNU Make 3.81 (2006-04-01).
- - Document more customization options.
-
- This brings over some configuration logic from `curl-for-win`.
-
- Closes #9680
-
-- cmake: enable more detection on Windows
-
- Enable `HAVE_UNISTD_H`, `HAVE_STRTOK_R` and `HAVE_STRCASECMP` detection
- on Windows, instead of having predefined values.
-
- With these features detected correctly, CMake Windows builds get closer
- to the autotools and `config-win32.h` ones.
-
- This also fixes detecting `HAVE_FTRUNCATE` correctly, which required
- `unistd.h`.
-
- Fixing `ftruncate()` in turn causes a build warning/error with legacy
- MinGW/MSYS1 due to an offset type size mismatch. This env misses to
- detect `HAVE_FILE_OFFSET_BITS`, which may be a reason. This patch
- force-disables `HAVE_FTRUNCATE` for this platform.
-
- Reviewed-by: Daniel Stenberg
-
- Closes #9687
-
-- autotools: allow unix sockets on Windows
-
- Fixes: https://github.com/curl/curl-for-win/blob/73a070d96fd906fdee929e2f1f00
- a9149fb39239/curl-autotools.sh#L44-L47
-
- On Windows this feature is present, but not the header used in the
- detection logic. It also requires an elaborate enabler logic
- (as seen in `lib/curl_setup.h`). Let's always allow it and let the
- lib code deal with the details.
-
- Closes #9688
-
-- cmake: add missing inet_ntop check
-
- This adds the missing half of the check, next to the other half
- already present in `lib/curl_config.h.cmake`.
-
- Force disable `HAVE_INET_NTOP` for old MSVC where it caused compiler
- warnings.
-
- Reviewed-by: Daniel Stenberg
-
- Closes #9689
-
-Daniel Stenberg (11 Oct 2022)
-
-- RELEASE-NOTES: synced
-
-bsergean on github (11 Oct 2022)
-
-- asyn-ares: set hint flags when calling ares_getaddrinfo
-
- The hint flag is ARES_AI_NUMERICSERV, and it will save a call to
- getservbyname or getservbyname_r to set it.
-
- Closes #9694
-
-Daniel Stenberg (11 Oct 2022)
-
-- header.d: add category smtp and imap
-
- They were previously (erroneously) added manually to tool_listhelp.c
- which would make them get removed again when the file is updated next
- time, unless added correctly here in header.d
-
- Follow-up to 2437fac01
-
- Closes #9690
-
-- curl/get_url_file_name: use libcurl URL parser
-
- To avoid URL tricks, use the URL parser for this.
-
- This update changes curl's behavior slightly in that it will ignore the
- possible query part from the URL and only use the file name from the
- actual path from the URL. I consider it a bugfix.
-
- "curl -O localhost/name?giveme-giveme" will now save the output in the
- local file named 'name'
-
- Updated test 1210 to verify
-
- Assisted-by: Jay Satiro
-
- Closes #9684
-
-Martin Ågren (11 Oct 2022)
-
-- docs: fix grammar around needing pass phrase
-
- "You never needed a pass phrase" reads like it's about to be followed by
- something like "until version so-and-so", but that is not what is
- intended. Change to "You never need a pass phrase". There are two
- instances of this text, so make sure to update both.
-
-Xiang Xiao (10 Oct 2022)
-
-- cmake: add the check of HAVE_SOCKETPAIR
-
- which is used by Curl_socketpair
-
- Signed-off-by: Xiang Xiao <xiaoxiang@xiaomi.com>
-
- Closes #9686
-
-Daniel Stenberg (10 Oct 2022)
-
-- curl/add_file_name_to_url: use the libcurl URL parser
-
- instead of the custom error-prone parser, to extract and update the path
- of the given URL
-
- Closes #9683
-
-- single_transfer: use the libcurl URL parser when appending query parts
-
- Instead of doing "manual" error-prone parsing in another place.
-
- Used when --data contents is added to the URL query when -G is provided.
-
- Closes #9681
-
-- ws: fix buffer pointer use in the callback loop
-
- Closes #9678
-
-Petr Štetiar (10 Oct 2022)
-
-- curl-wolfssl.m4: error out if wolfSSL is not usable
-
- When I explicitly declare, that I would like to have curl built with
- wolfSSL support using `--with-wolfssl` configure option, then I would
- expect, that either I endup with curl having that support, for example
- in form of https support or it wouldn't be available at all.
-
- Downstream projects like for example OpenWrt build curl wolfSSL variant
- with `--with-wolfssl` already, but in certain corner cases it does fail:
-
- configure:25299: checking for wolfSSL_Init in -lwolfssl
- configure:25321: x86_64-openwrt-linux-musl-gcc -o conftest [snip]
- In file included from target-x86_64_musl/usr/include/wolfssl/wolfcrypt/dsa.
- h:33,
- from target-x86_64_musl/usr/include/wolfssl/wolfcrypt/asn_
- public.h:35,
- from target-x86_64_musl/usr/include/wolfssl/ssl.h:35,
- from conftest.c:47:
- target-x86_64_musl/usr/include/wolfssl/wolfcrypt/integer.h:37:14: fatal err
- or: wolfssl/wolfcrypt/sp_int.h: No such file or directory
- #include <wolfssl/wolfcrypt/sp_int.h>
- ^~~~~~~~~~~~~~~~~~~~~~~~~~~~
- compilation terminated.
-
- and in the end thus produces curl without https support:
-
- curl: (1) Protocol "https" not supported or disabled in libcurl
-
- So fix it, by making the working wolfSSL mandatory and error out in
- configure step when that's not the case:
-
- checking for wolfSSL_Init in -lwolfssl... no
- configure: error: --with-wolfssl but wolfSSL was not found or doesn't work
-
- References: https://github.com/openwrt/packages/issues/19005
- References: https://github.com/openwrt/packages/issues/19547
- Signed-off-by: Petr Štetiar <ynezz@true.cz>
-
- Closes #9682
-
-Daniel Stenberg (10 Oct 2022)
-
-- tool_getparam: pass in the snprintf("%.*s") string length as 'int'
-
- Reported by Coverity CID 1515928
-
- Closes #9679
-
-Paul Seligman (9 Oct 2022)
-
-- ws: minor fixes for web sockets without the CONNECT_ONLY flag
-
- - Fixed an issue where is_in_callback was getting cleared when using web
- sockets with debug logging enabled
- - Ensure the handle is is_in_callback when calling out to fwrite_func
- - Change the write vs. send_data decision to whether or not the handle
- is in CONNECT_ONLY mode.
- - Account for buflen not including the header length in curl_ws_send
-
- Closes #9665
-
-Marc Hoersken (8 Oct 2022)
-
-- CI/cirrus: merge existing macOS jobs into a job matrix
-
- Ref: #9627
- Reviewed-by: Philip H.
-
- Closes #9672
-
-Daniel Stenberg (8 Oct 2022)
-
-- strcase: add and use Curl_timestrcmp
-
- This is a strcmp() alternative function for comparing "secrets",
- designed to take the same time no matter the content to not leak
- match/non-match info to observers based on how fast it is.
-
- The time this function takes is only a function of the shortest input
- string.
-
- Reported-by: Trail of Bits
-
- Closes #9658
-
-- tool_getparam: split out data_urlencode() into its own function
-
- Closes #9673
-
-- connect: fix Curl_updateconninfo for TRNSPRT_UNIX
-
- Reported-by: Vasiliy Ulyanov
- Fixes #9664
- Closes #9670
-
-- ws: fix Coverity complaints
-
- Coverity pointed out several flaws where variables remained
- uninitialized after forks.
-
- Follow-up to e3f335148adc6742728f
-
- Closes #9666
-
-Marc Hoersken (7 Oct 2022)
-
-- CI/GHA: merge msh3 and openssl3 builds into linux workflow
-
- Continue work on merging all Linux workflows into one file.
-
- Follow up to #9501
- Closes #9646
-
-Daniel Stenberg (7 Oct 2022)
-
-- curl_ws_send.3: call the argument 'fragsize'
-
- Since WebSocket works with "fragments" not "frames"
-
- Closes #9668
-
-- easy: avoid Intel error #2312: pointer cast involving 64-bit pointed-to type
-
- Follow-up to e3f335148adc6742728ff8
-
- Closes #9669
-
-- tool_main: exit at once if out of file descriptors
-
- If the main_checkfds function cannot create new file descriptors in an
- attempt to detect of stdin, stdout or stderr are closed.
-
- Also changed the check to use fcntl() to check if the descriptors are
- open, which avoids superfluously calling pipe() if they all already are.
-
- Follow-up to facfa19cdd4d0094
-
- Reported-by: Trail of Bits
-
- Closes #9663
-
-- websockets: remodeled API to support 63 bit frame sizes
-
- curl_ws_recv() now receives data to fill up the provided buffer, but can
- return a partial fragment. The function now also get a pointer to a
- curl_ws_frame struct with metadata that also mentions the offset and
- total size of the fragment (of which you might be receiving a smaller
- piece). This way, large incoming fragments will be "streamed" to the
- application. When the curl_ws_frame struct field 'bytesleft' is 0, the
- final fragment piece has been delivered.
-
- curl_ws_recv() was also adjusted to work with a buffer size smaller than
- the fragment size. (Possibly needless to say as the fragment size can
- now be 63 bit large).
-
- curl_ws_send() now supports sending a piece of a fragment, in a
- streaming manner, in addition to sending the entire fragment in a single
- call if it is small enough. To send a huge fragment, curl_ws_send() can
- be used to send it in many small calls by first telling libcurl about
- the total expected fragment size, and then send the payload in N number
- of separate invokes and libcurl will stream those over the wire.
-
- The struct curl_ws_meta() returns is now called 'curl_ws_frame' and it
- has been extended with two new fields: *offset* and *bytesleft*. To help
- describe the passed on data chunk when a fragment is delivered in many
- smaller pieces.
-
- The documentation has been updated accordingly.
-
- Closes #9636
-
-Patrick Monnerat (7 Oct 2022)
-
-- docs/examples: avoid deprecated options in examples where possible
-
- Example programs targeting a deprecated feature/option are commented with
- a warning about it.
- Other examples are adapted to not use deprecated options.
-
- Closes #9661
-
-Viktor Szakats (6 Oct 2022)
-
-- cmake: fix enabling websocket support
-
- Follow-up from 664249d095275ec532f55dd1752d80c8c1093a77
-
- Closes #9660
-
-- tidy-up: delete parallel/unused feature flags
-
- Detecting headers and lib separately makes sense when headers come in
- variations or with extra ones, but this wasn't the case here. These were
- duplicate/parallel macros that we had to keep in sync with each other
- for a working build. This patch leaves a single macro for each of these
- dependencies:
-
- - Rely on `HAVE_LIBZ`, delete parallel `HAVE_ZLIB_H`.
-
- Also delete CMake logic making sure these two were in sync, along with
- a toggle to turn off that logic, called `CURL_SPECIAL_LIBZ`.
-
- Also delete stray `HAVE_ZLIB` defines.
-
- There is also a `USE_ZLIB` variant in `lib/config-dos.h`. This patch
- retains it for compatibility and deprecates it.
-
- - Rely on `USE_LIBSSH2`, delete parallel `HAVE_LIBSSH2_H`.
-
- Also delete `LIBSSH2_WIN32`, `LIBSSH2_LIBRARY` from
- `winbuild/MakefileBuild.vc`, these have a role when building libssh2
- itself. And `CURL_USE_LIBSSH`, which had no use at all.
-
- Also delete stray `HAVE_LIBSSH2` defines.
-
- - Rely on `USE_LIBSSH`, delete parallel `HAVE_LIBSSH_LIBSSH_H`.
-
- Also delete `LIBSSH_WIN32`, `LIBSSH_LIBRARY` and `HAVE_LIBSSH` from
- `winbuild/MakefileBuild.vc`, these were the result of copy-pasting the
- libssh2 line, and were not having any use.
-
- - Delete unused `HAVE_LIBPSL_H` and `HAVE_LIBPSL`.
-
- Reviewed-by: Daniel Stenberg
-
- Closes #9652
-
-Daniel Stenberg (6 Oct 2022)
-
-- netrc: compare user name case sensitively
-
- User name comparisions in netrc need to match the case.
-
- Closes #9657
-
-- CURLOPT_COOKIEFILE: insist on "" for enable-without-file
-
- The former way that also suggested using a non-existing file to just
- enable the cookie engine could lead to developers maybe a bit carelessly
- guessing a file name that will not exist, and then in a future due to
- circumstances, such a file could be made to exist and then accidentally
- libcurl would read cookies not actually meant to.
-
- Reported-by: Trail of bits
-
- Closes #9654
-
-- tests/Makefile: remove run time stats from ci-test
-
- The ci-test is the normal makefile target invoked in CI jobs. This has
- been using the -r option to runtests.pl since a long time, but I find
- that it mostly just adds many lines to the test output report without
- anyone caring much about those stats.
-
- Remove it.
-
- Closes #9656
-
-Patrick Monnerat (6 Oct 2022)
-
-- tool: reorganize function c_escape around a dynbuf
-
- This is a bit shorter and a lot safer.
-
- Substrings of unescaped characters are added by a single call to reduce
- overhead.
-
- Extend test 1465 to handle more kind of escapes.
-
- Closes #9653
-
-Jay Satiro (5 Oct 2022)
-
-- CURLOPT_HTTPPOST.3: bolden the deprecation notice
-
- Ref: https://github.com/curl/curl/pull/9621
-
- Closes https://github.com/curl/curl/pull/9637
-
-John Bampton (5 Oct 2022)
-
-- misc: fix spelling in docs and comments
-
- also: remove outdated sentence
-
- Closes #9644
-
-Patrick Monnerat (5 Oct 2022)
-
-- tool: avoid generating ambiguous escaped characters in --libcurl
-
- C string hexadecimal-escaped characters may have more than 2 digits.
- This results in a wrong C compiler interpretation of a 2-digit escaped
- character when followed by an hex digit character.
-
- The solution retained here is to represent such characters as 3-digit
- octal escapes.
-
- Adjust and extend test 1465 for this case.
-
- Closes #9643
-
-Daniel Stenberg (5 Oct 2022)
-
-- configure: the ngtcp2 option should default to 'no'
-
- While still experimental.
-
- Bug: https://curl.se/mail/lib-2022-10/0007.html
- Reported-by: Daniel Hallberg
-
- Closes #9650
-
-- CURLOPT_MIMEPOST.3: add an (inline) example
-
- Reported-by: Jay Satiro
- Bug: https://github.com/curl/curl/pull/9637#issuecomment-1268070723
-
- Closes #9649
-
-Viktor Szakats (5 Oct 2022)
-
-- Makefile.m32: exclude libs & libpaths for shared mode exes [ci skip]
-
- Exclude linker flags specifying depedency libs and libpaths, when
- building against `libcurl.dll`. In such case these options are not
- necessary (but may cause errors if not/wrongly configured.)
-
- Also move and reword a comment on `CPPFLAGS` to not apply to
- `UNICODE` options. These are necessary for all build targets.
-
- Closes #9651
-
-Jay Satiro (5 Oct 2022)
-
-- runtests: fix uninitialized value on ignored tests
-
- - Don't show TESTFAIL message (ie tests failed which aren't ignored) if
- only ignored tests failed.
-
- Before:
- IGNORED: failed tests: 571 612 1056
- TESTDONE: 1214 tests out of 1217 reported OK: 99%
- Use of uninitialized value $failed in concatenation (.) or string at
- ./runtests.pl line 6290.
- TESTFAIL: These test cases failed:
-
- After:
- IGNORED: failed tests: 571 612 1056
- TESTDONE: 1214 tests out of 1217 reported OK: 99%
-
- Closes https://github.com/curl/curl/pull/9648
-
-- cirrus: use make LDFLAGS=-all-static instead of curl_LDFLAGS
-
- - Correct the use of -all-static for static Windows CI builds.
-
- curl_LDFLAGS was removed from the makefile when metalink support was
- removed. LDFLAGS=-all-static is passed to make only, because it is not a
- valid option for configure compilation tests.
-
- Closes https://github.com/curl/curl/pull/9633
-
-Viktor Szakats (4 Oct 2022)
-
-- Makefile.m32: fix regression with tool_hugehelp [ci skip]
-
- In a recent commit I mistakenly deleted this logic, after seeing a
- reference to a filename ending with `.cvs` and thinking it must have
- been long gone. Turns out this is an existing file. Restore the rule
- and the necessary `COPY` definitions with it.
-
- The restored logic is required for a successful build on a bare source
- tree (as opposed to a source release tarball).
-
- Also shorten an existing condition similar to the one added in this
- patch.
-
- Regression since 07a0047882dd3f1fbf73486c5dd9c15370877ad6
-
- Closes #9645
-
-- Makefile.m32: deduplicate build rules [ci skip]
-
- After this patch, we reduce the three copies of most `Makefile.m32`
- logic to one. This now resides in `lib/Makefile.m32`. It makes future
- updates easier, the code shorter, with a small amount of added
- complexity.
-
- `Makefile.m32` reduction:
-
- | | bytes | LOC total | blank | comment | code |
- |-------------------|-------:|----------:|-------:|---------:|------:|
- | 7.85.0 | 34772 | 1337 | 79 | 192 | 1066 |
- | before this patch | 17601 | 625 | 62 | 106 | 457 |
- | after this patch | 11680 | 392 | 52 | 104 | 236 |
-
- Details:
-
- - Change rules to create objects for the `v*` subdirs in the `lib` dir.
- This allows to use a shared compile rule and assumes that filenames
- are not (and will not be) colliding across these directories.
- `Makefile.m32` now also stores a list of these subdirs. They are
- changing rarely though.
-
- - Sync as much as possible between the three `Makefile.m32` scripts'
- rules and their source/target sections.
-
- - After this patch `CPPFLAGS` are all applied to the `src` sources once
- again. This matches the behaviour of cmake/autotools. Only zlib ones
- are actually required there.
-
- - Use `.rc` names from `Makefile.inc` instead of keeping a duplicate.
-
- - Change examples to link `libcurl.dll` by default. This makes building
- trivial, even as a cross-build:
- `CC=x86_64-w64-mingw32-gcc make -f Makefile.m32`
- To run them, you need to move/copy or add-to-path `libcurl.dll`.
- You can select static mode via `CFG=-static`.
-
- - List more of the `Makefile.m32` config variables.
-
- - Drop `.rc` support from examples. It made it fragile without much
- benefit.
-
- - Include a necessary system lib for the `externalsocket.c` example.
-
- - Exclude unnecessary systems libs when building in `-dyn` mode.
-
- Closes #9642
-
-Daniel Stenberg (4 Oct 2022)
-
-- RELEASE-NOTES: synced
-
-- CURLOPT_COOKIELIST.3: fix formatting mistake
-
- Also, updated manpage-syntax.pl to make it detect this error in test
- 1173.
-
- Reported-by: ProceduralMan on github
- Fixes #9639
- Closes #9640
-
-Jay Satiro (4 Oct 2022)
-
-- connect: change verbose IPv6 address:port to [address]:port
-
- - Use brackets for the IPv6 address shown in verbose message when the
- format is address:port so that it is less confusing.
-
- Before: Trying 2606:4700:4700::1111:443...
- After: Trying [2606:4700:4700::1111]:443...
-
- Bug: https://curl.se/mail/archive-2022-02/0041.html
- Reported-by: David Hu
-
- Closes #9635
-
-Viktor Szakats (3 Oct 2022)
-
-- Makefile.m32: major rework [ci skip]
-
- This patch overhauls `Makefile.m32` scripts, fixing a list of quirks,
- making its behaviour and customization envvars align better with other
- build systems, aiming for less code, that is easier to read, use and
- maintain.
-
- Details:
- - Rename customization envvars:
- `CURL_CC` -> `CC`
- `CURL_RC` -> `RC`
- `CURL_AR` -> `AR`
- `CURL_LDFLAG_EXTRAS_DLL` -> `CURL_LDFLAGS_LIB`
- `CURL_LDFLAG_EXTRAS_EXE` -> `CURL_LDFLAGS_BIN`
- - Drop `CURL_STRIP` and `CURL_RANLIB`. These tools are no longer used.
- - Accept `CFLAGS`, `CPPFLAGS`, `RCFLAGS`, `LDFLAGS` and `LIBS` envvars.
- - Drop `CURL_CFLAG_EXTRAS`, `CURL_LDFLAG_EXTRAS`, `CURL_RCFLAG_EXTRAS` in
- favor of the above.
- - Do not automatically enable `zlib` with `libssh2`. `zlib` is optional
- with `libssh2`.
- - Omit unnecessary `CPPFLAGS` options when building `curl.exe` and
- examples.
- - Drop support for deprecated `-winssl` `CFG` option. Use `-schannel`
- instead.
- - Avoid late evaluation where not necessary (`=` -> `:=`).
- - Drop support for `CURL_DLL_A_SUFFIX` to override the implib suffix.
- Instead, use the standard naming scheme by default: `libcurl.dll.a`.
- The toolchain recognizes the name, and selects it automatically when
- asking for a `-shared` vs. `-static` build.
- - Stop applying `strip` to `libcurl.a`. Follow-up from
- 16a58e9f93c7e89e1f87720199388bcfcfa148a4. There was no debug info to
- strip since then.
- - Stop setting `-O3`, `-W`, `-Wall` options. You can add these to
- `CFLAGS` as desired.
- - Always enable `-DCURL_DISABLE_OPENSSL_AUTO_LOAD_CONFIG` with OpenSSL,
- to avoid that vulnerability on Windows.
- - Add `-lbrotlicommon` to `LIBS` when using `brotli`.
- - Do not enable `-nghttp3` without `-ngtcp2`.
- - `-ssh2` and `-rtmp` options no longer try to auto-select a TLS-backend.
- You need to set the backend explicitly. This scales better and avoids
- issues with certain combinations (e.g. `libssh2` + `wolfssl` with no
- `schannel`).
- - Default to OpenSSL TLS-backend with `ngtcp2`. Possible to override via
- `NGTCP2_LIBS`.
- - Old, alternate method of enabling components (e.g. `SSH2=1`) no longer
- supported.
- - Delete `SPNEGO` references. They were no-ops.
- - Drop support for Win9x environments.
- - Allow setting `OPENSSL_LIBS` independently from `OPENSSL_LIBPATH`.
- - Support autotools/CMake `libssh2` builds by default.
- - Respect `CURL_DLL_SUFFIX` in `-dyn` mode when building `curl.exe` and
- examples.
- - Assume standard directory layout with `LIBCARES_PATH`. (Instead of the
- long gone embedded one.)
- - Stop static linking with c-ares by default. Add
- `CPPFLAGS=-DCARES_STATICLIB` to enable it.
- - Reorganize internal layout to avoid redundancy and emit clean diffs
- between src/lib and example make files.
- - Delete unused variables.
- - Code cleanups/rework.
- - Comment and indentation fixes.
-
- Closes #9632
-
-- scripts/release-notes.pl: strip ci skip tag [ci skip]
-
- Ref: https://github.com/curl/curl/commit/e604a82cae922bf86403a94f5803ac5e4303
- ae97#commitcomment-85637701
-
- Reviewed-by: Daniel Stenberg
-
- Closes #9634
-
-- Makefile.m32: delete legacy component bits [ci skip]
-
- - Drop auto-detection of OpenSSL 1.0.2 and earlier. Now always defaulting
- to OpenSSL 1.1.0 and later, LibreSSL and BoringSSL.
-
- - Drop `Invalid path to OpenSSL package` detection. OpenSSL has been
- using a standard file layout since 1.1.0, so this seems unnecessary
- now.
-
- - Drop special logic to enable Novell LDAP SDK support.
-
- - Drop special logic to enable OpenLDAP LDAP SDK support. This seems
- to be distinct from native OpenLDAP, with support implemented inside
- `lib/ldap.c` (vs. `lib/openldap.c`) back when the latter did not exist
- yet in curl.
-
- - Add `-lwldap32` only if there is no other LDAP library (either native
- OpenLDAP, or SDKs above) present.
-
- - Update `doc/INSTALL.md` accordingly.
-
- After this patch, it's necessary to make configration changes when using
- OpenSSL 1.0.2 or earlier, or the two LDAP SDKs.
-
- OpenSSL 1.0.2 and earlier:
- ```
- export OPENSSL_INCLUDE = <path-to-openssl>/outinc
- export OPENSSL_LIBPATH = <path-to-openssl>/out
- export OPENSSL_LIBS = -lssl32 -leay32 -lgdi32
- ```
-
- Novell LDAP SDK, previously enabled via `USE_LDAP_NOVELL=1`:
- ```
- export CURL_CFLAG_EXTRAS = -I<path-to-sdk>/inc -DCURL_HAS_NOVELL_LDAPSDK
- export CURL_LDFLAG_EXTRAS = -L<path-to-sdk>/lib/mscvc -lldapsdk -lldapssl -ll
- dapx
- ```
-
- OpenLDAP LDAP SDK, previously enabled via `USE_LDAP_OPENLDAP=1`:
- ```
- export CURL_CFLAG_EXTRAS = -I<path-to-sdk>/include -DCURL_HAS_OPENLDAP_LDAPSD
- K
- export CURL_LDFLAG_EXTRAS = -L<path-to-sdk>/lib -lldap -llber
- ```
-
- I haven't tested these scenarios, and in general we recommend using
- a recent OpenSSL release. Also, WinLDAP (the Windows default) and
- OpenLDAP (via `-DUSE_OPENLDAP`) are the LDAP options actively worked on
- in curl.
-
- Closes #9631
-
-Daniel Stenberg (2 Oct 2022)
-
-- vauth/ntlm.h: make line shorter than 80 columns
-
- Follow-up from 265fbd937
-
-Viktor Szakats (1 Oct 2022)
-
-- docs: update sourceforge project links [ci skip]
-
- SourceForge projects can now choose between two hostnames, with .io and
- .net ending. Both support HTTPS by default now. Opening the other variant
- will perm-redirected to the one chosen by the project.
-
- The .io -> .net redirection is done insecurely.
-
- Let's update the URLs to point to the current canonical endpoints to
- avoid any redirects.
-
- Closes #9630
-
-Daniel Stenberg (1 Oct 2022)
-
-- curl_url_set.3: document CURLU_APPENDQUERY proper
-
- Listed among the other supported flags.
-
- Reported-by: Robby Simpson
- Fixes #9628
- Closes #9629
-
-Viktor Szakats (1 Oct 2022)
-
-- Makefile.m32: cleanups and fixes [ci skip]
-
- - Add `-lcrypt32` once, and add it always for simplicity.
- - Delete broken link and reference to the pre-Vista WinIDN add-on.
- MS no longer distribute it.
- - Delete related `WINIDN_PATH` option. IDN is a system lib since Vista.
- - Sync `LIBCARES_PATH` default with the rest of dependencies.
- - Delete version numbers from dependency path defaults.
- - `libgsasl` package is now called `gsasl`.
- - Delete `libexpat` and `libxml2` references. No longer used by curl.
- - Delete `Edit the path below...` comments. We recommend to predefine
- those envvars instead.
- - `libcares.a` is not an internal dependency anymore. Stop using it as
- such.
- - `windres` `--include-dir` -> `-I`, `-F` -> `--target=` for readability.
- - Delete `STRIP`, `CURL_STRIP`, `AR` references from `src/Makefile.m32`.
- They were never used.
- - Stop to `clean` some objects twice in `src/Makefile.m32`.
- - Delete cvs-specific leftovers.
- - Finish resource support in examples make file.
- - Delete `-I<root>/lib` from examples make file.
- - Fix copyright start year in examples make file.
- - Delete duplicate `ftpuploadresume` input in examples make file.
- - Sync OpenSSL lib order, `SYNC` support, `PROOT` use, dependency path
- defaults, variables names and other internal bits between the three
- make files.
- - `lib/Makefile.m32` accepted custom options via `DLL_LIBS` envvar. This
- was lib-specific and possibly accidental. Use `CURL_LDFLAG_EXTRAS_DLL`
- envvar for the same effect.
- - Fix linking `curl.exe` and examples to wrong static libs with
- auto-detected OpenSSL 1.0.2 or earlier.
- - Add `-lgdi32` for OpenSSL 1.0.2 and earlier only.
- - Add link to Novell LDAP SDK and use a relative default path. Latest
- version is from 2016, linked to an outdated OpenSSL 1.0.1.
- - Whitespace and comment cleanups.
-
- TODO in a next commit:
-
- Delete built-in detection/logic for OpenSSL 1.0.2 and earlier, the Novell
- LDAP SDK and the other LDAP SDK (which is _not_ OpenLDAP). Write up the
- necessary custom envvars to configure them.
-
- Closes #9616
-
-Daniel Stenberg (30 Sep 2022)
-
-- RELEASE-NOTES: synced
-
-Matt Holt (30 Sep 2022)
-
-- HTTP3.md: update Caddy example
-
- Closes #9623
-
-Daniel Stenberg (30 Sep 2022)
-
-- easy: fix the altsvc init for curl_easy_duphandle
-
- It was using the old #ifdef which nothing sets anymore
-
- Closes #9624
-
-- GHA: build tests in a separate step from the running of them
-
- ... to make the output smaller for when you want to look at test
- failures.
-
- Removed the examples build from msh3
-
- Closes #9619
-
-Viktor Szakats (29 Sep 2022)
-
-- ldap: delete stray CURL_HAS_MOZILLA_LDAP reference
-
- Added in 68b215157fdf69612edebdb220b3804822277822, while adding openldap
- support. This is also the single mention of this constant in the source
- tree and also in that commit. Based on these, it seems like an accident.
-
- Delete this reference.
-
- Reviewed-by: Daniel Stenberg
-
- Closes #9625
-
-- docs: spelling nits
-
- - MingW -> MinGW (Minimalist GNU for Windows)
- - f.e. -> e.g.
- - some whitespace and punctuation.
-
- Reviewed-by: Daniel Stenberg
-
- Closes #9622
-
-Philip Heiduck (29 Sep 2022)
-
-- cirrus-ci: add macOS build with m1
-
- Signed-off-by: Philip H <47042125+pheiduck@users.noreply.github.com>
-
- Closes #9565
-
-Patrick Monnerat (29 Sep 2022)
-
-- lib: sanitize conditional exclusion around MIME
-
- The introduction of CURL_DISABLE_MIME came with some additional bugs:
- - Disabled MIME is compiled-in anyway if SMTP and/or IMAP is enabled.
- - CURLOPT_MIMEPOST, CURLOPT_MIME_OPTIONS and CURLOPT_HTTPHEADER are
- conditioned on HTTP, although also needed for SMTP and IMAP MIME mail
- uploads.
-
- In addition, the CURLOPT_HTTPHEADER and --header documentation does not
- mention their use for MIME mail.
-
- This commit fixes the problems above.
-
- Closes #9610
-
-Thiago Suchorski (29 Sep 2022)
-
-- docs: minor grammar fixes
-
- Closes #9609
-
-Daniel Stenberg (28 Sep 2022)
-
-- CURLSHOPT_UNLOCKFUNC.3: the callback as no 'access' argument
-
- Probably a copy and paste error from the lock function man page.
-
- Reported-by: Robby Simpson
- Fixes #9612
- Closes #9613
-
-- CURLOPT_ACCEPT_ENCODING.3: remove "four" as they are five
-
- ... instead just list the supported encodings.
-
- Reported-by: ProceduralMan on github
- Fixes #9614
- Closes #9615
-
-Dan Fandrich (28 Sep 2022)
-
-- tests: Remove a duplicated keyword
-
-- docs: document more server names for test files
-
-Daniel Stenberg (28 Sep 2022)
-
-- altsvc: reject bad port numbers
-
- The existing code tried but did not properly reject alternative services
- using negative or too large port numbers.
-
- With this fix, the logic now also flushes the old entries immediately
- before adding a new one, making a following header with an illegal entry
- not flush the already stored entry.
-
- Report from the ongoing source code audit by Trail of Bits.
-
- Adjusted test 356 to verify.
-
- Closes #9607
-
-- functypes: provide the recv and send arg and return types
-
- This header is for providing the argument types for recv() and send()
- when built to not use a dedicated config-[platfor].h file.
-
- Remove the slow brute-force checks from configure and cmake.
-
- This change also removes the use of the types for select, as they were
- not used in code.
-
- Closes #9592
-
-- urlapi: reject more bad characters from the host name field
-
- Extended test 1560 to verify
-
- Report from the ongoing source code audit by Trail of Bits.
-
- Closes #9608
-
-- configure: deprecate builds with small curl_off_t
-
- If curl_off_t turns out to be smaller than 8 bytes,
- --with-n64-deprecated needs to be used to allow the build to
- continue. This is to highlight the fact that support for such builds is
- going away next year.
-
- Also mentioned in DEPRECATED.md
-
- Closes #9605
-
-Patrick Monnerat (27 Sep 2022)
-
-- http, vauth: always provide Curl_allow_auth_to_host() functionality
-
- This function is currently located in the lib/http.c module and is
- therefore disabled by the CURL_DISABLE_HTTP conditional token.
-
- As it may be called by TLS backends, disabling HTTP results in an
- undefined reference error at link time.
-
- Move this function to vauth/vauth.c to always provide it and rename it
- as Curl_auth_allowed_to_host() to respect the vauth module naming
- convention.
-
- Closes #9600
-
-Daniel Stenberg (27 Sep 2022)
-
-- ngtcp2: fix C89 compliance nit
-
-- openssl: make certinfo available for QUIC
-
- Curl_ossl_certchain() is now an exported function in lib/vtls/openssl.c that
- can also be used from quiche.c and ngtcp2.c to get the cert chain for QUIC
- connections as well.
-
- The *certchain function was moved to the top of the file for this reason.
-
- Reported-by: Eloy Degen
- Fixes #9584
- Closes #9597
-
-- RELEASE-NOTES: synced
-
-- DEPRECATE.md: Support for systems without 64 bit data types
-
- Closes #9604
-
-Patrick Monnerat (27 Sep 2022)
-
-- tests: skip mime/form tests when mime is not built-in
-
- Closes #9596
-
-Daniel Stenberg (27 Sep 2022)
-
-- url: rename function due to name-clash in Watt-32
-
- Follow-up to 2481dbe5f4f58 and applies the change the way it was
- intended.
-
-Viktor Szakats (26 Sep 2022)
-
-- windows: adjust name of two internal public functions
-
- According to `docs/INTERNALS.md`, internal function names spanning source
- files start with uppercase `Curl_`. Bring these two functions in
- alignment with this.
-
- This also stops exporting them from `libcurl.dll` in autotools builds.
-
- Reviewed-by: Daniel Stenberg
-
- Closes #9598
-
-Gisle Vanem (26 Sep 2022)
-
-- url: rename function due to name-clash in Watt-32
-
- Since the commit 764c958c52edb427f39, there was a new function called
- resolve_ip(). This clashes with an internal function in Watt-32.
-
- Closes #9585
-
-Jay Satiro (26 Sep 2022)
-
-- schannel: ban server ALPN change during recv renegotiation
-
- By the time schannel_recv is renegotiating the connection, libcurl has
- already decided on a protocol and it is too late for the server to
- select a protocol via ALPN except for the originally selected protocol.
-
- Ref: https://github.com/curl/curl/issues/9451
-
- Closes https://github.com/curl/curl/pull/9463
-
-Daniel Stenberg (26 Sep 2022)
-
-- url: a zero-length userinfo part in the URL is still a (blank) user
-
- Adjusted test 1560 to verify
-
- Reported-by: Jay Satiro
-
- Fixes #9088
- Closes #9590
-
-Viktor Szakats (25 Sep 2022)
-
-- autotools: allow --enable-symbol-hiding with windows
-
- This local autotools logic was put in place in
- 9e24b9c7afbcb81120af4cf3f6cdee49a06d8224 (in 2012) which disabled it for
- Windows unconditionally. Testing reveals that it actually works with
- tested toolchains (mingw-w64 and CI ones), so let's allow this build
- feature on that platform. Bringing this in sync with CMake, which already
- supported this.
-
- Reviewed-by: Jay Satiro
-
- Closes #9586
-
-- autotools: reduce brute-force when detecting recv/send arg list
-
- autotools uses brute-force to detect `recv`/`send`/`select` argument
- lists, by interating through _all_ argument type combinations on each
- `./configure` run. This logic exists since
- 01fa02d0b545e1433dced2430561f8c0c72b74a9 (from 2006) and was a bit later
- extended with Windows support.
-
- This results in a worst-case number of compile + link cycles as below:
- - `recv`: 96
- - `send`: 192
- - `select`: 60
- Total: 348 (the number of curl C source files is 195, for comparison)
-
- Notice that e.g. curl-for-win autotools builds require two `./configure`
- invocations, doubling these numbers.
-
- `recv` on Windows was especially unlucky because `SOCKET` (the correct
- choice there) was listed _last_ in one of the outer trial loops. This
- resulted in lengthy waits while autotools was trying all invalid
- combinations first, wasting cycles, disk writes and slowing down
- iteration.
-
- This patch reduces the amount of idle work by reordering the tests in
- a way to succeed first on a well-known platform such as Windows, and
- also on non-Windows by testing for POSIX prototypes first, on the
- assumption that these are the most likely candidates these days. (We do
- not touch `select`, where the order was already optimal for these
- platforms.)
-
- For non-Windows, this means to try a return value of `ssize_t` first,
- then `int`, reordering the buffer argument type to try `void *` first,
- then `byte *`, and prefer the `const` flavor with `send`. If we are
- here, also stop testing for `SOCKET` type in non-Windows builds.
-
- After the patch, detection on Windows is instantaneous. It should also be
- faster on popular platforms such as Linux and BSD-based ones.
-
- If there are known-good variations for other platforms, they can also be
- fast-tracked like above, given a way to check for that platform inside
- the autotools logic.
-
- Reviewed-by: Daniel Stenberg
-
- Closes #9591
-
-Daniel Stenberg (23 Sep 2022)
-
-- TODO: Provide the error body from a CONNECT response
-
- Spellchecked-by: Jay Satiro
-
- Closes #9513
- Closes #9581
-
-Viktor Szakats (23 Sep 2022)
-
-- windows: autotools .rc warnings fixup
-
- Move `LT_LANG([Windows Resource])` after `XC_LIBTOOL`, fixing:
-
- - Warnings when running `autoreconf -fi`.
-
- - Warning when compiling .rc files:
- libtool: compile: unable to infer tagged configuration
- libtool: error: specify a tag with '--tag'
-
- Follow up to 6de7322c03d5b4d91576a7d9fc893e03cc9d1057
- Ref: https://github.com/curl/curl/pull/9521#issuecomment-1256291156
-
- Suggested-by: Patrick Monnerat
- Closes #9582
-
-Randall S. Becker (23 Sep 2022)
-
-- curl_setup: disable use of FLOSS for 64-bit NonStop builds
-
- Older 32-bit builds currently need FLOSS. This dependency may be removed
- in future OS releases.
-
- Signed-off-by: Randall S. Becker <randall.becker@nexbridge.ca>
-
- Closes #9575
-
-Patrick Monnerat (23 Sep 2022)
-
-- tool: remove dead code
-
- Add a debug assertion to verify protocols included/excluded in a set
- are always tokenized.
-
- Follow-up to commit 677266c.
-
- Closes #9576
-
-- lib: prepare the incoming of additional protocols
-
- Move the curl_prot_t to its own conditional block. Introduce symbol
- PROTO_TYPE_SMALL to control it.
-
- Fix a cast in a curl_prot_t assignment.
- Remove an outdated comment.
-
- Follow-up to cd5ca80.
-
- Closes #9534
-
-Daniel Stenberg (23 Sep 2022)
-
-- msh3: change the static_assert to make the code C89
-
-- bearssl: make it proper C89 compliant
-
-- curl-compilers.m4: for gcc + want warnings, set gnu89 standard
-
- To better verify that the code is C89
-
- Closes #9542
-
-Patrick Monnerat (22 Sep 2022)
-
-- lib517: fix C89 constant signedness
-
- In C89, positive integer literals that overflow an int but not an
- unsigned int may be understood as a negative int.
-
- lib517.c:129:3: warning: this decimal constant is unsigned only in ISO C90
- {"Sun, 06 Nov 2044 08:49:37 GMT", 2362034977 },
- ^
-
- Closes #9572
-
-Daniel Stenberg (22 Sep 2022)
-
-- mprintf: use snprintf if available
-
- This is the single place in libcurl code where it uses the "native"
- s(n)printf() function. Used for writing floats. The use has been
- reviewed and vetted and uses a HUGE target buffer, but switching to
- snprintf() still makes this safer and removes build-time warnings.
-
- Reported-by: Philip Heiduck
-
- Fixes #9569
- Closes #9570
-
-- docs: tag curl options better in man pages
-
- As it makes them links in the HTML versions.
-
- Verified by the extended test 1176
-
-- symbols-in-versions: CURLOPT_ENCODING is deprecated since 7.21.6
-
-- manpage-syntax.pl: all libcurl option symbols should be \fI-tagged
-
- ... as that makes them links to their corresponding man page.
-
- This script is used for test 1173.
-
- Closes #9574
-
-- RELEASE-NOTES: synced
-
-Patrick Monnerat (22 Sep 2022)
-
-- tool: remove protocol count limitation
-
- Replace bit mask protocol sets by null-terminated arrays of protocol
- tokens. These are the addresses of the protocol names returned by
- curl_version_info().
-
- Protocol names are sorted case-insensitively before output to satisfy CI
- tests matches consistency.
-
- The protocol list returned by curl_version_info() is augmented with all
- RTMP protocol variants.
-
- Test 1401 adjusted for new alpha ordered output.
-
- Closes #9546
-
-Daniel Stenberg (22 Sep 2022)
-
-- test972: verify the output without using external tool
-
- It seems too restrictive to assume and use an external tool to verify
- the JSON. This now verifies the outut byte per byte. We could consider
- building a local "JSON verifyer" in a future.
-
- Remove 'jsonlint' from the CI job.
-
- Reported-by: Marcel Raad
- Fixes #9563
- Closes #9564
-
-- hostip: lazily wait to figure out if IPv6 works until needed
-
- The check may take many milliseconds, so now it is performed once the
- value is first needed. Also, this change makes sure that the value is
- not used if the resolve is set to be IPv4-only.
-
- Closes #9553
-
-- curl.h: fix mention of wrong error code in comment
-
- The same error and comment were also used and is now corrected in
- CURLOPT_SSH_KEYFUNCTION.3
-
-- symbol-scan.pl: scan and verify .3 man pages
-
- This script now also finds all .3 man pages in docs/include and
- docs/include/opts, extracts all uses of CURL* symbols and verifies that all
- symbols mentioned in docs are defined in public headers.
-
- A "global symbol" is one of those matching a known prefix and the script make
- s
- an attempt to check all/most of them. Just using *all* symbols that match
- CURL* proved matching a little too many other references as well and turned
- difficult turning into something useful.
-
- Closes #9544
-
-- symbols-in-versions: add missing LIBCURL* symbols
-
-- symbol-scan.pl: also check for LIBCURL* symbols
-
- Closes #9544
-
-- docs/libcurl/symbols-in-versions: add several missing symbols
-
-- test1119: scan all public headers
-
- Previously this test only scanned a subset of the headers, which made us
- accidentally miss symbols that were provided in the others. Now, the script
- iterates over all headers present in include/curl.
-
- Closes #9544
-
-Patrick Monnerat (21 Sep 2022)
-
-- examples/chkspeed: improve portability
-
- The example program chkspeed uses strncasecmp() which is not portable
- across systems. Replace calls to this function by tests on characters.
-
- Closes #9562
-
-Daniel Stenberg (21 Sep 2022)
-
-- easy: fix the #include order
-
- The mentioned "last 3 includes" order should be respected. easy_lock.h should
- be included before those three.
-
- Reported-by: Yuriy Chernyshov
- Fixes #9560
- Closes #9561
-
-- docs: spellfixes
-
- Pointed by the new CI job
-
-- GHA: spellcheck
-
- This spellchecker checks markdown files. For this reason this job
- converts all man pages in the repository to markdown with pandoc before
- the check runs.
-
- The perl script 'cleanspell' filters out details from the man page in
- the process, to avoid the spellchecker trying to spellcheck things it
- can't. Like curl specific symbols and the SYNOPSIS and EXAMPLE sections
- of libcurl man pages.
-
- The spell checker does not check words in sections that are within pre,
- strong and em tags.
-
- 'spellcheck.words' is a custom word list with additional accepted words.
-
- Closes #9523
-
-- connect: fix the wrong error message on connect failures
-
- The "Failed to connect to" message after a connection failure would
- include the strerror message based on the presumed previous socket
- error, but in times it seems that error number is not set when reaching
- this code and therefore it would include the wrong error message.
-
- The strerror message is now removed from here and the curl_easy_strerror
- error is used instead.
-
- Reported-by: Edoardo Lolletti
- Fixes #9549
- Closes #9554
-
-- httpput-postfields.c: shorten string for C89 compliance
-
- httpput-postfields.c:41:3: error: string length ‘522’ is greater than the
- length ‘509’ ISO C90 compilers are required to support [-Woverlength-str
- ings]
- 41 | "this chapter.";
- | ^~~~~~~~~~~~~~~
-
- Closes #9555
-
-- ws: fix a C89 compliance nit
-
- Closes #9541
-
-Patrick Monnerat (21 Sep 2022)
-
-- unit test 1655: make it C89-compliant
-
- Initializations performed in unit test 1655 use automatic variables in
- aggregates and thus can only be computed at run-time. Using gcc in C89
- dialect mode produces warning messages like:
-
- unit1655.c:96:7: warning: initializer element is not computable at load time
- [-Wpedantic]
- 96 | { toolong, DOH_DNS_NAME_TOO_LONG }, /* expect early failure */
- | ^~~~~~~
-
- Fix the problem by converting these automatic pointer variables to
- static arrays.
-
- Closes #9551
-
-Tobias Schaefer (20 Sep 2022)
-
-- curl_strequal.3: fix typo
-
- Closes #9548
-
-Dmitry Karpov (20 Sep 2022)
-
-- resolve: make forced IPv4 resolve only use A queries
-
- This protects IPv4-only transfers from undesired bad IPv6-related side
- effects and make IPv4 transfers in dual-stack libcurl behave the same
- way as in IPv4 single-stack libcurl.
-
- Closes #9540
-
-Daniel Stenberg (20 Sep 2022)
-
-- RELEASE-NOTES: synced
-
-- winbuild/MakefileBuild.vc: handle spaces in libssh(2) include paths
-
- Patched-by: Mark Itzcovitz
- Bug: https://curl.se/mail/lib-2022-09/0038.html
-
- Closes #9536
-
-- TODO: Reduce CA certificate bundle reparsing
-
- By adding some sort of cache.
-
- Reported-by: Michael Drake
- Closes #9379
- Closes #9538
-
-Marc Hoersken (19 Sep 2022)
-
-- CI/GHA: cancel outdated CI runs on new PR changes
-
- Avoid letting outdated CI runs continue if a PR receives
- new changes. Outside a PR we let them continue running
- by tying the concurrency to the commit hash instead.
-
- Also only let one CodeQL or Hacktoberfest job run at a time.
-
- Other CI platforms we use have this build in, but GitHub
- unfortunately neither by default nor with a simple option.
-
- This saves CI resources and therefore a little energy.
-
- Approved-by: Daniel Stenberg
- Approved-by: Max Dymond
- Closes #9533
-
-Daniel Stenberg (19 Sep 2022)
-
-- docs: fix proselint complaints
-
-- GHA: run proselint on markdown files
-
- Co-authored-by: Marc Hörsken
-
- Closes #9520
-
-- lib: the number four in a sequence is the "fourth"
-
- Spelling is hard
-
- Closes #9535
-
-John Bampton (19 Sep 2022)
-
-- misc: fix spelling in two source files
-
- Closes #9529
-
-Viktor Szakats (18 Sep 2022)
-
-- windows: add .rc support to autotools builds
-
- After this update autotools builds will compile and link `.rc` resources
- to Windows executables. Bringing this feature on par with CMake and
- Makefile.m32 builds. And also making it unnecessary to improvise these
- steps manually, while monkey patching build files, e.g. [0].
-
- You can customize the resource compiler via the `RC` envvar, and its
- options via `RCFLAGS`.
-
- This harmless warning may appear throughout the build, even though the
- autotools manual documents [1] `RC` as a valid tag, and it fails when
- omitting one:
- `libtool: error: ignoring unknown tag RC`
-
- [0] https://github.com/curl/curl-for-win/blob/535f19060d4b708f72e75dd849409ce
- 50baa1b84/curl-autotools.sh#L376-L382
- [1] https://www.gnu.org/software/libtool/manual/html_node/Tags.html
-
- Closes #9521
-
-Marc Hoersken (18 Sep 2022)
-
-- CI/linkcheck: only run if a Markdown file is changed
-
- This saves CI resources and therefore a little energy.
-
- Reviewed-by: Max Dymond
- Closes #9531
-
-- README.md: add GHA status badges for Linux and macOS builds
-
- This makes sense now that Linux builds are being consolidated.
-
- Approved-by: Daniel Stenberg
- Closes #9530
-
- [skip ci]
-
-Daniel Stenberg (17 Sep 2022)
-
-- misc: null-terminate
-
- Make use of this term consistently.
-
- Closes #9527
-
-Marc Hoersken (17 Sep 2022)
-
-- CI/GHA: merge intel CC and more TLS libs into linux workflow
-
- Continue work on merging all Linux workflows into one file.
-
- Reviewed-by: Max Dymond
- Follow up to #9501
- Closes #9514
-
-Patrick Monnerat (17 Sep 2022)
-
-- lib1597: make it C89-compliant again
-
- Automatic variable addresses cannot be used in an initialisation
- aggregate.
-
- Follow-up to 9d51329
-
- Reported-by: Daniel Stenberg
- Fixes: #9524
- Closes #9525
-
-Daniel Stenberg (17 Sep 2022)
-
-- tool_libinfo: silence "different 'const' qualifiers" in qsort()
-
- MSVC 15.0.30729.1 warned about it
-
- Follow-up to dd2a024323dcc
-
- Closes #9522
-
-Patrick Monnerat (16 Sep 2022)
-
-- docs: tell about disabled protocols in CURLOPT_*PROTOCOLS_STR.
-
- Disabled protocols are now handled as if they were unknown.
- Also update the possible protocol list.
-
-- cli tool: do not use disabled protocols
-
- As they are now rejected by the library, take care of not passing
- disabled protocol names to CURLOPT_PROTOCOLS_STR and
- CURLOPT_REDIR_PROTOCOLS_STR.
-
- Rather than using the CURLPROTO_* constants, dynamically assign protocol
- numbers based on the order they are listed by curl_version_info().
-
- New type proto_set_t implements prototype bit masks: it should therefore
- be large enough to accomodate all library-enabled protocols. If not,
- protocol numbers beyond the bit count of proto_set_t are recognized but
- "inaccessible": when used, a warning is displayed and the value is
- ignored. Should proto_set_t overflows, enabled protocols are reordered to
- force those having a public CURLPROTO_* representation to be accessible.
-
- Code has been added to subordinate RTMP?* protocols to the presence of
- RTMP in the enabled protocol list, being returned by curl_version_info()
- or not.
-
-- setopt: use the handler table for protocol name to number conversions
-
- This also returns error CURLE_UNSUPPORTED_PROTOCOL rather than
- CURLE_BAD_FUNCTION_ARGUMENT when a listed protocol name is not found.
-
- A new schemelen parameter is added to Curl_builtin_scheme() to support
- this extended use.
-
- Note that disabled protocols are not recognized anymore.
-
- Tests adapted accordingly.
-
- Closes #9472
-
-Daniel Stenberg (16 Sep 2022)
-
-- altsvc: use 'h3' for h3
-
- Since the official and real version has been out for a while now and servers
- are deployed out there using it, there is no point in sticking to h3-29.
-
- Reported-by: ウさん
- Fixes #9515
- Closes #9516
-
-chemodax (16 Sep 2022)
-
-- winbuild: Use NMake batch-rules for compilation
-
- - Invoke cl compiler once for each group of .c files.
-
- This is significantly improves compilation time. For example in my
- environment: 40 s --> 20 s.
-
- Prior to this change cl was invoked per .c file.
-
- Closes https://github.com/curl/curl/pull/9512
-
-Daniel Stenberg (16 Sep 2022)
-
-- ws: the infof() flags should be %zu
-
- Follow-up to e5e9e0c5e49ae0
-
- Closes #9518
-
-- curl: warn for --ssl use, considered insecure
-
- Closes #9519
-
-Sergey Bronnikov (16 Sep 2022)
-
-- curl_escape.3: fix typo
-
- lengthf -> length
-
- Closes #9517
-
-Daniel Stenberg (16 Sep 2022)
-
-- mailmap: merge Philip Heiduck's two addresses into one
-
-- test1948: verify PUT + POST reusing the same handle
-
- Reproduced #9507, verifies the fix
-
-- setopt: when POST is set, reset the 'upload' field
-
- Reported-by: RobBotic1 on github
- Fixes #9507
- Closes #9511
-
-Marc Hoersken (15 Sep 2022)
-
-- github: initial CODEOWNERS setup for CI configuration
-
- Reviewed-by: Daniel Stenberg
- Reviewed-by: Marcel Raad
- Reviewed-by: Max Dymond
-
- Closes #9505
-
- [skip ci]
-
-Philip Heiduck (15 Sep 2022)
-
-- CI: optimize some more dependencies install
-
- Signed-off-by: Philip Heiduck <pheiduck@Philips-MBP.lan>
-
- Closes #9500
-
-Marc Hoersken (15 Sep 2022)
-
-- CI/GHA: merge event-based and NSS into new linux workflow
-
- Continue work on merging all Linux workflows into one file.
-
- Follow up to #9501
- Closes #9506
-
-Daniel Stenberg (15 Sep 2022)
-
-- include/curl/websockets.h: add extern "C" for C++
-
- Reported-by: n0name321 on github
- Fixes #9509
- Closes #9510
-
-- lib1560: extended to verify detect/reject of unknown schemes
-
- ... when no guessing is allowed.
-
-- urlapi: detect scheme better when not guessing
-
- When the parser is not allowed to guess scheme, it should consider the
- word ending at the first colon to be the scheme, independently of number
- of slashes.
-
- The parser now checks that the scheme is known before it counts slashes,
- to improve the error messge for URLs with unknown schemes and maybe no
- slashes.
-
- When following redirects, no scheme guessing is allowed and therefore
- this change effectively prevents redirects to unknown schemes such as
- "data".
-
- Fixes #9503
-
-- strerror: improve two URL API error messages
-
-Marc Hoersken (14 Sep 2022)
-
-- CI/GHA: merge bearssl and hyper into initial linux workflow
-
- Begin work on merging all Linux workflows into one file.
-
- Closes #9501
-
-Daniel Stenberg (14 Sep 2022)
-
-- RELEASE-NOTES: synced
-
-- cmake: define BUILDING_LIBCURL in lib/CMakeLists, not config.h
-
- Since the config file might also get included by the tool code at times.
- This syncs with how other builds do it.
-
- Closes #9498
-
-- tool_hugehelp: make hugehelp a blank macro when disabled
-
- Closes #9485
-
-- getparameter: return PARAM_MANUAL_REQUESTED for -M even when disabled
-
- ... to improve the output in this situation. Now it doesn't say "option
- unknown" anymore.
-
- Closes #9485
-
-- setopt: fix compiler warning
-
- Follow-up to cd5ca80f00d2
-
- closes #9502
-
-Philip Heiduck (13 Sep 2022)
-
-- CI: skip make, do make install at once for dependencies
-
- Signed-off-by: Philip Heiduck <pheiduck@Philips-MBP.lan>
-
- Closes #9477
-
-Daniel Stenberg (13 Sep 2022)
-
-- formdata: typecast the va_arg return value
-
- To avoid "enumerated type mixed with another type" warnings
-
- Follow-up from 0f52dd5fd5aa3592691a
-
- Closes #9499
-
-- RELEASE-PROCEDURE.md: mention patch releases
-
- - When to make them and how to argue for them
- - Refreshed the release date list
-
- Closes #9495
-
-- urldata: use a curl_prot_t type for storing protocol bits
-
- This internal-use-only storage type can be bumped to a curl_off_t once
- we need to use bit 32 as the previous 'unsigned int' can no longer hold
- them all then.
-
- The websocket protocols take bit 30 and 31 so they are the last ones
- that fit within 32 bits - but cannot properly be exported through APIs
- since those use *signed* 32 bit types (long) in places.
-
- Closes #9481
-
-zhanghu on xiaomi (13 Sep 2022)
-
-- formdata: fix warning: 'CURLformoption' is promoted to 'int'
-
- curl/lib/formdata.c: In function 'FormAdd':
- curl/lib/formdata.c:249:31: warning: 'CURLformoption' is promoted to 'int' wh
- en passed through '...'
- 249 | option = va_arg(params, CURLformoption);
- | ^
- curl/lib/formdata.c:249:31: note: (so you should pass 'int' not 'CURLformopti
- on' to 'va_arg')
- curl/lib/formdata.c:249:31: note: if this code is reached, the program will a
- bort
-
- Closes #9484
-
-Daniel Stenberg (13 Sep 2022)
-
-- CURLOPT_CONNECT_ONLY.3: for ws(s) as well
-
- and correct the version number for when that support comes. Even if it
- is still experimental for WebSocket.
-
- Closes #9487
-
-- tool_operate: avoid a few #ifdefs for disabled-libcurl builds
-
- By providing empty macros in the header file instead, the code gets
- easier to read and yet is disabled on demand.
-
- Closes #9486
-
-a1346054 on github (13 Sep 2022)
-
-- scripts: use `grep -E` instead of `egrep`
-
- egrep is deprecated
-
- Closes #9491
-
-Hayden Roche (13 Sep 2022)
-
-- wolfSSL: fix session management bug.
-
- Prior to this commit, non-persistent pointers were being used to store
- sessions. When a WOLFSSL object was then freed, that freed the session
- it owned, and thus invalidated the pointer held in curl's cache. This
- commit makes it so we get a persistent (deep copied) session pointer
- that we then add to the cache. Accordingly, wolfssl_session_free, which
- was previously a no-op, now needs to actually call SSL_SESSION_free.
-
- This bug was discovered by a wolfSSL customer.
-
- Closes #9492
-
-Daniel Stenberg (13 Sep 2022)
-
-- docs: use "WebSocket" in singular
-
- This is how the RFC calls the protocol. Also rename the file in docs/ to
- WEBSOCKET.md in uppercase to match how we have done it for many other
- protocol docs in similar fashion.
-
- Add the WebSocket docs to the tarball.
-
- Closes #9496
-
-Marcel Raad (12 Sep 2022)
-
-- ws: fix build without `USE_WEBSOCKETS`
-
- The curl.h include is required unconditionally.
-
-- ws: add missing curl.h include
-
- A conflict between commits 664249d0952 and e5839f4ee70 broke the build.
-
-Daniel Stenberg (12 Sep 2022)
-
-- ws: fix an infof() call to use %uz for size_t output
-
- Detected by Coverity, CID 1514665.
-
- Closes #9480
-
-Marcel Raad (12 Sep 2022)
-
-- curl_setup: include only system.h instead of curl.h
-
- As done before commit 9506d01ee50.
-
- Ref: https://github.com/curl/curl/pull/9375#discussion_r957010158
- Closes https://github.com/curl/curl/pull/9453
-
-- lib: add missing limits.h includes
-
- Closes https://github.com/curl/curl/pull/9453
-
-- lib and tests: add missing curl.h includes
-
- Closes https://github.com/curl/curl/pull/9453
-
-- curl_setup: include curl.h after platform setup headers
-
- The platform setup headers might set definitions required for the
- includes in curl.h.
-
- Ref: https://github.com/curl/curl/pull/9375#discussion_r956998269
- Closes https://github.com/curl/curl/pull/9453
-
-Benjamin Loison (12 Sep 2022)
-
-- docs: correct missing uppercase in Markdown files
-
- To detect these typos I used:
-
- ```
- clear && grep -rn '\. [a-z]' . | uniq | grep -v '\. lib' | grep -v '[0-9]\. [
- a-z]' | grep -v '\.\. [a-z]' | grep -v '\. curl' | grep -v 'e.g. [a-z]' | gre
- p -v 'eg. [a-z]' | grep -v '\etc. [a-z]' | grep -v 'i.e\. [a-z]' | grep --col
- or=always '\. [a-z]' | grep '\.md'
- ```
-
- Closes #9474
-
-Daniel Stenberg (12 Sep 2022)
-
-- tool_setopt: use better English in --libcurl source comments
-
- Like this:
-
- XYZ was set to an object pointer
- ABC was set to a function pointer
-
- Closes #9475
-
-- setopt: make protocol2num use a curl_off_t for the protocol bit
-
- ... since WSS does not fit within 32 bit.
-
- Bug: https://github.com/curl/curl/pull/9467#issuecomment-1243014887
- Closes #9476
-
-- RELEASE-NOTES: synced
-
-- configure: polish the grep -E message a bit further
-
- Suggested-by: Emanuele Torre
- Closes #9473
-
-- GHA: add a gcc-11 -O3 build using OpenSSL
-
- Since -O3 might trigger other warnings
-
- Closes #9454
-
-Patrick Monnerat (11 Sep 2022)
-
-- content_encoding: use writer struct subclasses for different encodings
-
- The variable-sized encoding-specific storage of a struct contenc_writer
- currently relies on void * alignment that may be insufficient with
- regards to the specific storage fields, although having not caused any
- problems yet.
-
- In addition, gcc 11.3 issues a warning on access to fields of partially
- allocated structures that can occur when the specific storage size is 0:
-
- content_encoding.c: In function ‘Curl_build_unencoding_stack’:
- content_encoding.c:980:21: warning: array subscript ‘struct contenc_write
- r[0]’ is partly outside array bounds of ‘unsigned char[16]’ [-Warray-bo
- unds]
- 980 | writer->handler = handler;
- | ~~~~~~~~~~~~~~~~^~~~~~~~~
- In file included from content_encoding.c:49:
- memdebug.h:115:29: note: referencing an object of size 16 allocated by ‘c
- url_dbg_calloc’
- 115 | #define calloc(nbelem,size) curl_dbg_calloc(nbelem, size, __LINE__,
- __FILE__)
- | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- ~~~~~~~~~~
- content_encoding.c:977:60: note: in expansion of macro ‘calloc’
- 977 | struct contenc_writer *writer = (struct contenc_writer *)calloc(1
- , sz);
-
- To solve both these problems, the current commit replaces the
- contenc_writer/params structure pairs by "subclasses" of struct
- contenc_writer. These are structures that contain a contenc_writer at
- offset 0. Proper field alignment is therefore handled by the compiler and
- full structure allocation is performed, silencing the warnings.
-
- Closes #9455
-
-Daniel Stenberg (11 Sep 2022)
-
-- configure: correct the wording when checking grep -E
-
- The check first checks that grep -E works, and only as a fallback tries
- to find and use egrep. egrep is deprecated.
-
- This change only corrects the output wording, not the checks themselves.
-
- Closes #9471
-
-Viktor Szakats (10 Sep 2022)
-
-- websockets: sync prototypes in docs with implementation [ci skip]
-
- Docs for the new send/recv functions synced with the committed versions
- of these.
-
- Closes #9470
-
-Daniel Stenberg (10 Sep 2022)
-
-- setopt: make protocols2num() work with websockets
-
- So that CURLOPT_PROTOCOLS_STR and CURLOPT_REDIR_PROTOCOLS_STR can
- specify those as well.
-
- Reported-by: Patrick Monnerat
- Bug: https://curl.se/mail/lib-2022-09/0016.html
- Closes #9467
-
-- curl/websockets.h: remove leftover bad typedef
-
- Just a leftover trace of a development thing that did not stay like
- that.
-
- Reported-by: Marc Hörsken
- Fixes #9465
- Cloes #9466
-
-Orgad Shaneh (10 Sep 2022)
-
-- fix Cygwin/MSYS compilation
-
- _getpid is Windows API. On Cygwin variants it should remain getpid.
-
- Fixes #8220
- Closes #9255
-
-Marc Hoersken (10 Sep 2022)
-
-- GHA: prepare workflow merge by aligning structure again
-
- Closes #9413
-
-Daniel Stenberg (9 Sep 2022)
-
-- docs: the websockets symbols are added in 7.86.0
-
- Nothing else
-
- Closes #9459
-
-- tests/libtest/Makefile.inc: fixup merge conflict mistake
-
-- EXPERIMENTAL.md: add WebSockets
-
-- appveyor: enable websockets
-
-- cirrus: enable websockets in the windows builds
-
-- GHA: add websockets to macos, openssl3 and hyper builds
-
-- tests: add websockets tests
-
- - add websockets support to sws
- - 2300: first very basic websockets test
- - 2301: first libcurl test for ws (not working yet)
- - 2302: use the ws callback
- - 2303: test refused upgrade
-
-- curl_ws_meta: initial implementation
-
-- curl_ws_meta.3: added docs
-
-- ws: initial websockets support
-
- Closes #8995
-
-- version: add ws + wss
-
-- libtest/lib1560: test basic websocket URL parsing
-
-- configure: add --enable-websockets
-
-- docs/WebSockets.md: docs
-
-- test415: verify Content-Length parser with control code + negative value
-
-- strtoofft: after space, there cannot be a control code
-
- With the change from ISSPACE() to ISBLANK() this function no longer
- deals with (ignores) control codes the same way, which could lead to
- this function returning unexpected values like in the case of
- "Content-Length: \r-12354".
-
- Follow-up to 6f9fb7ec2d7cb389a0da5
-
- Detected by OSS-fuzz
- Bug: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=51140
- Assisted-by: Max Dymond
- Closes #9458
-
-- headers: reset the requests counter at transfer start
-
- If not, reusing an easy handle to do a subsequent transfer would
- continue the counter from the previous invoke, which then would make use
- of the header API difficult/impossible as the request counter
- mismatched.
-
- Add libtest 1947 to verify.
-
- Reported-by: Andrew Lambert
- Fixes #9424
- Closes #9447
-
-Jay Satiro (8 Sep 2022)
-
-- header: define public API functions as extern c
-
- Prior to this change linker errors would occur if curl_easy_header or
- curl_easy_nextheader was called from a C++ unit.
-
- Bug: https://github.com/curl/curl/issues/9424#issuecomment-1238818007
- Reported-by: Andrew Lambert
-
- Closes https://github.com/curl/curl/pull/9446
-
-Daniel Stenberg (8 Sep 2022)
-
-- http2: make nghttp2 less picky about field whitespace
-
- In nghttp2 1.49.0 it returns error on leading and trailing whitespace in
- header fields according to language in the recently shipped RFC 9113.
-
- nghttp2 1.50.0 introduces an option to switch off this strict check and
- this change enables this option by default which should make curl behave
- more similar to how it did with nghttp2 1.48.0 and earlier.
-
- We might want to consider making this an option in the future.
-
- Closes #9448
-
-- RELEASE-NOTES: synced
-
- And bump to 7.86.0 for the pending next release
diff --git a/libs/libcurl/docs/THANKS b/libs/libcurl/docs/THANKS index 699eebabfc..0536b26a5a 100644 --- a/libs/libcurl/docs/THANKS +++ b/libs/libcurl/docs/THANKS @@ -119,6 +119,7 @@ Alexis La Goutte Alexis Vachette
Alfonso Martone
Alfred Gebert
+Ali Khodkar
Ali Utku Selen
ALittleDruid on github
Allen Pulsifer
@@ -145,6 +146,7 @@ Andreas Damm Andreas Falkenhahn
Andreas Farber
Andreas Fischer
+Andreas Huebner
Andreas Kostyrka
Andreas Malzahn
Andreas Ntaflos
@@ -210,6 +212,7 @@ Anthony Shaw Antoine Aubert
Antoine Calando
Antoine Pietri
+Antoine Pitrou
Anton Bychkov
Anton Gerasimov
Anton Kalmykov
@@ -227,6 +230,7 @@ Armel Asselin Arnaud Compan
Arnaud Ebalard
Arnaud Rebillout
+Arne Soete
Aron Bergman
Aron Rotteveel
Artak Galoyan
@@ -275,6 +279,7 @@ bdry on github beckenc on github
Ben Boeckel
Ben Darnell
+Ben Fritz
Ben Greear
Ben Kohler
Ben Madsen
@@ -317,6 +322,7 @@ billionai on github Billyzou0741326 on github
Bin Lan
Bin Meng
+Biswapriyo Nath
Bjarni Ingi Gislason
Bjoern Franke
Bjoern Sikora
@@ -333,6 +339,7 @@ Bob Schader bobmitchell1956 on github
Bodo Bergmann
Bogdan Nicula
+Boris Kuschel
Boris Okunskiy
Boris Rasin
Boris Verkhovskiy
@@ -362,6 +369,7 @@ Brian E. Gallew Brian Green
Brian Inglis
Brian J. Murrell
+Brian Lund
Brian Prodoehl
Brian R Duffy
Brian Ulm
@@ -422,6 +430,7 @@ Cherish98 on github Chester Liu
Chih-Chung Chang
Chih-Hsuan Yen
+Chloe Kudryavtsev
Chris "Bob Bob"
Chris Araman
Chris Carlmar
@@ -483,13 +492,16 @@ COFFEETALES on github coinhubs on github
Colby Ranger
Colin Blair
+Colin Cross
Colin Hogben
Colin Leroy
Colin O'Dell
Colin Watson
Colm Buckley
+Colman Mbuya
Constantine Sapuntzakis
coralw on github
+correctmost on github
Cory Benfield
Cory Nelson
Costya Shulyupin
@@ -527,6 +539,7 @@ Dan Becker Dan Cristian
Dan Donahue
Dan Fandrich
+Dan Frandrich
Dan Johnson
Dan Kenigsberg
Dan Locks
@@ -629,7 +642,9 @@ Davide Cassioli davidedec on github
dbrowndan on github
dEajL3kA on github
+Deal(一线灵)
dekerser on github
+dengjfzh on github
Dengminwen
Denis Baručić
Denis Chaplygin
@@ -662,6 +677,7 @@ Diogo Teles Sant'Anna Dirk Eddelbuettel
Dirk Feytons
Dirk Manske
+Dirk Rosenkranz
Dirk Wetter
Dirkjan Bussink
Diven Qi
@@ -718,9 +734,11 @@ Dustin Howett Dusty Mabe
Duy Phan Thanh
Dwarakanath Yadavalli
+Dylan Anthony
Dylan Ellicott
Dylan Salisbury
Dániel Bakai
+eaglegai on github
Early Ehlinger
Earnestly on github
Eason-Yu on github
@@ -762,6 +780,7 @@ Emil Lerner Emil Romanus
Emil Österlund
Emiliano Ida
+Emilio Cobos Álvarez
Emilio López
Emmanuel Tychon
Enrico Scholz
@@ -832,6 +851,7 @@ Federico Bianchi Federico Pellegrin
Fedor Karpelevitch
Fedor Korotkov
+FeignClaims on github
Feist Josselin
Felipe Gasper
Felix Hädicke
@@ -869,6 +889,7 @@ Frank Ticheler Frank Van Uffelen
František Kučera
François Charlier
+François Michel
François Rigault
Frazer Smith
Fred Machado
@@ -1020,6 +1041,7 @@ Hide Ishikawa Hidemoto Nakada
highmtworks on github
Himanshu Gupta
+Hind Montassif
Hiroki Kurosawa
Hirotaka Tagawa
Ho-chi Chen
@@ -1137,6 +1159,7 @@ Jan Venekamp Jan Verbeek
Jan-Piet Mens
JanB on github
+Janne Blomqvist
Janne Johansson
Jared Jennings
Jared Lundell
@@ -1221,6 +1244,7 @@ Jim Drash Jim Freeman
Jim Fuller
Jim Hollinger
+Jim King
Jim Meyering
Jimmy Gaussen
Jiri Dvorak
@@ -1301,6 +1325,7 @@ Jon Torrey Jon Travis
Jon Turner
Jon Wilkes
+Jonas Bülow
Jonas Forsman
Jonas Haag
Jonas Minnberg
@@ -1326,6 +1351,7 @@ Joseph Chen Josh Bialkowski
Josh Brobst
Josh Kapell
+Josh McCullough
Josh Soref
joshhe on github
Joshua Kwan
@@ -1418,6 +1444,7 @@ Kenny To Kent Boortz
Kerem Kat
Keshav Krity
+Kev Jackson
Kevin Adler
Kevin Baughman
Kevin Burke
@@ -1465,6 +1492,7 @@ Kunal Ekawde Kurt Fankhauser
Kushal Das
Kvarec Lezki
+kwind on github
Kwon-Young Choi
Kyle Abramowitz
Kyle Edwards
@@ -1740,6 +1768,7 @@ Melissa Mears Melroy van den Berg
Mert Yazıcıoğlu
Mettgut Jamalla
+Micah Snyder)
Michael Afanasiev
Michael Anti
Michael Baentsch
@@ -1848,6 +1877,7 @@ Muhammad Herdiansyah Muhammad Hussein Ammari
Muhammed Yavuz Nuzumlalı
Murugan Balraj
+musvaage on github
Muz Dima
Myk Taylor
n0name321 on github
@@ -1918,6 +1948,7 @@ Niranjan Hasabnis Nis Jorgensen
nk
Noam Moshe
+nobedee on github
NobodyXu on github
Nobuhiro Ban
Nodak Sodak
@@ -1942,6 +1973,7 @@ Oleguer Llopart Olen Andoni
olesteban on github
Oli Kingshott
+Oliver Chang
Oliver Gondža
Oliver Graute
Oliver Kuckertz
@@ -1961,6 +1993,8 @@ Orgad Shaneh Ori Avtalion
orycho on github
osabc on github
+Osaila on github
+Osama Albahrani
Oscar Koeroo
Oscar Norlander
Oskar Liljeblad
@@ -1969,6 +2003,7 @@ Oumph on github ovidiu-benea on github
P R Schaffner
Palo Markovic
+pandada8 on github
Paolo Mossino
Paolo Piacentini
Paras Sethia
@@ -2015,7 +2050,9 @@ Paul Vixie Paulo Roberto Tomasi
Pavel Cenek
Pavel Gushchin
+Pavel Kalyugin
Pavel Löbl
+Pavel Mayorov
Pavel Orehov
Pavel Pavlov
Pavel Raiskup
@@ -2282,6 +2319,7 @@ Romulo A. Ceccon Ron Eldor
Ron Parker
Ron Zapp
+Ronan Pigott
Ronnie Mose
Rosen Penev
Rosimildo da Silva
@@ -2408,6 +2446,7 @@ Shiraz Kanga shithappens2016 on github
Shlomi Fish
Shmulik Regev
+Shohei Maeda
Siddhartha Prakash Jain
siddharthchhabrap on github
Sidney San Martín
@@ -2421,8 +2460,10 @@ Simon Josefsson Simon Legner
Simon Liu
Simon Warta
+simplerobot on github
Siva Sivaraman
SLDiggie on github
+Smackd0wn on github
smuellerDD on github
sn on hackerone
sofaboss on github
@@ -2484,6 +2525,7 @@ Sterling Hughes Steve Green
Steve H Truong
Steve Havelka
+Steve Herrell
Steve Holme
Steve Lhomme
Steve Little
@@ -2507,6 +2549,7 @@ SumatraPeter on github Sune Ahlgren
Sunny Bean
Sunny Purushe
+SuperIlu on github
Sven Anders
Sven Blumenstein
Sven Neuhaus
@@ -2558,6 +2601,7 @@ Thomas M. DuBuisson Thomas Petazzoni
Thomas Ruecker
Thomas Schwinge
+Thomas Taylor
Thomas Tonino
Thomas van Hesteren
Thomas Vegas
@@ -2732,12 +2776,14 @@ Vsevolod Novikov vshmuk on hackerone
vvb2060 on github
Vyron Tsingaras
+Vítor Galvão
W. Mark Kubacki
Waldek Kozba
Walter J. Mack
Ward Willats
Warren Menzer
Wayne Haigh
+Wei Chong Tan
Wenchao Li
Wenxiang Qian
Werner Koch
@@ -2810,6 +2856,7 @@ Yuriy Sosov Yusuke Nakamura
Yves Arrouye
Yves Lejeune
+YX Hao
z2-2z on github
z2_ on hackerone
Zachary Seguin
@@ -2842,5 +2889,6 @@ zzq1015 on github ウさん
不确定
加藤郁之
+左潇峰
梦终无痕
積丹尼 Dan Jacobson
diff --git a/libs/libcurl/include/curl/curl.h b/libs/libcurl/include/curl/curl.h index 0ec7223141..608ff6efc5 100644 --- a/libs/libcurl/include/curl/curl.h +++ b/libs/libcurl/include/curl/curl.h @@ -174,8 +174,9 @@ typedef enum { } curl_sslbackend;
/* aliases for library clones and renames */
-#define CURLSSLBACKEND_LIBRESSL CURLSSLBACKEND_OPENSSL
+#define CURLSSLBACKEND_AWSLC CURLSSLBACKEND_OPENSSL
#define CURLSSLBACKEND_BORINGSSL CURLSSLBACKEND_OPENSSL
+#define CURLSSLBACKEND_LIBRESSL CURLSSLBACKEND_OPENSSL
/* deprecated names: */
#define CURLSSLBACKEND_CYASSL CURLSSLBACKEND_WOLFSSL
@@ -331,7 +332,8 @@ struct curl_fileinfo { unsigned int flags;
- /* used internally */
+ /* These are libcurl private struct fields. Previously used by libcurl, so
+ they must never be interfered with. */
char *b_data;
size_t b_size;
size_t b_used;
@@ -778,7 +780,8 @@ typedef enum { CONNECT HTTP/1.1 */
CURLPROXY_HTTP_1_0 = 1, /* added in 7.19.4, force to use CONNECT
HTTP/1.0 */
- CURLPROXY_HTTPS = 2, /* added in 7.52.0 */
+ CURLPROXY_HTTPS = 2, /* HTTPS but stick to HTTP/1 added in 7.52.0 */
+ CURLPROXY_HTTPS2 = 3, /* HTTPS and attempt HTTP/2 added in 8.1.0 */
CURLPROXY_SOCKS4 = 4, /* support added in 7.15.2, enum existed already
in 7.10 */
CURLPROXY_SOCKS5 = 5, /* added in 7.10 */
diff --git a/libs/libcurl/include/curl/curlver.h b/libs/libcurl/include/curl/curlver.h index ac29eb51c7..8b2d773a6c 100644 --- a/libs/libcurl/include/curl/curlver.h +++ b/libs/libcurl/include/curl/curlver.h @@ -32,13 +32,13 @@ /* This is the version number of the libcurl package from which this header
file origins: */
-#define LIBCURL_VERSION "8.0.1"
+#define LIBCURL_VERSION "8.1.2"
/* The numeric version number is also available "in parts" by using these
defines: */
#define LIBCURL_VERSION_MAJOR 8
-#define LIBCURL_VERSION_MINOR 0
-#define LIBCURL_VERSION_PATCH 1
+#define LIBCURL_VERSION_MINOR 1
+#define LIBCURL_VERSION_PATCH 2
/* This is the numeric version of the libcurl version number, meant for easier
parsing and comparisons by programs. The LIBCURL_VERSION_NUM define will
@@ -59,7 +59,7 @@ CURL_VERSION_BITS() macro since curl's own configure script greps for it
and needs it to contain the full number.
*/
-#define LIBCURL_VERSION_NUM 0x080001
+#define LIBCURL_VERSION_NUM 0x080102
/*
* This is the date and time when the full source package was created. The
@@ -70,7 +70,7 @@ *
* "2007-11-23"
*/
-#define LIBCURL_TIMESTAMP "2023-03-20"
+#define LIBCURL_TIMESTAMP "2023-05-30"
#define CURL_VERSION_BITS(x,y,z) ((x)<<16|(y)<<8|(z))
#define CURL_AT_LEAST_VERSION(x,y,z) \
diff --git a/libs/libcurl/include/curl/easy.h b/libs/libcurl/include/curl/easy.h index 566668ef3c..e9c46579f0 100644 --- a/libs/libcurl/include/curl/easy.h +++ b/libs/libcurl/include/curl/easy.h @@ -48,13 +48,13 @@ CURL_EXTERN void curl_easy_cleanup(CURL *curl); *
* DESCRIPTION
*
- * Request internal information from the curl session with this function. The
- * third argument MUST be a pointer to a long, a pointer to a char * or a
- * pointer to a double (as the documentation describes elsewhere). The data
- * pointed to will be filled in accordingly and can be relied upon only if the
- * function returns CURLE_OK. This function is intended to get used *AFTER* a
- * performed transfer, all results from this function are undefined until the
- * transfer is completed.
+ * Request internal information from the curl session with this function.
+ * The third argument MUST be pointing to the specific type of the used option
+ * which is documented in each man page of the option. The data pointed to
+ * will be filled in accordingly and can be relied upon only if the function
+ * returns CURLE_OK. This function is intended to get used *AFTER* a performed
+ * transfer, all results from this function are undefined until the transfer
+ * is completed.
*/
CURL_EXTERN CURLcode curl_easy_getinfo(CURL *curl, CURLINFO info, ...);
diff --git a/libs/libcurl/libcurl.vcxproj b/libs/libcurl/libcurl.vcxproj index 42e3a7cd03..ae88c6edb0 100644 --- a/libs/libcurl/libcurl.vcxproj +++ b/libs/libcurl/libcurl.vcxproj @@ -41,6 +41,9 @@ <ClCompile Include="src\base64.c">
<PrecompiledHeader>NotUsing</PrecompiledHeader>
</ClCompile>
+ <ClCompile Include="src\bufq.c">
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ </ClCompile>
<ClCompile Include="src\bufref.c">
<PrecompiledHeader>NotUsing</PrecompiledHeader>
</ClCompile>
@@ -50,6 +53,15 @@ <ClCompile Include="src\cf-https-connect.c">
<PrecompiledHeader>NotUsing</PrecompiledHeader>
</ClCompile>
+ <ClCompile Include="src\cf-h1-proxy.c">
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="src\cf-h2-proxy.c">
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="src\cf-haproxy.c">
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ </ClCompile>
<ClCompile Include="src\cf-socket.c">
<PrecompiledHeader>NotUsing</PrecompiledHeader>
</ClCompile>
@@ -131,6 +143,9 @@ <ClCompile Include="src\dynbuf.c">
<PrecompiledHeader>NotUsing</PrecompiledHeader>
</ClCompile>
+ <ClCompile Include="src\dynhds.c">
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ </ClCompile>
<ClCompile Include="src\easy.c">
<PrecompiledHeader>NotUsing</PrecompiledHeader>
</ClCompile>
@@ -170,9 +185,6 @@ <ClCompile Include="src\gopher.c">
<PrecompiledHeader>NotUsing</PrecompiledHeader>
</ClCompile>
- <ClCompile Include="src\h2h3.c">
- <PrecompiledHeader>NotUsing</PrecompiledHeader>
- </ClCompile>
<ClCompile Include="src\hash.c">
<PrecompiledHeader>NotUsing</PrecompiledHeader>
</ClCompile>
@@ -206,6 +218,9 @@ <ClCompile Include="src\http.c">
<PrecompiledHeader>NotUsing</PrecompiledHeader>
</ClCompile>
+ <ClCompile Include="src\http1.c">
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ </ClCompile>
<ClCompile Include="src\http2.c">
<PrecompiledHeader>NotUsing</PrecompiledHeader>
</ClCompile>
@@ -468,8 +483,12 @@ <ClInclude Include="src\amigaos.h" />
<ClInclude Include="src\arpa_telnet.h" />
<ClInclude Include="src\asyn.h" />
+ <ClInclude Include="src\bufq.h" />
<ClInclude Include="src\bufref.h" />
<ClInclude Include="src\c-hyper.h" />
+ <ClInclude Include="src\cf-h1-proxy.h" />
+ <ClInclude Include="src\cf-h2-proxy.h" />
+ <ClInclude Include="src\cf-haproxy.h" />
<ClInclude Include="src\cf-https-connect.h" />
<ClInclude Include="src\cf-socket.h" />
<ClInclude Include="src\cfilters.h" />
@@ -520,6 +539,7 @@ <ClInclude Include="src\doh.h" />
<ClInclude Include="src\dotdot.h" />
<ClInclude Include="src\dynbuf.h" />
+ <ClInclude Include="src\dynhds.h" />
<ClInclude Include="src\easyif.h" />
<ClInclude Include="src\easy_lock.h" />
<ClInclude Include="src\easyoptions.h" />
@@ -533,13 +553,13 @@ <ClInclude Include="src\functypes.h" />
<ClInclude Include="src\getinfo.h" />
<ClInclude Include="src\gopher.h" />
- <ClInclude Include="src\h2h3.h" />
<ClInclude Include="src\hash.h" />
<ClInclude Include="src\headers.h" />
<ClInclude Include="src\hostip.h" />
<ClInclude Include="src\hsts.h" />
<ClInclude Include="src\http_aws_sigv4.h" />
<ClInclude Include="src\http.h" />
+ <ClInclude Include="src\http1.h" />
<ClInclude Include="src\http2.h" />
<ClInclude Include="src\http_chunks.h" />
<ClInclude Include="src\http_digest.h" />
diff --git a/libs/libcurl/libcurl.vcxproj.filters b/libs/libcurl/libcurl.vcxproj.filters index c73c53cb99..b7049ee81d 100644 --- a/libs/libcurl/libcurl.vcxproj.filters +++ b/libs/libcurl/libcurl.vcxproj.filters @@ -17,12 +17,24 @@ <ClCompile Include="src\base64.c">
<Filter>Source Files</Filter>
</ClCompile>
+ <ClCompile Include="src\bufq.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
<ClCompile Include="src\bufref.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\c-hyper.c">
<Filter>Source Files</Filter>
</ClCompile>
+ <ClCompile Include="src\cf-h1-proxy.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="src\cf-h2-proxy.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="src\cf-haproxy.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
<ClCompile Include="src\cf-https-connect.c">
<Filter>Source Files</Filter>
</ClCompile>
@@ -107,6 +119,9 @@ <ClCompile Include="src\dynbuf.c">
<Filter>Source Files</Filter>
</ClCompile>
+ <ClCompile Include="src\dynhds.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
<ClCompile Include="src\easy.c">
<Filter>Source Files</Filter>
</ClCompile>
@@ -146,9 +161,6 @@ <ClCompile Include="src\gopher.c">
<Filter>Source Files</Filter>
</ClCompile>
- <ClCompile Include="src\h2h3.c">
- <Filter>Source Files</Filter>
- </ClCompile>
<ClCompile Include="src\hash.c">
<Filter>Source Files</Filter>
</ClCompile>
@@ -182,6 +194,9 @@ <ClCompile Include="src\http.c">
<Filter>Source Files</Filter>
</ClCompile>
+ <ClCompile Include="src\http1.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
<ClCompile Include="src\http2.c">
<Filter>Source Files</Filter>
</ClCompile>
@@ -487,12 +502,24 @@ <ClInclude Include="src\asyn.h">
<Filter>Header Files</Filter>
</ClInclude>
+ <ClInclude Include="src\bufq.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
<ClInclude Include="src\bufref.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="src\c-hyper.h">
<Filter>Header Files</Filter>
</ClInclude>
+ <ClInclude Include="src\cf-h1-proxy.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
+ <ClInclude Include="src\cf-h2-proxy.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
+ <ClInclude Include="src\cf-haproxy.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
<ClInclude Include="src\cf-https-connect.h">
<Filter>Header Files</Filter>
</ClInclude>
@@ -643,6 +670,9 @@ <ClInclude Include="src\dynbuf.h">
<Filter>Header Files</Filter>
</ClInclude>
+ <ClInclude Include="src\dynhds.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
<ClInclude Include="src\easyif.h">
<Filter>Header Files</Filter>
</ClInclude>
@@ -682,9 +712,6 @@ <ClInclude Include="src\gopher.h">
<Filter>Header Files</Filter>
</ClInclude>
- <ClInclude Include="src\h2h3.h">
- <Filter>Header Files</Filter>
- </ClInclude>
<ClInclude Include="src\hash.h">
<Filter>Header Files</Filter>
</ClInclude>
@@ -703,6 +730,9 @@ <ClInclude Include="src\http.h">
<Filter>Header Files</Filter>
</ClInclude>
+ <ClInclude Include="src\http1.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
<ClInclude Include="src\http2.h">
<Filter>Header Files</Filter>
</ClInclude>
diff --git a/libs/libcurl/src/CMakeLists.txt b/libs/libcurl/src/CMakeLists.txt index d37499846a..7e0be5a1eb 100644 --- a/libs/libcurl/src/CMakeLists.txt +++ b/libs/libcurl/src/CMakeLists.txt @@ -86,6 +86,7 @@ set_target_properties(${LIB_NAME} PROPERTIES if(CMAKE_SYSTEM_NAME STREQUAL "AIX" OR
CMAKE_SYSTEM_NAME STREQUAL "Linux" OR
CMAKE_SYSTEM_NAME STREQUAL "Darwin" OR
+ CMAKE_SYSTEM_NAME STREQUAL "SunOS" OR
CMAKE_SYSTEM_NAME STREQUAL "GNU/kFreeBSD" OR
# FreeBSD comes with the a.out and elf flavours
@@ -130,6 +131,17 @@ if(WIN32) set_target_properties(${LIB_NAME} PROPERTIES IMPORT_SUFFIX "_imp.lib")
endif()
endif()
+elseif(NOT CMAKE_CROSSCOMPILING)
+ # on not-Windows and not-crosscompiling, check for writable argv[]
+ include(CheckCSourceRuns)
+ check_c_source_runs("
+int main(int argc, char **argv)
+{
+ (void)argc;
+ argv[0][0] = ' ';
+ return (argv[0][0] == ' ')?0:1;
+}"
+ HAVE_WRITABLE_ARGV)
endif()
target_include_directories(${LIB_NAME} INTERFACE
diff --git a/libs/libcurl/src/Makefile.in b/libs/libcurl/src/Makefile.in index f1144ae9c3..8ae1bceda7 100644 --- a/libs/libcurl/src/Makefile.in +++ b/libs/libcurl/src/Makefile.in @@ -212,40 +212,41 @@ am__installdirs = "$(DESTDIR)$(libdir)" LTLIBRARIES = $(lib_LTLIBRARIES) $(noinst_LTLIBRARIES)
libcurl_la_LIBADD =
am__libcurl_la_SOURCES_DIST = altsvc.c amigaos.c asyn-ares.c \
- asyn-thread.c base64.c bufref.c c-hyper.c cf-https-connect.c \
- cf-socket.c cfilters.c conncache.c connect.c \
- content_encoding.c cookie.c curl_addrinfo.c curl_des.c \
- curl_endian.c curl_fnmatch.c curl_get_line.c \
- curl_gethostname.c curl_gssapi.c curl_log.c curl_memrchr.c \
- curl_multibyte.c curl_ntlm_core.c curl_ntlm_wb.c curl_path.c \
- curl_range.c curl_rtmp.c curl_sasl.c curl_sspi.c \
- curl_threads.c dict.c doh.c dynbuf.c easy.c easygetopt.c \
- easyoptions.c escape.c file.c fileinfo.c fopen.c formdata.c \
- ftp.c ftplistparser.c getenv.c getinfo.c gopher.c h2h3.c \
- hash.c headers.c hmac.c hostasyn.c hostip.c hostip4.c \
- hostip6.c hostsyn.c hsts.c http.c http2.c http_chunks.c \
- http_digest.c http_negotiate.c http_ntlm.c http_proxy.c \
- http_aws_sigv4.c idn.c if2ip.c imap.c inet_ntop.c inet_pton.c \
- krb5.c ldap.c llist.c md4.c md5.c memdebug.c mime.c mprintf.c \
- mqtt.c multi.c netrc.c nonblock.c noproxy.c openldap.c \
- parsedate.c pingpong.c pop3.c progress.c psl.c rand.c rename.c \
- rtsp.c select.c sendf.c setopt.c sha256.c share.c slist.c \
- smb.c smtp.c socketpair.c socks.c socks_gssapi.c socks_sspi.c \
- speedcheck.c splay.c strcase.c strdup.c strerror.c strtok.c \
- strtoofft.c system_win32.c telnet.c tftp.c timediff.c \
- timeval.c transfer.c url.c urlapi.c version.c version_win32.c \
- warnless.c ws.c vauth/cleartext.c vauth/cram.c vauth/digest.c \
- vauth/digest_sspi.c vauth/gsasl.c vauth/krb5_gssapi.c \
- vauth/krb5_sspi.c vauth/ntlm.c vauth/ntlm_sspi.c \
- vauth/oauth2.c vauth/spnego_gssapi.c vauth/spnego_sspi.c \
- vauth/vauth.c vtls/bearssl.c vtls/gskit.c vtls/gtls.c \
- vtls/hostcheck.c vtls/keylog.c vtls/mbedtls.c \
+ asyn-thread.c base64.c bufq.c bufref.c c-hyper.c cf-h1-proxy.c \
+ cf-h2-proxy.c cf-haproxy.c cf-https-connect.c cf-socket.c \
+ cfilters.c conncache.c connect.c content_encoding.c cookie.c \
+ curl_addrinfo.c curl_des.c curl_endian.c curl_fnmatch.c \
+ curl_get_line.c curl_gethostname.c curl_gssapi.c curl_log.c \
+ curl_memrchr.c curl_multibyte.c curl_ntlm_core.c \
+ curl_ntlm_wb.c curl_path.c curl_range.c curl_rtmp.c \
+ curl_sasl.c curl_sspi.c curl_threads.c dict.c doh.c dynbuf.c \
+ dynhds.c easy.c easygetopt.c easyoptions.c escape.c file.c \
+ fileinfo.c fopen.c formdata.c ftp.c ftplistparser.c getenv.c \
+ getinfo.c gopher.c hash.c headers.c hmac.c hostasyn.c hostip.c \
+ hostip4.c hostip6.c hostsyn.c hsts.c http.c http1.c http2.c \
+ http_chunks.c http_digest.c http_negotiate.c http_ntlm.c \
+ http_proxy.c http_aws_sigv4.c idn.c if2ip.c imap.c inet_ntop.c \
+ inet_pton.c krb5.c ldap.c llist.c md4.c md5.c memdebug.c \
+ mime.c mprintf.c mqtt.c multi.c netrc.c nonblock.c noproxy.c \
+ openldap.c parsedate.c pingpong.c pop3.c progress.c psl.c \
+ rand.c rename.c rtsp.c select.c sendf.c setopt.c sha256.c \
+ share.c slist.c smb.c smtp.c socketpair.c socks.c \
+ socks_gssapi.c socks_sspi.c speedcheck.c splay.c strcase.c \
+ strdup.c strerror.c strtok.c strtoofft.c system_win32.c \
+ telnet.c tftp.c timediff.c timeval.c transfer.c url.c urlapi.c \
+ version.c version_win32.c warnless.c ws.c vauth/cleartext.c \
+ vauth/cram.c vauth/digest.c vauth/digest_sspi.c vauth/gsasl.c \
+ vauth/krb5_gssapi.c vauth/krb5_sspi.c vauth/ntlm.c \
+ vauth/ntlm_sspi.c vauth/oauth2.c vauth/spnego_gssapi.c \
+ vauth/spnego_sspi.c vauth/vauth.c vtls/bearssl.c vtls/gskit.c \
+ vtls/gtls.c vtls/hostcheck.c vtls/keylog.c vtls/mbedtls.c \
vtls/mbedtls_threadlock.c vtls/nss.c vtls/openssl.c \
vtls/rustls.c vtls/schannel.c vtls/schannel_verify.c \
vtls/sectransp.c vtls/vtls.c vtls/wolfssl.c vtls/x509asn1.c \
vquic/curl_msh3.c vquic/curl_ngtcp2.c vquic/curl_quiche.c \
vquic/vquic.c vssh/libssh.c vssh/libssh2.c vssh/wolfssh.c \
- altsvc.h amigaos.h arpa_telnet.h asyn.h bufref.h c-hyper.h \
+ altsvc.h amigaos.h arpa_telnet.h asyn.h bufq.h bufref.h \
+ c-hyper.h cf-h1-proxy.h cf-h2-proxy.h cf-haproxy.h \
cf-https-connect.h cf-socket.h cfilters.h conncache.h \
connect.h content_encoding.h cookie.h curl_addrinfo.h \
curl_base64.h curl_ctype.h curl_des.h curl_endian.h \
@@ -255,10 +256,10 @@ am__libcurl_la_SOURCES_DIST = altsvc.c amigaos.c asyn-ares.c \ curl_multibyte.h curl_ntlm_core.h curl_ntlm_wb.h curl_path.h \
curl_printf.h curl_range.h curl_rtmp.h curl_sasl.h \
curl_setup.h curl_setup_once.h curl_sha256.h curl_sspi.h \
- curl_threads.h curlx.h dict.h doh.h dynbuf.h easy_lock.h \
- easyif.h easyoptions.h escape.h file.h fileinfo.h fopen.h \
- formdata.h functypes.h ftp.h ftplistparser.h getinfo.h \
- gopher.h h2h3.h hash.h headers.h hostip.h hsts.h http.h \
+ curl_threads.h curlx.h dict.h doh.h dynbuf.h dynhds.h \
+ easy_lock.h easyif.h easyoptions.h escape.h file.h fileinfo.h \
+ fopen.h formdata.h functypes.h ftp.h ftplistparser.h getinfo.h \
+ gopher.h hash.h headers.h hostip.h hsts.h http.h http1.h \
http2.h http_chunks.h http_digest.h http_negotiate.h \
http_ntlm.h http_proxy.h http_aws_sigv4.h idn.h if2ip.h imap.h \
inet_ntop.h inet_pton.h llist.h memdebug.h mime.h mqtt.h \
@@ -279,41 +280,43 @@ am__libcurl_la_SOURCES_DIST = altsvc.c amigaos.c asyn-ares.c \ vquic/vquic.h vquic/vquic_int.h vssh/ssh.h libcurl.rc
am__objects_1 = libcurl_la-altsvc.lo libcurl_la-amigaos.lo \
libcurl_la-asyn-ares.lo libcurl_la-asyn-thread.lo \
- libcurl_la-base64.lo libcurl_la-bufref.lo \
- libcurl_la-c-hyper.lo libcurl_la-cf-https-connect.lo \
- libcurl_la-cf-socket.lo libcurl_la-cfilters.lo \
- libcurl_la-conncache.lo libcurl_la-connect.lo \
- libcurl_la-content_encoding.lo libcurl_la-cookie.lo \
- libcurl_la-curl_addrinfo.lo libcurl_la-curl_des.lo \
- libcurl_la-curl_endian.lo libcurl_la-curl_fnmatch.lo \
- libcurl_la-curl_get_line.lo libcurl_la-curl_gethostname.lo \
- libcurl_la-curl_gssapi.lo libcurl_la-curl_log.lo \
- libcurl_la-curl_memrchr.lo libcurl_la-curl_multibyte.lo \
- libcurl_la-curl_ntlm_core.lo libcurl_la-curl_ntlm_wb.lo \
- libcurl_la-curl_path.lo libcurl_la-curl_range.lo \
- libcurl_la-curl_rtmp.lo libcurl_la-curl_sasl.lo \
- libcurl_la-curl_sspi.lo libcurl_la-curl_threads.lo \
- libcurl_la-dict.lo libcurl_la-doh.lo libcurl_la-dynbuf.lo \
+ libcurl_la-base64.lo libcurl_la-bufq.lo libcurl_la-bufref.lo \
+ libcurl_la-c-hyper.lo libcurl_la-cf-h1-proxy.lo \
+ libcurl_la-cf-h2-proxy.lo libcurl_la-cf-haproxy.lo \
+ libcurl_la-cf-https-connect.lo libcurl_la-cf-socket.lo \
+ libcurl_la-cfilters.lo libcurl_la-conncache.lo \
+ libcurl_la-connect.lo libcurl_la-content_encoding.lo \
+ libcurl_la-cookie.lo libcurl_la-curl_addrinfo.lo \
+ libcurl_la-curl_des.lo libcurl_la-curl_endian.lo \
+ libcurl_la-curl_fnmatch.lo libcurl_la-curl_get_line.lo \
+ libcurl_la-curl_gethostname.lo libcurl_la-curl_gssapi.lo \
+ libcurl_la-curl_log.lo libcurl_la-curl_memrchr.lo \
+ libcurl_la-curl_multibyte.lo libcurl_la-curl_ntlm_core.lo \
+ libcurl_la-curl_ntlm_wb.lo libcurl_la-curl_path.lo \
+ libcurl_la-curl_range.lo libcurl_la-curl_rtmp.lo \
+ libcurl_la-curl_sasl.lo libcurl_la-curl_sspi.lo \
+ libcurl_la-curl_threads.lo libcurl_la-dict.lo \
+ libcurl_la-doh.lo libcurl_la-dynbuf.lo libcurl_la-dynhds.lo \
libcurl_la-easy.lo libcurl_la-easygetopt.lo \
libcurl_la-easyoptions.lo libcurl_la-escape.lo \
libcurl_la-file.lo libcurl_la-fileinfo.lo libcurl_la-fopen.lo \
libcurl_la-formdata.lo libcurl_la-ftp.lo \
libcurl_la-ftplistparser.lo libcurl_la-getenv.lo \
- libcurl_la-getinfo.lo libcurl_la-gopher.lo libcurl_la-h2h3.lo \
- libcurl_la-hash.lo libcurl_la-headers.lo libcurl_la-hmac.lo \
+ libcurl_la-getinfo.lo libcurl_la-gopher.lo libcurl_la-hash.lo \
+ libcurl_la-headers.lo libcurl_la-hmac.lo \
libcurl_la-hostasyn.lo libcurl_la-hostip.lo \
libcurl_la-hostip4.lo libcurl_la-hostip6.lo \
libcurl_la-hostsyn.lo libcurl_la-hsts.lo libcurl_la-http.lo \
- libcurl_la-http2.lo libcurl_la-http_chunks.lo \
- libcurl_la-http_digest.lo libcurl_la-http_negotiate.lo \
- libcurl_la-http_ntlm.lo libcurl_la-http_proxy.lo \
- libcurl_la-http_aws_sigv4.lo libcurl_la-idn.lo \
- libcurl_la-if2ip.lo libcurl_la-imap.lo libcurl_la-inet_ntop.lo \
- libcurl_la-inet_pton.lo libcurl_la-krb5.lo libcurl_la-ldap.lo \
- libcurl_la-llist.lo libcurl_la-md4.lo libcurl_la-md5.lo \
- libcurl_la-memdebug.lo libcurl_la-mime.lo \
- libcurl_la-mprintf.lo libcurl_la-mqtt.lo libcurl_la-multi.lo \
- libcurl_la-netrc.lo libcurl_la-nonblock.lo \
+ libcurl_la-http1.lo libcurl_la-http2.lo \
+ libcurl_la-http_chunks.lo libcurl_la-http_digest.lo \
+ libcurl_la-http_negotiate.lo libcurl_la-http_ntlm.lo \
+ libcurl_la-http_proxy.lo libcurl_la-http_aws_sigv4.lo \
+ libcurl_la-idn.lo libcurl_la-if2ip.lo libcurl_la-imap.lo \
+ libcurl_la-inet_ntop.lo libcurl_la-inet_pton.lo \
+ libcurl_la-krb5.lo libcurl_la-ldap.lo libcurl_la-llist.lo \
+ libcurl_la-md4.lo libcurl_la-md5.lo libcurl_la-memdebug.lo \
+ libcurl_la-mime.lo libcurl_la-mprintf.lo libcurl_la-mqtt.lo \
+ libcurl_la-multi.lo libcurl_la-netrc.lo libcurl_la-nonblock.lo \
libcurl_la-noproxy.lo libcurl_la-openldap.lo \
libcurl_la-parsedate.lo libcurl_la-pingpong.lo \
libcurl_la-pop3.lo libcurl_la-progress.lo libcurl_la-psl.lo \
@@ -372,8 +375,10 @@ libcurl_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ libcurlu_la_LIBADD =
am__objects_11 = libcurlu_la-altsvc.lo libcurlu_la-amigaos.lo \
libcurlu_la-asyn-ares.lo libcurlu_la-asyn-thread.lo \
- libcurlu_la-base64.lo libcurlu_la-bufref.lo \
- libcurlu_la-c-hyper.lo libcurlu_la-cf-https-connect.lo \
+ libcurlu_la-base64.lo libcurlu_la-bufq.lo \
+ libcurlu_la-bufref.lo libcurlu_la-c-hyper.lo \
+ libcurlu_la-cf-h1-proxy.lo libcurlu_la-cf-h2-proxy.lo \
+ libcurlu_la-cf-haproxy.lo libcurlu_la-cf-https-connect.lo \
libcurlu_la-cf-socket.lo libcurlu_la-cfilters.lo \
libcurlu_la-conncache.lo libcurlu_la-connect.lo \
libcurlu_la-content_encoding.lo libcurlu_la-cookie.lo \
@@ -387,22 +392,22 @@ am__objects_11 = libcurlu_la-altsvc.lo libcurlu_la-amigaos.lo \ libcurlu_la-curl_rtmp.lo libcurlu_la-curl_sasl.lo \
libcurlu_la-curl_sspi.lo libcurlu_la-curl_threads.lo \
libcurlu_la-dict.lo libcurlu_la-doh.lo libcurlu_la-dynbuf.lo \
- libcurlu_la-easy.lo libcurlu_la-easygetopt.lo \
- libcurlu_la-easyoptions.lo libcurlu_la-escape.lo \
- libcurlu_la-file.lo libcurlu_la-fileinfo.lo \
- libcurlu_la-fopen.lo libcurlu_la-formdata.lo \
- libcurlu_la-ftp.lo libcurlu_la-ftplistparser.lo \
- libcurlu_la-getenv.lo libcurlu_la-getinfo.lo \
- libcurlu_la-gopher.lo libcurlu_la-h2h3.lo libcurlu_la-hash.lo \
- libcurlu_la-headers.lo libcurlu_la-hmac.lo \
+ libcurlu_la-dynhds.lo libcurlu_la-easy.lo \
+ libcurlu_la-easygetopt.lo libcurlu_la-easyoptions.lo \
+ libcurlu_la-escape.lo libcurlu_la-file.lo \
+ libcurlu_la-fileinfo.lo libcurlu_la-fopen.lo \
+ libcurlu_la-formdata.lo libcurlu_la-ftp.lo \
+ libcurlu_la-ftplistparser.lo libcurlu_la-getenv.lo \
+ libcurlu_la-getinfo.lo libcurlu_la-gopher.lo \
+ libcurlu_la-hash.lo libcurlu_la-headers.lo libcurlu_la-hmac.lo \
libcurlu_la-hostasyn.lo libcurlu_la-hostip.lo \
libcurlu_la-hostip4.lo libcurlu_la-hostip6.lo \
libcurlu_la-hostsyn.lo libcurlu_la-hsts.lo libcurlu_la-http.lo \
- libcurlu_la-http2.lo libcurlu_la-http_chunks.lo \
- libcurlu_la-http_digest.lo libcurlu_la-http_negotiate.lo \
- libcurlu_la-http_ntlm.lo libcurlu_la-http_proxy.lo \
- libcurlu_la-http_aws_sigv4.lo libcurlu_la-idn.lo \
- libcurlu_la-if2ip.lo libcurlu_la-imap.lo \
+ libcurlu_la-http1.lo libcurlu_la-http2.lo \
+ libcurlu_la-http_chunks.lo libcurlu_la-http_digest.lo \
+ libcurlu_la-http_negotiate.lo libcurlu_la-http_ntlm.lo \
+ libcurlu_la-http_proxy.lo libcurlu_la-http_aws_sigv4.lo \
+ libcurlu_la-idn.lo libcurlu_la-if2ip.lo libcurlu_la-imap.lo \
libcurlu_la-inet_ntop.lo libcurlu_la-inet_pton.lo \
libcurlu_la-krb5.lo libcurlu_la-ldap.lo libcurlu_la-llist.lo \
libcurlu_la-md4.lo libcurlu_la-md5.lo libcurlu_la-memdebug.lo \
@@ -478,8 +483,12 @@ am__depfiles_remade = ./$(DEPDIR)/libcurl_la-altsvc.Plo \ ./$(DEPDIR)/libcurl_la-asyn-ares.Plo \
./$(DEPDIR)/libcurl_la-asyn-thread.Plo \
./$(DEPDIR)/libcurl_la-base64.Plo \
+ ./$(DEPDIR)/libcurl_la-bufq.Plo \
./$(DEPDIR)/libcurl_la-bufref.Plo \
./$(DEPDIR)/libcurl_la-c-hyper.Plo \
+ ./$(DEPDIR)/libcurl_la-cf-h1-proxy.Plo \
+ ./$(DEPDIR)/libcurl_la-cf-h2-proxy.Plo \
+ ./$(DEPDIR)/libcurl_la-cf-haproxy.Plo \
./$(DEPDIR)/libcurl_la-cf-https-connect.Plo \
./$(DEPDIR)/libcurl_la-cf-socket.Plo \
./$(DEPDIR)/libcurl_la-cfilters.Plo \
@@ -507,6 +516,7 @@ am__depfiles_remade = ./$(DEPDIR)/libcurl_la-altsvc.Plo \ ./$(DEPDIR)/libcurl_la-curl_threads.Plo \
./$(DEPDIR)/libcurl_la-dict.Plo ./$(DEPDIR)/libcurl_la-doh.Plo \
./$(DEPDIR)/libcurl_la-dynbuf.Plo \
+ ./$(DEPDIR)/libcurl_la-dynhds.Plo \
./$(DEPDIR)/libcurl_la-easy.Plo \
./$(DEPDIR)/libcurl_la-easygetopt.Plo \
./$(DEPDIR)/libcurl_la-easyoptions.Plo \
@@ -520,7 +530,6 @@ am__depfiles_remade = ./$(DEPDIR)/libcurl_la-altsvc.Plo \ ./$(DEPDIR)/libcurl_la-getenv.Plo \
./$(DEPDIR)/libcurl_la-getinfo.Plo \
./$(DEPDIR)/libcurl_la-gopher.Plo \
- ./$(DEPDIR)/libcurl_la-h2h3.Plo \
./$(DEPDIR)/libcurl_la-hash.Plo \
./$(DEPDIR)/libcurl_la-headers.Plo \
./$(DEPDIR)/libcurl_la-hmac.Plo \
@@ -531,6 +540,7 @@ am__depfiles_remade = ./$(DEPDIR)/libcurl_la-altsvc.Plo \ ./$(DEPDIR)/libcurl_la-hostsyn.Plo \
./$(DEPDIR)/libcurl_la-hsts.Plo \
./$(DEPDIR)/libcurl_la-http.Plo \
+ ./$(DEPDIR)/libcurl_la-http1.Plo \
./$(DEPDIR)/libcurl_la-http2.Plo \
./$(DEPDIR)/libcurl_la-http_aws_sigv4.Plo \
./$(DEPDIR)/libcurl_la-http_chunks.Plo \
@@ -598,8 +608,12 @@ am__depfiles_remade = ./$(DEPDIR)/libcurl_la-altsvc.Plo \ ./$(DEPDIR)/libcurlu_la-asyn-ares.Plo \
./$(DEPDIR)/libcurlu_la-asyn-thread.Plo \
./$(DEPDIR)/libcurlu_la-base64.Plo \
+ ./$(DEPDIR)/libcurlu_la-bufq.Plo \
./$(DEPDIR)/libcurlu_la-bufref.Plo \
./$(DEPDIR)/libcurlu_la-c-hyper.Plo \
+ ./$(DEPDIR)/libcurlu_la-cf-h1-proxy.Plo \
+ ./$(DEPDIR)/libcurlu_la-cf-h2-proxy.Plo \
+ ./$(DEPDIR)/libcurlu_la-cf-haproxy.Plo \
./$(DEPDIR)/libcurlu_la-cf-https-connect.Plo \
./$(DEPDIR)/libcurlu_la-cf-socket.Plo \
./$(DEPDIR)/libcurlu_la-cfilters.Plo \
@@ -628,6 +642,7 @@ am__depfiles_remade = ./$(DEPDIR)/libcurl_la-altsvc.Plo \ ./$(DEPDIR)/libcurlu_la-dict.Plo \
./$(DEPDIR)/libcurlu_la-doh.Plo \
./$(DEPDIR)/libcurlu_la-dynbuf.Plo \
+ ./$(DEPDIR)/libcurlu_la-dynhds.Plo \
./$(DEPDIR)/libcurlu_la-easy.Plo \
./$(DEPDIR)/libcurlu_la-easygetopt.Plo \
./$(DEPDIR)/libcurlu_la-easyoptions.Plo \
@@ -641,7 +656,6 @@ am__depfiles_remade = ./$(DEPDIR)/libcurl_la-altsvc.Plo \ ./$(DEPDIR)/libcurlu_la-getenv.Plo \
./$(DEPDIR)/libcurlu_la-getinfo.Plo \
./$(DEPDIR)/libcurlu_la-gopher.Plo \
- ./$(DEPDIR)/libcurlu_la-h2h3.Plo \
./$(DEPDIR)/libcurlu_la-hash.Plo \
./$(DEPDIR)/libcurlu_la-headers.Plo \
./$(DEPDIR)/libcurlu_la-hmac.Plo \
@@ -652,6 +666,7 @@ am__depfiles_remade = ./$(DEPDIR)/libcurl_la-altsvc.Plo \ ./$(DEPDIR)/libcurlu_la-hostsyn.Plo \
./$(DEPDIR)/libcurlu_la-hsts.Plo \
./$(DEPDIR)/libcurlu_la-http.Plo \
+ ./$(DEPDIR)/libcurlu_la-http1.Plo \
./$(DEPDIR)/libcurlu_la-http2.Plo \
./$(DEPDIR)/libcurlu_la-http_aws_sigv4.Plo \
./$(DEPDIR)/libcurlu_la-http_chunks.Plo \
@@ -1201,8 +1216,12 @@ LIB_CFILES = \ asyn-ares.c \
asyn-thread.c \
base64.c \
+ bufq.c \
bufref.c \
c-hyper.c \
+ cf-h1-proxy.c \
+ cf-h2-proxy.c \
+ cf-haproxy.c \
cf-https-connect.c \
cf-socket.c \
cfilters.c \
@@ -1231,6 +1250,7 @@ LIB_CFILES = \ dict.c \
doh.c \
dynbuf.c \
+ dynhds.c \
easy.c \
easygetopt.c \
easyoptions.c \
@@ -1244,7 +1264,6 @@ LIB_CFILES = \ getenv.c \
getinfo.c \
gopher.c \
- h2h3.c \
hash.c \
headers.c \
hmac.c \
@@ -1255,6 +1274,7 @@ LIB_CFILES = \ hostsyn.c \
hsts.c \
http.c \
+ http1.c \
http2.c \
http_chunks.c \
http_digest.c \
@@ -1326,8 +1346,12 @@ LIB_HFILES = \ amigaos.h \
arpa_telnet.h \
asyn.h \
+ bufq.h \
bufref.h \
c-hyper.h \
+ cf-h1-proxy.h \
+ cf-h2-proxy.h \
+ cf-haproxy.h \
cf-https-connect.h \
cf-socket.h \
cfilters.h \
@@ -1369,6 +1393,7 @@ LIB_HFILES = \ dict.h \
doh.h \
dynbuf.h \
+ dynhds.h \
easy_lock.h \
easyif.h \
easyoptions.h \
@@ -1382,12 +1407,12 @@ LIB_HFILES = \ ftplistparser.h \
getinfo.h \
gopher.h \
- h2h3.h \
hash.h \
headers.h \
hostip.h \
hsts.h \
http.h \
+ http1.h \
http2.h \
http_chunks.h \
http_digest.h \
@@ -1772,8 +1797,12 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-asyn-ares.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-asyn-thread.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-base64.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-bufq.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-bufref.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-c-hyper.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-cf-h1-proxy.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-cf-h2-proxy.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-cf-haproxy.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-cf-https-connect.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-cf-socket.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-cfilters.Plo@am__quote@ # am--include-marker
@@ -1802,6 +1831,7 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-dict.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-doh.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-dynbuf.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-dynhds.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-easy.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-easygetopt.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-easyoptions.Plo@am__quote@ # am--include-marker
@@ -1815,7 +1845,6 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-getenv.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-getinfo.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-gopher.Plo@am__quote@ # am--include-marker
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-h2h3.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-hash.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-headers.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-hmac.Plo@am__quote@ # am--include-marker
@@ -1826,6 +1855,7 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-hostsyn.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-hsts.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-http.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-http1.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-http2.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-http_aws_sigv4.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-http_chunks.Plo@am__quote@ # am--include-marker
@@ -1896,8 +1926,12 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-asyn-ares.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-asyn-thread.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-base64.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-bufq.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-bufref.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-c-hyper.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-cf-h1-proxy.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-cf-h2-proxy.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-cf-haproxy.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-cf-https-connect.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-cf-socket.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-cfilters.Plo@am__quote@ # am--include-marker
@@ -1926,6 +1960,7 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-dict.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-doh.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-dynbuf.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-dynhds.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-easy.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-easygetopt.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-easyoptions.Plo@am__quote@ # am--include-marker
@@ -1939,7 +1974,6 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-getenv.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-getinfo.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-gopher.Plo@am__quote@ # am--include-marker
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-h2h3.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-hash.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-headers.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-hmac.Plo@am__quote@ # am--include-marker
@@ -1950,6 +1984,7 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-hostsyn.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-hsts.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-http.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-http1.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-http2.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-http_aws_sigv4.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-http_chunks.Plo@am__quote@ # am--include-marker
@@ -2153,6 +2188,13 @@ libcurl_la-base64.lo: base64.c @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -c -o libcurl_la-base64.lo `test -f 'base64.c' || echo '$(srcdir)/'`base64.c
+libcurl_la-bufq.lo: bufq.c
+@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -MT libcurl_la-bufq.lo -MD -MP -MF $(DEPDIR)/libcurl_la-bufq.Tpo -c -o libcurl_la-bufq.lo `test -f 'bufq.c' || echo '$(srcdir)/'`bufq.c
+@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurl_la-bufq.Tpo $(DEPDIR)/libcurl_la-bufq.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='bufq.c' object='libcurl_la-bufq.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -c -o libcurl_la-bufq.lo `test -f 'bufq.c' || echo '$(srcdir)/'`bufq.c
+
libcurl_la-bufref.lo: bufref.c
@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -MT libcurl_la-bufref.lo -MD -MP -MF $(DEPDIR)/libcurl_la-bufref.Tpo -c -o libcurl_la-bufref.lo `test -f 'bufref.c' || echo '$(srcdir)/'`bufref.c
@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurl_la-bufref.Tpo $(DEPDIR)/libcurl_la-bufref.Plo
@@ -2167,6 +2209,27 @@ libcurl_la-c-hyper.lo: c-hyper.c @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -c -o libcurl_la-c-hyper.lo `test -f 'c-hyper.c' || echo '$(srcdir)/'`c-hyper.c
+libcurl_la-cf-h1-proxy.lo: cf-h1-proxy.c
+@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -MT libcurl_la-cf-h1-proxy.lo -MD -MP -MF $(DEPDIR)/libcurl_la-cf-h1-proxy.Tpo -c -o libcurl_la-cf-h1-proxy.lo `test -f 'cf-h1-proxy.c' || echo '$(srcdir)/'`cf-h1-proxy.c
+@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurl_la-cf-h1-proxy.Tpo $(DEPDIR)/libcurl_la-cf-h1-proxy.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='cf-h1-proxy.c' object='libcurl_la-cf-h1-proxy.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -c -o libcurl_la-cf-h1-proxy.lo `test -f 'cf-h1-proxy.c' || echo '$(srcdir)/'`cf-h1-proxy.c
+
+libcurl_la-cf-h2-proxy.lo: cf-h2-proxy.c
+@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -MT libcurl_la-cf-h2-proxy.lo -MD -MP -MF $(DEPDIR)/libcurl_la-cf-h2-proxy.Tpo -c -o libcurl_la-cf-h2-proxy.lo `test -f 'cf-h2-proxy.c' || echo '$(srcdir)/'`cf-h2-proxy.c
+@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurl_la-cf-h2-proxy.Tpo $(DEPDIR)/libcurl_la-cf-h2-proxy.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='cf-h2-proxy.c' object='libcurl_la-cf-h2-proxy.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -c -o libcurl_la-cf-h2-proxy.lo `test -f 'cf-h2-proxy.c' || echo '$(srcdir)/'`cf-h2-proxy.c
+
+libcurl_la-cf-haproxy.lo: cf-haproxy.c
+@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -MT libcurl_la-cf-haproxy.lo -MD -MP -MF $(DEPDIR)/libcurl_la-cf-haproxy.Tpo -c -o libcurl_la-cf-haproxy.lo `test -f 'cf-haproxy.c' || echo '$(srcdir)/'`cf-haproxy.c
+@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurl_la-cf-haproxy.Tpo $(DEPDIR)/libcurl_la-cf-haproxy.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='cf-haproxy.c' object='libcurl_la-cf-haproxy.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -c -o libcurl_la-cf-haproxy.lo `test -f 'cf-haproxy.c' || echo '$(srcdir)/'`cf-haproxy.c
+
libcurl_la-cf-https-connect.lo: cf-https-connect.c
@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -MT libcurl_la-cf-https-connect.lo -MD -MP -MF $(DEPDIR)/libcurl_la-cf-https-connect.Tpo -c -o libcurl_la-cf-https-connect.lo `test -f 'cf-https-connect.c' || echo '$(srcdir)/'`cf-https-connect.c
@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurl_la-cf-https-connect.Tpo $(DEPDIR)/libcurl_la-cf-https-connect.Plo
@@ -2363,6 +2426,13 @@ libcurl_la-dynbuf.lo: dynbuf.c @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -c -o libcurl_la-dynbuf.lo `test -f 'dynbuf.c' || echo '$(srcdir)/'`dynbuf.c
+libcurl_la-dynhds.lo: dynhds.c
+@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -MT libcurl_la-dynhds.lo -MD -MP -MF $(DEPDIR)/libcurl_la-dynhds.Tpo -c -o libcurl_la-dynhds.lo `test -f 'dynhds.c' || echo '$(srcdir)/'`dynhds.c
+@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurl_la-dynhds.Tpo $(DEPDIR)/libcurl_la-dynhds.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='dynhds.c' object='libcurl_la-dynhds.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -c -o libcurl_la-dynhds.lo `test -f 'dynhds.c' || echo '$(srcdir)/'`dynhds.c
+
libcurl_la-easy.lo: easy.c
@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -MT libcurl_la-easy.lo -MD -MP -MF $(DEPDIR)/libcurl_la-easy.Tpo -c -o libcurl_la-easy.lo `test -f 'easy.c' || echo '$(srcdir)/'`easy.c
@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurl_la-easy.Tpo $(DEPDIR)/libcurl_la-easy.Plo
@@ -2454,13 +2524,6 @@ libcurl_la-gopher.lo: gopher.c @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -c -o libcurl_la-gopher.lo `test -f 'gopher.c' || echo '$(srcdir)/'`gopher.c
-libcurl_la-h2h3.lo: h2h3.c
-@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -MT libcurl_la-h2h3.lo -MD -MP -MF $(DEPDIR)/libcurl_la-h2h3.Tpo -c -o libcurl_la-h2h3.lo `test -f 'h2h3.c' || echo '$(srcdir)/'`h2h3.c
-@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurl_la-h2h3.Tpo $(DEPDIR)/libcurl_la-h2h3.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='h2h3.c' object='libcurl_la-h2h3.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -c -o libcurl_la-h2h3.lo `test -f 'h2h3.c' || echo '$(srcdir)/'`h2h3.c
-
libcurl_la-hash.lo: hash.c
@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -MT libcurl_la-hash.lo -MD -MP -MF $(DEPDIR)/libcurl_la-hash.Tpo -c -o libcurl_la-hash.lo `test -f 'hash.c' || echo '$(srcdir)/'`hash.c
@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurl_la-hash.Tpo $(DEPDIR)/libcurl_la-hash.Plo
@@ -2531,6 +2594,13 @@ libcurl_la-http.lo: http.c @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -c -o libcurl_la-http.lo `test -f 'http.c' || echo '$(srcdir)/'`http.c
+libcurl_la-http1.lo: http1.c
+@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -MT libcurl_la-http1.lo -MD -MP -MF $(DEPDIR)/libcurl_la-http1.Tpo -c -o libcurl_la-http1.lo `test -f 'http1.c' || echo '$(srcdir)/'`http1.c
+@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurl_la-http1.Tpo $(DEPDIR)/libcurl_la-http1.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='http1.c' object='libcurl_la-http1.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -c -o libcurl_la-http1.lo `test -f 'http1.c' || echo '$(srcdir)/'`http1.c
+
libcurl_la-http2.lo: http2.c
@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -MT libcurl_la-http2.lo -MD -MP -MF $(DEPDIR)/libcurl_la-http2.Tpo -c -o libcurl_la-http2.lo `test -f 'http2.c' || echo '$(srcdir)/'`http2.c
@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurl_la-http2.Tpo $(DEPDIR)/libcurl_la-http2.Plo
@@ -3273,6 +3343,13 @@ libcurlu_la-base64.lo: base64.c @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -c -o libcurlu_la-base64.lo `test -f 'base64.c' || echo '$(srcdir)/'`base64.c
+libcurlu_la-bufq.lo: bufq.c
+@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -MT libcurlu_la-bufq.lo -MD -MP -MF $(DEPDIR)/libcurlu_la-bufq.Tpo -c -o libcurlu_la-bufq.lo `test -f 'bufq.c' || echo '$(srcdir)/'`bufq.c
+@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurlu_la-bufq.Tpo $(DEPDIR)/libcurlu_la-bufq.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='bufq.c' object='libcurlu_la-bufq.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -c -o libcurlu_la-bufq.lo `test -f 'bufq.c' || echo '$(srcdir)/'`bufq.c
+
libcurlu_la-bufref.lo: bufref.c
@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -MT libcurlu_la-bufref.lo -MD -MP -MF $(DEPDIR)/libcurlu_la-bufref.Tpo -c -o libcurlu_la-bufref.lo `test -f 'bufref.c' || echo '$(srcdir)/'`bufref.c
@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurlu_la-bufref.Tpo $(DEPDIR)/libcurlu_la-bufref.Plo
@@ -3287,6 +3364,27 @@ libcurlu_la-c-hyper.lo: c-hyper.c @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -c -o libcurlu_la-c-hyper.lo `test -f 'c-hyper.c' || echo '$(srcdir)/'`c-hyper.c
+libcurlu_la-cf-h1-proxy.lo: cf-h1-proxy.c
+@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -MT libcurlu_la-cf-h1-proxy.lo -MD -MP -MF $(DEPDIR)/libcurlu_la-cf-h1-proxy.Tpo -c -o libcurlu_la-cf-h1-proxy.lo `test -f 'cf-h1-proxy.c' || echo '$(srcdir)/'`cf-h1-proxy.c
+@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurlu_la-cf-h1-proxy.Tpo $(DEPDIR)/libcurlu_la-cf-h1-proxy.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='cf-h1-proxy.c' object='libcurlu_la-cf-h1-proxy.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -c -o libcurlu_la-cf-h1-proxy.lo `test -f 'cf-h1-proxy.c' || echo '$(srcdir)/'`cf-h1-proxy.c
+
+libcurlu_la-cf-h2-proxy.lo: cf-h2-proxy.c
+@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -MT libcurlu_la-cf-h2-proxy.lo -MD -MP -MF $(DEPDIR)/libcurlu_la-cf-h2-proxy.Tpo -c -o libcurlu_la-cf-h2-proxy.lo `test -f 'cf-h2-proxy.c' || echo '$(srcdir)/'`cf-h2-proxy.c
+@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurlu_la-cf-h2-proxy.Tpo $(DEPDIR)/libcurlu_la-cf-h2-proxy.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='cf-h2-proxy.c' object='libcurlu_la-cf-h2-proxy.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -c -o libcurlu_la-cf-h2-proxy.lo `test -f 'cf-h2-proxy.c' || echo '$(srcdir)/'`cf-h2-proxy.c
+
+libcurlu_la-cf-haproxy.lo: cf-haproxy.c
+@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -MT libcurlu_la-cf-haproxy.lo -MD -MP -MF $(DEPDIR)/libcurlu_la-cf-haproxy.Tpo -c -o libcurlu_la-cf-haproxy.lo `test -f 'cf-haproxy.c' || echo '$(srcdir)/'`cf-haproxy.c
+@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurlu_la-cf-haproxy.Tpo $(DEPDIR)/libcurlu_la-cf-haproxy.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='cf-haproxy.c' object='libcurlu_la-cf-haproxy.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -c -o libcurlu_la-cf-haproxy.lo `test -f 'cf-haproxy.c' || echo '$(srcdir)/'`cf-haproxy.c
+
libcurlu_la-cf-https-connect.lo: cf-https-connect.c
@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -MT libcurlu_la-cf-https-connect.lo -MD -MP -MF $(DEPDIR)/libcurlu_la-cf-https-connect.Tpo -c -o libcurlu_la-cf-https-connect.lo `test -f 'cf-https-connect.c' || echo '$(srcdir)/'`cf-https-connect.c
@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurlu_la-cf-https-connect.Tpo $(DEPDIR)/libcurlu_la-cf-https-connect.Plo
@@ -3483,6 +3581,13 @@ libcurlu_la-dynbuf.lo: dynbuf.c @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -c -o libcurlu_la-dynbuf.lo `test -f 'dynbuf.c' || echo '$(srcdir)/'`dynbuf.c
+libcurlu_la-dynhds.lo: dynhds.c
+@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -MT libcurlu_la-dynhds.lo -MD -MP -MF $(DEPDIR)/libcurlu_la-dynhds.Tpo -c -o libcurlu_la-dynhds.lo `test -f 'dynhds.c' || echo '$(srcdir)/'`dynhds.c
+@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurlu_la-dynhds.Tpo $(DEPDIR)/libcurlu_la-dynhds.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='dynhds.c' object='libcurlu_la-dynhds.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -c -o libcurlu_la-dynhds.lo `test -f 'dynhds.c' || echo '$(srcdir)/'`dynhds.c
+
libcurlu_la-easy.lo: easy.c
@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -MT libcurlu_la-easy.lo -MD -MP -MF $(DEPDIR)/libcurlu_la-easy.Tpo -c -o libcurlu_la-easy.lo `test -f 'easy.c' || echo '$(srcdir)/'`easy.c
@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurlu_la-easy.Tpo $(DEPDIR)/libcurlu_la-easy.Plo
@@ -3574,13 +3679,6 @@ libcurlu_la-gopher.lo: gopher.c @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -c -o libcurlu_la-gopher.lo `test -f 'gopher.c' || echo '$(srcdir)/'`gopher.c
-libcurlu_la-h2h3.lo: h2h3.c
-@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -MT libcurlu_la-h2h3.lo -MD -MP -MF $(DEPDIR)/libcurlu_la-h2h3.Tpo -c -o libcurlu_la-h2h3.lo `test -f 'h2h3.c' || echo '$(srcdir)/'`h2h3.c
-@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurlu_la-h2h3.Tpo $(DEPDIR)/libcurlu_la-h2h3.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='h2h3.c' object='libcurlu_la-h2h3.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -c -o libcurlu_la-h2h3.lo `test -f 'h2h3.c' || echo '$(srcdir)/'`h2h3.c
-
libcurlu_la-hash.lo: hash.c
@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -MT libcurlu_la-hash.lo -MD -MP -MF $(DEPDIR)/libcurlu_la-hash.Tpo -c -o libcurlu_la-hash.lo `test -f 'hash.c' || echo '$(srcdir)/'`hash.c
@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurlu_la-hash.Tpo $(DEPDIR)/libcurlu_la-hash.Plo
@@ -3651,6 +3749,13 @@ libcurlu_la-http.lo: http.c @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -c -o libcurlu_la-http.lo `test -f 'http.c' || echo '$(srcdir)/'`http.c
+libcurlu_la-http1.lo: http1.c
+@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -MT libcurlu_la-http1.lo -MD -MP -MF $(DEPDIR)/libcurlu_la-http1.Tpo -c -o libcurlu_la-http1.lo `test -f 'http1.c' || echo '$(srcdir)/'`http1.c
+@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurlu_la-http1.Tpo $(DEPDIR)/libcurlu_la-http1.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='http1.c' object='libcurlu_la-http1.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -c -o libcurlu_la-http1.lo `test -f 'http1.c' || echo '$(srcdir)/'`http1.c
+
libcurlu_la-http2.lo: http2.c
@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -MT libcurlu_la-http2.lo -MD -MP -MF $(DEPDIR)/libcurlu_la-http2.Tpo -c -o libcurlu_la-http2.lo `test -f 'http2.c' || echo '$(srcdir)/'`http2.c
@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurlu_la-http2.Tpo $(DEPDIR)/libcurlu_la-http2.Plo
@@ -4509,8 +4614,12 @@ distclean: distclean-am -rm -f ./$(DEPDIR)/libcurl_la-asyn-ares.Plo
-rm -f ./$(DEPDIR)/libcurl_la-asyn-thread.Plo
-rm -f ./$(DEPDIR)/libcurl_la-base64.Plo
+ -rm -f ./$(DEPDIR)/libcurl_la-bufq.Plo
-rm -f ./$(DEPDIR)/libcurl_la-bufref.Plo
-rm -f ./$(DEPDIR)/libcurl_la-c-hyper.Plo
+ -rm -f ./$(DEPDIR)/libcurl_la-cf-h1-proxy.Plo
+ -rm -f ./$(DEPDIR)/libcurl_la-cf-h2-proxy.Plo
+ -rm -f ./$(DEPDIR)/libcurl_la-cf-haproxy.Plo
-rm -f ./$(DEPDIR)/libcurl_la-cf-https-connect.Plo
-rm -f ./$(DEPDIR)/libcurl_la-cf-socket.Plo
-rm -f ./$(DEPDIR)/libcurl_la-cfilters.Plo
@@ -4539,6 +4648,7 @@ distclean: distclean-am -rm -f ./$(DEPDIR)/libcurl_la-dict.Plo
-rm -f ./$(DEPDIR)/libcurl_la-doh.Plo
-rm -f ./$(DEPDIR)/libcurl_la-dynbuf.Plo
+ -rm -f ./$(DEPDIR)/libcurl_la-dynhds.Plo
-rm -f ./$(DEPDIR)/libcurl_la-easy.Plo
-rm -f ./$(DEPDIR)/libcurl_la-easygetopt.Plo
-rm -f ./$(DEPDIR)/libcurl_la-easyoptions.Plo
@@ -4552,7 +4662,6 @@ distclean: distclean-am -rm -f ./$(DEPDIR)/libcurl_la-getenv.Plo
-rm -f ./$(DEPDIR)/libcurl_la-getinfo.Plo
-rm -f ./$(DEPDIR)/libcurl_la-gopher.Plo
- -rm -f ./$(DEPDIR)/libcurl_la-h2h3.Plo
-rm -f ./$(DEPDIR)/libcurl_la-hash.Plo
-rm -f ./$(DEPDIR)/libcurl_la-headers.Plo
-rm -f ./$(DEPDIR)/libcurl_la-hmac.Plo
@@ -4563,6 +4672,7 @@ distclean: distclean-am -rm -f ./$(DEPDIR)/libcurl_la-hostsyn.Plo
-rm -f ./$(DEPDIR)/libcurl_la-hsts.Plo
-rm -f ./$(DEPDIR)/libcurl_la-http.Plo
+ -rm -f ./$(DEPDIR)/libcurl_la-http1.Plo
-rm -f ./$(DEPDIR)/libcurl_la-http2.Plo
-rm -f ./$(DEPDIR)/libcurl_la-http_aws_sigv4.Plo
-rm -f ./$(DEPDIR)/libcurl_la-http_chunks.Plo
@@ -4633,8 +4743,12 @@ distclean: distclean-am -rm -f ./$(DEPDIR)/libcurlu_la-asyn-ares.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-asyn-thread.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-base64.Plo
+ -rm -f ./$(DEPDIR)/libcurlu_la-bufq.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-bufref.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-c-hyper.Plo
+ -rm -f ./$(DEPDIR)/libcurlu_la-cf-h1-proxy.Plo
+ -rm -f ./$(DEPDIR)/libcurlu_la-cf-h2-proxy.Plo
+ -rm -f ./$(DEPDIR)/libcurlu_la-cf-haproxy.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-cf-https-connect.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-cf-socket.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-cfilters.Plo
@@ -4663,6 +4777,7 @@ distclean: distclean-am -rm -f ./$(DEPDIR)/libcurlu_la-dict.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-doh.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-dynbuf.Plo
+ -rm -f ./$(DEPDIR)/libcurlu_la-dynhds.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-easy.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-easygetopt.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-easyoptions.Plo
@@ -4676,7 +4791,6 @@ distclean: distclean-am -rm -f ./$(DEPDIR)/libcurlu_la-getenv.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-getinfo.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-gopher.Plo
- -rm -f ./$(DEPDIR)/libcurlu_la-h2h3.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-hash.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-headers.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-hmac.Plo
@@ -4687,6 +4801,7 @@ distclean: distclean-am -rm -f ./$(DEPDIR)/libcurlu_la-hostsyn.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-hsts.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-http.Plo
+ -rm -f ./$(DEPDIR)/libcurlu_la-http1.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-http2.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-http_aws_sigv4.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-http_chunks.Plo
@@ -4874,8 +4989,12 @@ maintainer-clean: maintainer-clean-am -rm -f ./$(DEPDIR)/libcurl_la-asyn-ares.Plo
-rm -f ./$(DEPDIR)/libcurl_la-asyn-thread.Plo
-rm -f ./$(DEPDIR)/libcurl_la-base64.Plo
+ -rm -f ./$(DEPDIR)/libcurl_la-bufq.Plo
-rm -f ./$(DEPDIR)/libcurl_la-bufref.Plo
-rm -f ./$(DEPDIR)/libcurl_la-c-hyper.Plo
+ -rm -f ./$(DEPDIR)/libcurl_la-cf-h1-proxy.Plo
+ -rm -f ./$(DEPDIR)/libcurl_la-cf-h2-proxy.Plo
+ -rm -f ./$(DEPDIR)/libcurl_la-cf-haproxy.Plo
-rm -f ./$(DEPDIR)/libcurl_la-cf-https-connect.Plo
-rm -f ./$(DEPDIR)/libcurl_la-cf-socket.Plo
-rm -f ./$(DEPDIR)/libcurl_la-cfilters.Plo
@@ -4904,6 +5023,7 @@ maintainer-clean: maintainer-clean-am -rm -f ./$(DEPDIR)/libcurl_la-dict.Plo
-rm -f ./$(DEPDIR)/libcurl_la-doh.Plo
-rm -f ./$(DEPDIR)/libcurl_la-dynbuf.Plo
+ -rm -f ./$(DEPDIR)/libcurl_la-dynhds.Plo
-rm -f ./$(DEPDIR)/libcurl_la-easy.Plo
-rm -f ./$(DEPDIR)/libcurl_la-easygetopt.Plo
-rm -f ./$(DEPDIR)/libcurl_la-easyoptions.Plo
@@ -4917,7 +5037,6 @@ maintainer-clean: maintainer-clean-am -rm -f ./$(DEPDIR)/libcurl_la-getenv.Plo
-rm -f ./$(DEPDIR)/libcurl_la-getinfo.Plo
-rm -f ./$(DEPDIR)/libcurl_la-gopher.Plo
- -rm -f ./$(DEPDIR)/libcurl_la-h2h3.Plo
-rm -f ./$(DEPDIR)/libcurl_la-hash.Plo
-rm -f ./$(DEPDIR)/libcurl_la-headers.Plo
-rm -f ./$(DEPDIR)/libcurl_la-hmac.Plo
@@ -4928,6 +5047,7 @@ maintainer-clean: maintainer-clean-am -rm -f ./$(DEPDIR)/libcurl_la-hostsyn.Plo
-rm -f ./$(DEPDIR)/libcurl_la-hsts.Plo
-rm -f ./$(DEPDIR)/libcurl_la-http.Plo
+ -rm -f ./$(DEPDIR)/libcurl_la-http1.Plo
-rm -f ./$(DEPDIR)/libcurl_la-http2.Plo
-rm -f ./$(DEPDIR)/libcurl_la-http_aws_sigv4.Plo
-rm -f ./$(DEPDIR)/libcurl_la-http_chunks.Plo
@@ -4998,8 +5118,12 @@ maintainer-clean: maintainer-clean-am -rm -f ./$(DEPDIR)/libcurlu_la-asyn-ares.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-asyn-thread.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-base64.Plo
+ -rm -f ./$(DEPDIR)/libcurlu_la-bufq.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-bufref.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-c-hyper.Plo
+ -rm -f ./$(DEPDIR)/libcurlu_la-cf-h1-proxy.Plo
+ -rm -f ./$(DEPDIR)/libcurlu_la-cf-h2-proxy.Plo
+ -rm -f ./$(DEPDIR)/libcurlu_la-cf-haproxy.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-cf-https-connect.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-cf-socket.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-cfilters.Plo
@@ -5028,6 +5152,7 @@ maintainer-clean: maintainer-clean-am -rm -f ./$(DEPDIR)/libcurlu_la-dict.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-doh.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-dynbuf.Plo
+ -rm -f ./$(DEPDIR)/libcurlu_la-dynhds.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-easy.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-easygetopt.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-easyoptions.Plo
@@ -5041,7 +5166,6 @@ maintainer-clean: maintainer-clean-am -rm -f ./$(DEPDIR)/libcurlu_la-getenv.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-getinfo.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-gopher.Plo
- -rm -f ./$(DEPDIR)/libcurlu_la-h2h3.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-hash.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-headers.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-hmac.Plo
@@ -5052,6 +5176,7 @@ maintainer-clean: maintainer-clean-am -rm -f ./$(DEPDIR)/libcurlu_la-hostsyn.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-hsts.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-http.Plo
+ -rm -f ./$(DEPDIR)/libcurlu_la-http1.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-http2.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-http_aws_sigv4.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-http_chunks.Plo
diff --git a/libs/libcurl/src/Makefile.inc b/libs/libcurl/src/Makefile.inc index 1a24ff461d..c0f3c662c9 100644 --- a/libs/libcurl/src/Makefile.inc +++ b/libs/libcurl/src/Makefile.inc @@ -105,8 +105,12 @@ LIB_CFILES = \ asyn-ares.c \
asyn-thread.c \
base64.c \
+ bufq.c \
bufref.c \
c-hyper.c \
+ cf-h1-proxy.c \
+ cf-h2-proxy.c \
+ cf-haproxy.c \
cf-https-connect.c \
cf-socket.c \
cfilters.c \
@@ -135,6 +139,7 @@ LIB_CFILES = \ dict.c \
doh.c \
dynbuf.c \
+ dynhds.c \
easy.c \
easygetopt.c \
easyoptions.c \
@@ -148,7 +153,6 @@ LIB_CFILES = \ getenv.c \
getinfo.c \
gopher.c \
- h2h3.c \
hash.c \
headers.c \
hmac.c \
@@ -159,6 +163,7 @@ LIB_CFILES = \ hostsyn.c \
hsts.c \
http.c \
+ http1.c \
http2.c \
http_chunks.c \
http_digest.c \
@@ -230,8 +235,12 @@ LIB_HFILES = \ amigaos.h \
arpa_telnet.h \
asyn.h \
+ bufq.h \
bufref.h \
c-hyper.h \
+ cf-h1-proxy.h \
+ cf-h2-proxy.h \
+ cf-haproxy.h \
cf-https-connect.h \
cf-socket.h \
cfilters.h \
@@ -273,6 +282,7 @@ LIB_HFILES = \ dict.h \
doh.h \
dynbuf.h \
+ dynhds.h \
easy_lock.h \
easyif.h \
easyoptions.h \
@@ -286,12 +296,12 @@ LIB_HFILES = \ ftplistparser.h \
getinfo.h \
gopher.h \
- h2h3.h \
hash.h \
headers.h \
hostip.h \
hsts.h \
http.h \
+ http1.h \
http2.h \
http_chunks.h \
http_digest.h \
diff --git a/libs/libcurl/src/altsvc.c b/libs/libcurl/src/altsvc.c index 22c445b53e..ca9f670b29 100644 --- a/libs/libcurl/src/altsvc.c +++ b/libs/libcurl/src/altsvc.c @@ -117,7 +117,7 @@ static struct altsvc *altsvc_createid(const char *srchost, as->dst.port = curlx_ultous(dstport);
return as;
- error:
+error:
altsvc_free(as);
return NULL;
}
@@ -217,7 +217,7 @@ static CURLcode altsvc_load(struct altsvcinfo *asi, const char *file) }
return result;
- fail:
+fail:
Curl_safefree(asi->filename);
free(line);
fclose(fp);
diff --git a/libs/libcurl/src/asyn-thread.c b/libs/libcurl/src/asyn-thread.c index cfd6c9f171..8a2628f6b7 100644 --- a/libs/libcurl/src/asyn-thread.c +++ b/libs/libcurl/src/asyn-thread.c @@ -251,7 +251,7 @@ int init_thread_sync_data(struct thread_data *td, return 1;
- err_exit:
+err_exit:
#ifndef CURL_DISABLE_SOCKETPAIR
if(tsd->sock_pair[0] != CURL_SOCKET_BAD) {
sclose(tsd->sock_pair[0]);
@@ -469,10 +469,10 @@ static bool init_resolve_thread(struct Curl_easy *data, return TRUE;
- err_exit:
+err_exit:
destroy_async_data(asp);
- errno_exit:
+errno_exit:
errno = err;
return FALSE;
}
diff --git a/libs/libcurl/src/base64.c b/libs/libcurl/src/base64.c index a11811f891..5d52992485 100644 --- a/libs/libcurl/src/base64.c +++ b/libs/libcurl/src/base64.c @@ -178,7 +178,7 @@ CURLcode Curl_base64_decode(const char *src, *outlen = rawlen;
return CURLE_OK;
- bad:
+bad:
free(newstr);
return CURLE_BAD_CONTENT_ENCODING;
}
diff --git a/libs/libcurl/src/bufq.c b/libs/libcurl/src/bufq.c new file mode 100644 index 0000000000..de86b7136e --- /dev/null +++ b/libs/libcurl/src/bufq.c @@ -0,0 +1,659 @@ +/***************************************************************************
+ * _ _ ____ _
+ * Project ___| | | | _ \| |
+ * / __| | | | |_) | |
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+ * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+ * are also available at https://curl.se/docs/copyright.html.
+ *
+ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
+ * copies of the Software, and permit persons to whom the Software is
+ * furnished to do so, under the terms of the COPYING file.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ * SPDX-License-Identifier: curl
+ *
+ ***************************************************************************/
+
+#include "curl_setup.h"
+#include "bufq.h"
+
+/* The last 3 #include files should be in this order */
+#include "curl_printf.h"
+#include "curl_memory.h"
+#include "memdebug.h"
+
+static bool chunk_is_empty(const struct buf_chunk *chunk)
+{
+ return chunk->r_offset >= chunk->w_offset;
+}
+
+static bool chunk_is_full(const struct buf_chunk *chunk)
+{
+ return chunk->w_offset >= chunk->dlen;
+}
+
+static size_t chunk_len(const struct buf_chunk *chunk)
+{
+ return chunk->w_offset - chunk->r_offset;
+}
+
+static size_t chunk_space(const struct buf_chunk *chunk)
+{
+ return chunk->dlen - chunk->w_offset;
+}
+
+static void chunk_reset(struct buf_chunk *chunk)
+{
+ chunk->next = NULL;
+ chunk->r_offset = chunk->w_offset = 0;
+}
+
+static size_t chunk_append(struct buf_chunk *chunk,
+ const unsigned char *buf, size_t len)
+{
+ unsigned char *p = &chunk->x.data[chunk->w_offset];
+ size_t n = chunk->dlen - chunk->w_offset;
+ DEBUGASSERT(chunk->dlen >= chunk->w_offset);
+ if(n) {
+ n = CURLMIN(n, len);
+ memcpy(p, buf, n);
+ chunk->w_offset += n;
+ }
+ return n;
+}
+
+static size_t chunk_read(struct buf_chunk *chunk,
+ unsigned char *buf, size_t len)
+{
+ unsigned char *p = &chunk->x.data[chunk->r_offset];
+ size_t n = chunk->w_offset - chunk->r_offset;
+ DEBUGASSERT(chunk->w_offset >= chunk->r_offset);
+ if(!n) {
+ return 0;
+ }
+ else if(n <= len) {
+ memcpy(buf, p, n);
+ chunk->r_offset = chunk->w_offset = 0;
+ return n;
+ }
+ else {
+ memcpy(buf, p, len);
+ chunk->r_offset += len;
+ return len;
+ }
+}
+
+static ssize_t chunk_slurpn(struct buf_chunk *chunk, size_t max_len,
+ Curl_bufq_reader *reader,
+ void *reader_ctx, CURLcode *err)
+{
+ unsigned char *p = &chunk->x.data[chunk->w_offset];
+ size_t n = chunk->dlen - chunk->w_offset; /* free amount */
+ ssize_t nread;
+
+ DEBUGASSERT(chunk->dlen >= chunk->w_offset);
+ if(!n) {
+ *err = CURLE_AGAIN;
+ return -1;
+ }
+ if(max_len && n > max_len)
+ n = max_len;
+ nread = reader(reader_ctx, p, n, err);
+ if(nread > 0) {
+ DEBUGASSERT((size_t)nread <= n);
+ chunk->w_offset += nread;
+ }
+ return nread;
+}
+
+static void chunk_peek(const struct buf_chunk *chunk,
+ const unsigned char **pbuf, size_t *plen)
+{
+ DEBUGASSERT(chunk->w_offset >= chunk->r_offset);
+ *pbuf = &chunk->x.data[chunk->r_offset];
+ *plen = chunk->w_offset - chunk->r_offset;
+}
+
+static void chunk_peek_at(const struct buf_chunk *chunk, size_t offset,
+ const unsigned char **pbuf, size_t *plen)
+{
+ offset += chunk->r_offset;
+ DEBUGASSERT(chunk->w_offset >= offset);
+ *pbuf = &chunk->x.data[offset];
+ *plen = chunk->w_offset - offset;
+}
+
+static size_t chunk_skip(struct buf_chunk *chunk, size_t amount)
+{
+ size_t n = chunk->w_offset - chunk->r_offset;
+ DEBUGASSERT(chunk->w_offset >= chunk->r_offset);
+ if(n) {
+ n = CURLMIN(n, amount);
+ chunk->r_offset += n;
+ if(chunk->r_offset == chunk->w_offset)
+ chunk->r_offset = chunk->w_offset = 0;
+ }
+ return n;
+}
+
+static void chunk_shift(struct buf_chunk *chunk)
+{
+ if(chunk->r_offset) {
+ if(!chunk_is_empty(chunk)) {
+ size_t n = chunk->w_offset - chunk->r_offset;
+ memmove(chunk->x.data, chunk->x.data + chunk->r_offset, n);
+ chunk->w_offset -= chunk->r_offset;
+ chunk->r_offset = 0;
+ }
+ else {
+ chunk->r_offset = chunk->w_offset = 0;
+ }
+ }
+}
+
+static void chunk_list_free(struct buf_chunk **anchor)
+{
+ struct buf_chunk *chunk;
+ while(*anchor) {
+ chunk = *anchor;
+ *anchor = chunk->next;
+ free(chunk);
+ }
+}
+
+
+
+void Curl_bufcp_init(struct bufc_pool *pool,
+ size_t chunk_size, size_t spare_max)
+{
+ DEBUGASSERT(chunk_size > 0);
+ DEBUGASSERT(spare_max > 0);
+ memset(pool, 0, sizeof(*pool));
+ pool->chunk_size = chunk_size;
+ pool->spare_max = spare_max;
+}
+
+static CURLcode bufcp_take(struct bufc_pool *pool,
+ struct buf_chunk **pchunk)
+{
+ struct buf_chunk *chunk = NULL;
+
+ if(pool->spare) {
+ chunk = pool->spare;
+ pool->spare = chunk->next;
+ --pool->spare_count;
+ chunk_reset(chunk);
+ *pchunk = chunk;
+ return CURLE_OK;
+ }
+
+ chunk = calloc(1, sizeof(*chunk) + pool->chunk_size);
+ if(!chunk) {
+ *pchunk = NULL;
+ return CURLE_OUT_OF_MEMORY;
+ }
+ chunk->dlen = pool->chunk_size;
+ *pchunk = chunk;
+ return CURLE_OK;
+}
+
+static void bufcp_put(struct bufc_pool *pool,
+ struct buf_chunk *chunk)
+{
+ if(pool->spare_count >= pool->spare_max) {
+ free(chunk);
+ }
+ else {
+ chunk_reset(chunk);
+ chunk->next = pool->spare;
+ pool->spare = chunk;
+ ++pool->spare_count;
+ }
+}
+
+void Curl_bufcp_free(struct bufc_pool *pool)
+{
+ chunk_list_free(&pool->spare);
+ pool->spare_count = 0;
+}
+
+static void bufq_init(struct bufq *q, struct bufc_pool *pool,
+ size_t chunk_size, size_t max_chunks, int opts)
+{
+ DEBUGASSERT(chunk_size > 0);
+ DEBUGASSERT(max_chunks > 0);
+ memset(q, 0, sizeof(*q));
+ q->chunk_size = chunk_size;
+ q->max_chunks = max_chunks;
+ q->pool = pool;
+ q->opts = opts;
+}
+
+void Curl_bufq_init2(struct bufq *q, size_t chunk_size, size_t max_chunks,
+ int opts)
+{
+ bufq_init(q, NULL, chunk_size, max_chunks, opts);
+}
+
+void Curl_bufq_init(struct bufq *q, size_t chunk_size, size_t max_chunks)
+{
+ bufq_init(q, NULL, chunk_size, max_chunks, BUFQ_OPT_NONE);
+}
+
+void Curl_bufq_initp(struct bufq *q, struct bufc_pool *pool,
+ size_t max_chunks, int opts)
+{
+ bufq_init(q, pool, pool->chunk_size, max_chunks, opts);
+}
+
+void Curl_bufq_free(struct bufq *q)
+{
+ chunk_list_free(&q->head);
+ chunk_list_free(&q->spare);
+ q->tail = NULL;
+ q->chunk_count = 0;
+}
+
+void Curl_bufq_reset(struct bufq *q)
+{
+ struct buf_chunk *chunk;
+ while(q->head) {
+ chunk = q->head;
+ q->head = chunk->next;
+ chunk->next = q->spare;
+ q->spare = chunk;
+ }
+ q->tail = NULL;
+}
+
+size_t Curl_bufq_len(const struct bufq *q)
+{
+ const struct buf_chunk *chunk = q->head;
+ size_t len = 0;
+ while(chunk) {
+ len += chunk_len(chunk);
+ chunk = chunk->next;
+ }
+ return len;
+}
+
+size_t Curl_bufq_space(const struct bufq *q)
+{
+ size_t space = 0;
+ if(q->tail)
+ space += chunk_space(q->tail);
+ if(q->spare) {
+ struct buf_chunk *chunk = q->spare;
+ while(chunk) {
+ space += chunk->dlen;
+ chunk = chunk->next;
+ }
+ }
+ if(q->chunk_count < q->max_chunks) {
+ space += (q->max_chunks - q->chunk_count) * q->chunk_size;
+ }
+ return space;
+}
+
+bool Curl_bufq_is_empty(const struct bufq *q)
+{
+ return !q->head || chunk_is_empty(q->head);
+}
+
+bool Curl_bufq_is_full(const struct bufq *q)
+{
+ if(!q->tail || q->spare)
+ return FALSE;
+ if(q->chunk_count < q->max_chunks)
+ return FALSE;
+ if(q->chunk_count > q->max_chunks)
+ return TRUE;
+ /* we have no spares and cannot make more, is the tail full? */
+ return chunk_is_full(q->tail);
+}
+
+static struct buf_chunk *get_spare(struct bufq *q)
+{
+ struct buf_chunk *chunk = NULL;
+
+ if(q->spare) {
+ chunk = q->spare;
+ q->spare = chunk->next;
+ chunk_reset(chunk);
+ return chunk;
+ }
+
+ if(q->chunk_count >= q->max_chunks && (!(q->opts & BUFQ_OPT_SOFT_LIMIT)))
+ return NULL;
+
+ if(q->pool) {
+ if(bufcp_take(q->pool, &chunk))
+ return NULL;
+ ++q->chunk_count;
+ return chunk;
+ }
+ else {
+ chunk = calloc(1, sizeof(*chunk) + q->chunk_size);
+ if(!chunk)
+ return NULL;
+ chunk->dlen = q->chunk_size;
+ ++q->chunk_count;
+ return chunk;
+ }
+}
+
+static void prune_head(struct bufq *q)
+{
+ struct buf_chunk *chunk;
+
+ while(q->head && chunk_is_empty(q->head)) {
+ chunk = q->head;
+ q->head = chunk->next;
+ if(q->tail == chunk)
+ q->tail = q->head;
+ if(q->pool) {
+ bufcp_put(q->pool, chunk);
+ --q->chunk_count;
+ }
+ else if((q->chunk_count > q->max_chunks) ||
+ (q->opts & BUFQ_OPT_NO_SPARES)) {
+ /* SOFT_LIMIT allowed us more than max. free spares until
+ * we are at max again. Or free them if we are configured
+ * to not use spares. */
+ free(chunk);
+ --q->chunk_count;
+ }
+ else {
+ chunk->next = q->spare;
+ q->spare = chunk;
+ }
+ }
+}
+
+static struct buf_chunk *get_non_full_tail(struct bufq *q)
+{
+ struct buf_chunk *chunk;
+
+ if(q->tail && !chunk_is_full(q->tail))
+ return q->tail;
+ chunk = get_spare(q);
+ if(chunk) {
+ /* new tail, and possibly new head */
+ if(q->tail) {
+ q->tail->next = chunk;
+ q->tail = chunk;
+ }
+ else {
+ DEBUGASSERT(!q->head);
+ q->head = q->tail = chunk;
+ }
+ }
+ return chunk;
+}
+
+ssize_t Curl_bufq_write(struct bufq *q,
+ const unsigned char *buf, size_t len,
+ CURLcode *err)
+{
+ struct buf_chunk *tail;
+ ssize_t nwritten = 0;
+ size_t n;
+
+ DEBUGASSERT(q->max_chunks > 0);
+ while(len) {
+ tail = get_non_full_tail(q);
+ if(!tail) {
+ if(q->chunk_count < q->max_chunks) {
+ *err = CURLE_OUT_OF_MEMORY;
+ return -1;
+ }
+ break;
+ }
+ n = chunk_append(tail, buf, len);
+ DEBUGASSERT(n);
+ nwritten += n;
+ buf += n;
+ len -= n;
+ }
+ if(nwritten == 0 && len) {
+ *err = CURLE_AGAIN;
+ return -1;
+ }
+ *err = CURLE_OK;
+ return nwritten;
+}
+
+ssize_t Curl_bufq_read(struct bufq *q, unsigned char *buf, size_t len,
+ CURLcode *err)
+{
+ ssize_t nread = 0;
+ size_t n;
+
+ *err = CURLE_OK;
+ while(len && q->head) {
+ n = chunk_read(q->head, buf, len);
+ if(n) {
+ nread += n;
+ buf += n;
+ len -= n;
+ }
+ prune_head(q);
+ }
+ if(nread == 0) {
+ *err = CURLE_AGAIN;
+ return -1;
+ }
+ return nread;
+}
+
+bool Curl_bufq_peek(struct bufq *q,
+ const unsigned char **pbuf, size_t *plen)
+{
+ if(q->head && chunk_is_empty(q->head)) {
+ prune_head(q);
+ }
+ if(q->head && !chunk_is_empty(q->head)) {
+ chunk_peek(q->head, pbuf, plen);
+ return TRUE;
+ }
+ *pbuf = NULL;
+ *plen = 0;
+ return FALSE;
+}
+
+bool Curl_bufq_peek_at(struct bufq *q, size_t offset,
+ const unsigned char **pbuf, size_t *plen)
+{
+ struct buf_chunk *c = q->head;
+ size_t clen;
+
+ while(c) {
+ clen = chunk_len(c);
+ if(!clen)
+ break;
+ if(offset >= clen) {
+ offset -= clen;
+ c = c->next;
+ continue;
+ }
+ chunk_peek_at(c, offset, pbuf, plen);
+ return TRUE;
+ }
+ *pbuf = NULL;
+ *plen = 0;
+ return FALSE;
+}
+
+void Curl_bufq_skip(struct bufq *q, size_t amount)
+{
+ size_t n;
+
+ while(amount && q->head) {
+ n = chunk_skip(q->head, amount);
+ amount -= n;
+ prune_head(q);
+ }
+}
+
+void Curl_bufq_skip_and_shift(struct bufq *q, size_t amount)
+{
+ Curl_bufq_skip(q, amount);
+ if(q->tail)
+ chunk_shift(q->tail);
+}
+
+ssize_t Curl_bufq_pass(struct bufq *q, Curl_bufq_writer *writer,
+ void *writer_ctx, CURLcode *err)
+{
+ const unsigned char *buf;
+ size_t blen;
+ ssize_t nwritten = 0;
+
+ while(Curl_bufq_peek(q, &buf, &blen)) {
+ ssize_t chunk_written;
+
+ chunk_written = writer(writer_ctx, buf, blen, err);
+ if(chunk_written < 0) {
+ if(!nwritten || *err != CURLE_AGAIN) {
+ /* blocked on first write or real error, fail */
+ nwritten = -1;
+ }
+ break;
+ }
+ Curl_bufq_skip(q, (size_t)chunk_written);
+ nwritten += chunk_written;
+ }
+ return nwritten;
+}
+
+ssize_t Curl_bufq_write_pass(struct bufq *q,
+ const unsigned char *buf, size_t len,
+ Curl_bufq_writer *writer, void *writer_ctx,
+ CURLcode *err)
+{
+ ssize_t nwritten = 0, n;
+
+ *err = CURLE_OK;
+ while(len) {
+ if(Curl_bufq_is_full(q)) {
+ /* try to make room in case we are full */
+ n = Curl_bufq_pass(q, writer, writer_ctx, err);
+ if(n < 0) {
+ if(*err != CURLE_AGAIN) {
+ /* real error, fail */
+ return -1;
+ }
+ /* would block */
+ }
+ }
+
+ /* Add whatever is remaining now to bufq */
+ n = Curl_bufq_write(q, buf, len, err);
+ if(n < 0) {
+ if(*err != CURLE_AGAIN) {
+ /* real error, fail */
+ return -1;
+ }
+ /* no room in bufq, bail out */
+ goto out;
+ }
+ /* Maybe only part of `data` has been added, continue to loop */
+ buf += (size_t)n;
+ len -= (size_t)n;
+ nwritten += (size_t)n;
+ }
+
+out:
+ return nwritten;
+}
+
+ssize_t Curl_bufq_sipn(struct bufq *q, size_t max_len,
+ Curl_bufq_reader *reader, void *reader_ctx,
+ CURLcode *err)
+{
+ struct buf_chunk *tail = NULL;
+ ssize_t nread;
+
+ *err = CURLE_AGAIN;
+ tail = get_non_full_tail(q);
+ if(!tail) {
+ if(q->chunk_count < q->max_chunks) {
+ *err = CURLE_OUT_OF_MEMORY;
+ return -1;
+ }
+ /* full, blocked */
+ *err = CURLE_AGAIN;
+ return -1;
+ }
+
+ nread = chunk_slurpn(tail, max_len, reader, reader_ctx, err);
+ if(nread < 0) {
+ return -1;
+ }
+ else if(nread == 0) {
+ /* eof */
+ *err = CURLE_OK;
+ }
+ return nread;
+}
+
+/**
+ * Read up to `max_len` bytes and append it to the end of the buffer queue.
+ * if `max_len` is 0, no limit is imposed and the call behaves exactly
+ * the same as `Curl_bufq_slurp()`.
+ * Returns the total amount of buf read (may be 0) or -1 on other
+ * reader errors.
+ * Note that even in case of a -1 chunks may have been read and
+ * the buffer queue will have different length than before.
+ */
+static ssize_t bufq_slurpn(struct bufq *q, size_t max_len,
+ Curl_bufq_reader *reader, void *reader_ctx,
+ CURLcode *err)
+{
+ ssize_t nread = 0, n;
+
+ *err = CURLE_AGAIN;
+ while(1) {
+
+ n = Curl_bufq_sipn(q, max_len, reader, reader_ctx, err);
+ if(n < 0) {
+ if(!nread || *err != CURLE_AGAIN) {
+ /* blocked on first read or real error, fail */
+ nread = -1;
+ }
+ else
+ *err = CURLE_OK;
+ break;
+ }
+ else if(n == 0) {
+ /* eof */
+ *err = CURLE_OK;
+ break;
+ }
+ nread += (size_t)n;
+ if(max_len) {
+ DEBUGASSERT((size_t)n <= max_len);
+ max_len -= (size_t)n;
+ if(!max_len)
+ break;
+ }
+ /* give up slurping when we get less bytes than we asked for */
+ if(q->tail && !chunk_is_full(q->tail))
+ break;
+ }
+ return nread;
+}
+
+ssize_t Curl_bufq_slurp(struct bufq *q, Curl_bufq_reader *reader,
+ void *reader_ctx, CURLcode *err)
+{
+ return bufq_slurpn(q, 0, reader, reader_ctx, err);
+}
diff --git a/libs/libcurl/src/bufq.h b/libs/libcurl/src/bufq.h new file mode 100644 index 0000000000..8a6ea2b52f --- /dev/null +++ b/libs/libcurl/src/bufq.h @@ -0,0 +1,271 @@ +#ifndef HEADER_CURL_BUFQ_H
+#define HEADER_CURL_BUFQ_H
+/***************************************************************************
+ * _ _ ____ _
+ * Project ___| | | | _ \| |
+ * / __| | | | |_) | |
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+ * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+ * are also available at https://curl.se/docs/copyright.html.
+ *
+ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
+ * copies of the Software, and permit persons to whom the Software is
+ * furnished to do so, under the terms of the COPYING file.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ * SPDX-License-Identifier: curl
+ *
+ ***************************************************************************/
+#include "curl_setup.h"
+
+#include <curl/curl.h>
+
+/**
+ * A chunk of bytes for reading and writing.
+ * The size is fixed a creation with read and write offset
+ * for where unread content is.
+ */
+struct buf_chunk {
+ struct buf_chunk *next; /* to keep it in a list */
+ size_t dlen; /* the amount of allocated x.data[] */
+ size_t r_offset; /* first unread bytes */
+ size_t w_offset; /* one after last written byte */
+ union {
+ unsigned char data[1]; /* the buffer for `dlen` bytes */
+ void *dummy; /* alignment */
+ } x;
+};
+
+/**
+ * A pool for providing/keeping a number of chunks of the same size
+ *
+ * The same pool can be shared by many `bufq` instances. However, a pool
+ * is not thread safe. All bufqs using it are supposed to operate in the
+ * same thread.
+ */
+struct bufc_pool {
+ struct buf_chunk *spare; /* list of available spare chunks */
+ size_t chunk_size; /* the size of chunks in this pool */
+ size_t spare_count; /* current number of spare chunks in list */
+ size_t spare_max; /* max number of spares to keep */
+};
+
+void Curl_bufcp_init(struct bufc_pool *pool,
+ size_t chunk_size, size_t spare_max);
+
+void Curl_bufcp_free(struct bufc_pool *pool);
+
+/**
+ * A queue of byte chunks for reading and writing.
+ * Reading is done from `head`, writing is done to `tail`.
+ *
+ * `bufq`s can be empty or full or neither. Its `len` is the number
+ * of bytes that can be read. For an empty bufq, `len` will be 0.
+ *
+ * By default, a bufq can hold up to `max_chunks * chunk_size` number
+ * of bytes. When `max_chunks` are used (in the `head` list) and the
+ * `tail` chunk is full, the bufq will report that it is full.
+ *
+ * On a full bufq, `len` may be less than the maximum number of bytes,
+ * e.g. when the head chunk is partially read. `len` may also become
+ * larger than the max when option `BUFQ_OPT_SOFT_LIMIT` is used.
+ *
+ * By default, writing to a full bufq will return (-1, CURLE_AGAIN). Same
+ * as reading from an empty bufq.
+ * With `BUFQ_OPT_SOFT_LIMIT` set, a bufq will allow writing becond this
+ * limit and use more than `max_chunks`. However it will report that it
+ * is full nevertheless. This is provided for situation where writes
+ * preferably never fail (except for memory exhaustion).
+ *
+ * By default and without a pool, a bufq will keep chunks that read
+ * read empty in its `spare` list. Option `BUFQ_OPT_NO_SPARES` will
+ * disable that and free chunks once they become empty.
+ *
+ * When providing a pool to a bufq, all chunk creation and spare handling
+ * will be delegated to that pool.
+ */
+struct bufq {
+ struct buf_chunk *head; /* chunk with bytes to read from */
+ struct buf_chunk *tail; /* chunk to write to */
+ struct buf_chunk *spare; /* list of free chunks, unless `pool` */
+ struct bufc_pool *pool; /* optional pool for free chunks */
+ size_t chunk_count; /* current number of chunks in `head+spare` */
+ size_t max_chunks; /* max `head` chunks to use */
+ size_t chunk_size; /* size of chunks to manage */
+ int opts; /* options for handling queue, see below */
+};
+
+/**
+ * Default behaviour: chunk limit is "hard", meaning attempts to write
+ * more bytes than can be hold in `max_chunks` is refused and will return
+ * -1, CURLE_AGAIN. */
+#define BUFQ_OPT_NONE (0)
+/**
+ * Make `max_chunks` a "soft" limit. A bufq will report that it is "full"
+ * when `max_chunks` are used, but allows writing beyond this limit.
+ */
+#define BUFQ_OPT_SOFT_LIMIT (1 << 0)
+/**
+ * Do not keep spare chunks.
+ */
+#define BUFQ_OPT_NO_SPARES (1 << 1)
+
+/**
+ * Initialize a buffer queue that can hold up to `max_chunks` buffers
+ * each of size `chunk_size`. The bufq will not allow writing of
+ * more bytes than can be held in `max_chunks`.
+ */
+void Curl_bufq_init(struct bufq *q, size_t chunk_size, size_t max_chunks);
+
+/**
+ * Initialize a buffer queue that can hold up to `max_chunks` buffers
+ * each of size `chunk_size` with the given options. See `BUFQ_OPT_*`.
+ */
+void Curl_bufq_init2(struct bufq *q, size_t chunk_size,
+ size_t max_chunks, int opts);
+
+void Curl_bufq_initp(struct bufq *q, struct bufc_pool *pool,
+ size_t max_chunks, int opts);
+
+/**
+ * Reset the buffer queue to be empty. Will keep any allocated buffer
+ * chunks around.
+ */
+void Curl_bufq_reset(struct bufq *q);
+
+/**
+ * Free all resources held by the buffer queue.
+ */
+void Curl_bufq_free(struct bufq *q);
+
+/**
+ * Return the total amount of data in the queue.
+ */
+size_t Curl_bufq_len(const struct bufq *q);
+
+/**
+ * Return the total amount of free space in the queue.
+ * The returned length is the number of bytes that can
+ * be expected to be written successfully to the bufq,
+ * providing no memory allocations fail.
+ */
+size_t Curl_bufq_space(const struct bufq *q);
+
+/**
+ * Returns TRUE iff there is no data in the buffer queue.
+ */
+bool Curl_bufq_is_empty(const struct bufq *q);
+
+/**
+ * Returns TRUE iff there is no space left in the buffer queue.
+ */
+bool Curl_bufq_is_full(const struct bufq *q);
+
+/**
+ * Write buf to the end of the buffer queue. The buf is copied
+ * and the amount of copied bytes is returned.
+ * A return code of -1 indicates an error, setting `err` to the
+ * cause. An err of CURLE_AGAIN is returned if the buffer queue is full.
+ */
+ssize_t Curl_bufq_write(struct bufq *q,
+ const unsigned char *buf, size_t len,
+ CURLcode *err);
+
+/**
+ * Read buf from the start of the buffer queue. The buf is copied
+ * and the amount of copied bytes is returned.
+ * A return code of -1 indicates an error, setting `err` to the
+ * cause. An err of CURLE_AGAIN is returned if the buffer queue is empty.
+ */
+ssize_t Curl_bufq_read(struct bufq *q, unsigned char *buf, size_t len,
+ CURLcode *err);
+
+/**
+ * Peek at the head chunk in the buffer queue. Returns a pointer to
+ * the chunk buf (at the current offset) and its length. Does not
+ * modify the buffer queue.
+ * Returns TRUE iff bytes are available. Sets `pbuf` to NULL and `plen`
+ * to 0 when no bytes are available.
+ * Repeated calls return the same information until the buffer queue
+ * is modified, see `Curl_bufq_skip()``
+ */
+bool Curl_bufq_peek(struct bufq *q,
+ const unsigned char **pbuf, size_t *plen);
+
+bool Curl_bufq_peek_at(struct bufq *q, size_t offset,
+ const unsigned char **pbuf, size_t *plen);
+
+/**
+ * Tell the buffer queue to discard `amount` buf bytes at the head
+ * of the queue. Skipping more buf than is currently buffered will
+ * just empty the queue.
+ */
+void Curl_bufq_skip(struct bufq *q, size_t amount);
+
+/**
+ * Same as `skip` but shift tail data to the start afterwards,
+ * so that further writes will find room in tail.
+ */
+void Curl_bufq_skip_and_shift(struct bufq *q, size_t amount);
+
+typedef ssize_t Curl_bufq_writer(void *writer_ctx,
+ const unsigned char *buf, size_t len,
+ CURLcode *err);
+/**
+ * Passes the chunks in the buffer queue to the writer and returns
+ * the amount of buf written. A writer may return -1 and CURLE_AGAIN
+ * to indicate blocking at which point the queue will stop and return
+ * the amount of buf passed so far.
+ * -1 is returned on any other errors reported by the writer.
+ * Note that in case of a -1 chunks may have been written and
+ * the buffer queue will have different length than before.
+ */
+ssize_t Curl_bufq_pass(struct bufq *q, Curl_bufq_writer *writer,
+ void *writer_ctx, CURLcode *err);
+
+typedef ssize_t Curl_bufq_reader(void *reader_ctx,
+ unsigned char *buf, size_t len,
+ CURLcode *err);
+
+/**
+ * Read date and append it to the end of the buffer queue until the
+ * reader returns blocking or the queue is full. A reader returns
+ * -1 and CURLE_AGAIN to indicate blocking.
+ * Returns the total amount of buf read (may be 0) or -1 on other
+ * reader errors.
+ * Note that in case of a -1 chunks may have been read and
+ * the buffer queue will have different length than before.
+ */
+ssize_t Curl_bufq_slurp(struct bufq *q, Curl_bufq_reader *reader,
+ void *reader_ctx, CURLcode *err);
+
+/**
+ * Read *once* up to `max_len` bytes and append it to the buffer.
+ * if `max_len` is 0, no limit is imposed besides the chunk space.
+ * Returns the total amount of buf read (may be 0) or -1 on other
+ * reader errors.
+ */
+ssize_t Curl_bufq_sipn(struct bufq *q, size_t max_len,
+ Curl_bufq_reader *reader, void *reader_ctx,
+ CURLcode *err);
+
+/**
+ * Write buf to the end of the buffer queue.
+ * Will write bufq content or passed `buf` directly using the `writer`
+ * callback when it sees fit. 'buf' might get passed directly
+ * on or is placed into the buffer, depending on `len` and current
+ * amount buffered, chunk size, etc.
+ */
+ssize_t Curl_bufq_write_pass(struct bufq *q,
+ const unsigned char *buf, size_t len,
+ Curl_bufq_writer *writer, void *writer_ctx,
+ CURLcode *err);
+
+#endif /* HEADER_CURL_BUFQ_H */
diff --git a/libs/libcurl/src/c-hyper.c b/libs/libcurl/src/c-hyper.c index 0b2074bb8d..b36341e2f3 100644 --- a/libs/libcurl/src/c-hyper.c +++ b/libs/libcurl/src/c-hyper.c @@ -1212,7 +1212,7 @@ CURLcode Curl_http(struct Curl_easy *data, bool *done) Curl_safefree(data->state.aptr.userpwd);
Curl_safefree(data->state.aptr.proxyuserpwd);
return CURLE_OK;
- error:
+error:
DEBUGASSERT(result);
if(io)
hyper_io_free(io);
diff --git a/libs/libcurl/src/cf-h1-proxy.c b/libs/libcurl/src/cf-h1-proxy.c new file mode 100644 index 0000000000..3d886e1ea9 --- /dev/null +++ b/libs/libcurl/src/cf-h1-proxy.c @@ -0,0 +1,1184 @@ +/***************************************************************************
+ * _ _ ____ _
+ * Project ___| | | | _ \| |
+ * / __| | | | |_) | |
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+ * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+ * are also available at https://curl.se/docs/copyright.html.
+ *
+ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
+ * copies of the Software, and permit persons to whom the Software is
+ * furnished to do so, under the terms of the COPYING file.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ * SPDX-License-Identifier: curl
+ *
+ ***************************************************************************/
+
+#include "curl_setup.h"
+
+#if !defined(CURL_DISABLE_PROXY) && !defined(CURL_DISABLE_HTTP)
+
+#include <curl/curl.h>
+#ifdef USE_HYPER
+#include <hyper.h>
+#endif
+#include "urldata.h"
+#include "dynbuf.h"
+#include "sendf.h"
+#include "http.h"
+#include "http_proxy.h"
+#include "url.h"
+#include "select.h"
+#include "progress.h"
+#include "cfilters.h"
+#include "cf-h1-proxy.h"
+#include "connect.h"
+#include "curl_log.h"
+#include "curlx.h"
+#include "vtls/vtls.h"
+#include "transfer.h"
+#include "multiif.h"
+
+/* The last 3 #include files should be in this order */
+#include "curl_printf.h"
+#include "curl_memory.h"
+#include "memdebug.h"
+
+
+typedef enum {
+ TUNNEL_INIT, /* init/default/no tunnel state */
+ TUNNEL_CONNECT, /* CONNECT request is being send */
+ TUNNEL_RECEIVE, /* CONNECT answer is being received */
+ TUNNEL_RESPONSE, /* CONNECT response received completely */
+ TUNNEL_ESTABLISHED,
+ TUNNEL_FAILED
+} tunnel_state;
+
+/* struct for HTTP CONNECT tunneling */
+struct tunnel_state {
+ int sockindex;
+ const char *hostname;
+ int remote_port;
+ struct HTTP CONNECT;
+ struct dynbuf rcvbuf;
+ struct dynbuf req;
+ size_t nsend;
+ size_t headerlines;
+ enum keeponval {
+ KEEPON_DONE,
+ KEEPON_CONNECT,
+ KEEPON_IGNORE
+ } keepon;
+ curl_off_t cl; /* size of content to read and ignore */
+ tunnel_state tunnel_state;
+ BIT(chunked_encoding);
+ BIT(close_connection);
+};
+
+
+static bool tunnel_is_established(struct tunnel_state *ts)
+{
+ return ts && (ts->tunnel_state == TUNNEL_ESTABLISHED);
+}
+
+static bool tunnel_is_failed(struct tunnel_state *ts)
+{
+ return ts && (ts->tunnel_state == TUNNEL_FAILED);
+}
+
+static CURLcode tunnel_reinit(struct tunnel_state *ts,
+ struct connectdata *conn,
+ struct Curl_easy *data)
+{
+ (void)data;
+ DEBUGASSERT(ts);
+ Curl_dyn_reset(&ts->rcvbuf);
+ Curl_dyn_reset(&ts->req);
+ ts->tunnel_state = TUNNEL_INIT;
+ ts->keepon = KEEPON_CONNECT;
+ ts->cl = 0;
+ ts->close_connection = FALSE;
+
+ if(conn->bits.conn_to_host)
+ ts->hostname = conn->conn_to_host.name;
+ else if(ts->sockindex == SECONDARYSOCKET)
+ ts->hostname = conn->secondaryhostname;
+ else
+ ts->hostname = conn->host.name;
+
+ if(ts->sockindex == SECONDARYSOCKET)
+ ts->remote_port = conn->secondary_port;
+ else if(conn->bits.conn_to_port)
+ ts->remote_port = conn->conn_to_port;
+ else
+ ts->remote_port = conn->remote_port;
+
+ return CURLE_OK;
+}
+
+static CURLcode tunnel_init(struct tunnel_state **pts,
+ struct Curl_easy *data,
+ struct connectdata *conn,
+ int sockindex)
+{
+ struct tunnel_state *ts;
+ CURLcode result;
+
+ if(conn->handler->flags & PROTOPT_NOTCPPROXY) {
+ failf(data, "%s cannot be done over CONNECT", conn->handler->scheme);
+ return CURLE_UNSUPPORTED_PROTOCOL;
+ }
+
+ /* we might need the upload buffer for streaming a partial request */
+ result = Curl_get_upload_buffer(data);
+ if(result)
+ return result;
+
+ ts = calloc(1, sizeof(*ts));
+ if(!ts)
+ return CURLE_OUT_OF_MEMORY;
+
+ ts->sockindex = sockindex;
+ infof(data, "allocate connect buffer");
+
+ Curl_dyn_init(&ts->rcvbuf, DYN_PROXY_CONNECT_HEADERS);
+ Curl_dyn_init(&ts->req, DYN_HTTP_REQUEST);
+
+ *pts = ts;
+ connkeep(conn, "HTTP proxy CONNECT");
+ return tunnel_reinit(ts, conn, data);
+}
+
+static void tunnel_go_state(struct Curl_cfilter *cf,
+ struct tunnel_state *ts,
+ tunnel_state new_state,
+ struct Curl_easy *data)
+{
+ if(ts->tunnel_state == new_state)
+ return;
+ /* leaving this one */
+ switch(ts->tunnel_state) {
+ case TUNNEL_CONNECT:
+ data->req.ignorebody = FALSE;
+ break;
+ default:
+ break;
+ }
+ /* entering this one */
+ switch(new_state) {
+ case TUNNEL_INIT:
+ DEBUGF(LOG_CF(data, cf, "new tunnel state 'init'"));
+ tunnel_reinit(ts, cf->conn, data);
+ break;
+
+ case TUNNEL_CONNECT:
+ DEBUGF(LOG_CF(data, cf, "new tunnel state 'connect'"));
+ ts->tunnel_state = TUNNEL_CONNECT;
+ ts->keepon = KEEPON_CONNECT;
+ Curl_dyn_reset(&ts->rcvbuf);
+ break;
+
+ case TUNNEL_RECEIVE:
+ DEBUGF(LOG_CF(data, cf, "new tunnel state 'receive'"));
+ ts->tunnel_state = TUNNEL_RECEIVE;
+ break;
+
+ case TUNNEL_RESPONSE:
+ DEBUGF(LOG_CF(data, cf, "new tunnel state 'response'"));
+ ts->tunnel_state = TUNNEL_RESPONSE;
+ break;
+
+ case TUNNEL_ESTABLISHED:
+ DEBUGF(LOG_CF(data, cf, "new tunnel state 'established'"));
+ infof(data, "CONNECT phase completed");
+ data->state.authproxy.done = TRUE;
+ data->state.authproxy.multipass = FALSE;
+ /* FALLTHROUGH */
+ case TUNNEL_FAILED:
+ if(new_state == TUNNEL_FAILED)
+ DEBUGF(LOG_CF(data, cf, "new tunnel state 'failed'"));
+ ts->tunnel_state = new_state;
+ Curl_dyn_reset(&ts->rcvbuf);
+ Curl_dyn_reset(&ts->req);
+ /* restore the protocol pointer */
+ data->info.httpcode = 0; /* clear it as it might've been used for the
+ proxy */
+ /* If a proxy-authorization header was used for the proxy, then we should
+ make sure that it isn't accidentally used for the document request
+ after we've connected. So let's free and clear it here. */
+ Curl_safefree(data->state.aptr.proxyuserpwd);
+#ifdef USE_HYPER
+ data->state.hconnect = FALSE;
+#endif
+ break;
+ }
+}
+
+static void tunnel_free(struct Curl_cfilter *cf,
+ struct Curl_easy *data)
+{
+ struct tunnel_state *ts = cf->ctx;
+ if(ts) {
+ tunnel_go_state(cf, ts, TUNNEL_FAILED, data);
+ Curl_dyn_free(&ts->rcvbuf);
+ Curl_dyn_free(&ts->req);
+ free(ts);
+ cf->ctx = NULL;
+ }
+}
+
+static CURLcode CONNECT_host(struct Curl_easy *data,
+ struct connectdata *conn,
+ const char *hostname,
+ int remote_port,
+ char **connecthostp,
+ char **hostp)
+{
+ char *hostheader; /* for CONNECT */
+ char *host = NULL; /* Host: */
+ bool ipv6_ip = conn->bits.ipv6_ip;
+
+ /* the hostname may be different */
+ if(hostname != conn->host.name)
+ ipv6_ip = (strchr(hostname, ':') != NULL);
+ hostheader = /* host:port with IPv6 support */
+ aprintf("%s%s%s:%d", ipv6_ip?"[":"", hostname, ipv6_ip?"]":"",
+ remote_port);
+ if(!hostheader)
+ return CURLE_OUT_OF_MEMORY;
+
+ if(!Curl_checkProxyheaders(data, conn, STRCONST("Host"))) {
+ host = aprintf("Host: %s\r\n", hostheader);
+ if(!host) {
+ free(hostheader);
+ return CURLE_OUT_OF_MEMORY;
+ }
+ }
+ *connecthostp = hostheader;
+ *hostp = host;
+ return CURLE_OK;
+}
+
+#ifndef USE_HYPER
+static CURLcode start_CONNECT(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ struct tunnel_state *ts)
+{
+ struct connectdata *conn = cf->conn;
+ char *hostheader = NULL;
+ char *host = NULL;
+ const char *httpv;
+ CURLcode result;
+
+ infof(data, "Establish HTTP proxy tunnel to %s:%d",
+ ts->hostname, ts->remote_port);
+
+ /* This only happens if we've looped here due to authentication
+ reasons, and we don't really use the newly cloned URL here
+ then. Just free() it. */
+ Curl_safefree(data->req.newurl);
+
+ result = CONNECT_host(data, conn,
+ ts->hostname, ts->remote_port,
+ &hostheader, &host);
+ if(result)
+ goto out;
+
+ /* Setup the proxy-authorization header, if any */
+ result = Curl_http_output_auth(data, conn, "CONNECT", HTTPREQ_GET,
+ hostheader, TRUE);
+ if(result)
+ goto out;
+
+ httpv = (conn->http_proxy.proxytype == CURLPROXY_HTTP_1_0) ? "1.0" : "1.1";
+
+ result =
+ Curl_dyn_addf(&ts->req,
+ "CONNECT %s HTTP/%s\r\n"
+ "%s" /* Host: */
+ "%s", /* Proxy-Authorization */
+ hostheader,
+ httpv,
+ host?host:"",
+ data->state.aptr.proxyuserpwd?
+ data->state.aptr.proxyuserpwd:"");
+ if(result)
+ goto out;
+
+ if(!Curl_checkProxyheaders(data, conn, STRCONST("User-Agent"))
+ && data->set.str[STRING_USERAGENT])
+ result = Curl_dyn_addf(&ts->req, "User-Agent: %s\r\n",
+ data->set.str[STRING_USERAGENT]);
+ if(result)
+ goto out;
+
+ if(!Curl_checkProxyheaders(data, conn, STRCONST("Proxy-Connection")))
+ result = Curl_dyn_addn(&ts->req,
+ STRCONST("Proxy-Connection: Keep-Alive\r\n"));
+ if(result)
+ goto out;
+
+ result = Curl_add_custom_headers(data, TRUE, &ts->req);
+ if(result)
+ goto out;
+
+ /* CRLF terminate the request */
+ result = Curl_dyn_addn(&ts->req, STRCONST("\r\n"));
+ if(result)
+ goto out;
+
+ /* Send the connect request to the proxy */
+ result = Curl_buffer_send(&ts->req, data, &ts->CONNECT,
+ &data->info.request_size, 0,
+ ts->sockindex);
+ ts->headerlines = 0;
+
+out:
+ if(result)
+ failf(data, "Failed sending CONNECT to proxy");
+ free(host);
+ free(hostheader);
+ return result;
+}
+
+static CURLcode send_CONNECT(struct Curl_easy *data,
+ struct connectdata *conn,
+ struct tunnel_state *ts,
+ bool *done)
+{
+ struct SingleRequest *k = &data->req;
+ struct HTTP *http = &ts->CONNECT;
+ CURLcode result = CURLE_OK;
+
+ if(http->sending != HTTPSEND_REQUEST)
+ goto out;
+
+ if(!ts->nsend) {
+ size_t fillcount;
+ k->upload_fromhere = data->state.ulbuf;
+ result = Curl_fillreadbuffer(data, data->set.upload_buffer_size,
+ &fillcount);
+ if(result)
+ goto out;
+ ts->nsend = fillcount;
+ }
+ if(ts->nsend) {
+ ssize_t bytes_written;
+ /* write to socket (send away data) */
+ result = Curl_write(data,
+ conn->writesockfd, /* socket to send to */
+ k->upload_fromhere, /* buffer pointer */
+ ts->nsend, /* buffer size */
+ &bytes_written); /* actually sent */
+ if(result)
+ goto out;
+ /* send to debug callback! */
+ Curl_debug(data, CURLINFO_HEADER_OUT,
+ k->upload_fromhere, bytes_written);
+
+ ts->nsend -= bytes_written;
+ k->upload_fromhere += bytes_written;
+ }
+ if(!ts->nsend)
+ http->sending = HTTPSEND_NADA;
+
+out:
+ if(result)
+ failf(data, "Failed sending CONNECT to proxy");
+ *done = (http->sending != HTTPSEND_REQUEST);
+ return result;
+}
+
+static CURLcode on_resp_header(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ struct tunnel_state *ts,
+ const char *header)
+{
+ CURLcode result = CURLE_OK;
+ struct SingleRequest *k = &data->req;
+ (void)cf;
+
+ if((checkprefix("WWW-Authenticate:", header) &&
+ (401 == k->httpcode)) ||
+ (checkprefix("Proxy-authenticate:", header) &&
+ (407 == k->httpcode))) {
+
+ bool proxy = (k->httpcode == 407) ? TRUE : FALSE;
+ char *auth = Curl_copy_header_value(header);
+ if(!auth)
+ return CURLE_OUT_OF_MEMORY;
+
+ DEBUGF(LOG_CF(data, cf, "CONNECT: fwd auth header '%s'", header));
+ result = Curl_http_input_auth(data, proxy, auth);
+
+ free(auth);
+
+ if(result)
+ return result;
+ }
+ else if(checkprefix("Content-Length:", header)) {
+ if(k->httpcode/100 == 2) {
+ /* A client MUST ignore any Content-Length or Transfer-Encoding
+ header fields received in a successful response to CONNECT.
+ "Successful" described as: 2xx (Successful). RFC 7231 4.3.6 */
+ infof(data, "Ignoring Content-Length in CONNECT %03d response",
+ k->httpcode);
+ }
+ else {
+ (void)curlx_strtoofft(header + strlen("Content-Length:"),
+ NULL, 10, &ts->cl);
+ }
+ }
+ else if(Curl_compareheader(header,
+ STRCONST("Connection:"), STRCONST("close")))
+ ts->close_connection = TRUE;
+ else if(checkprefix("Transfer-Encoding:", header)) {
+ if(k->httpcode/100 == 2) {
+ /* A client MUST ignore any Content-Length or Transfer-Encoding
+ header fields received in a successful response to CONNECT.
+ "Successful" described as: 2xx (Successful). RFC 7231 4.3.6 */
+ infof(data, "Ignoring Transfer-Encoding in "
+ "CONNECT %03d response", k->httpcode);
+ }
+ else if(Curl_compareheader(header,
+ STRCONST("Transfer-Encoding:"),
+ STRCONST("chunked"))) {
+ infof(data, "CONNECT responded chunked");
+ ts->chunked_encoding = TRUE;
+ /* init our chunky engine */
+ Curl_httpchunk_init(data);
+ }
+ }
+ else if(Curl_compareheader(header,
+ STRCONST("Proxy-Connection:"),
+ STRCONST("close")))
+ ts->close_connection = TRUE;
+ else if(!strncmp(header, "HTTP/1.", 7) &&
+ ((header[7] == '0') || (header[7] == '1')) &&
+ (header[8] == ' ') &&
+ ISDIGIT(header[9]) && ISDIGIT(header[10]) && ISDIGIT(header[11]) &&
+ !ISDIGIT(header[12])) {
+ /* store the HTTP code from the proxy */
+ data->info.httpproxycode = k->httpcode = (header[9] - '0') * 100 +
+ (header[10] - '0') * 10 + (header[11] - '0');
+ }
+ return result;
+}
+
+static CURLcode recv_CONNECT_resp(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ struct tunnel_state *ts,
+ bool *done)
+{
+ CURLcode result = CURLE_OK;
+ struct SingleRequest *k = &data->req;
+ curl_socket_t tunnelsocket = Curl_conn_cf_get_socket(cf, data);
+ char *linep;
+ size_t perline;
+ int error;
+
+#define SELECT_OK 0
+#define SELECT_ERROR 1
+
+ error = SELECT_OK;
+ *done = FALSE;
+
+ if(!Curl_conn_data_pending(data, ts->sockindex))
+ return CURLE_OK;
+
+ while(ts->keepon) {
+ ssize_t gotbytes;
+ char byte;
+
+ /* Read one byte at a time to avoid a race condition. Wait at most one
+ second before looping to ensure continuous pgrsUpdates. */
+ result = Curl_read(data, tunnelsocket, &byte, 1, &gotbytes);
+ if(result == CURLE_AGAIN)
+ /* socket buffer drained, return */
+ return CURLE_OK;
+
+ if(Curl_pgrsUpdate(data))
+ return CURLE_ABORTED_BY_CALLBACK;
+
+ if(result) {
+ ts->keepon = KEEPON_DONE;
+ break;
+ }
+
+ if(gotbytes <= 0) {
+ if(data->set.proxyauth && data->state.authproxy.avail &&
+ data->state.aptr.proxyuserpwd) {
+ /* proxy auth was requested and there was proxy auth available,
+ then deem this as "mere" proxy disconnect */
+ ts->close_connection = TRUE;
+ infof(data, "Proxy CONNECT connection closed");
+ }
+ else {
+ error = SELECT_ERROR;
+ failf(data, "Proxy CONNECT aborted");
+ }
+ ts->keepon = KEEPON_DONE;
+ break;
+ }
+
+ if(ts->keepon == KEEPON_IGNORE) {
+ /* This means we are currently ignoring a response-body */
+
+ if(ts->cl) {
+ /* A Content-Length based body: simply count down the counter
+ and make sure to break out of the loop when we're done! */
+ ts->cl--;
+ if(ts->cl <= 0) {
+ ts->keepon = KEEPON_DONE;
+ break;
+ }
+ }
+ else {
+ /* chunked-encoded body, so we need to do the chunked dance
+ properly to know when the end of the body is reached */
+ CHUNKcode r;
+ CURLcode extra;
+ ssize_t tookcareof = 0;
+
+ /* now parse the chunked piece of data so that we can
+ properly tell when the stream ends */
+ r = Curl_httpchunk_read(data, &byte, 1, &tookcareof, &extra);
+ if(r == CHUNKE_STOP) {
+ /* we're done reading chunks! */
+ infof(data, "chunk reading DONE");
+ ts->keepon = KEEPON_DONE;
+ }
+ }
+ continue;
+ }
+
+ if(Curl_dyn_addn(&ts->rcvbuf, &byte, 1)) {
+ failf(data, "CONNECT response too large");
+ return CURLE_RECV_ERROR;
+ }
+
+ /* if this is not the end of a header line then continue */
+ if(byte != 0x0a)
+ continue;
+
+ ts->headerlines++;
+ linep = Curl_dyn_ptr(&ts->rcvbuf);
+ perline = Curl_dyn_len(&ts->rcvbuf); /* amount of bytes in this line */
+
+ /* output debug if that is requested */
+ Curl_debug(data, CURLINFO_HEADER_IN, linep, perline);
+
+ if(!data->set.suppress_connect_headers) {
+ /* send the header to the callback */
+ int writetype = CLIENTWRITE_HEADER | CLIENTWRITE_CONNECT |
+ (data->set.include_header ? CLIENTWRITE_BODY : 0) |
+ (ts->headerlines == 1 ? CLIENTWRITE_STATUS : 0);
+
+ result = Curl_client_write(data, writetype, linep, perline);
+ if(result)
+ return result;
+ }
+
+ data->info.header_size += (long)perline;
+
+ /* Newlines are CRLF, so the CR is ignored as the line isn't
+ really terminated until the LF comes. Treat a following CR
+ as end-of-headers as well.*/
+
+ if(('\r' == linep[0]) ||
+ ('\n' == linep[0])) {
+ /* end of response-headers from the proxy */
+
+ if((407 == k->httpcode) && !data->state.authproblem) {
+ /* If we get a 407 response code with content length
+ when we have no auth problem, we must ignore the
+ whole response-body */
+ ts->keepon = KEEPON_IGNORE;
+
+ if(ts->cl) {
+ infof(data, "Ignore %" CURL_FORMAT_CURL_OFF_T
+ " bytes of response-body", ts->cl);
+ }
+ else if(ts->chunked_encoding) {
+ CHUNKcode r;
+ CURLcode extra;
+
+ infof(data, "Ignore chunked response-body");
+
+ /* We set ignorebody true here since the chunked decoder
+ function will acknowledge that. Pay attention so that this is
+ cleared again when this function returns! */
+ k->ignorebody = TRUE;
+
+ if(linep[1] == '\n')
+ /* this can only be a LF if the letter at index 0 was a CR */
+ linep++;
+
+ /* now parse the chunked piece of data so that we can properly
+ tell when the stream ends */
+ r = Curl_httpchunk_read(data, linep + 1, 1, &gotbytes,
+ &extra);
+ if(r == CHUNKE_STOP) {
+ /* we're done reading chunks! */
+ infof(data, "chunk reading DONE");
+ ts->keepon = KEEPON_DONE;
+ }
+ }
+ else {
+ /* without content-length or chunked encoding, we
+ can't keep the connection alive since the close is
+ the end signal so we bail out at once instead */
+ DEBUGF(LOG_CF(data, cf, "CONNECT: no content-length or chunked"));
+ ts->keepon = KEEPON_DONE;
+ }
+ }
+ else {
+ ts->keepon = KEEPON_DONE;
+ }
+
+ DEBUGASSERT(ts->keepon == KEEPON_IGNORE
+ || ts->keepon == KEEPON_DONE);
+ continue;
+ }
+
+ result = on_resp_header(cf, data, ts, linep);
+ if(result)
+ return result;
+
+ Curl_dyn_reset(&ts->rcvbuf);
+ } /* while there's buffer left and loop is requested */
+
+ if(error)
+ result = CURLE_RECV_ERROR;
+ *done = (ts->keepon == KEEPON_DONE);
+ if(!result && *done && data->info.httpproxycode/100 != 2) {
+ /* Deal with the possibly already received authenticate
+ headers. 'newurl' is set to a new URL if we must loop. */
+ result = Curl_http_auth_act(data);
+ }
+ return result;
+}
+
+#else /* USE_HYPER */
+/* The Hyper version of CONNECT */
+static CURLcode start_CONNECT(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ struct tunnel_state *ts)
+{
+ struct connectdata *conn = cf->conn;
+ struct hyptransfer *h = &data->hyp;
+ curl_socket_t tunnelsocket = Curl_conn_cf_get_socket(cf, data);
+ hyper_io *io = NULL;
+ hyper_request *req = NULL;
+ hyper_headers *headers = NULL;
+ hyper_clientconn_options *options = NULL;
+ hyper_task *handshake = NULL;
+ hyper_task *task = NULL; /* for the handshake */
+ hyper_clientconn *client = NULL;
+ hyper_task *sendtask = NULL; /* for the send */
+ char *hostheader = NULL; /* for CONNECT */
+ char *host = NULL; /* Host: */
+ CURLcode result = CURLE_OUT_OF_MEMORY;
+
+ io = hyper_io_new();
+ if(!io) {
+ failf(data, "Couldn't create hyper IO");
+ result = CURLE_OUT_OF_MEMORY;
+ goto error;
+ }
+ /* tell Hyper how to read/write network data */
+ hyper_io_set_userdata(io, data);
+ hyper_io_set_read(io, Curl_hyper_recv);
+ hyper_io_set_write(io, Curl_hyper_send);
+ conn->sockfd = tunnelsocket;
+
+ data->state.hconnect = TRUE;
+
+ /* create an executor to poll futures */
+ if(!h->exec) {
+ h->exec = hyper_executor_new();
+ if(!h->exec) {
+ failf(data, "Couldn't create hyper executor");
+ result = CURLE_OUT_OF_MEMORY;
+ goto error;
+ }
+ }
+
+ options = hyper_clientconn_options_new();
+ hyper_clientconn_options_set_preserve_header_case(options, 1);
+ hyper_clientconn_options_set_preserve_header_order(options, 1);
+
+ if(!options) {
+ failf(data, "Couldn't create hyper client options");
+ result = CURLE_OUT_OF_MEMORY;
+ goto error;
+ }
+
+ hyper_clientconn_options_exec(options, h->exec);
+
+ /* "Both the `io` and the `options` are consumed in this function
+ call" */
+ handshake = hyper_clientconn_handshake(io, options);
+ if(!handshake) {
+ failf(data, "Couldn't create hyper client handshake");
+ result = CURLE_OUT_OF_MEMORY;
+ goto error;
+ }
+ io = NULL;
+ options = NULL;
+
+ if(HYPERE_OK != hyper_executor_push(h->exec, handshake)) {
+ failf(data, "Couldn't hyper_executor_push the handshake");
+ result = CURLE_OUT_OF_MEMORY;
+ goto error;
+ }
+ handshake = NULL; /* ownership passed on */
+
+ task = hyper_executor_poll(h->exec);
+ if(!task) {
+ failf(data, "Couldn't hyper_executor_poll the handshake");
+ result = CURLE_OUT_OF_MEMORY;
+ goto error;
+ }
+
+ client = hyper_task_value(task);
+ hyper_task_free(task);
+ req = hyper_request_new();
+ if(!req) {
+ failf(data, "Couldn't hyper_request_new");
+ result = CURLE_OUT_OF_MEMORY;
+ goto error;
+ }
+ if(hyper_request_set_method(req, (uint8_t *)"CONNECT",
+ strlen("CONNECT"))) {
+ failf(data, "error setting method");
+ result = CURLE_OUT_OF_MEMORY;
+ goto error;
+ }
+
+ infof(data, "Establish HTTP proxy tunnel to %s:%d",
+ ts->hostname, ts->remote_port);
+
+ /* This only happens if we've looped here due to authentication
+ reasons, and we don't really use the newly cloned URL here
+ then. Just free() it. */
+ Curl_safefree(data->req.newurl);
+
+ result = CONNECT_host(data, conn, ts->hostname, ts->remote_port,
+ &hostheader, &host);
+ if(result)
+ goto error;
+
+ if(hyper_request_set_uri(req, (uint8_t *)hostheader,
+ strlen(hostheader))) {
+ failf(data, "error setting path");
+ result = CURLE_OUT_OF_MEMORY;
+ goto error;
+ }
+ if(data->set.verbose) {
+ char *se = aprintf("CONNECT %s HTTP/1.1\r\n", hostheader);
+ if(!se) {
+ result = CURLE_OUT_OF_MEMORY;
+ goto error;
+ }
+ Curl_debug(data, CURLINFO_HEADER_OUT, se, strlen(se));
+ free(se);
+ }
+ /* Setup the proxy-authorization header, if any */
+ result = Curl_http_output_auth(data, conn, "CONNECT", HTTPREQ_GET,
+ hostheader, TRUE);
+ if(result)
+ goto error;
+ Curl_safefree(hostheader);
+
+ /* default is 1.1 */
+ if((conn->http_proxy.proxytype == CURLPROXY_HTTP_1_0) &&
+ (HYPERE_OK != hyper_request_set_version(req,
+ HYPER_HTTP_VERSION_1_0))) {
+ failf(data, "error setting HTTP version");
+ result = CURLE_OUT_OF_MEMORY;
+ goto error;
+ }
+
+ headers = hyper_request_headers(req);
+ if(!headers) {
+ failf(data, "hyper_request_headers");
+ result = CURLE_OUT_OF_MEMORY;
+ goto error;
+ }
+ if(host) {
+ result = Curl_hyper_header(data, headers, host);
+ if(result)
+ goto error;
+ Curl_safefree(host);
+ }
+
+ if(data->state.aptr.proxyuserpwd) {
+ result = Curl_hyper_header(data, headers,
+ data->state.aptr.proxyuserpwd);
+ if(result)
+ goto error;
+ }
+
+ if(!Curl_checkProxyheaders(data, conn, STRCONST("User-Agent")) &&
+ data->set.str[STRING_USERAGENT]) {
+ struct dynbuf ua;
+ Curl_dyn_init(&ua, DYN_HTTP_REQUEST);
+ result = Curl_dyn_addf(&ua, "User-Agent: %s\r\n",
+ data->set.str[STRING_USERAGENT]);
+ if(result)
+ goto error;
+ result = Curl_hyper_header(data, headers, Curl_dyn_ptr(&ua));
+ if(result)
+ goto error;
+ Curl_dyn_free(&ua);
+ }
+
+ if(!Curl_checkProxyheaders(data, conn, STRCONST("Proxy-Connection"))) {
+ result = Curl_hyper_header(data, headers,
+ "Proxy-Connection: Keep-Alive");
+ if(result)
+ goto error;
+ }
+
+ result = Curl_add_custom_headers(data, TRUE, headers);
+ if(result)
+ goto error;
+
+ sendtask = hyper_clientconn_send(client, req);
+ if(!sendtask) {
+ failf(data, "hyper_clientconn_send");
+ result = CURLE_OUT_OF_MEMORY;
+ goto error;
+ }
+
+ if(HYPERE_OK != hyper_executor_push(h->exec, sendtask)) {
+ failf(data, "Couldn't hyper_executor_push the send");
+ result = CURLE_OUT_OF_MEMORY;
+ goto error;
+ }
+
+error:
+ free(host);
+ free(hostheader);
+ if(io)
+ hyper_io_free(io);
+ if(options)
+ hyper_clientconn_options_free(options);
+ if(handshake)
+ hyper_task_free(handshake);
+ if(client)
+ hyper_clientconn_free(client);
+ return result;
+}
+
+static CURLcode send_CONNECT(struct Curl_easy *data,
+ struct connectdata *conn,
+ struct tunnel_state *ts,
+ bool *done)
+{
+ struct hyptransfer *h = &data->hyp;
+ hyper_task *task = NULL;
+ hyper_error *hypererr = NULL;
+ CURLcode result = CURLE_OK;
+
+ (void)ts;
+ (void)conn;
+ do {
+ task = hyper_executor_poll(h->exec);
+ if(task) {
+ bool error = hyper_task_type(task) == HYPER_TASK_ERROR;
+ if(error)
+ hypererr = hyper_task_value(task);
+ hyper_task_free(task);
+ if(error) {
+ /* this could probably use a better error code? */
+ result = CURLE_OUT_OF_MEMORY;
+ goto error;
+ }
+ }
+ } while(task);
+error:
+ *done = (result == CURLE_OK);
+ if(hypererr) {
+ uint8_t errbuf[256];
+ size_t errlen = hyper_error_print(hypererr, errbuf, sizeof(errbuf));
+ failf(data, "Hyper: %.*s", (int)errlen, errbuf);
+ hyper_error_free(hypererr);
+ }
+ return result;
+}
+
+static CURLcode recv_CONNECT_resp(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ struct tunnel_state *ts,
+ bool *done)
+{
+ struct hyptransfer *h = &data->hyp;
+ CURLcode result;
+ int didwhat;
+
+ (void)ts;
+ *done = FALSE;
+ result = Curl_hyper_stream(data, cf->conn, &didwhat, done,
+ CURL_CSELECT_IN | CURL_CSELECT_OUT);
+ if(result || !*done)
+ return result;
+ if(h->exec) {
+ hyper_executor_free(h->exec);
+ h->exec = NULL;
+ }
+ if(h->read_waker) {
+ hyper_waker_free(h->read_waker);
+ h->read_waker = NULL;
+ }
+ if(h->write_waker) {
+ hyper_waker_free(h->write_waker);
+ h->write_waker = NULL;
+ }
+ return result;
+}
+
+#endif /* USE_HYPER */
+
+static CURLcode CONNECT(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ struct tunnel_state *ts)
+{
+ struct connectdata *conn = cf->conn;
+ CURLcode result;
+ bool done;
+
+ if(tunnel_is_established(ts))
+ return CURLE_OK;
+ if(tunnel_is_failed(ts))
+ return CURLE_RECV_ERROR; /* Need a cfilter close and new bootstrap */
+
+ do {
+ timediff_t check;
+
+ check = Curl_timeleft(data, NULL, TRUE);
+ if(check <= 0) {
+ failf(data, "Proxy CONNECT aborted due to timeout");
+ result = CURLE_OPERATION_TIMEDOUT;
+ goto out;
+ }
+
+ switch(ts->tunnel_state) {
+ case TUNNEL_INIT:
+ /* Prepare the CONNECT request and make a first attempt to send. */
+ DEBUGF(LOG_CF(data, cf, "CONNECT start"));
+ result = start_CONNECT(cf, data, ts);
+ if(result)
+ goto out;
+ tunnel_go_state(cf, ts, TUNNEL_CONNECT, data);
+ /* FALLTHROUGH */
+
+ case TUNNEL_CONNECT:
+ /* see that the request is completely sent */
+ DEBUGF(LOG_CF(data, cf, "CONNECT send"));
+ result = send_CONNECT(data, cf->conn, ts, &done);
+ if(result || !done)
+ goto out;
+ tunnel_go_state(cf, ts, TUNNEL_RECEIVE, data);
+ /* FALLTHROUGH */
+
+ case TUNNEL_RECEIVE:
+ /* read what is there */
+ DEBUGF(LOG_CF(data, cf, "CONNECT receive"));
+ result = recv_CONNECT_resp(cf, data, ts, &done);
+ if(Curl_pgrsUpdate(data)) {
+ result = CURLE_ABORTED_BY_CALLBACK;
+ goto out;
+ }
+ /* error or not complete yet. return for more multi-multi */
+ if(result || !done)
+ goto out;
+ /* got it */
+ tunnel_go_state(cf, ts, TUNNEL_RESPONSE, data);
+ /* FALLTHROUGH */
+
+ case TUNNEL_RESPONSE:
+ DEBUGF(LOG_CF(data, cf, "CONNECT response"));
+ if(data->req.newurl) {
+ /* not the "final" response, we need to do a follow up request.
+ * If the other side indicated a connection close, or if someone
+ * else told us to close this connection, do so now.
+ */
+ if(ts->close_connection || conn->bits.close) {
+ /* Close this filter and the sub-chain, re-connect the
+ * sub-chain and continue. Closing this filter will
+ * reset our tunnel state. To avoid recursion, we return
+ * and expect to be called again.
+ */
+ DEBUGF(LOG_CF(data, cf, "CONNECT need to close+open"));
+ infof(data, "Connect me again please");
+ Curl_conn_cf_close(cf, data);
+ connkeep(conn, "HTTP proxy CONNECT");
+ result = Curl_conn_cf_connect(cf->next, data, FALSE, &done);
+ goto out;
+ }
+ else {
+ /* staying on this connection, reset state */
+ tunnel_go_state(cf, ts, TUNNEL_INIT, data);
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ } while(data->req.newurl);
+
+ DEBUGASSERT(ts->tunnel_state == TUNNEL_RESPONSE);
+ if(data->info.httpproxycode/100 != 2) {
+ /* a non-2xx response and we have no next url to try. */
+ Curl_safefree(data->req.newurl);
+ /* failure, close this connection to avoid re-use */
+ streamclose(conn, "proxy CONNECT failure");
+ tunnel_go_state(cf, ts, TUNNEL_FAILED, data);
+ failf(data, "CONNECT tunnel failed, response %d", data->req.httpcode);
+ return CURLE_RECV_ERROR;
+ }
+ /* 2xx response, SUCCESS! */
+ tunnel_go_state(cf, ts, TUNNEL_ESTABLISHED, data);
+ infof(data, "CONNECT tunnel established, response %d",
+ data->info.httpproxycode);
+ result = CURLE_OK;
+
+out:
+ if(result)
+ tunnel_go_state(cf, ts, TUNNEL_FAILED, data);
+ return result;
+}
+
+static CURLcode cf_h1_proxy_connect(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ bool blocking, bool *done)
+{
+ CURLcode result;
+ struct tunnel_state *ts = cf->ctx;
+
+ if(cf->connected) {
+ *done = TRUE;
+ return CURLE_OK;
+ }
+
+ DEBUGF(LOG_CF(data, cf, "connect"));
+ result = cf->next->cft->connect(cf->next, data, blocking, done);
+ if(result || !*done)
+ return result;
+
+ *done = FALSE;
+ if(!ts) {
+ result = tunnel_init(&ts, data, cf->conn, cf->sockindex);
+ if(result)
+ return result;
+ cf->ctx = ts;
+ }
+
+ /* TODO: can we do blocking? */
+ /* We want "seamless" operations through HTTP proxy tunnel */
+
+ result = CONNECT(cf, data, ts);
+ if(result)
+ goto out;
+ Curl_safefree(data->state.aptr.proxyuserpwd);
+
+out:
+ *done = (result == CURLE_OK) && tunnel_is_established(cf->ctx);
+ if(*done) {
+ cf->connected = TRUE;
+ tunnel_free(cf, data);
+ }
+ return result;
+}
+
+static int cf_h1_proxy_get_select_socks(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ curl_socket_t *socks)
+{
+ struct tunnel_state *ts = cf->ctx;
+ int fds;
+
+ fds = cf->next->cft->get_select_socks(cf->next, data, socks);
+ if(!fds && cf->next->connected && !cf->connected) {
+ /* If we are not connected, but the filter "below" is
+ * and not waiting on something, we are tunneling. */
+ socks[0] = Curl_conn_cf_get_socket(cf, data);
+ if(ts) {
+ /* when we've sent a CONNECT to a proxy, we should rather either
+ wait for the socket to become readable to be able to get the
+ response headers or if we're still sending the request, wait
+ for write. */
+ if(ts->CONNECT.sending == HTTPSEND_REQUEST) {
+ return GETSOCK_WRITESOCK(0);
+ }
+ return GETSOCK_READSOCK(0);
+ }
+ return GETSOCK_WRITESOCK(0);
+ }
+ return fds;
+}
+
+static void cf_h1_proxy_destroy(struct Curl_cfilter *cf,
+ struct Curl_easy *data)
+{
+ DEBUGF(LOG_CF(data, cf, "destroy"));
+ tunnel_free(cf, data);
+}
+
+static void cf_h1_proxy_close(struct Curl_cfilter *cf,
+ struct Curl_easy *data)
+{
+ DEBUGF(LOG_CF(data, cf, "close"));
+ cf->connected = FALSE;
+ if(cf->ctx) {
+ tunnel_go_state(cf, cf->ctx, TUNNEL_INIT, data);
+ }
+ if(cf->next)
+ cf->next->cft->close(cf->next, data);
+}
+
+
+struct Curl_cftype Curl_cft_h1_proxy = {
+ "H1-PROXY",
+ CF_TYPE_IP_CONNECT,
+ 0,
+ cf_h1_proxy_destroy,
+ cf_h1_proxy_connect,
+ cf_h1_proxy_close,
+ Curl_cf_http_proxy_get_host,
+ cf_h1_proxy_get_select_socks,
+ Curl_cf_def_data_pending,
+ Curl_cf_def_send,
+ Curl_cf_def_recv,
+ Curl_cf_def_cntrl,
+ Curl_cf_def_conn_is_alive,
+ Curl_cf_def_conn_keep_alive,
+ Curl_cf_def_query,
+};
+
+CURLcode Curl_cf_h1_proxy_insert_after(struct Curl_cfilter *cf_at,
+ struct Curl_easy *data)
+{
+ struct Curl_cfilter *cf;
+ CURLcode result;
+
+ (void)data;
+ result = Curl_cf_create(&cf, &Curl_cft_h1_proxy, NULL);
+ if(!result)
+ Curl_conn_cf_insert_after(cf_at, cf);
+ return result;
+}
+
+#endif /* !CURL_DISABLE_PROXY && ! CURL_DISABLE_HTTP */
diff --git a/libs/libcurl/src/cf-h1-proxy.h b/libs/libcurl/src/cf-h1-proxy.h new file mode 100644 index 0000000000..e8786f265b --- /dev/null +++ b/libs/libcurl/src/cf-h1-proxy.h @@ -0,0 +1,39 @@ +#ifndef HEADER_CURL_H1_PROXY_H
+#define HEADER_CURL_H1_PROXY_H
+/***************************************************************************
+ * _ _ ____ _
+ * Project ___| | | | _ \| |
+ * / __| | | | |_) | |
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+ * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+ * are also available at https://curl.se/docs/copyright.html.
+ *
+ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
+ * copies of the Software, and permit persons to whom the Software is
+ * furnished to do so, under the terms of the COPYING file.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ * SPDX-License-Identifier: curl
+ *
+ ***************************************************************************/
+
+#include "curl_setup.h"
+
+#if !defined(CURL_DISABLE_PROXY) && !defined(CURL_DISABLE_HTTP)
+
+CURLcode Curl_cf_h1_proxy_insert_after(struct Curl_cfilter *cf,
+ struct Curl_easy *data);
+
+extern struct Curl_cftype Curl_cft_h1_proxy;
+
+
+#endif /* !CURL_DISABLE_PROXY && !CURL_DISABLE_HTTP */
+
+#endif /* HEADER_CURL_H1_PROXY_H */
diff --git a/libs/libcurl/src/cf-h2-proxy.c b/libs/libcurl/src/cf-h2-proxy.c new file mode 100644 index 0000000000..b504504e89 --- /dev/null +++ b/libs/libcurl/src/cf-h2-proxy.c @@ -0,0 +1,1356 @@ +/***************************************************************************
+ * _ _ ____ _
+ * Project ___| | | | _ \| |
+ * / __| | | | |_) | |
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+ * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+ * are also available at https://curl.se/docs/copyright.html.
+ *
+ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
+ * copies of the Software, and permit persons to whom the Software is
+ * furnished to do so, under the terms of the COPYING file.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ * SPDX-License-Identifier: curl
+ *
+ ***************************************************************************/
+
+#include "curl_setup.h"
+
+#if defined(USE_NGHTTP2) && !defined(CURL_DISABLE_PROXY)
+
+#include <nghttp2/nghttp2.h>
+#include "urldata.h"
+#include "cfilters.h"
+#include "connect.h"
+#include "curl_log.h"
+#include "bufq.h"
+#include "dynbuf.h"
+#include "dynhds.h"
+#include "http1.h"
+#include "http_proxy.h"
+#include "multiif.h"
+#include "cf-h2-proxy.h"
+
+/* The last 3 #include files should be in this order */
+#include "curl_printf.h"
+#include "curl_memory.h"
+#include "memdebug.h"
+
+#define H2_NW_CHUNK_SIZE (128*1024)
+#define H2_NW_RECV_CHUNKS 1
+#define H2_NW_SEND_CHUNKS 1
+
+#define HTTP2_HUGE_WINDOW_SIZE (32 * 1024 * 1024) /* 32 MB */
+
+#define H2_TUNNEL_WINDOW_SIZE (1024 * 1024)
+#define H2_TUNNEL_CHUNK_SIZE (32 * 1024)
+#define H2_TUNNEL_RECV_CHUNKS \
+ (H2_TUNNEL_WINDOW_SIZE / H2_TUNNEL_CHUNK_SIZE)
+#define H2_TUNNEL_SEND_CHUNKS \
+ (H2_TUNNEL_WINDOW_SIZE / H2_TUNNEL_CHUNK_SIZE)
+
+typedef enum {
+ TUNNEL_INIT, /* init/default/no tunnel state */
+ TUNNEL_CONNECT, /* CONNECT request is being send */
+ TUNNEL_RESPONSE, /* CONNECT response received completely */
+ TUNNEL_ESTABLISHED,
+ TUNNEL_FAILED
+} tunnel_state;
+
+struct tunnel_stream {
+ struct http_resp *resp;
+ struct bufq recvbuf;
+ struct bufq sendbuf;
+ char *authority;
+ int32_t stream_id;
+ uint32_t error;
+ tunnel_state state;
+ bool has_final_response;
+ bool closed;
+ bool reset;
+};
+
+static CURLcode tunnel_stream_init(struct Curl_cfilter *cf,
+ struct tunnel_stream *ts)
+{
+ const char *hostname;
+ int port;
+ bool ipv6_ip = cf->conn->bits.ipv6_ip;
+
+ ts->state = TUNNEL_INIT;
+ ts->stream_id = -1;
+ Curl_bufq_init2(&ts->recvbuf, H2_TUNNEL_CHUNK_SIZE, H2_TUNNEL_RECV_CHUNKS,
+ BUFQ_OPT_SOFT_LIMIT);
+ Curl_bufq_init(&ts->sendbuf, H2_TUNNEL_CHUNK_SIZE, H2_TUNNEL_SEND_CHUNKS);
+
+ if(cf->conn->bits.conn_to_host)
+ hostname = cf->conn->conn_to_host.name;
+ else if(cf->sockindex == SECONDARYSOCKET)
+ hostname = cf->conn->secondaryhostname;
+ else
+ hostname = cf->conn->host.name;
+
+ if(cf->sockindex == SECONDARYSOCKET)
+ port = cf->conn->secondary_port;
+ else if(cf->conn->bits.conn_to_port)
+ port = cf->conn->conn_to_port;
+ else
+ port = cf->conn->remote_port;
+
+ if(hostname != cf->conn->host.name)
+ ipv6_ip = (strchr(hostname, ':') != NULL);
+
+ ts->authority = /* host:port with IPv6 support */
+ aprintf("%s%s%s:%d", ipv6_ip?"[":"", hostname, ipv6_ip?"]":"", port);
+ if(!ts->authority)
+ return CURLE_OUT_OF_MEMORY;
+
+ return CURLE_OK;
+}
+
+static void tunnel_stream_clear(struct tunnel_stream *ts)
+{
+ Curl_http_resp_free(ts->resp);
+ Curl_bufq_free(&ts->recvbuf);
+ Curl_bufq_free(&ts->sendbuf);
+ Curl_safefree(ts->authority);
+ memset(ts, 0, sizeof(*ts));
+ ts->state = TUNNEL_INIT;
+}
+
+static void tunnel_go_state(struct Curl_cfilter *cf,
+ struct tunnel_stream *ts,
+ tunnel_state new_state,
+ struct Curl_easy *data)
+{
+ (void)cf;
+
+ if(ts->state == new_state)
+ return;
+ /* leaving this one */
+ switch(ts->state) {
+ case TUNNEL_CONNECT:
+ data->req.ignorebody = FALSE;
+ break;
+ default:
+ break;
+ }
+ /* entering this one */
+ switch(new_state) {
+ case TUNNEL_INIT:
+ DEBUGF(LOG_CF(data, cf, "new tunnel state 'init'"));
+ tunnel_stream_clear(ts);
+ break;
+
+ case TUNNEL_CONNECT:
+ DEBUGF(LOG_CF(data, cf, "new tunnel state 'connect'"));
+ ts->state = TUNNEL_CONNECT;
+ break;
+
+ case TUNNEL_RESPONSE:
+ DEBUGF(LOG_CF(data, cf, "new tunnel state 'response'"));
+ ts->state = TUNNEL_RESPONSE;
+ break;
+
+ case TUNNEL_ESTABLISHED:
+ DEBUGF(LOG_CF(data, cf, "new tunnel state 'established'"));
+ infof(data, "CONNECT phase completed");
+ data->state.authproxy.done = TRUE;
+ data->state.authproxy.multipass = FALSE;
+ /* FALLTHROUGH */
+ case TUNNEL_FAILED:
+ if(new_state == TUNNEL_FAILED)
+ DEBUGF(LOG_CF(data, cf, "new tunnel state 'failed'"));
+ ts->state = new_state;
+ /* If a proxy-authorization header was used for the proxy, then we should
+ make sure that it isn't accidentally used for the document request
+ after we've connected. So let's free and clear it here. */
+ Curl_safefree(data->state.aptr.proxyuserpwd);
+ break;
+ }
+}
+
+struct cf_h2_proxy_ctx {
+ nghttp2_session *h2;
+ /* The easy handle used in the current filter call, cleared at return */
+ struct cf_call_data call_data;
+
+ struct bufq inbufq; /* network receive buffer */
+ struct bufq outbufq; /* network send buffer */
+
+ struct tunnel_stream tunnel; /* our tunnel CONNECT stream */
+ int32_t goaway_error;
+ int32_t last_stream_id;
+ BIT(conn_closed);
+ BIT(goaway);
+};
+
+/* How to access `call_data` from a cf_h2 filter */
+#define CF_CTX_CALL_DATA(cf) \
+ ((struct cf_h2_proxy_ctx *)(cf)->ctx)->call_data
+
+static void cf_h2_proxy_ctx_clear(struct cf_h2_proxy_ctx *ctx)
+{
+ struct cf_call_data save = ctx->call_data;
+
+ if(ctx->h2) {
+ nghttp2_session_del(ctx->h2);
+ }
+ Curl_bufq_free(&ctx->inbufq);
+ Curl_bufq_free(&ctx->outbufq);
+ tunnel_stream_clear(&ctx->tunnel);
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->call_data = save;
+}
+
+static void cf_h2_proxy_ctx_free(struct cf_h2_proxy_ctx *ctx)
+{
+ if(ctx) {
+ cf_h2_proxy_ctx_clear(ctx);
+ free(ctx);
+ }
+}
+
+static ssize_t nw_in_reader(void *reader_ctx,
+ unsigned char *buf, size_t buflen,
+ CURLcode *err)
+{
+ struct Curl_cfilter *cf = reader_ctx;
+ struct Curl_easy *data = CF_DATA_CURRENT(cf);
+ ssize_t nread;
+
+ nread = Curl_conn_cf_recv(cf->next, data, (char *)buf, buflen, err);
+ DEBUGF(LOG_CF(data, cf, "nw_in recv(len=%zu) -> %zd, %d",
+ buflen, nread, *err));
+ return nread;
+}
+
+static ssize_t nw_out_writer(void *writer_ctx,
+ const unsigned char *buf, size_t buflen,
+ CURLcode *err)
+{
+ struct Curl_cfilter *cf = writer_ctx;
+ struct Curl_easy *data = CF_DATA_CURRENT(cf);
+ ssize_t nwritten;
+
+ nwritten = Curl_conn_cf_send(cf->next, data, (const char *)buf, buflen, err);
+ DEBUGF(LOG_CF(data, cf, "nw_out send(len=%zu) -> %zd", buflen, nwritten));
+ return nwritten;
+}
+
+static int h2_client_new(struct Curl_cfilter *cf,
+ nghttp2_session_callbacks *cbs)
+{
+ struct cf_h2_proxy_ctx *ctx = cf->ctx;
+ nghttp2_option *o;
+
+ int rc = nghttp2_option_new(&o);
+ if(rc)
+ return rc;
+ /* We handle window updates ourself to enforce buffer limits */
+ nghttp2_option_set_no_auto_window_update(o, 1);
+#if NGHTTP2_VERSION_NUM >= 0x013200
+ /* with 1.50.0 */
+ /* turn off RFC 9113 leading and trailing white spaces validation against
+ HTTP field value. */
+ nghttp2_option_set_no_rfc9113_leading_and_trailing_ws_validation(o, 1);
+#endif
+ rc = nghttp2_session_client_new2(&ctx->h2, cbs, cf, o);
+ nghttp2_option_del(o);
+ return rc;
+}
+
+static ssize_t on_session_send(nghttp2_session *h2,
+ const uint8_t *buf, size_t blen,
+ int flags, void *userp);
+static int on_frame_recv(nghttp2_session *session, const nghttp2_frame *frame,
+ void *userp);
+static int on_stream_close(nghttp2_session *session, int32_t stream_id,
+ uint32_t error_code, void *userp);
+static int on_header(nghttp2_session *session, const nghttp2_frame *frame,
+ const uint8_t *name, size_t namelen,
+ const uint8_t *value, size_t valuelen,
+ uint8_t flags,
+ void *userp);
+static int tunnel_recv_callback(nghttp2_session *session, uint8_t flags,
+ int32_t stream_id,
+ const uint8_t *mem, size_t len, void *userp);
+
+/*
+ * Initialize the cfilter context
+ */
+static CURLcode cf_h2_proxy_ctx_init(struct Curl_cfilter *cf,
+ struct Curl_easy *data)
+{
+ struct cf_h2_proxy_ctx *ctx = cf->ctx;
+ CURLcode result = CURLE_OUT_OF_MEMORY;
+ nghttp2_session_callbacks *cbs = NULL;
+ int rc;
+
+ DEBUGASSERT(!ctx->h2);
+ memset(&ctx->tunnel, 0, sizeof(ctx->tunnel));
+
+ Curl_bufq_init(&ctx->inbufq, H2_NW_CHUNK_SIZE, H2_NW_RECV_CHUNKS);
+ Curl_bufq_init(&ctx->outbufq, H2_NW_CHUNK_SIZE, H2_NW_SEND_CHUNKS);
+
+ if(tunnel_stream_init(cf, &ctx->tunnel))
+ goto out;
+
+ rc = nghttp2_session_callbacks_new(&cbs);
+ if(rc) {
+ failf(data, "Couldn't initialize nghttp2 callbacks");
+ goto out;
+ }
+
+ nghttp2_session_callbacks_set_send_callback(cbs, on_session_send);
+ nghttp2_session_callbacks_set_on_frame_recv_callback(cbs, on_frame_recv);
+ nghttp2_session_callbacks_set_on_data_chunk_recv_callback(
+ cbs, tunnel_recv_callback);
+ nghttp2_session_callbacks_set_on_stream_close_callback(cbs, on_stream_close);
+ nghttp2_session_callbacks_set_on_header_callback(cbs, on_header);
+
+ /* The nghttp2 session is not yet setup, do it */
+ rc = h2_client_new(cf, cbs);
+ if(rc) {
+ failf(data, "Couldn't initialize nghttp2");
+ goto out;
+ }
+
+ {
+ nghttp2_settings_entry iv[3];
+
+ iv[0].settings_id = NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS;
+ iv[0].value = Curl_multi_max_concurrent_streams(data->multi);
+ iv[1].settings_id = NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE;
+ iv[1].value = H2_TUNNEL_WINDOW_SIZE;
+ iv[2].settings_id = NGHTTP2_SETTINGS_ENABLE_PUSH;
+ iv[2].value = 0;
+ rc = nghttp2_submit_settings(ctx->h2, NGHTTP2_FLAG_NONE, iv, 3);
+ if(rc) {
+ failf(data, "nghttp2_submit_settings() failed: %s(%d)",
+ nghttp2_strerror(rc), rc);
+ result = CURLE_HTTP2;
+ goto out;
+ }
+ }
+
+ rc = nghttp2_session_set_local_window_size(ctx->h2, NGHTTP2_FLAG_NONE, 0,
+ HTTP2_HUGE_WINDOW_SIZE);
+ if(rc) {
+ failf(data, "nghttp2_session_set_local_window_size() failed: %s(%d)",
+ nghttp2_strerror(rc), rc);
+ result = CURLE_HTTP2;
+ goto out;
+ }
+
+
+ /* all set, traffic will be send on connect */
+ result = CURLE_OK;
+
+out:
+ if(cbs)
+ nghttp2_session_callbacks_del(cbs);
+ DEBUGF(LOG_CF(data, cf, "init proxy ctx -> %d", result));
+ return result;
+}
+
+static CURLcode nw_out_flush(struct Curl_cfilter *cf,
+ struct Curl_easy *data)
+{
+ struct cf_h2_proxy_ctx *ctx = cf->ctx;
+ size_t buflen = Curl_bufq_len(&ctx->outbufq);
+ ssize_t nwritten;
+ CURLcode result;
+
+ (void)data;
+ if(!buflen)
+ return CURLE_OK;
+
+ DEBUGF(LOG_CF(data, cf, "h2 conn flush %zu bytes", buflen));
+ nwritten = Curl_bufq_pass(&ctx->outbufq, nw_out_writer, cf, &result);
+ if(nwritten < 0) {
+ return result;
+ }
+ if((size_t)nwritten < buflen) {
+ return CURLE_AGAIN;
+ }
+ return CURLE_OK;
+}
+
+/*
+ * Processes pending input left in network input buffer.
+ * This function returns 0 if it succeeds, or -1 and error code will
+ * be assigned to *err.
+ */
+static int h2_process_pending_input(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ CURLcode *err)
+{
+ struct cf_h2_proxy_ctx *ctx = cf->ctx;
+ const unsigned char *buf;
+ size_t blen;
+ ssize_t rv;
+
+ while(Curl_bufq_peek(&ctx->inbufq, &buf, &blen)) {
+
+ rv = nghttp2_session_mem_recv(ctx->h2, (const uint8_t *)buf, blen);
+ DEBUGF(LOG_CF(data, cf,
+ "fed %zu bytes from nw to nghttp2 -> %zd", blen, rv));
+ if(rv < 0) {
+ failf(data,
+ "process_pending_input: nghttp2_session_mem_recv() returned "
+ "%zd:%s", rv, nghttp2_strerror((int)rv));
+ *err = CURLE_RECV_ERROR;
+ return -1;
+ }
+ Curl_bufq_skip(&ctx->inbufq, (size_t)rv);
+ if(Curl_bufq_is_empty(&ctx->inbufq)) {
+ DEBUGF(LOG_CF(data, cf, "all data in connection buffer processed"));
+ break;
+ }
+ else {
+ DEBUGF(LOG_CF(data, cf, "process_pending_input: %zu bytes left "
+ "in connection buffer", Curl_bufq_len(&ctx->inbufq)));
+ }
+ }
+
+ if(nghttp2_session_check_request_allowed(ctx->h2) == 0) {
+ /* No more requests are allowed in the current session, so
+ the connection may not be reused. This is set when a
+ GOAWAY frame has been received or when the limit of stream
+ identifiers has been reached. */
+ connclose(cf->conn, "http/2: No new requests allowed");
+ }
+
+ return 0;
+}
+
+static CURLcode h2_progress_ingress(struct Curl_cfilter *cf,
+ struct Curl_easy *data)
+{
+ struct cf_h2_proxy_ctx *ctx = cf->ctx;
+ CURLcode result = CURLE_OK;
+ ssize_t nread;
+
+ /* Process network input buffer fist */
+ if(!Curl_bufq_is_empty(&ctx->inbufq)) {
+ DEBUGF(LOG_CF(data, cf, "Process %zd bytes in connection buffer",
+ Curl_bufq_len(&ctx->inbufq)));
+ if(h2_process_pending_input(cf, data, &result) < 0)
+ return result;
+ }
+
+ /* Receive data from the "lower" filters, e.g. network until
+ * it is time to stop or we have enough data for this stream */
+ while(!ctx->conn_closed && /* not closed the connection */
+ !ctx->tunnel.closed && /* nor the tunnel */
+ Curl_bufq_is_empty(&ctx->inbufq) && /* and we consumed our input */
+ !Curl_bufq_is_full(&ctx->tunnel.recvbuf)) {
+
+ nread = Curl_bufq_slurp(&ctx->inbufq, nw_in_reader, cf, &result);
+ DEBUGF(LOG_CF(data, cf, "read %zd bytes nw data -> %zd, %d",
+ Curl_bufq_len(&ctx->inbufq), nread, result));
+ if(nread < 0) {
+ if(result != CURLE_AGAIN) {
+ failf(data, "Failed receiving HTTP2 data");
+ return result;
+ }
+ break;
+ }
+ else if(nread == 0) {
+ ctx->conn_closed = TRUE;
+ break;
+ }
+
+ if(h2_process_pending_input(cf, data, &result))
+ return result;
+ }
+
+ if(ctx->conn_closed && Curl_bufq_is_empty(&ctx->inbufq)) {
+ connclose(cf->conn, "GOAWAY received");
+ }
+
+ return CURLE_OK;
+}
+
+/*
+ * Check if there's been an update in the priority /
+ * dependency settings and if so it submits a PRIORITY frame with the updated
+ * info.
+ * Flush any out data pending in the network buffer.
+ */
+static CURLcode h2_progress_egress(struct Curl_cfilter *cf,
+ struct Curl_easy *data)
+{
+ struct cf_h2_proxy_ctx *ctx = cf->ctx;
+ int rv = 0;
+
+ rv = nghttp2_session_send(ctx->h2);
+ if(nghttp2_is_fatal(rv)) {
+ DEBUGF(LOG_CF(data, cf, "nghttp2_session_send error (%s)%d",
+ nghttp2_strerror(rv), rv));
+ return CURLE_SEND_ERROR;
+ }
+ return nw_out_flush(cf, data);
+}
+
+static ssize_t on_session_send(nghttp2_session *h2,
+ const uint8_t *buf, size_t blen, int flags,
+ void *userp)
+{
+ struct Curl_cfilter *cf = userp;
+ struct cf_h2_proxy_ctx *ctx = cf->ctx;
+ struct Curl_easy *data = CF_DATA_CURRENT(cf);
+ ssize_t nwritten;
+ CURLcode result = CURLE_OK;
+
+ (void)h2;
+ (void)flags;
+ DEBUGASSERT(data);
+
+ nwritten = Curl_bufq_write_pass(&ctx->outbufq, buf, blen,
+ nw_out_writer, cf, &result);
+ if(nwritten < 0) {
+ if(result == CURLE_AGAIN) {
+ return NGHTTP2_ERR_WOULDBLOCK;
+ }
+ failf(data, "Failed sending HTTP2 data");
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+
+ if(!nwritten)
+ return NGHTTP2_ERR_WOULDBLOCK;
+
+ return nwritten;
+}
+
+static int on_frame_recv(nghttp2_session *session, const nghttp2_frame *frame,
+ void *userp)
+{
+ struct Curl_cfilter *cf = userp;
+ struct cf_h2_proxy_ctx *ctx = cf->ctx;
+ struct Curl_easy *data = CF_DATA_CURRENT(cf);
+ int32_t stream_id = frame->hd.stream_id;
+
+ (void)session;
+ DEBUGASSERT(data);
+ if(!stream_id) {
+ /* stream ID zero is for connection-oriented stuff */
+ DEBUGASSERT(data);
+ switch(frame->hd.type) {
+ case NGHTTP2_SETTINGS:
+ /* we do not do anything with this for now */
+ break;
+ case NGHTTP2_GOAWAY:
+ infof(data, "recveived GOAWAY, error=%d, last_stream=%u",
+ frame->goaway.error_code, frame->goaway.last_stream_id);
+ ctx->goaway = TRUE;
+ break;
+ case NGHTTP2_WINDOW_UPDATE:
+ DEBUGF(LOG_CF(data, cf, "recv frame WINDOW_UPDATE"));
+ break;
+ default:
+ DEBUGF(LOG_CF(data, cf, "recv frame %x on 0", frame->hd.type));
+ }
+ return 0;
+ }
+
+ if(stream_id != ctx->tunnel.stream_id) {
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%u] rcvd FRAME not for tunnel",
+ stream_id));
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+
+ switch(frame->hd.type) {
+ case NGHTTP2_DATA:
+ /* If body started on this stream, then receiving DATA is illegal. */
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%u] recv frame DATA", stream_id));
+ break;
+ case NGHTTP2_HEADERS:
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%u] recv frame HEADERS", stream_id));
+
+ /* nghttp2 guarantees that :status is received, and we store it to
+ stream->status_code. Fuzzing has proven this can still be reached
+ without status code having been set. */
+ if(!ctx->tunnel.resp)
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ /* Only final status code signals the end of header */
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%u] got http status: %d",
+ stream_id, ctx->tunnel.resp->status));
+ if(!ctx->tunnel.has_final_response) {
+ if(ctx->tunnel.resp->status / 100 != 1) {
+ ctx->tunnel.has_final_response = TRUE;
+ }
+ }
+ break;
+ case NGHTTP2_PUSH_PROMISE:
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%u] recv PUSH_PROMISE", stream_id));
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ case NGHTTP2_RST_STREAM:
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%u] recv RST", stream_id));
+ ctx->tunnel.reset = TRUE;
+ break;
+ case NGHTTP2_WINDOW_UPDATE:
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%u] recv WINDOW_UPDATE", stream_id));
+ if((data->req.keepon & KEEP_SEND_HOLD) &&
+ (data->req.keepon & KEEP_SEND)) {
+ data->req.keepon &= ~KEEP_SEND_HOLD;
+ Curl_expire(data, 0, EXPIRE_RUN_NOW);
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%u] unpausing after win update",
+ stream_id));
+ }
+ break;
+ default:
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%u] recv frame %x",
+ stream_id, frame->hd.type));
+ break;
+ }
+ return 0;
+}
+
+static int on_header(nghttp2_session *session, const nghttp2_frame *frame,
+ const uint8_t *name, size_t namelen,
+ const uint8_t *value, size_t valuelen,
+ uint8_t flags,
+ void *userp)
+{
+ struct Curl_cfilter *cf = userp;
+ struct cf_h2_proxy_ctx *ctx = cf->ctx;
+ struct Curl_easy *data = CF_DATA_CURRENT(cf);
+ int32_t stream_id = frame->hd.stream_id;
+ CURLcode result;
+
+ (void)flags;
+ (void)data;
+ (void)session;
+ DEBUGASSERT(stream_id); /* should never be a zero stream ID here */
+ if(stream_id != ctx->tunnel.stream_id) {
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%u] header for non-tunnel stream: "
+ "%.*s: %.*s", stream_id,
+ (int)namelen, name,
+ (int)valuelen, value));
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+
+ if(frame->hd.type == NGHTTP2_PUSH_PROMISE)
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+
+ if(ctx->tunnel.has_final_response) {
+ /* we do not do anything with trailers for tunnel streams */
+ return 0;
+ }
+
+ if(namelen == sizeof(HTTP_PSEUDO_STATUS) - 1 &&
+ memcmp(HTTP_PSEUDO_STATUS, name, namelen) == 0) {
+ int http_status;
+ struct http_resp *resp;
+
+ /* status: always comes first, we might get more than one response,
+ * link the previous ones for keepers */
+ result = Curl_http_decode_status(&http_status,
+ (const char *)value, valuelen);
+ if(result)
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ result = Curl_http_resp_make(&resp, http_status, NULL);
+ if(result)
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ resp->prev = ctx->tunnel.resp;
+ ctx->tunnel.resp = resp;
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%u] status: HTTP/2 %03d",
+ stream_id, ctx->tunnel.resp->status));
+ return 0;
+ }
+
+ if(!ctx->tunnel.resp)
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+
+ result = Curl_dynhds_add(&ctx->tunnel.resp->headers,
+ (const char *)name, namelen,
+ (const char *)value, valuelen);
+ if(result)
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%u] header: %.*s: %.*s",
+ stream_id,
+ (int)namelen, name,
+ (int)valuelen, value));
+
+ return 0; /* 0 is successful */
+}
+
+static ssize_t tunnel_send_callback(nghttp2_session *session,
+ int32_t stream_id,
+ uint8_t *buf, size_t length,
+ uint32_t *data_flags,
+ nghttp2_data_source *source,
+ void *userp)
+{
+ struct Curl_cfilter *cf = userp;
+ struct cf_h2_proxy_ctx *ctx = cf->ctx;
+ struct Curl_easy *data = CF_DATA_CURRENT(cf);
+ struct tunnel_stream *ts;
+ CURLcode result;
+ ssize_t nread;
+
+ (void)source;
+ (void)data;
+ (void)ctx;
+
+ if(!stream_id)
+ return NGHTTP2_ERR_INVALID_ARGUMENT;
+
+ ts = nghttp2_session_get_stream_user_data(session, stream_id);
+ if(!ts)
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ DEBUGASSERT(ts == &ctx->tunnel);
+
+ nread = Curl_bufq_read(&ts->sendbuf, buf, length, &result);
+ if(nread < 0) {
+ if(result != CURLE_AGAIN)
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ return NGHTTP2_ERR_DEFERRED;
+ }
+ if(ts->closed && Curl_bufq_is_empty(&ts->sendbuf))
+ *data_flags = NGHTTP2_DATA_FLAG_EOF;
+
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%u] tunnel_send_callback -> %zd",
+ ts->stream_id, nread));
+ return nread;
+}
+
+static int tunnel_recv_callback(nghttp2_session *session, uint8_t flags,
+ int32_t stream_id,
+ const uint8_t *mem, size_t len, void *userp)
+{
+ struct Curl_cfilter *cf = userp;
+ struct cf_h2_proxy_ctx *ctx = cf->ctx;
+ ssize_t nwritten;
+ CURLcode result;
+
+ (void)flags;
+ (void)session;
+ DEBUGASSERT(stream_id); /* should never be a zero stream ID here */
+
+ if(stream_id != ctx->tunnel.stream_id)
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+
+ nwritten = Curl_bufq_write(&ctx->tunnel.recvbuf, mem, len, &result);
+ if(nwritten < 0) {
+ if(result != CURLE_AGAIN)
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ nwritten = 0;
+ }
+ DEBUGASSERT((size_t)nwritten == len);
+ return 0;
+}
+
+static int on_stream_close(nghttp2_session *session, int32_t stream_id,
+ uint32_t error_code, void *userp)
+{
+ struct Curl_cfilter *cf = userp;
+ struct cf_h2_proxy_ctx *ctx = cf->ctx;
+ struct Curl_easy *data = CF_DATA_CURRENT(cf);
+
+ (void)session;
+ (void)data;
+
+ if(stream_id != ctx->tunnel.stream_id)
+ return 0;
+
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%u] on_stream_close, %s (err %d)",
+ stream_id, nghttp2_http2_strerror(error_code), error_code));
+ ctx->tunnel.closed = TRUE;
+ ctx->tunnel.error = error_code;
+
+ return 0;
+}
+
+static CURLcode h2_submit(int32_t *pstream_id,
+ struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ nghttp2_session *h2,
+ struct httpreq *req,
+ const nghttp2_priority_spec *pri_spec,
+ void *stream_user_data,
+ nghttp2_data_source_read_callback read_callback,
+ void *read_ctx)
+{
+ struct dynhds h2_headers;
+ nghttp2_nv *nva = NULL;
+ unsigned int i;
+ int32_t stream_id = -1;
+ size_t nheader;
+ CURLcode result;
+
+ (void)cf;
+ Curl_dynhds_init(&h2_headers, 0, DYN_HTTP_REQUEST);
+ result = Curl_http_req_to_h2(&h2_headers, req, data);
+ if(result)
+ goto out;
+
+ nheader = Curl_dynhds_count(&h2_headers);
+ nva = malloc(sizeof(nghttp2_nv) * nheader);
+ if(!nva) {
+ result = CURLE_OUT_OF_MEMORY;
+ goto out;
+ }
+
+ for(i = 0; i < nheader; ++i) {
+ struct dynhds_entry *e = Curl_dynhds_getn(&h2_headers, i);
+ nva[i].name = (unsigned char *)e->name;
+ nva[i].namelen = e->namelen;
+ nva[i].value = (unsigned char *)e->value;
+ nva[i].valuelen = e->valuelen;
+ nva[i].flags = NGHTTP2_NV_FLAG_NONE;
+ }
+
+ if(read_callback) {
+ nghttp2_data_provider data_prd;
+
+ data_prd.read_callback = read_callback;
+ data_prd.source.ptr = read_ctx;
+ stream_id = nghttp2_submit_request(h2, pri_spec, nva, nheader,
+ &data_prd, stream_user_data);
+ }
+ else {
+ stream_id = nghttp2_submit_request(h2, pri_spec, nva, nheader,
+ NULL, stream_user_data);
+ }
+
+ if(stream_id < 0) {
+ failf(data, "nghttp2_session_upgrade2() failed: %s(%d)",
+ nghttp2_strerror(stream_id), stream_id);
+ result = CURLE_SEND_ERROR;
+ goto out;
+ }
+ result = CURLE_OK;
+
+out:
+ free(nva);
+ Curl_dynhds_free(&h2_headers);
+ *pstream_id = stream_id;
+ return result;
+}
+
+static CURLcode submit_CONNECT(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ struct tunnel_stream *ts)
+{
+ struct cf_h2_proxy_ctx *ctx = cf->ctx;
+ CURLcode result;
+ struct httpreq *req = NULL;
+
+ infof(data, "Establish HTTP/2 proxy tunnel to %s", ts->authority);
+
+ result = Curl_http_req_make(&req, "CONNECT", sizeof("CONNECT")-1,
+ NULL, 0, ts->authority, strlen(ts->authority),
+ NULL, 0);
+ if(result)
+ goto out;
+
+ /* Setup the proxy-authorization header, if any */
+ result = Curl_http_output_auth(data, cf->conn, req->method, HTTPREQ_GET,
+ req->authority, TRUE);
+ if(result)
+ goto out;
+
+ if(data->state.aptr.proxyuserpwd) {
+ result = Curl_dynhds_h1_cadd_line(&req->headers,
+ data->state.aptr.proxyuserpwd);
+ if(result)
+ goto out;
+ }
+
+ if(!Curl_checkProxyheaders(data, cf->conn, STRCONST("User-Agent"))
+ && data->set.str[STRING_USERAGENT]) {
+ result = Curl_dynhds_cadd(&req->headers, "User-Agent",
+ data->set.str[STRING_USERAGENT]);
+ if(result)
+ goto out;
+ }
+
+ result = Curl_dynhds_add_custom(data, TRUE, &req->headers);
+ if(result)
+ goto out;
+
+ result = h2_submit(&ts->stream_id, cf, data, ctx->h2, req,
+ NULL, ts, tunnel_send_callback, cf);
+ if(result) {
+ DEBUGF(LOG_CF(data, cf, "send: nghttp2_submit_request error (%s)%u",
+ nghttp2_strerror(ts->stream_id), ts->stream_id));
+ }
+
+out:
+ if(req)
+ Curl_http_req_free(req);
+ if(result)
+ failf(data, "Failed sending CONNECT to proxy");
+ return result;
+}
+
+static CURLcode inspect_response(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ struct tunnel_stream *ts)
+{
+ CURLcode result = CURLE_OK;
+ struct dynhds_entry *auth_reply = NULL;
+ (void)cf;
+
+ DEBUGASSERT(ts->resp);
+ if(ts->resp->status/100 == 2) {
+ infof(data, "CONNECT tunnel established, response %d", ts->resp->status);
+ tunnel_go_state(cf, ts, TUNNEL_ESTABLISHED, data);
+ return CURLE_OK;
+ }
+
+ if(ts->resp->status == 401) {
+ auth_reply = Curl_dynhds_cget(&ts->resp->headers, "WWW-Authenticate");
+ }
+ else if(ts->resp->status == 407) {
+ auth_reply = Curl_dynhds_cget(&ts->resp->headers, "Proxy-Authenticate");
+ }
+
+ if(auth_reply) {
+ DEBUGF(LOG_CF(data, cf, "CONNECT: fwd auth header '%s'",
+ auth_reply->value));
+ result = Curl_http_input_auth(data, ts->resp->status == 407,
+ auth_reply->value);
+ if(result)
+ return result;
+ if(data->req.newurl) {
+ /* Inidicator that we should try again */
+ Curl_safefree(data->req.newurl);
+ tunnel_go_state(cf, ts, TUNNEL_INIT, data);
+ return CURLE_OK;
+ }
+ }
+
+ /* Seems to have failed */
+ return CURLE_RECV_ERROR;
+}
+
+static CURLcode CONNECT(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ struct tunnel_stream *ts)
+{
+ struct cf_h2_proxy_ctx *ctx = cf->ctx;
+ CURLcode result = CURLE_OK;
+
+ DEBUGASSERT(ts);
+ DEBUGASSERT(ts->authority);
+ do {
+ switch(ts->state) {
+ case TUNNEL_INIT:
+ /* Prepare the CONNECT request and make a first attempt to send. */
+ DEBUGF(LOG_CF(data, cf, "CONNECT start for %s", ts->authority));
+ result = submit_CONNECT(cf, data, ts);
+ if(result)
+ goto out;
+ tunnel_go_state(cf, ts, TUNNEL_CONNECT, data);
+ /* FALLTHROUGH */
+
+ case TUNNEL_CONNECT:
+ /* see that the request is completely sent */
+ result = h2_progress_ingress(cf, data);
+ if(!result)
+ result = h2_progress_egress(cf, data);
+ if(result) {
+ tunnel_go_state(cf, ts, TUNNEL_FAILED, data);
+ break;
+ }
+
+ if(ts->has_final_response) {
+ tunnel_go_state(cf, ts, TUNNEL_RESPONSE, data);
+ }
+ else {
+ result = CURLE_OK;
+ goto out;
+ }
+ /* FALLTHROUGH */
+
+ case TUNNEL_RESPONSE:
+ DEBUGASSERT(ts->has_final_response);
+ result = inspect_response(cf, data, ts);
+ if(result)
+ goto out;
+ break;
+
+ case TUNNEL_ESTABLISHED:
+ return CURLE_OK;
+
+ case TUNNEL_FAILED:
+ return CURLE_RECV_ERROR;
+
+ default:
+ break;
+ }
+
+ } while(ts->state == TUNNEL_INIT);
+
+out:
+ if(result || ctx->tunnel.closed)
+ tunnel_go_state(cf, ts, TUNNEL_FAILED, data);
+ return result;
+}
+
+static CURLcode cf_h2_proxy_connect(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ bool blocking, bool *done)
+{
+ struct cf_h2_proxy_ctx *ctx = cf->ctx;
+ CURLcode result = CURLE_OK;
+ struct cf_call_data save;
+ timediff_t check;
+ struct tunnel_stream *ts = &ctx->tunnel;
+
+ if(cf->connected) {
+ *done = TRUE;
+ return CURLE_OK;
+ }
+
+ /* Connect the lower filters first */
+ if(!cf->next->connected) {
+ result = Curl_conn_cf_connect(cf->next, data, blocking, done);
+ if(result || !*done)
+ return result;
+ }
+
+ *done = FALSE;
+
+ CF_DATA_SAVE(save, cf, data);
+ if(!ctx->h2) {
+ result = cf_h2_proxy_ctx_init(cf, data);
+ if(result)
+ goto out;
+ }
+ DEBUGASSERT(ts->authority);
+
+ check = Curl_timeleft(data, NULL, TRUE);
+ if(check <= 0) {
+ failf(data, "Proxy CONNECT aborted due to timeout");
+ result = CURLE_OPERATION_TIMEDOUT;
+ goto out;
+ }
+
+ /* for the secondary socket (FTP), use the "connect to host"
+ * but ignore the "connect to port" (use the secondary port)
+ */
+ result = CONNECT(cf, data, ts);
+
+out:
+ *done = (result == CURLE_OK) && (ts->state == TUNNEL_ESTABLISHED);
+ cf->connected = *done;
+ CF_DATA_RESTORE(cf, save);
+ return result;
+}
+
+static void cf_h2_proxy_close(struct Curl_cfilter *cf, struct Curl_easy *data)
+{
+ struct cf_h2_proxy_ctx *ctx = cf->ctx;
+
+ if(ctx) {
+ struct cf_call_data save;
+
+ CF_DATA_SAVE(save, cf, data);
+ cf_h2_proxy_ctx_clear(ctx);
+ CF_DATA_RESTORE(cf, save);
+ }
+}
+
+static void cf_h2_proxy_destroy(struct Curl_cfilter *cf,
+ struct Curl_easy *data)
+{
+ struct cf_h2_proxy_ctx *ctx = cf->ctx;
+
+ (void)data;
+ if(ctx) {
+ cf_h2_proxy_ctx_free(ctx);
+ cf->ctx = NULL;
+ }
+}
+
+static bool cf_h2_proxy_data_pending(struct Curl_cfilter *cf,
+ const struct Curl_easy *data)
+{
+ struct cf_h2_proxy_ctx *ctx = cf->ctx;
+ if((ctx && !Curl_bufq_is_empty(&ctx->inbufq)) ||
+ (ctx && ctx->tunnel.state == TUNNEL_ESTABLISHED &&
+ !Curl_bufq_is_empty(&ctx->tunnel.recvbuf)))
+ return TRUE;
+ return cf->next? cf->next->cft->has_data_pending(cf->next, data) : FALSE;
+}
+
+static int cf_h2_proxy_get_select_socks(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ curl_socket_t *sock)
+{
+ struct cf_h2_proxy_ctx *ctx = cf->ctx;
+ int bitmap = GETSOCK_BLANK;
+ struct cf_call_data save;
+
+ CF_DATA_SAVE(save, cf, data);
+ sock[0] = Curl_conn_cf_get_socket(cf, data);
+ bitmap |= GETSOCK_READSOCK(0);
+
+ /* HTTP/2 layer wants to send data) AND there's a window to send data in */
+ if(nghttp2_session_want_write(ctx->h2) &&
+ nghttp2_session_get_remote_window_size(ctx->h2))
+ bitmap |= GETSOCK_WRITESOCK(0);
+
+ CF_DATA_RESTORE(cf, save);
+ return bitmap;
+}
+
+static ssize_t h2_handle_tunnel_close(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ CURLcode *err)
+{
+ struct cf_h2_proxy_ctx *ctx = cf->ctx;
+ ssize_t rv = 0;
+
+ if(ctx->tunnel.error == NGHTTP2_REFUSED_STREAM) {
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%u] REFUSED_STREAM, try again on a new "
+ "connection", ctx->tunnel.stream_id));
+ connclose(cf->conn, "REFUSED_STREAM"); /* don't use this anymore */
+ *err = CURLE_RECV_ERROR; /* trigger Curl_retry_request() later */
+ return -1;
+ }
+ else if(ctx->tunnel.error != NGHTTP2_NO_ERROR) {
+ failf(data, "HTTP/2 stream %u was not closed cleanly: %s (err %u)",
+ ctx->tunnel.stream_id, nghttp2_http2_strerror(ctx->tunnel.error),
+ ctx->tunnel.error);
+ *err = CURLE_HTTP2_STREAM;
+ return -1;
+ }
+ else if(ctx->tunnel.reset) {
+ failf(data, "HTTP/2 stream %u was reset", ctx->tunnel.stream_id);
+ *err = CURLE_RECV_ERROR;
+ return -1;
+ }
+
+ *err = CURLE_OK;
+ rv = 0;
+ DEBUGF(LOG_CF(data, cf, "handle_tunnel_close -> %zd, %d", rv, *err));
+ return rv;
+}
+
+static ssize_t tunnel_recv(struct Curl_cfilter *cf, struct Curl_easy *data,
+ char *buf, size_t len, CURLcode *err)
+{
+ struct cf_h2_proxy_ctx *ctx = cf->ctx;
+ ssize_t nread = -1;
+
+ *err = CURLE_AGAIN;
+ if(!Curl_bufq_is_empty(&ctx->tunnel.recvbuf)) {
+ nread = Curl_bufq_read(&ctx->tunnel.recvbuf,
+ (unsigned char *)buf, len, err);
+ if(nread < 0)
+ goto out;
+ DEBUGASSERT(nread > 0);
+ }
+
+ if(nread < 0) {
+ if(ctx->tunnel.closed) {
+ nread = h2_handle_tunnel_close(cf, data, err);
+ }
+ else if(ctx->tunnel.reset ||
+ (ctx->conn_closed && Curl_bufq_is_empty(&ctx->inbufq)) ||
+ (ctx->goaway && ctx->last_stream_id < ctx->tunnel.stream_id)) {
+ *err = CURLE_RECV_ERROR;
+ nread = -1;
+ }
+ }
+ else if(nread == 0) {
+ *err = CURLE_AGAIN;
+ nread = -1;
+ }
+
+out:
+ DEBUGF(LOG_CF(data, cf, "tunnel_recv(len=%zu) -> %zd, %d",
+ len, nread, *err));
+ return nread;
+}
+
+static ssize_t cf_h2_proxy_recv(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ char *buf, size_t len, CURLcode *err)
+{
+ struct cf_h2_proxy_ctx *ctx = cf->ctx;
+ ssize_t nread = -1;
+ struct cf_call_data save;
+ CURLcode result;
+
+ if(ctx->tunnel.state != TUNNEL_ESTABLISHED) {
+ *err = CURLE_RECV_ERROR;
+ return -1;
+ }
+ CF_DATA_SAVE(save, cf, data);
+
+ if(Curl_bufq_is_empty(&ctx->tunnel.recvbuf)) {
+ *err = h2_progress_ingress(cf, data);
+ if(*err)
+ goto out;
+ }
+
+ nread = tunnel_recv(cf, data, buf, len, err);
+
+ if(nread > 0) {
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%u] increase window by %zd",
+ ctx->tunnel.stream_id, nread));
+ nghttp2_session_consume(ctx->h2, ctx->tunnel.stream_id, (size_t)nread);
+ }
+
+ result = h2_progress_egress(cf, data);
+ if(result) {
+ *err = result;
+ nread = -1;
+ }
+
+out:
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%u] cf_recv(len=%zu) -> %zd %d",
+ ctx->tunnel.stream_id, len, nread, *err));
+ CF_DATA_RESTORE(cf, save);
+ return nread;
+}
+
+static ssize_t cf_h2_proxy_send(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ const void *mem, size_t len, CURLcode *err)
+{
+ struct cf_h2_proxy_ctx *ctx = cf->ctx;
+ struct cf_call_data save;
+ ssize_t nwritten = -1;
+ const unsigned char *buf = mem;
+ size_t start_len = len;
+ int rv;
+
+ if(ctx->tunnel.state != TUNNEL_ESTABLISHED) {
+ *err = CURLE_SEND_ERROR;
+ return -1;
+ }
+ CF_DATA_SAVE(save, cf, data);
+
+ while(len) {
+ nwritten = Curl_bufq_write(&ctx->tunnel.sendbuf, buf, len, err);
+ if(nwritten <= 0) {
+ if(*err && *err != CURLE_AGAIN) {
+ DEBUGF(LOG_CF(data, cf, "error adding data to tunnel sendbuf: %d",
+ *err));
+ nwritten = -1;
+ goto out;
+ }
+ /* blocked */
+ nwritten = 0;
+ }
+ else {
+ DEBUGASSERT((size_t)nwritten <= len);
+ buf += (size_t)nwritten;
+ len -= (size_t)nwritten;
+ }
+
+ /* resume the tunnel stream and let the h2 session send, which
+ * triggers reading from tunnel.sendbuf */
+ rv = nghttp2_session_resume_data(ctx->h2, ctx->tunnel.stream_id);
+ if(nghttp2_is_fatal(rv)) {
+ *err = CURLE_SEND_ERROR;
+ nwritten = -1;
+ goto out;
+ }
+ *err = h2_progress_egress(cf, data);
+ if(*err) {
+ nwritten = -1;
+ goto out;
+ }
+
+ if(!nwritten && Curl_bufq_is_full(&ctx->tunnel.sendbuf)) {
+ size_t rwin;
+ /* we could not add to the buffer and after session processing,
+ * it is still full. */
+ rwin = nghttp2_session_get_stream_remote_window_size(
+ ctx->h2, ctx->tunnel.stream_id);
+ DEBUGF(LOG_CF(data, cf, "cf_send: tunnel win %u/%zu",
+ nghttp2_session_get_remote_window_size(ctx->h2), rwin));
+ if(rwin == 0) {
+ /* We cannot upload more as the stream's remote window size
+ * is 0. We need to receive WIN_UPDATEs before we can continue.
+ */
+ data->req.keepon |= KEEP_SEND_HOLD;
+ DEBUGF(LOG_CF(data, cf, "pausing send as remote flow "
+ "window is exhausted"));
+ }
+ break;
+ }
+ }
+
+ nwritten = start_len - len;
+ if(nwritten > 0) {
+ *err = CURLE_OK;
+ }
+ else if(ctx->tunnel.closed) {
+ nwritten = -1;
+ *err = CURLE_SEND_ERROR;
+ }
+ else {
+ nwritten = -1;
+ *err = CURLE_AGAIN;
+ }
+
+out:
+ DEBUGF(LOG_CF(data, cf, "cf_send(len=%zu) -> %zd, %d ",
+ start_len, nwritten, *err));
+ CF_DATA_RESTORE(cf, save);
+ return nwritten;
+}
+
+struct Curl_cftype Curl_cft_h2_proxy = {
+ "H2-PROXY",
+ CF_TYPE_IP_CONNECT,
+ CURL_LOG_DEFAULT,
+ cf_h2_proxy_destroy,
+ cf_h2_proxy_connect,
+ cf_h2_proxy_close,
+ Curl_cf_http_proxy_get_host,
+ cf_h2_proxy_get_select_socks,
+ cf_h2_proxy_data_pending,
+ cf_h2_proxy_send,
+ cf_h2_proxy_recv,
+ Curl_cf_def_cntrl,
+ Curl_cf_def_conn_is_alive,
+ Curl_cf_def_conn_keep_alive,
+ Curl_cf_def_query,
+};
+
+CURLcode Curl_cf_h2_proxy_insert_after(struct Curl_cfilter *cf,
+ struct Curl_easy *data)
+{
+ struct Curl_cfilter *cf_h2_proxy = NULL;
+ struct cf_h2_proxy_ctx *ctx;
+ CURLcode result = CURLE_OUT_OF_MEMORY;
+
+ (void)data;
+ ctx = calloc(sizeof(*ctx), 1);
+ if(!ctx)
+ goto out;
+
+ result = Curl_cf_create(&cf_h2_proxy, &Curl_cft_h2_proxy, ctx);
+ if(result)
+ goto out;
+
+ Curl_conn_cf_insert_after(cf, cf_h2_proxy);
+ result = CURLE_OK;
+
+out:
+ if(result)
+ cf_h2_proxy_ctx_free(ctx);
+ return result;
+}
+
+#endif /* defined(USE_NGHTTP2) && !defined(CURL_DISABLE_PROXY) */
diff --git a/libs/libcurl/src/cf-h2-proxy.h b/libs/libcurl/src/cf-h2-proxy.h new file mode 100644 index 0000000000..b531028e7e --- /dev/null +++ b/libs/libcurl/src/cf-h2-proxy.h @@ -0,0 +1,39 @@ +#ifndef HEADER_CURL_H2_PROXY_H
+#define HEADER_CURL_H2_PROXY_H
+/***************************************************************************
+ * _ _ ____ _
+ * Project ___| | | | _ \| |
+ * / __| | | | |_) | |
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+ * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+ * are also available at https://curl.se/docs/copyright.html.
+ *
+ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
+ * copies of the Software, and permit persons to whom the Software is
+ * furnished to do so, under the terms of the COPYING file.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ * SPDX-License-Identifier: curl
+ *
+ ***************************************************************************/
+
+#include "curl_setup.h"
+
+#if defined(USE_NGHTTP2) && !defined(CURL_DISABLE_PROXY)
+
+CURLcode Curl_cf_h2_proxy_insert_after(struct Curl_cfilter *cf,
+ struct Curl_easy *data);
+
+extern struct Curl_cftype Curl_cft_h2_proxy;
+
+
+#endif /* defined(USE_NGHTTP2) && !defined(CURL_DISABLE_PROXY) */
+
+#endif /* HEADER_CURL_H2_PROXY_H */
diff --git a/libs/libcurl/src/cf-haproxy.c b/libs/libcurl/src/cf-haproxy.c new file mode 100644 index 0000000000..0d58261ada --- /dev/null +++ b/libs/libcurl/src/cf-haproxy.c @@ -0,0 +1,246 @@ +/***************************************************************************
+ * _ _ ____ _
+ * Project ___| | | | _ \| |
+ * / __| | | | |_) | |
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+ * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+ * are also available at https://curl.se/docs/copyright.html.
+ *
+ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
+ * copies of the Software, and permit persons to whom the Software is
+ * furnished to do so, under the terms of the COPYING file.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ * SPDX-License-Identifier: curl
+ *
+ ***************************************************************************/
+
+#include "curl_setup.h"
+
+#if !defined(CURL_DISABLE_PROXY)
+
+#include <curl/curl.h>
+#include "urldata.h"
+#include "cfilters.h"
+#include "cf-haproxy.h"
+#include "curl_log.h"
+#include "multiif.h"
+
+/* The last 3 #include files should be in this order */
+#include "curl_printf.h"
+#include "curl_memory.h"
+#include "memdebug.h"
+
+
+typedef enum {
+ HAPROXY_INIT, /* init/default/no tunnel state */
+ HAPROXY_SEND, /* data_out being sent */
+ HAPROXY_DONE /* all work done */
+} haproxy_state;
+
+struct cf_haproxy_ctx {
+ int state;
+ struct dynbuf data_out;
+};
+
+static void cf_haproxy_ctx_reset(struct cf_haproxy_ctx *ctx)
+{
+ DEBUGASSERT(ctx);
+ ctx->state = HAPROXY_INIT;
+ Curl_dyn_reset(&ctx->data_out);
+}
+
+static void cf_haproxy_ctx_free(struct cf_haproxy_ctx *ctx)
+{
+ if(ctx) {
+ Curl_dyn_free(&ctx->data_out);
+ free(ctx);
+ }
+}
+
+static CURLcode cf_haproxy_date_out_set(struct Curl_cfilter*cf,
+ struct Curl_easy *data)
+{
+ struct cf_haproxy_ctx *ctx = cf->ctx;
+ CURLcode result;
+ const char *tcp_version;
+
+ DEBUGASSERT(ctx);
+ DEBUGASSERT(ctx->state == HAPROXY_INIT);
+#ifdef USE_UNIX_SOCKETS
+ if(cf->conn->unix_domain_socket)
+ /* the buffer is large enough to hold this! */
+ result = Curl_dyn_addn(&ctx->data_out, STRCONST("PROXY UNKNOWN\r\n"));
+ else {
+#endif /* USE_UNIX_SOCKETS */
+ /* Emit the correct prefix for IPv6 */
+ tcp_version = cf->conn->bits.ipv6 ? "TCP6" : "TCP4";
+
+ result = Curl_dyn_addf(&ctx->data_out, "PROXY %s %s %s %i %i\r\n",
+ tcp_version,
+ data->info.conn_local_ip,
+ data->info.conn_primary_ip,
+ data->info.conn_local_port,
+ data->info.conn_primary_port);
+
+#ifdef USE_UNIX_SOCKETS
+ }
+#endif /* USE_UNIX_SOCKETS */
+ return result;
+}
+
+static CURLcode cf_haproxy_connect(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ bool blocking, bool *done)
+{
+ struct cf_haproxy_ctx *ctx = cf->ctx;
+ CURLcode result;
+ size_t len;
+
+ DEBUGASSERT(ctx);
+ if(cf->connected) {
+ *done = TRUE;
+ return CURLE_OK;
+ }
+
+ result = cf->next->cft->connect(cf->next, data, blocking, done);
+ if(result || !*done)
+ return result;
+
+ switch(ctx->state) {
+ case HAPROXY_INIT:
+ result = cf_haproxy_date_out_set(cf, data);
+ if(result)
+ goto out;
+ ctx->state = HAPROXY_SEND;
+ /* FALLTHROUGH */
+ case HAPROXY_SEND:
+ len = Curl_dyn_len(&ctx->data_out);
+ if(len > 0) {
+ ssize_t written = Curl_conn_send(data, cf->sockindex,
+ Curl_dyn_ptr(&ctx->data_out),
+ len, &result);
+ if(written < 0)
+ goto out;
+ Curl_dyn_tail(&ctx->data_out, len - (size_t)written);
+ if(Curl_dyn_len(&ctx->data_out) > 0) {
+ result = CURLE_OK;
+ goto out;
+ }
+ }
+ ctx->state = HAPROXY_DONE;
+ /* FALLTHROUGH */
+ default:
+ Curl_dyn_free(&ctx->data_out);
+ break;
+ }
+
+out:
+ *done = (!result) && (ctx->state == HAPROXY_DONE);
+ cf->connected = *done;
+ return result;
+}
+
+static void cf_haproxy_destroy(struct Curl_cfilter *cf,
+ struct Curl_easy *data)
+{
+ (void)data;
+ DEBUGF(LOG_CF(data, cf, "destroy"));
+ cf_haproxy_ctx_free(cf->ctx);
+}
+
+static void cf_haproxy_close(struct Curl_cfilter *cf,
+ struct Curl_easy *data)
+{
+ DEBUGF(LOG_CF(data, cf, "close"));
+ cf->connected = FALSE;
+ cf_haproxy_ctx_reset(cf->ctx);
+ if(cf->next)
+ cf->next->cft->close(cf->next, data);
+}
+
+static int cf_haproxy_get_select_socks(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ curl_socket_t *socks)
+{
+ int fds;
+
+ fds = cf->next->cft->get_select_socks(cf->next, data, socks);
+ if(!fds && cf->next->connected && !cf->connected) {
+ /* If we are not connected, but the filter "below" is
+ * and not waiting on something, we are sending. */
+ socks[0] = Curl_conn_cf_get_socket(cf, data);
+ return GETSOCK_WRITESOCK(0);
+ }
+ return fds;
+}
+
+
+struct Curl_cftype Curl_cft_haproxy = {
+ "HAPROXY",
+ 0,
+ 0,
+ cf_haproxy_destroy,
+ cf_haproxy_connect,
+ cf_haproxy_close,
+ Curl_cf_def_get_host,
+ cf_haproxy_get_select_socks,
+ Curl_cf_def_data_pending,
+ Curl_cf_def_send,
+ Curl_cf_def_recv,
+ Curl_cf_def_cntrl,
+ Curl_cf_def_conn_is_alive,
+ Curl_cf_def_conn_keep_alive,
+ Curl_cf_def_query,
+};
+
+static CURLcode cf_haproxy_create(struct Curl_cfilter **pcf,
+ struct Curl_easy *data)
+{
+ struct Curl_cfilter *cf = NULL;
+ struct cf_haproxy_ctx *ctx;
+ CURLcode result;
+
+ (void)data;
+ ctx = calloc(sizeof(*ctx), 1);
+ if(!ctx) {
+ result = CURLE_OUT_OF_MEMORY;
+ goto out;
+ }
+ ctx->state = HAPROXY_INIT;
+ Curl_dyn_init(&ctx->data_out, DYN_HAXPROXY);
+
+ result = Curl_cf_create(&cf, &Curl_cft_haproxy, ctx);
+ if(result)
+ goto out;
+ ctx = NULL;
+
+out:
+ cf_haproxy_ctx_free(ctx);
+ *pcf = result? NULL : cf;
+ return result;
+}
+
+CURLcode Curl_cf_haproxy_insert_after(struct Curl_cfilter *cf_at,
+ struct Curl_easy *data)
+{
+ struct Curl_cfilter *cf;
+ CURLcode result;
+
+ result = cf_haproxy_create(&cf, data);
+ if(result)
+ goto out;
+ Curl_conn_cf_insert_after(cf_at, cf);
+
+out:
+ return result;
+}
+
+#endif /* !CURL_DISABLE_PROXY */
diff --git a/libs/libcurl/src/cf-haproxy.h b/libs/libcurl/src/cf-haproxy.h new file mode 100644 index 0000000000..92f78a5481 --- /dev/null +++ b/libs/libcurl/src/cf-haproxy.h @@ -0,0 +1,39 @@ +#ifndef HEADER_CURL_CF_HAPROXY_H
+#define HEADER_CURL_CF_HAPROXY_H
+/***************************************************************************
+ * _ _ ____ _
+ * Project ___| | | | _ \| |
+ * / __| | | | |_) | |
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+ * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+ * are also available at https://curl.se/docs/copyright.html.
+ *
+ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
+ * copies of the Software, and permit persons to whom the Software is
+ * furnished to do so, under the terms of the COPYING file.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ * SPDX-License-Identifier: curl
+ *
+ ***************************************************************************/
+
+#include "curl_setup.h"
+#include "urldata.h"
+
+#if !defined(CURL_DISABLE_PROXY)
+
+CURLcode Curl_cf_haproxy_insert_after(struct Curl_cfilter *cf_at,
+ struct Curl_easy *data);
+
+extern struct Curl_cftype Curl_cft_haproxy;
+
+#endif /* !CURL_DISABLE_PROXY */
+
+#endif /* HEADER_CURL_CF_HAPROXY_H */
diff --git a/libs/libcurl/src/cf-https-connect.c b/libs/libcurl/src/cf-https-connect.c index ba5c00e965..6cdd0ae3f8 100644 --- a/libs/libcurl/src/cf-https-connect.c +++ b/libs/libcurl/src/cf-https-connect.c @@ -496,11 +496,11 @@ out: return result;
}
-CURLcode Curl_cf_http_connect_add(struct Curl_easy *data,
- struct connectdata *conn,
- int sockindex,
- const struct Curl_dns_entry *remotehost,
- bool try_h3, bool try_h21)
+static CURLcode cf_http_connect_add(struct Curl_easy *data,
+ struct connectdata *conn,
+ int sockindex,
+ const struct Curl_dns_entry *remotehost,
+ bool try_h3, bool try_h21)
{
struct Curl_cfilter *cf;
CURLcode result = CURLE_OK;
@@ -514,24 +514,6 @@ out: return result;
}
-CURLcode
-Curl_cf_http_connect_insert_after(struct Curl_cfilter *cf_at,
- struct Curl_easy *data,
- const struct Curl_dns_entry *remotehost,
- bool try_h3, bool try_h21)
-{
- struct Curl_cfilter *cf;
- CURLcode result;
-
- DEBUGASSERT(data);
- result = cf_hc_create(&cf, data, remotehost, try_h3, try_h21);
- if(result)
- goto out;
- Curl_conn_cf_insert_after(cf_at, cf);
-out:
- return result;
-}
-
CURLcode Curl_cf_https_setup(struct Curl_easy *data,
struct connectdata *conn,
int sockindex,
@@ -560,8 +542,8 @@ CURLcode Curl_cf_https_setup(struct Curl_easy *data, try_h21 = TRUE;
}
- result = Curl_cf_http_connect_add(data, conn, sockindex, remotehost,
- try_h3, try_h21);
+ result = cf_http_connect_add(data, conn, sockindex, remotehost,
+ try_h3, try_h21);
out:
return result;
}
diff --git a/libs/libcurl/src/cf-socket.c b/libs/libcurl/src/cf-socket.c index 69d44369fe..73de784fe1 100644 --- a/libs/libcurl/src/cf-socket.c +++ b/libs/libcurl/src/cf-socket.c @@ -54,6 +54,7 @@ #endif
#include "urldata.h"
+#include "bufq.h"
#include "sendf.h"
#include "if2ip.h"
#include "strerror.h"
@@ -79,6 +80,22 @@ #include "memdebug.h"
+#if defined(ENABLE_IPV6) && defined(IPV6_V6ONLY) && defined(WIN32)
+/* It makes support for IPv4-mapped IPv6 addresses.
+ * Linux kernel, NetBSD, FreeBSD and Darwin: default is off;
+ * Windows Vista and later: default is on;
+ * DragonFly BSD: acts like off, and dummy setting;
+ * OpenBSD and earlier Windows: unsupported.
+ * Linux: controlled by /proc/sys/net/ipv6/bindv6only.
+ */
+static void set_ipv6_v6only(curl_socket_t sockfd, int on)
+{
+ (void)setsockopt(sockfd, IPPROTO_IPV6, IPV6_V6ONLY, (void *)&on, sizeof(on));
+}
+#else
+#define set_ipv6_v6only(x,y)
+#endif
+
static void tcpnodelay(struct Curl_easy *data, curl_socket_t sockfd)
{
#if defined(TCP_NODELAY)
@@ -195,6 +212,10 @@ tcpkeepalive(struct Curl_easy *data, }
}
+/**
+ * Assign the address `ai` to the Curl_sockaddr_ex `dest` and
+ * set the transport used.
+ */
void Curl_sock_assign_addr(struct Curl_sockaddr_ex *dest,
const struct Curl_addrinfo *ai,
int transport)
@@ -224,7 +245,7 @@ void Curl_sock_assign_addr(struct Curl_sockaddr_ex *dest, dest->addrlen = ai->ai_addrlen;
if(dest->addrlen > sizeof(struct Curl_sockaddr_storage))
- dest->addrlen = sizeof(struct Curl_sockaddr_storage);
+ dest->addrlen = sizeof(struct Curl_sockaddr_storage);
memcpy(&dest->sa_addr, ai->ai_addr, dest->addrlen);
}
@@ -700,8 +721,11 @@ static bool verifyconnect(curl_socket_t sockfd, int *error) return rc;
}
-CURLcode Curl_socket_connect_result(struct Curl_easy *data,
- const char *ipaddress, int error)
+/**
+ * Determine the curl code for a socket connect() == -1 with errno.
+ */
+static CURLcode socket_connect_result(struct Curl_easy *data,
+ const char *ipaddress, int error)
{
char buffer[STRERROR_LEN];
@@ -729,29 +753,20 @@ CURLcode Curl_socket_connect_result(struct Curl_easy *data, }
}
-#ifdef USE_RECV_BEFORE_SEND_WORKAROUND
-struct io_buffer {
- char *bufr;
- size_t allc; /* size of the current allocation */
- size_t head; /* bufr index for next read */
- size_t tail; /* bufr index for next write */
-};
-
-static void io_buffer_reset(struct io_buffer *iob)
-{
- if(iob->bufr)
- free(iob->bufr);
- memset(iob, 0, sizeof(*iob));
-}
-#endif /* USE_RECV_BEFORE_SEND_WORKAROUND */
+/* We have a recv buffer to enhance reads with len < NW_SMALL_READS.
+ * This happens often on TLS connections where the TLS implementation
+ * tries to read the head of a TLS record, determine the length of the
+ * full record and then make a subsequent read for that.
+ * On large reads, we will not fill the buffer to avoid the double copy. */
+#define NW_RECV_CHUNK_SIZE (64 * 1024)
+#define NW_RECV_CHUNKS 1
+#define NW_SMALL_READS (1024)
struct cf_socket_ctx {
int transport;
struct Curl_sockaddr_ex addr; /* address to connect to */
curl_socket_t sock; /* current attempt socket */
-#ifdef USE_RECV_BEFORE_SEND_WORKAROUND
- struct io_buffer recv_buffer;
-#endif
+ struct bufq recvbuf; /* used when `buffer_recv` is set */
char r_ip[MAX_IPADR_LEN]; /* remote IP as string */
int r_port; /* remote port number */
char l_ip[MAX_IPADR_LEN]; /* local IP as string */
@@ -763,6 +778,7 @@ struct cf_socket_ctx { BIT(got_first_byte); /* if first byte was received */
BIT(accepted); /* socket was accepted, not connected */
BIT(active);
+ BIT(buffer_recv);
};
static void cf_socket_ctx_init(struct cf_socket_ctx *ctx,
@@ -773,6 +789,56 @@ static void cf_socket_ctx_init(struct cf_socket_ctx *ctx, ctx->sock = CURL_SOCKET_BAD;
ctx->transport = transport;
Curl_sock_assign_addr(&ctx->addr, ai, transport);
+ Curl_bufq_init(&ctx->recvbuf, NW_RECV_CHUNK_SIZE, NW_RECV_CHUNKS);
+}
+
+struct reader_ctx {
+ struct Curl_cfilter *cf;
+ struct Curl_easy *data;
+};
+
+static ssize_t nw_in_read(void *reader_ctx,
+ unsigned char *buf, size_t len,
+ CURLcode *err)
+{
+ struct reader_ctx *rctx = reader_ctx;
+ struct cf_socket_ctx *ctx = rctx->cf->ctx;
+ ssize_t nread;
+
+ *err = CURLE_OK;
+ nread = sread(ctx->sock, buf, len);
+
+ if(-1 == nread) {
+ int sockerr = SOCKERRNO;
+
+ if(
+#ifdef WSAEWOULDBLOCK
+ /* This is how Windows does it */
+ (WSAEWOULDBLOCK == sockerr)
+#else
+ /* errno may be EWOULDBLOCK or on some systems EAGAIN when it returned
+ due to its inability to send off data without blocking. We therefore
+ treat both error codes the same here */
+ (EWOULDBLOCK == sockerr) || (EAGAIN == sockerr) || (EINTR == sockerr)
+#endif
+ ) {
+ /* this is just a case of EWOULDBLOCK */
+ *err = CURLE_AGAIN;
+ nread = -1;
+ }
+ else {
+ char buffer[STRERROR_LEN];
+
+ failf(rctx->data, "Recv failure: %s",
+ Curl_strerror(sockerr, buffer, sizeof(buffer)));
+ rctx->data->state.os_errno = sockerr;
+ *err = CURLE_RECV_ERROR;
+ nread = -1;
+ }
+ }
+ DEBUGF(LOG_CF(rctx->data, rctx->cf, "nw_in_read(len=%zu) -> %d, err=%d",
+ len, (int)nread, *err));
+ return nread;
}
static void cf_socket_close(struct Curl_cfilter *cf, struct Curl_easy *data)
@@ -786,14 +852,14 @@ static void cf_socket_close(struct Curl_cfilter *cf, struct Curl_easy *data) * closed it) and we just forget about it.
*/
if(ctx->sock == cf->conn->sock[cf->sockindex]) {
- DEBUGF(LOG_CF(data, cf, "cf_socket_close(%d, active)",
- (int)ctx->sock));
+ DEBUGF(LOG_CF(data, cf, "cf_socket_close(%" CURL_FORMAT_SOCKET_T
+ ", active)", ctx->sock));
socket_close(data, cf->conn, !ctx->accepted, ctx->sock);
cf->conn->sock[cf->sockindex] = CURL_SOCKET_BAD;
}
else {
- DEBUGF(LOG_CF(data, cf, "cf_socket_close(%d) no longer at "
- "conn->sock[], discarding", (int)ctx->sock));
+ DEBUGF(LOG_CF(data, cf, "cf_socket_close(%" CURL_FORMAT_SOCKET_T
+ ") no longer at conn->sock[], discarding", ctx->sock));
/* TODO: we do not want this to happen. Need to check which
* code is messing with conn->sock[cf->sockindex] */
}
@@ -803,15 +869,14 @@ static void cf_socket_close(struct Curl_cfilter *cf, struct Curl_easy *data) }
else {
/* this is our local socket, we did never publish it */
- DEBUGF(LOG_CF(data, cf, "cf_socket_close(%d, not active)",
- (int)ctx->sock));
+ DEBUGF(LOG_CF(data, cf, "cf_socket_close(%" CURL_FORMAT_SOCKET_T
+ ", not active)", ctx->sock));
sclose(ctx->sock);
ctx->sock = CURL_SOCKET_BAD;
}
-#ifdef USE_RECV_BEFORE_SEND_WORKAROUND
- io_buffer_reset(&ctx->recv_buffer);
-#endif
+ Curl_bufq_reset(&ctx->recvbuf);
ctx->active = FALSE;
+ ctx->buffer_recv = FALSE;
memset(&ctx->started_at, 0, sizeof(ctx->started_at));
memset(&ctx->connected_at, 0, sizeof(ctx->connected_at));
}
@@ -825,6 +890,7 @@ static void cf_socket_destroy(struct Curl_cfilter *cf, struct Curl_easy *data) cf_socket_close(cf, data);
DEBUGF(LOG_CF(data, cf, "destroy"));
+ Curl_bufq_free(&ctx->recvbuf);
free(ctx);
cf->ctx = NULL;
}
@@ -901,8 +967,10 @@ static CURLcode cf_socket_open(struct Curl_cfilter *cf, goto out;
#ifdef ENABLE_IPV6
- if(ctx->addr.family == AF_INET6)
+ if(ctx->addr.family == AF_INET6) {
+ set_ipv6_v6only(ctx->sock, 0);
ipmsg = " Trying [%s]:%d...";
+ }
else
#endif
ipmsg = " Trying %s:%d...";
@@ -975,7 +1043,8 @@ out: ctx->connected_at = Curl_now();
cf->connected = TRUE;
}
- DEBUGF(LOG_CF(data, cf, "cf_socket_open() -> %d, fd=%d", result, ctx->sock));
+ DEBUGF(LOG_CF(data, cf, "cf_socket_open() -> %d, fd=%" CURL_FORMAT_SOCKET_T,
+ result, ctx->sock));
return result;
}
@@ -1016,7 +1085,8 @@ static int do_connect(struct Curl_cfilter *cf, struct Curl_easy *data, #elif defined(TCP_FASTOPEN_CONNECT) /* Linux >= 4.11 */
if(setsockopt(ctx->sock, IPPROTO_TCP, TCP_FASTOPEN_CONNECT,
(void *)&optval, sizeof(optval)) < 0)
- infof(data, "Failed to enable TCP Fast Open on fd %d", ctx->sock);
+ infof(data, "Failed to enable TCP Fast Open on fd %"
+ CURL_FORMAT_SOCKET_T, ctx->sock);
rc = connect(ctx->sock, &ctx->addr.sa_addr, ctx->addr.addrlen);
#elif defined(MSG_FASTOPEN) /* old Linux */
@@ -1065,7 +1135,7 @@ static CURLcode cf_tcp_connect(struct Curl_cfilter *cf, /* Connect TCP socket */
rc = do_connect(cf, data, cf->conn->bits.tcp_fastopen);
if(-1 == rc) {
- result = Curl_socket_connect_result(data, ctx->r_ip, SOCKERRNO);
+ result = socket_connect_result(data, ctx->r_ip, SOCKERRNO);
goto out;
}
}
@@ -1151,89 +1221,16 @@ static int cf_socket_get_select_socks(struct Curl_cfilter *cf, return rc;
}
-#ifdef USE_RECV_BEFORE_SEND_WORKAROUND
-
-static CURLcode pre_receive_plain(struct Curl_cfilter *cf,
- struct Curl_easy *data)
-{
- struct cf_socket_ctx *ctx = cf->ctx;
- struct io_buffer * const iob = &ctx->recv_buffer;
-
- /* WinSock will destroy unread received data if send() is
- failed.
- To avoid lossage of received data, recv() must be
- performed before every send() if any incoming data is
- available. However, skip this, if buffer is already full. */
- if((cf->conn->handler->protocol&PROTO_FAMILY_HTTP) != 0 &&
- cf->conn->recv[cf->sockindex] == Curl_conn_recv &&
- (!iob->bufr || (iob->allc > iob->tail))) {
- const int readymask = Curl_socket_check(ctx->sock, CURL_SOCKET_BAD,
- CURL_SOCKET_BAD, 0);
- if(readymask != -1 && (readymask & CURL_CSELECT_IN) != 0) {
- size_t bytestorecv = iob->allc - iob->tail;
- ssize_t nread;
- /* Have some incoming data */
- if(!iob->bufr) {
- /* Use buffer double default size for intermediate buffer */
- iob->allc = 2 * data->set.buffer_size;
- iob->bufr = malloc(iob->allc);
- if(!iob->bufr)
- return CURLE_OUT_OF_MEMORY;
- iob->tail = 0;
- iob->head = 0;
- bytestorecv = iob->allc;
- }
-
- nread = sread(ctx->sock, iob->bufr + iob->tail, bytestorecv);
- if(nread > 0)
- iob->tail += (size_t)nread;
- }
- }
- return CURLE_OK;
-}
-
-static ssize_t get_pre_recved(struct Curl_cfilter *cf, char *buf, size_t len)
-{
- struct cf_socket_ctx *ctx = cf->ctx;
- struct io_buffer * const iob = &ctx->recv_buffer;
- size_t copysize;
- if(!iob->bufr)
- return 0;
-
- DEBUGASSERT(iob->allc > 0);
- DEBUGASSERT(iob->tail <= iob->allc);
- DEBUGASSERT(iob->head <= iob->tail);
- /* Check and process data that already received and storied in internal
- intermediate buffer */
- if(iob->tail > iob->head) {
- copysize = CURLMIN(len, iob->tail - iob->head);
- memcpy(buf, iob->bufr + iob->head, copysize);
- iob->head += copysize;
- }
- else
- copysize = 0; /* buffer was allocated, but nothing was received */
-
- /* Free intermediate buffer if it has no unprocessed data */
- if(iob->head == iob->tail)
- io_buffer_reset(iob);
-
- return (ssize_t)copysize;
-}
-#endif /* USE_RECV_BEFORE_SEND_WORKAROUND */
-
static bool cf_socket_data_pending(struct Curl_cfilter *cf,
const struct Curl_easy *data)
{
struct cf_socket_ctx *ctx = cf->ctx;
int readable;
-#ifdef USE_RECV_BEFORE_SEND_WORKAROUND
- if(ctx->recv_buffer.bufr && ctx->recv_buffer.allc &&
- ctx->recv_buffer.tail > ctx->recv_buffer.head)
- return TRUE;
-#endif
-
(void)data;
+ if(!Curl_bufq_is_empty(&ctx->recvbuf))
+ return TRUE;
+
readable = SOCKET_READABLE(ctx->sock, 0);
return (readable > 0 && (readable & CURL_CSELECT_IN));
}
@@ -1246,19 +1243,6 @@ static ssize_t cf_socket_send(struct Curl_cfilter *cf, struct Curl_easy *data, ssize_t nwritten;
*err = CURLE_OK;
-
-#ifdef USE_RECV_BEFORE_SEND_WORKAROUND
- /* WinSock will destroy unread received data if send() is
- failed.
- To avoid lossage of received data, recv() must be
- performed before every send() if any incoming data is
- available. */
- if(pre_receive_plain(cf, data)) {
- *err = CURLE_OUT_OF_MEMORY;
- return -1;
- }
-#endif
-
fdsave = cf->conn->sock[cf->sockindex];
cf->conn->sock[cf->sockindex] = ctx->sock;
@@ -1315,47 +1299,50 @@ static ssize_t cf_socket_recv(struct Curl_cfilter *cf, struct Curl_easy *data, *err = CURLE_OK;
-#ifdef USE_RECV_BEFORE_SEND_WORKAROUND
- /* Check and return data that already received and storied in internal
- intermediate buffer */
- nread = get_pre_recved(cf, buf, len);
- if(nread > 0) {
- *err = CURLE_OK;
- return nread;
- }
-#endif
-
fdsave = cf->conn->sock[cf->sockindex];
cf->conn->sock[cf->sockindex] = ctx->sock;
- nread = sread(ctx->sock, buf, len);
-
- if(-1 == nread) {
- int sockerr = SOCKERRNO;
-
- if(
-#ifdef WSAEWOULDBLOCK
- /* This is how Windows does it */
- (WSAEWOULDBLOCK == sockerr)
-#else
- /* errno may be EWOULDBLOCK or on some systems EAGAIN when it returned
- due to its inability to send off data without blocking. We therefore
- treat both error codes the same here */
- (EWOULDBLOCK == sockerr) || (EAGAIN == sockerr) || (EINTR == sockerr)
-#endif
- ) {
- /* this is just a case of EWOULDBLOCK */
- *err = CURLE_AGAIN;
+ if(ctx->buffer_recv && !Curl_bufq_is_empty(&ctx->recvbuf)) {
+ DEBUGF(LOG_CF(data, cf, "recv from buffer"));
+ nread = Curl_bufq_read(&ctx->recvbuf, (unsigned char *)buf, len, err);
+ }
+ else {
+ struct reader_ctx rctx;
+
+ rctx.cf = cf;
+ rctx.data = data;
+
+ /* "small" reads may trigger filling our buffer, "large" reads
+ * are probably not worth the additional copy */
+ if(ctx->buffer_recv && len < NW_SMALL_READS) {
+ ssize_t nwritten;
+ nwritten = Curl_bufq_slurp(&ctx->recvbuf, nw_in_read, &rctx, err);
+ if(nwritten < 0 && !Curl_bufq_is_empty(&ctx->recvbuf)) {
+ /* we have a partial read with an error. need to deliver
+ * what we got, return the error later. */
+ DEBUGF(LOG_CF(data, cf, "partial read: empty buffer first"));
+ nread = Curl_bufq_read(&ctx->recvbuf, (unsigned char *)buf, len, err);
+ }
+ else if(nwritten < 0) {
+ nread = -1;
+ goto out;
+ }
+ else if(nwritten == 0) {
+ /* eof */
+ *err = CURLE_OK;
+ nread = 0;
+ }
+ else {
+ DEBUGF(LOG_CF(data, cf, "buffered %zd additional bytes", nwritten));
+ nread = Curl_bufq_read(&ctx->recvbuf, (unsigned char *)buf, len, err);
+ }
}
else {
- char buffer[STRERROR_LEN];
- failf(data, "Recv failure: %s",
- Curl_strerror(sockerr, buffer, sizeof(buffer)));
- data->state.os_errno = sockerr;
- *err = CURLE_RECV_ERROR;
+ nread = nw_in_read(&rctx, (unsigned char *)buf, len, err);
}
}
+out:
DEBUGF(LOG_CF(data, cf, "recv(len=%zu) -> %d, err=%d", len, (int)nread,
*err));
if(nread > 0 && !ctx->got_first_byte) {
@@ -1411,6 +1398,11 @@ static void cf_socket_active(struct Curl_cfilter *cf, struct Curl_easy *data) conn_set_primary_ip(cf, data);
set_local_ip(cf, data);
Curl_persistconninfo(data, cf->conn, ctx->l_ip, ctx->l_port);
+ /* buffering is currently disabled by default because we have stalls
+ * in parallel transfers where not all buffered data is consumed and no
+ * socket events happen.
+ */
+ ctx->buffer_recv = FALSE;
}
ctx->active = TRUE;
}
@@ -1577,12 +1569,13 @@ static CURLcode cf_udp_setup_quic(struct Curl_cfilter *cf, rc = connect(ctx->sock, &ctx->addr.sa_addr, ctx->addr.addrlen);
if(-1 == rc) {
- return Curl_socket_connect_result(data, ctx->r_ip, SOCKERRNO);
+ return socket_connect_result(data, ctx->r_ip, SOCKERRNO);
}
set_local_ip(cf, data);
- DEBUGF(LOG_CF(data, cf, "%s socket %d connected: [%s:%d] -> [%s:%d]",
- (ctx->transport == TRNSPRT_QUIC)? "QUIC" : "UDP",
- ctx->sock, ctx->l_ip, ctx->l_port, ctx->r_ip, ctx->r_port));
+ DEBUGF(LOG_CF(data, cf, "%s socket %" CURL_FORMAT_SOCKET_T
+ " connected: [%s:%d] -> [%s:%d]",
+ (ctx->transport == TRNSPRT_QUIC)? "QUIC" : "UDP",
+ ctx->sock, ctx->l_ip, ctx->l_port, ctx->r_ip, ctx->r_port));
(void)curlx_nonblock(ctx->sock, TRUE);
switch(ctx->addr.family) {
@@ -1623,10 +1616,6 @@ static CURLcode cf_udp_connect(struct Curl_cfilter *cf, result = cf_socket_open(cf, data);
if(result) {
DEBUGF(LOG_CF(data, cf, "cf_udp_connect(), open failed -> %d", result));
- if(ctx->sock != CURL_SOCKET_BAD) {
- socket_close(data, cf->conn, TRUE, ctx->sock);
- ctx->sock = CURL_SOCKET_BAD;
- }
goto out;
}
@@ -1634,12 +1623,13 @@ static CURLcode cf_udp_connect(struct Curl_cfilter *cf, result = cf_udp_setup_quic(cf, data);
if(result)
goto out;
- DEBUGF(LOG_CF(data, cf, "cf_udp_connect(), opened socket=%d (%s:%d)",
+ DEBUGF(LOG_CF(data, cf, "cf_udp_connect(), opened socket=%"
+ CURL_FORMAT_SOCKET_T " (%s:%d)",
ctx->sock, ctx->l_ip, ctx->l_port));
}
else {
- DEBUGF(LOG_CF(data, cf, "cf_udp_connect(), opened socket=%d "
- "(unconnected)", ctx->sock));
+ DEBUGF(LOG_CF(data, cf, "cf_udp_connect(), opened socket=%"
+ CURL_FORMAT_SOCKET_T " (unconnected)", ctx->sock));
}
*done = TRUE;
cf->connected = TRUE;
@@ -1811,7 +1801,8 @@ CURLcode Curl_conn_tcp_listen_set(struct Curl_easy *data, ctx->active = TRUE;
ctx->connected_at = Curl_now();
cf->connected = TRUE;
- DEBUGF(LOG_CF(data, cf, "Curl_conn_tcp_listen_set(%d)", (int)ctx->sock));
+ DEBUGF(LOG_CF(data, cf, "Curl_conn_tcp_listen_set(%"
+ CURL_FORMAT_SOCKET_T ")", ctx->sock));
out:
if(result) {
@@ -1875,13 +1866,17 @@ CURLcode Curl_conn_tcp_accepted_set(struct Curl_easy *data, ctx->accepted = TRUE;
ctx->connected_at = Curl_now();
cf->connected = TRUE;
- DEBUGF(LOG_CF(data, cf, "accepted_set(sock=%d, remote=%s port=%d)",
- (int)ctx->sock, ctx->r_ip, ctx->r_port));
+ DEBUGF(LOG_CF(data, cf, "accepted_set(sock=%" CURL_FORMAT_SOCKET_T
+ ", remote=%s port=%d)",
+ ctx->sock, ctx->r_ip, ctx->r_port));
return CURLE_OK;
}
-bool Curl_cf_is_socket(struct Curl_cfilter *cf)
+/**
+ * Return TRUE iff `cf` is a socket filter.
+ */
+static bool cf_is_socket(struct Curl_cfilter *cf)
{
return cf && (cf->cft == &Curl_cft_tcp ||
cf->cft == &Curl_cft_udp ||
@@ -1896,7 +1891,7 @@ CURLcode Curl_cf_socket_peek(struct Curl_cfilter *cf, const char **pr_ip_str, int *pr_port,
const char **pl_ip_str, int *pl_port)
{
- if(Curl_cf_is_socket(cf) && cf->ctx) {
+ if(cf_is_socket(cf) && cf->ctx) {
struct cf_socket_ctx *ctx = cf->ctx;
if(psock)
diff --git a/libs/libcurl/src/cf-socket.h b/libs/libcurl/src/cf-socket.h index 897a46e580..805e612bd4 100644 --- a/libs/libcurl/src/cf-socket.h +++ b/libs/libcurl/src/cf-socket.h @@ -34,6 +34,23 @@ struct Curl_easy; struct connectdata;
struct Curl_sockaddr_ex;
+#ifndef SIZEOF_CURL_SOCKET_T
+/* configure and cmake check and set the define */
+# ifdef _WIN64
+# define SIZEOF_CURL_SOCKET_T 8
+# else
+/* default guess */
+# define SIZEOF_CURL_SOCKET_T 4
+# endif
+#endif
+
+#if SIZEOF_CURL_SOCKET_T < 8
+# define CURL_FORMAT_SOCKET_T "d"
+#else
+# define CURL_FORMAT_SOCKET_T "qd"
+#endif
+
+
/*
* The Curl_sockaddr_ex structure is basically libcurl's external API
* curl_sockaddr structure with enough space available to directly hold any
@@ -70,12 +87,6 @@ CURLcode Curl_socket_open(struct Curl_easy *data, int Curl_socket_close(struct Curl_easy *data, struct connectdata *conn,
curl_socket_t sock);
-/**
- * Determine the curl code for a socket connect() == -1 with errno.
- */
-CURLcode Curl_socket_connect_result(struct Curl_easy *data,
- const char *ipaddress, int error);
-
#ifdef USE_WINSOCK
/* When you run a program that uses the Windows Sockets API, you may
experience slow performance when you copy data to a TCP server.
@@ -155,11 +166,6 @@ CURLcode Curl_conn_tcp_accepted_set(struct Curl_easy *data, curl_socket_t *s);
/**
- * Return TRUE iff `cf` is a socket filter.
- */
-bool Curl_cf_is_socket(struct Curl_cfilter *cf);
-
-/**
* Peek at the socket and remote ip/port the socket filter is using.
* The filter owns all returned values.
* @param psock pointer to hold socket descriptor or NULL
diff --git a/libs/libcurl/src/cfilters.c b/libs/libcurl/src/cfilters.c index ffd0dbc883..c6ddd47a4e 100644 --- a/libs/libcurl/src/cfilters.c +++ b/libs/libcurl/src/cfilters.c @@ -44,40 +44,18 @@ #define ARRAYSIZE(A) (sizeof(A)/sizeof((A)[0]))
#endif
-
-void Curl_cf_def_destroy_this(struct Curl_cfilter *cf, struct Curl_easy *data)
-{
- (void)cf;
- (void)data;
-}
-
+#ifdef DEBUGBUILD
+/* used by unit2600.c */
void Curl_cf_def_close(struct Curl_cfilter *cf, struct Curl_easy *data)
{
cf->connected = FALSE;
if(cf->next)
cf->next->cft->close(cf->next, data);
}
+#endif
-CURLcode Curl_cf_def_connect(struct Curl_cfilter *cf,
- struct Curl_easy *data,
- bool blocking, bool *done)
-{
- CURLcode result;
-
- if(cf->connected) {
- *done = TRUE;
- return CURLE_OK;
- }
- if(cf->next) {
- result = cf->next->cft->connect(cf->next, data, blocking, done);
- if(!result && *done) {
- cf->connected = TRUE;
- }
- return result;
- }
- *done = FALSE;
- return CURLE_FAILED_INIT;
-}
+static void conn_report_connect_stats(struct Curl_easy *data,
+ struct connectdata *conn);
void Curl_cf_def_get_host(struct Curl_cfilter *cf, struct Curl_easy *data,
const char **phost, const char **pdisplay_host,
@@ -283,21 +261,31 @@ void Curl_conn_cf_insert_after(struct Curl_cfilter *cf_at, *pnext = tail;
}
-void Curl_conn_cf_discard(struct Curl_cfilter *cf, struct Curl_easy *data)
+bool Curl_conn_cf_discard_sub(struct Curl_cfilter *cf,
+ struct Curl_cfilter *discard,
+ struct Curl_easy *data,
+ bool destroy_always)
{
- struct Curl_cfilter **pprev = &cf->conn->cfilter[cf->sockindex];
+ struct Curl_cfilter **pprev = &cf->next;
+ bool found = FALSE;
- /* remove from chain if still in there */
+ /* remove from sub-chain and destroy */
DEBUGASSERT(cf);
- while (*pprev) {
- if (*pprev == cf) {
- *pprev = cf->next;
+ while(*pprev) {
+ if(*pprev == cf) {
+ *pprev = discard->next;
+ discard->next = NULL;
+ found = TRUE;
break;
}
pprev = &((*pprev)->next);
}
- cf->cft->destroy(cf, data);
- free(cf);
+ if(found || destroy_always) {
+ discard->next = NULL;
+ discard->cft->destroy(discard, data);
+ free(discard);
+ }
+ return found;
}
CURLcode Curl_conn_cf_connect(struct Curl_cfilter *cf,
@@ -324,14 +312,6 @@ int Curl_conn_cf_get_select_socks(struct Curl_cfilter *cf, return 0;
}
-bool Curl_conn_cf_data_pending(struct Curl_cfilter *cf,
- const struct Curl_easy *data)
-{
- if(cf)
- return cf->cft->has_data_pending(cf, data);
- return FALSE;
-}
-
ssize_t Curl_conn_cf_send(struct Curl_cfilter *cf, struct Curl_easy *data,
const void *buf, size_t len, CURLcode *err)
{
@@ -371,11 +351,11 @@ CURLcode Curl_conn_connect(struct Curl_easy *data, result = cf->cft->connect(cf, data, blocking, done);
if(!result && *done) {
Curl_conn_ev_update_info(data, data->conn);
- Curl_conn_report_connect_stats(data, data->conn);
+ conn_report_connect_stats(data, data->conn);
data->conn->keepalive = Curl_now();
}
else if(result) {
- Curl_conn_report_connect_stats(data, data->conn);
+ conn_report_connect_stats(data, data->conn);
}
}
@@ -405,10 +385,8 @@ bool Curl_conn_is_ip_connected(struct Curl_easy *data, int sockindex) return FALSE;
}
-bool Curl_conn_is_ssl(struct connectdata *conn, int sockindex)
+bool Curl_conn_cf_is_ssl(struct Curl_cfilter *cf)
{
- struct Curl_cfilter *cf = conn? conn->cfilter[sockindex] : NULL;
-
for(; cf; cf = cf->next) {
if(cf->cft->flags & CF_TYPE_SSL)
return TRUE;
@@ -418,6 +396,11 @@ bool Curl_conn_is_ssl(struct connectdata *conn, int sockindex) return FALSE;
}
+bool Curl_conn_is_ssl(struct connectdata *conn, int sockindex)
+{
+ return conn? Curl_conn_cf_is_ssl(conn->cfilter[sockindex]) : FALSE;
+}
+
bool Curl_conn_is_multiplex(struct connectdata *conn, int sockindex)
{
struct Curl_cfilter *cf = conn? conn->cfilter[sockindex] : NULL;
@@ -612,8 +595,11 @@ void Curl_conn_ev_update_info(struct Curl_easy *data, cf_cntrl_all(conn, data, TRUE, CF_CTRL_CONN_INFO_UPDATE, 0, NULL);
}
-void Curl_conn_report_connect_stats(struct Curl_easy *data,
- struct connectdata *conn)
+/**
+ * Update connection statistics
+ */
+static void conn_report_connect_stats(struct Curl_easy *data,
+ struct connectdata *conn)
{
struct Curl_cfilter *cf = conn->cfilter[FIRSTSOCKET];
if(cf) {
diff --git a/libs/libcurl/src/cfilters.h b/libs/libcurl/src/cfilters.h index 3a50fadcd8..384cdd3c9b 100644 --- a/libs/libcurl/src/cfilters.h +++ b/libs/libcurl/src/cfilters.h @@ -197,10 +197,6 @@ void Curl_cf_def_destroy_this(struct Curl_cfilter *cf, /* Default implementations for the type functions, implementing pass-through
* the filter chain. */
-void Curl_cf_def_close(struct Curl_cfilter *cf, struct Curl_easy *data);
-CURLcode Curl_cf_def_connect(struct Curl_cfilter *cf,
- struct Curl_easy *data,
- bool blocking, bool *done);
void Curl_cf_def_get_host(struct Curl_cfilter *cf, struct Curl_easy *data,
const char **phost, const char **pdisplay_host,
int *pport);
@@ -254,11 +250,16 @@ void Curl_conn_cf_insert_after(struct Curl_cfilter *cf_at, struct Curl_cfilter *cf_new);
/**
- * Discard, e.g. remove and destroy a specific filter instance.
- * If the filter is attached to a connection, it will be removed before
- * it is destroyed.
+ * Discard, e.g. remove and destroy `discard` iff
+ * it still is in the filter chain below `cf`. If `discard`
+ * is no longer found beneath `cf` return FALSE.
+ * if `destroy_always` is TRUE, will call `discard`s destroy
+ * function and free it even if not found in the subchain.
*/
-void Curl_conn_cf_discard(struct Curl_cfilter *cf, struct Curl_easy *data);
+bool Curl_conn_cf_discard_sub(struct Curl_cfilter *cf,
+ struct Curl_cfilter *discard,
+ struct Curl_easy *data,
+ bool destroy_always);
/**
* Discard all cfilters starting with `*pcf` and clearing it afterwards.
@@ -281,8 +282,6 @@ void Curl_conn_cf_close(struct Curl_cfilter *cf, struct Curl_easy *data); int Curl_conn_cf_get_select_socks(struct Curl_cfilter *cf,
struct Curl_easy *data,
curl_socket_t *socks);
-bool Curl_conn_cf_data_pending(struct Curl_cfilter *cf,
- const struct Curl_easy *data);
ssize_t Curl_conn_cf_send(struct Curl_cfilter *cf, struct Curl_easy *data,
const void *buf, size_t len, CURLcode *err);
ssize_t Curl_conn_cf_recv(struct Curl_cfilter *cf, struct Curl_easy *data,
@@ -293,6 +292,12 @@ CURLcode Curl_conn_cf_cntrl(struct Curl_cfilter *cf, int event, int arg1, void *arg2);
/**
+ * Determine if the connection filter chain is using SSL to the remote host
+ * (or will be once connected).
+ */
+bool Curl_conn_cf_is_ssl(struct Curl_cfilter *cf);
+
+/**
* Get the socket used by the filter chain starting at `cf`.
* Returns CURL_SOCKET_BAD if not available.
*/
@@ -437,12 +442,6 @@ void Curl_conn_ev_update_info(struct Curl_easy *data, struct connectdata *conn);
/**
- * Update connection statistics
- */
-void Curl_conn_report_connect_stats(struct Curl_easy *data,
- struct connectdata *conn);
-
-/**
* Check if FIRSTSOCKET's cfilter chain deems connection alive.
*/
bool Curl_conn_is_alive(struct Curl_easy *data, struct connectdata *conn,
@@ -455,6 +454,7 @@ CURLcode Curl_conn_keep_alive(struct Curl_easy *data, struct connectdata *conn,
int sockindex);
+void Curl_cf_def_close(struct Curl_cfilter *cf, struct Curl_easy *data);
void Curl_conn_get_host(struct Curl_easy *data, int sockindex,
const char **phost, const char **pdisplay_host,
int *pport);
diff --git a/libs/libcurl/src/config-amigaos.h b/libs/libcurl/src/config-amigaos.h index 839752800c..72ee0c0f9c 100644 --- a/libs/libcurl/src/config-amigaos.h +++ b/libs/libcurl/src/config-amigaos.h @@ -87,7 +87,15 @@ #define PACKAGE_STRING "curl -"
#define PACKAGE_TARNAME "curl"
#define PACKAGE_VERSION "-"
+
+#if defined(USE_AMISSL)
+#define CURL_CA_PATH "AmiSSL:Certs"
+#elif defined(__MORPHOS__)
+#define CURL_CA_BUNDLE "MOSSYS:Data/SSL/curl-ca-bundle.crt"
+#else
#define CURL_CA_BUNDLE "s:curl-ca-bundle.crt"
+#endif
+
#define STDC_HEADERS 1
#define TIME_WITH_SYS_TIME 1
diff --git a/libs/libcurl/src/config-dos.h b/libs/libcurl/src/config-dos.h index cb6db6a3d0..1313183f8c 100644 --- a/libs/libcurl/src/config-dos.h +++ b/libs/libcurl/src/config-dos.h @@ -82,7 +82,7 @@ #define SIZEOF_INT 4
#define SIZEOF_LONG 4
#define SIZEOF_SIZE_T 4
-#define SIZEOF_CURL_OFF_T 4
+#define SIZEOF_CURL_OFF_T 8
#define STDC_HEADERS 1
#define TIME_WITH_SYS_TIME 1
diff --git a/libs/libcurl/src/config-win32.h b/libs/libcurl/src/config-win32.h index 5536ebfa7a..e12ab552fd 100644 --- a/libs/libcurl/src/config-win32.h +++ b/libs/libcurl/src/config-win32.h @@ -205,10 +205,6 @@ /* Define if you have the socket function. */
#define HAVE_SOCKET 1
-/* Define if libSSH2 is in use */
-#define USE_LIBSSH2 1
-#define HAVE_LIBSSH2_H 1
-
/* Define if you have the strcasecmp function. */
#ifdef __MINGW32__
#define HAVE_STRCASECMP 1
@@ -627,9 +623,6 @@ Vista # define CURL_DISABLE_LDAP 1
#endif
-/* if SSL is enabled */
-#define USE_OPENSSL 1
-
/* Define to use the Windows crypto library. */
#if !defined(CURL_WINDOWS_APP)
#define USE_WIN32_CRYPTO
diff --git a/libs/libcurl/src/conncache.c b/libs/libcurl/src/conncache.c index 5b17a1b7b1..f9258090d2 100644 --- a/libs/libcurl/src/conncache.c +++ b/libs/libcurl/src/conncache.c @@ -246,7 +246,7 @@ CURLcode Curl_conncache_add_conn(struct Curl_easy *data) "The cache now contains %zu members",
conn->connection_id, connc->num_conn));
- unlock:
+unlock:
CONNCACHE_UNLOCK(data);
return result;
diff --git a/libs/libcurl/src/connect.c b/libs/libcurl/src/connect.c index 5b715acf58..70bb7717e6 100644 --- a/libs/libcurl/src/connect.c +++ b/libs/libcurl/src/connect.c @@ -59,6 +59,7 @@ #include "strerror.h"
#include "cfilters.h"
#include "connect.h"
+#include "cf-haproxy.h"
#include "cf-https-connect.h"
#include "cf-socket.h"
#include "select.h"
@@ -547,7 +548,7 @@ static CURLcode baller_connect(struct Curl_cfilter *cf, baller->result = Curl_conn_cf_connect(baller->cf, data, 0, connected);
if(!baller->result) {
- if (*connected) {
+ if(*connected) {
baller->connected = TRUE;
baller->is_done = TRUE;
}
@@ -663,7 +664,8 @@ evaluate: DEBUGF(LOG_CF(data, cf, "%s done", baller->name));
}
else {
- DEBUGF(LOG_CF(data, cf, "%s starting (timeout=%ldms)",
+ DEBUGF(LOG_CF(data, cf, "%s starting (timeout=%"
+ CURL_FORMAT_TIMEDIFF_T "ms)",
baller->name, baller->timeoutms));
++ongoing;
++added;
@@ -801,7 +803,8 @@ static CURLcode start_connect(struct Curl_cfilter *cf, timeout_ms, EXPIRE_DNS_PER_NAME);
if(result)
return result;
- DEBUGF(LOG_CF(data, cf, "created %s (timeout %ldms)",
+ DEBUGF(LOG_CF(data, cf, "created %s (timeout %"
+ CURL_FORMAT_TIMEDIFF_T "ms)",
ctx->baller[0]->name, ctx->baller[0]->timeoutms));
if(addr1) {
/* second one gets a delayed start */
@@ -812,7 +815,8 @@ static CURLcode start_connect(struct Curl_cfilter *cf, timeout_ms, EXPIRE_DNS_PER_NAME2);
if(result)
return result;
- DEBUGF(LOG_CF(data, cf, "created %s (timeout %ldms)",
+ DEBUGF(LOG_CF(data, cf, "created %s (timeout %"
+ CURL_FORMAT_TIMEDIFF_T "ms)",
ctx->baller[1]->name, ctx->baller[1]->timeoutms));
}
@@ -1056,12 +1060,23 @@ struct Curl_cftype Curl_cft_happy_eyeballs = { cf_he_query,
};
-CURLcode Curl_cf_happy_eyeballs_create(struct Curl_cfilter **pcf,
- struct Curl_easy *data,
- struct connectdata *conn,
- cf_ip_connect_create *cf_create,
- const struct Curl_dns_entry *remotehost,
- int transport)
+/**
+ * Create a happy eyeball connection filter that uses the, once resolved,
+ * address information to connect on ip families based on connection
+ * configuration.
+ * @param pcf output, the created cfilter
+ * @param data easy handle used in creation
+ * @param conn connection the filter is created for
+ * @param cf_create method to create the sub-filters performing the
+ * actual connects.
+ */
+static CURLcode
+cf_happy_eyeballs_create(struct Curl_cfilter **pcf,
+ struct Curl_easy *data,
+ struct connectdata *conn,
+ cf_ip_connect_create *cf_create,
+ const struct Curl_dns_entry *remotehost,
+ int transport)
{
struct cf_he_ctx *ctx = NULL;
CURLcode result;
@@ -1120,20 +1135,6 @@ static cf_ip_connect_create *get_cf_create(int transport) return NULL;
}
-#ifdef DEBUGBUILD
-void Curl_debug_set_transport_provider(int transport,
- cf_ip_connect_create *cf_create)
-{
- size_t i;
- for(i = 0; i < ARRAYSIZE(transport_providers); ++i) {
- if(transport == transport_providers[i].transport) {
- transport_providers[i].cf_create = cf_create;
- return;
- }
- }
-}
-#endif /* DEBUGBUILD */
-
static CURLcode cf_he_insert_after(struct Curl_cfilter *cf_at,
struct Curl_easy *data,
const struct Curl_dns_entry *remotehost,
@@ -1150,9 +1151,9 @@ static CURLcode cf_he_insert_after(struct Curl_cfilter *cf_at, DEBUGF(LOG_CF(data, cf_at, "unsupported transport type %d", transport));
return CURLE_UNSUPPORTED_PROTOCOL;
}
- result = Curl_cf_happy_eyeballs_create(&cf, data, cf_at->conn,
- cf_create, remotehost,
- transport);
+ result = cf_happy_eyeballs_create(&cf, data, cf_at->conn,
+ cf_create, remotehost,
+ transport);
if(result)
return result;
@@ -1219,7 +1220,7 @@ connect_sub_chain: if(ctx->state < CF_SETUP_CNNCT_HTTP_PROXY && cf->conn->bits.httpproxy) {
#ifdef USE_SSL
- if(cf->conn->http_proxy.proxytype == CURLPROXY_HTTPS
+ if(IS_HTTPS_PROXY(cf->conn->http_proxy.proxytype)
&& !Curl_conn_is_ssl(cf->conn, cf->sockindex)) {
result = Curl_cf_ssl_proxy_insert_after(cf, data);
if(result)
@@ -1355,12 +1356,12 @@ out: return result;
}
-CURLcode Curl_cf_setup_add(struct Curl_easy *data,
- struct connectdata *conn,
- int sockindex,
- const struct Curl_dns_entry *remotehost,
- int transport,
- int ssl_mode)
+static CURLcode cf_setup_add(struct Curl_easy *data,
+ struct connectdata *conn,
+ int sockindex,
+ const struct Curl_dns_entry *remotehost,
+ int transport,
+ int ssl_mode)
{
struct Curl_cfilter *cf;
CURLcode result = CURLE_OK;
@@ -1374,6 +1375,21 @@ out: return result;
}
+#ifdef DEBUGBUILD
+/* used by unit2600.c */
+void Curl_debug_set_transport_provider(int transport,
+ cf_ip_connect_create *cf_create)
+{
+ size_t i;
+ for(i = 0; i < ARRAYSIZE(transport_providers); ++i) {
+ if(transport == transport_providers[i].transport) {
+ transport_providers[i].cf_create = cf_create;
+ return;
+ }
+ }
+}
+#endif /* DEBUGBUILD */
+
CURLcode Curl_cf_setup_insert_after(struct Curl_cfilter *cf_at,
struct Curl_easy *data,
const struct Curl_dns_entry *remotehost,
@@ -1405,9 +1421,8 @@ CURLcode Curl_conn_setup(struct Curl_easy *data, #if !defined(CURL_DISABLE_HTTP) && !defined(USE_HYPER)
if(!conn->cfilter[sockindex] &&
- conn->handler->protocol == CURLPROTO_HTTPS &&
- (ssl_mode == CURL_CF_SSL_ENABLE || ssl_mode != CURL_CF_SSL_DISABLE)) {
-
+ conn->handler->protocol == CURLPROTO_HTTPS) {
+ DEBUGASSERT(ssl_mode != CURL_CF_SSL_DISABLE);
result = Curl_cf_https_setup(data, conn, sockindex, remotehost);
if(result)
goto out;
@@ -1416,8 +1431,8 @@ CURLcode Curl_conn_setup(struct Curl_easy *data, /* Still no cfilter set, apply default. */
if(!conn->cfilter[sockindex]) {
- result = Curl_cf_setup_add(data, conn, sockindex, remotehost,
- conn->transport, ssl_mode);
+ result = cf_setup_add(data, conn, sockindex, remotehost,
+ conn->transport, ssl_mode);
if(result)
goto out;
}
diff --git a/libs/libcurl/src/connect.h b/libs/libcurl/src/connect.h index d1002edbd1..f9961085a6 100644 --- a/libs/libcurl/src/connect.h +++ b/libs/libcurl/src/connect.h @@ -104,31 +104,6 @@ typedef CURLcode cf_ip_connect_create(struct Curl_cfilter **pcf, const struct Curl_addrinfo *ai,
int transport);
-/**
- * Create a happy eyeball connection filter that uses the, once resolved,
- * address information to connect on ip families based on connection
- * configuration.
- * @param pcf output, the created cfilter
- * @param data easy handle used in creation
- * @param conn connection the filter is created for
- * @param cf_create method to create the sub-filters performing the
- * actual connects.
- */
-CURLcode
-Curl_cf_happy_eyeballs_create(struct Curl_cfilter **pcf,
- struct Curl_easy *data,
- struct connectdata *conn,
- cf_ip_connect_create *cf_create,
- const struct Curl_dns_entry *remotehost,
- int transport);
-
-CURLcode Curl_cf_setup_add(struct Curl_easy *data,
- struct connectdata *conn,
- int sockindex,
- const struct Curl_dns_entry *remotehost,
- int transport,
- int ssl_mode);
-
CURLcode Curl_cf_setup_insert_after(struct Curl_cfilter *cf_at,
struct Curl_easy *data,
const struct Curl_dns_entry *remotehost,
diff --git a/libs/libcurl/src/content_encoding.c b/libs/libcurl/src/content_encoding.c index 6858b4547f..0793e21a91 100644 --- a/libs/libcurl/src/content_encoding.c +++ b/libs/libcurl/src/content_encoding.c @@ -53,6 +53,9 @@ #include "content_encoding.h"
#include "strdup.h"
#include "strcase.h"
+
+/* The last 3 #include files should be in this order */
+#include "curl_printf.h"
#include "curl_memory.h"
#include "memdebug.h"
@@ -1077,8 +1080,12 @@ CURLcode Curl_build_unencoding_stack(struct Curl_easy *data, Curl_httpchunk_init(data); /* init our chunky engine. */
}
else if(namelen) {
- const struct content_encoding *encoding = find_encoding(name, namelen);
+ const struct content_encoding *encoding;
struct contenc_writer *writer;
+ if(is_transfer && !data->set.http_transfer_encoding)
+ /* not requested, ignore */
+ return CURLE_OK;
+ encoding = find_encoding(name, namelen);
if(!k->writer_stack) {
k->writer_stack = new_unencoding_writer(data, &client_encoding,
diff --git a/libs/libcurl/src/cookie.c b/libs/libcurl/src/cookie.c index 77e202b6c6..61cd0948a8 100644 --- a/libs/libcurl/src/cookie.c +++ b/libs/libcurl/src/cookie.c @@ -483,11 +483,6 @@ static int invalid_octets(const char *p) */
struct Cookie *
Curl_cookie_add(struct Curl_easy *data,
- /*
- * The 'data' pointer here may be NULL at times, and thus
- * must only be used very carefully for things that can deal
- * with data being NULL. Such as infof() and similar
- */
struct CookieInfo *c,
bool httpheader, /* TRUE if HTTP header-style line */
bool noexpire, /* if TRUE, skip remove_expired() */
@@ -508,10 +503,7 @@ Curl_cookie_add(struct Curl_easy *data, bool badcookie = FALSE; /* cookies are good by default. mmmmm yummy */
size_t myhash;
-#ifdef CURL_DISABLE_VERBOSE_STRINGS
- (void)data;
-#endif
-
+ DEBUGASSERT(data);
DEBUGASSERT(MAX_SET_COOKIE_AMOUNT <= 255); /* counter is an unsigned char */
if(data->req.setcookies >= MAX_SET_COOKIE_AMOUNT)
return NULL;
@@ -523,8 +515,6 @@ Curl_cookie_add(struct Curl_easy *data, if(httpheader) {
/* This line was read off an HTTP-header */
- const char *namep;
- const char *valuep;
const char *ptr;
size_t linelength = strlen(lineptr);
@@ -547,8 +537,9 @@ Curl_cookie_add(struct Curl_easy *data, if(nlen) {
bool done = FALSE;
bool sep = FALSE;
+ const char *namep = ptr;
+ const char *valuep;
- namep = ptr;
ptr += nlen;
/* trim trailing spaces and tabs after name */
@@ -1128,17 +1119,11 @@ Curl_cookie_add(struct Curl_easy *data, if(replace_old) {
/* the domains were identical */
- if(clist->spath && co->spath) {
- if(strcasecompare(clist->spath, co->spath))
- replace_old = TRUE;
- else
- replace_old = FALSE;
- }
- else if(!clist->spath && !co->spath)
- replace_old = TRUE;
- else
+ if(clist->spath && co->spath &&
+ !strcasecompare(clist->spath, co->spath))
+ replace_old = FALSE;
+ else if(!clist->spath != !co->spath)
replace_old = FALSE;
-
}
if(replace_old && !co->livecookie && clist->livecookie) {
@@ -1219,7 +1204,8 @@ Curl_cookie_add(struct Curl_easy *data, *
* If 'newsession' is TRUE, discard all "session cookies" on read from file.
*
- * Note that 'data' might be called as NULL pointer.
+ * Note that 'data' might be called as NULL pointer. If data is NULL, 'file'
+ * will be ignored.
*
* Returns NULL on out of memory. Invalid cookies are ignored.
*/
@@ -1229,9 +1215,8 @@ struct CookieInfo *Curl_cookie_init(struct Curl_easy *data, bool newsession)
{
struct CookieInfo *c;
- FILE *fp = NULL;
- bool fromfile = TRUE;
char *line = NULL;
+ FILE *handle = NULL;
if(!inc) {
/* we didn't get a struct, create one */
@@ -1251,61 +1236,59 @@ struct CookieInfo *Curl_cookie_init(struct Curl_easy *data, /* we got an already existing one, use that */
c = inc;
}
- c->running = FALSE; /* this is not running, this is init */
-
- if(file && !strcmp(file, "-")) {
- fp = stdin;
- fromfile = FALSE;
- }
- else if(!file || !*file) {
- /* points to an empty string or NULL */
- fp = NULL;
- }
- else {
- fp = fopen(file, "rb");
- if(!fp)
- infof(data, "WARNING: failed to open cookie file \"%s\"", file);
- }
-
c->newsession = newsession; /* new session? */
- if(fp) {
- char *lineptr;
- bool headerline;
-
- line = malloc(MAX_COOKIE_LINE);
- if(!line)
- goto fail;
- while(Curl_get_line(line, MAX_COOKIE_LINE, fp)) {
- if(checkprefix("Set-Cookie:", line)) {
- /* This is a cookie line, get it! */
- lineptr = &line[11];
- headerline = TRUE;
- }
+ if(data) {
+ FILE *fp = NULL;
+ if(file) {
+ if(!strcmp(file, "-"))
+ fp = stdin;
else {
- lineptr = line;
- headerline = FALSE;
+ fp = fopen(file, "rb");
+ if(!fp)
+ infof(data, "WARNING: failed to open cookie file \"%s\"", file);
+ else
+ handle = fp;
}
- while(*lineptr && ISBLANK(*lineptr))
- lineptr++;
-
- Curl_cookie_add(data, c, headerline, TRUE, lineptr, NULL, NULL, TRUE);
}
- free(line); /* free the line buffer */
- /*
- * Remove expired cookies from the hash. We must make sure to run this
- * after reading the file, and not on every cookie.
- */
- remove_expired(c);
+ c->running = FALSE; /* this is not running, this is init */
+ if(fp) {
+ char *lineptr;
+ bool headerline;
+
+ line = malloc(MAX_COOKIE_LINE);
+ if(!line)
+ goto fail;
+ while(Curl_get_line(line, MAX_COOKIE_LINE, fp)) {
+ if(checkprefix("Set-Cookie:", line)) {
+ /* This is a cookie line, get it! */
+ lineptr = &line[11];
+ headerline = TRUE;
+ }
+ else {
+ lineptr = line;
+ headerline = FALSE;
+ }
+ while(*lineptr && ISBLANK(*lineptr))
+ lineptr++;
- if(fromfile)
- fclose(fp);
- }
+ Curl_cookie_add(data, c, headerline, TRUE, lineptr, NULL, NULL, TRUE);
+ }
+ free(line); /* free the line buffer */
+
+ /*
+ * Remove expired cookies from the hash. We must make sure to run this
+ * after reading the file, and not on every cookie.
+ */
+ remove_expired(c);
- c->running = TRUE; /* now, we're running */
- if(data)
+ if(handle)
+ fclose(handle);
+ }
data->state.cookie_engine = TRUE;
+ c->running = TRUE; /* now, we're running */
+ }
return c;
@@ -1317,8 +1300,8 @@ fail: */
if(!inc)
Curl_cookie_cleanup(c);
- if(fromfile && fp)
- fclose(fp);
+ if(handle)
+ fclose(handle);
return NULL; /* out of memory */
}
@@ -1404,7 +1387,7 @@ static struct Cookie *dup_cookie(struct Cookie *src) }
return d;
- fail:
+fail:
freecookie(d);
return NULL;
}
@@ -1448,7 +1431,7 @@ struct Cookie *Curl_cookie_getlist(struct Curl_easy *data, /* now check if the domain is correct */
if(!co->domain ||
(co->tailmatch && !is_ip &&
- tailmatch(co->domain, co->domain? strlen(co->domain):0, host)) ||
+ tailmatch(co->domain, strlen(co->domain), host)) ||
((!co->tailmatch || is_ip) && strcasecompare(host, co->domain)) ) {
/*
* the right part of the host matches the domain stuff in the
@@ -1738,7 +1721,7 @@ static CURLcode cookie_output(struct Curl_easy *data, }
/*
- * If we reach here we have successfully written a cookie file so theree is
+ * If we reach here we have successfully written a cookie file so there is
* no need to inspect the error, any error case should have jumped into the
* error block below.
*/
diff --git a/libs/libcurl/src/cookie.h b/libs/libcurl/src/cookie.h index 7e5bef37a6..5a28a75c4a 100644 --- a/libs/libcurl/src/cookie.h +++ b/libs/libcurl/src/cookie.h @@ -61,7 +61,6 @@ struct Cookie { struct CookieInfo {
/* linked list of cookies we know of */
struct Cookie *cookies[COOKIE_HASH_SIZE];
-
char *filename; /* file we read from/write to */
long numcookies; /* number of cookies in the "jar" */
bool running; /* state info, for cookie adding information */
@@ -70,23 +69,34 @@ struct CookieInfo { curl_off_t next_expiration; /* the next time at which expiration happens */
};
-/* This is the maximum line length we accept for a cookie line. RFC 2109
- section 6.3 says:
-
- "at least 4096 bytes per cookie (as measured by the size of the characters
- that comprise the cookie non-terminal in the syntax description of the
- Set-Cookie header)"
+/* The maximum sizes we accept for cookies. RFC 6265 section 6.1 says
+ "general-use user agents SHOULD provide each of the following minimum
+ capabilities":
- We allow max 5000 bytes cookie header. Max 4095 bytes length per cookie
- name and value. Name + value may not exceed 4096 bytes.
+ - At least 4096 bytes per cookie (as measured by the sum of the length of
+ the cookie's name, value, and attributes).
+ In the 6265bis draft document section 5.4 it is phrased even stronger: "If
+ the sum of the lengths of the name string and the value string is more than
+ 4096 octets, abort these steps and ignore the set-cookie-string entirely."
*/
+
+/** Limits for INCOMING cookies **/
+
+/* The longest we allow a line to be when reading a cookie from a HTTP header
+ or from a cookie jar */
#define MAX_COOKIE_LINE 5000
/* Maximum length of an incoming cookie name or content we deal with. Longer
cookies are ignored. */
#define MAX_NAME 4096
-#define MAX_NAME_TXT "4095"
+
+/* Maximum number of Set-Cookie: lines accepted in a single response. If more
+ such header lines are received, they are ignored. This value must be less
+ than 256 since an unsigned char is used to count. */
+#define MAX_SET_COOKIE_AMOUNT 50
+
+/** Limits for OUTGOING cookies **/
/* Maximum size for an outgoing cookie line libcurl will use in an http
request. This is the default maximum length used in some versions of Apache
@@ -98,11 +108,6 @@ struct CookieInfo { keep the maximum HTTP request within the maximum allowed size. */
#define MAX_COOKIE_SEND_AMOUNT 150
-/* Maximum number of Set-Cookie: lines accepted in a single response. If more
- such header lines are received, they are ignored. This value must be less
- than 256 since an unsigned char is used to count. */
-#define MAX_SET_COOKIE_AMOUNT 50
-
struct Curl_easy;
/*
* Add a cookie to the internal list of cookies. The domain and path arguments
diff --git a/libs/libcurl/src/curl_addrinfo.c b/libs/libcurl/src/curl_addrinfo.c index 3c31267487..2dd0edcede 100644 --- a/libs/libcurl/src/curl_addrinfo.c +++ b/libs/libcurl/src/curl_addrinfo.c @@ -274,7 +274,7 @@ Curl_he2ai(const struct hostent *he, int port) for(i = 0; (curr = he->h_addr_list[i]) != NULL; i++) {
size_t ss_size;
- size_t namelen = strlen(he->h_name) + 1; /* include null-terminatior */
+ size_t namelen = strlen(he->h_name) + 1; /* include null-terminator */
#ifdef ENABLE_IPV6
if(he->h_addrtype == AF_INET6)
ss_size = sizeof(struct sockaddr_in6);
diff --git a/libs/libcurl/src/curl_config.h.in b/libs/libcurl/src/curl_config.h.in index 2aa6ef4803..d94099b443 100644 --- a/libs/libcurl/src/curl_config.h.in +++ b/libs/libcurl/src/curl_config.h.in @@ -153,6 +153,9 @@ /* Define to 1 if you have _Atomic support. */
#undef HAVE_ATOMIC
+/* Define to 1 if using AWS-LC. */
+#undef HAVE_AWSLC
+
/* Define to 1 if you have the basename function. */
#undef HAVE_BASENAME
@@ -802,6 +805,9 @@ /* Size of curl_off_t in number of bytes */
#undef SIZEOF_CURL_OFF_T
+/* Size of curl_socket_t in number of bytes */
+#undef SIZEOF_CURL_SOCKET_T
+
/* Size of int in number of bytes */
#undef SIZEOF_INT
diff --git a/libs/libcurl/src/curl_log.c b/libs/libcurl/src/curl_log.c index ab1cf29ed6..ecde0b3e02 100644 --- a/libs/libcurl/src/curl_log.c +++ b/libs/libcurl/src/curl_log.c @@ -38,6 +38,9 @@ #include "connect.h"
#include "http2.h"
#include "http_proxy.h"
+#include "cf-h1-proxy.h"
+#include "cf-h2-proxy.h"
+#include "cf-haproxy.h"
#include "cf-https-connect.h"
#include "socks.h"
#include "strtok.h"
@@ -160,6 +163,10 @@ static struct Curl_cftype *cf_types[] = { #endif
#if !defined(CURL_DISABLE_PROXY)
#if !defined(CURL_DISABLE_HTTP)
+ &Curl_cft_h1_proxy,
+#ifdef USE_NGHTTP2
+ &Curl_cft_h2_proxy,
+#endif
&Curl_cft_http_proxy,
#endif /* !CURL_DISABLE_HTTP */
&Curl_cft_haproxy,
diff --git a/libs/libcurl/src/curl_memory.h b/libs/libcurl/src/curl_memory.h index 64e31defa2..404ad93886 100644 --- a/libs/libcurl/src/curl_memory.h +++ b/libs/libcurl/src/curl_memory.h @@ -52,39 +52,12 @@ * mentioned above will compile without any indication, but it will
* trigger weird memory related issues at runtime.
*
- * OTOH some source files from 'lib' subdirectory may additionally be
- * used directly as source code when using some curlx_ functions by
- * third party programs that don't even use libcurl at all. When using
- * these source files in this way it is necessary these are compiled
- * with CURLX_NO_MEMORY_CALLBACKS defined, in order to ensure that no
- * attempt of calling libcurl's memory callbacks is done from code
- * which can not use this machinery.
- *
- * Notice that libcurl's 'memory tracking' system works chaining into
- * the memory callback machinery. This implies that when compiling
- * 'lib' source files with CURLX_NO_MEMORY_CALLBACKS defined this file
- * disengages usage of libcurl's 'memory tracking' system, defining
- * MEMDEBUG_NODEFINES and overriding CURLDEBUG purpose.
- *
- * CURLX_NO_MEMORY_CALLBACKS takes precedence over CURLDEBUG. This is
- * done in order to allow building a 'memory tracking' enabled libcurl
- * and at the same time allow building programs which do not use it.
- *
- * Programs and libraries in 'tests' subdirectories have specific
- * purposes and needs, and as such each one will use whatever fits
- * best, depending additionally whether it links with libcurl or not.
- *
- * Caveat emptor. Proper curlx_* separation is a work in progress
- * the same as CURLX_NO_MEMORY_CALLBACKS usage, some adjustments may
- * still be required. IOW don't use them yet, there are sharp edges.
*/
#ifdef HEADER_CURL_MEMDEBUG_H
#error "Header memdebug.h shall not be included before curl_memory.h"
#endif
-#ifndef CURLX_NO_MEMORY_CALLBACKS
-
#ifndef CURL_DID_MEMORY_FUNC_TYPEDEFS /* only if not already done */
/*
* The following memory function replacement typedef's are COPIED from
@@ -146,13 +119,4 @@ extern curl_wcsdup_callback Curl_cwcsdup; #endif
#endif /* CURLDEBUG */
-
-#else /* CURLX_NO_MEMORY_CALLBACKS */
-
-#ifndef MEMDEBUG_NODEFINES
-#define MEMDEBUG_NODEFINES
-#endif
-
-#endif /* CURLX_NO_MEMORY_CALLBACKS */
-
#endif /* HEADER_CURL_MEMORY_H */
diff --git a/libs/libcurl/src/curl_ntlm_core.c b/libs/libcurl/src/curl_ntlm_core.c index 1bf1705cc4..368b0e429c 100644 --- a/libs/libcurl/src/curl_ntlm_core.c +++ b/libs/libcurl/src/curl_ntlm_core.c @@ -83,6 +83,10 @@ # define DES_ecb_encrypt des_ecb_encrypt
# define DESKEY(x) x
# define DESKEYARG(x) x
+# elif defined(OPENSSL_IS_AWSLC)
+# define DES_set_key_unchecked (void)DES_set_key
+# define DESKEYARG(x) *x
+# define DESKEY(x) &x
# else
# define DESKEYARG(x) *x
# define DESKEY(x) &x
diff --git a/libs/libcurl/src/curl_path.c b/libs/libcurl/src/curl_path.c index 0b2f796d1f..7adc040a22 100644 --- a/libs/libcurl/src/curl_path.c +++ b/libs/libcurl/src/curl_path.c @@ -62,24 +62,27 @@ CURLcode Curl_getworkingpath(struct Curl_easy *data, }
}
else if((data->conn->handler->protocol & CURLPROTO_SFTP) &&
- (working_path_len > 2) && !memcmp(working_path, "/~/", 3)) {
- size_t len;
- const char *p;
- int copyfrom = 3;
+ (!strcmp("/~", working_path) ||
+ ((working_path_len > 2) && !memcmp(working_path, "/~/", 3)))) {
if(Curl_dyn_add(&npath, homedir)) {
free(working_path);
return CURLE_OUT_OF_MEMORY;
}
- /* Copy a separating '/' if homedir does not end with one */
- len = Curl_dyn_len(&npath);
- p = Curl_dyn_ptr(&npath);
- if(len && (p[len-1] != '/'))
- copyfrom = 2;
-
- if(Curl_dyn_addn(&npath,
- &working_path[copyfrom], working_path_len - copyfrom)) {
- free(working_path);
- return CURLE_OUT_OF_MEMORY;
+ if(working_path_len > 2) {
+ size_t len;
+ const char *p;
+ int copyfrom = 3;
+ /* Copy a separating '/' if homedir does not end with one */
+ len = Curl_dyn_len(&npath);
+ p = Curl_dyn_ptr(&npath);
+ if(len && (p[len-1] != '/'))
+ copyfrom = 2;
+
+ if(Curl_dyn_addn(&npath,
+ &working_path[copyfrom], working_path_len - copyfrom)) {
+ free(working_path);
+ return CURLE_OUT_OF_MEMORY;
+ }
}
}
@@ -188,7 +191,7 @@ CURLcode Curl_get_pathname(const char **cpp, char **path, char *homedir) }
return CURLE_OK;
- fail:
+fail:
Curl_safefree(*path);
return CURLE_QUOTE_ERROR;
}
diff --git a/libs/libcurl/src/curl_rtmp.c b/libs/libcurl/src/curl_rtmp.c index a65ac898e1..4b675d6f82 100644 --- a/libs/libcurl/src/curl_rtmp.c +++ b/libs/libcurl/src/curl_rtmp.c @@ -231,7 +231,7 @@ static CURLcode rtmp_connect(struct Curl_easy *data, bool *done) /* We have to know if it's a write before we send the
* connect request packet
*/
- if(data->set.upload)
+ if(data->state.upload)
r->Link.protocol |= RTMP_FEATURE_WRITE;
/* For plain streams, use the buffer toggle trick to keep data flowing */
@@ -263,7 +263,7 @@ static CURLcode rtmp_do(struct Curl_easy *data, bool *done) if(!RTMP_ConnectStream(r, 0))
return CURLE_FAILED_INIT;
- if(data->set.upload) {
+ if(data->state.upload) {
Curl_pgrsSetUploadSize(data, data->state.infilesize);
Curl_setup_transfer(data, -1, -1, FALSE, FIRSTSOCKET);
}
diff --git a/libs/libcurl/src/curl_setup.h b/libs/libcurl/src/curl_setup.h index 85e2162c3c..4124e7288a 100644 --- a/libs/libcurl/src/curl_setup.h +++ b/libs/libcurl/src/curl_setup.h @@ -61,6 +61,16 @@ # ifndef NOGDI
# define NOGDI
# endif
+/* Detect Windows App environment which has a restricted access
+ * to the Win32 APIs. */
+# if (defined(_WIN32_WINNT) && (_WIN32_WINNT >= 0x0602)) || \
+ defined(WINAPI_FAMILY)
+# include <winapifamily.h>
+# if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) && \
+ !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
+# define CURL_WINDOWS_APP
+# endif
+# endif
#endif
/*
@@ -767,23 +777,6 @@ endings either CRLF or LF so 't' is appropriate. #define FOPEN_APPENDTEXT "a"
#endif
-/* Windows workaround to recv before every send, because apparently Winsock
- * destroys destroys recv() buffer when send() failed.
- * This workaround is now disabled by default since it caused hard to fix bugs.
- * Define USE_RECV_BEFORE_SEND_WORKAROUND to enable it.
- * https://github.com/curl/curl/issues/657
- * https://github.com/curl/curl/pull/10409
- */
-#if !defined(DONT_USE_RECV_BEFORE_SEND_WORKAROUND)
-# if defined(WIN32) || defined(__CYGWIN__)
-/* # define USE_RECV_BEFORE_SEND_WORKAROUND */
-# endif
-#else /* DONT_USE_RECV_BEFORE_SEND_WORKAROUND */
-# ifdef USE_RECV_BEFORE_SEND_WORKAROUND
-# undef USE_RECV_BEFORE_SEND_WORKAROUND
-# endif
-#endif /* DONT_USE_RECV_BEFORE_SEND_WORKAROUND */
-
/* for systems that don't detect this in configure */
#ifndef CURL_SA_FAMILY_T
# if defined(HAVE_SA_FAMILY_T)
@@ -823,7 +816,8 @@ int getpwuid_r(uid_t uid, struct passwd *pwd, char *buf, #define USE_HTTP2
#endif
-#if defined(USE_NGTCP2) || defined(USE_QUICHE) || defined(USE_MSH3)
+#if (defined(USE_NGTCP2) && defined(USE_NGHTTP3)) || \
+ defined(USE_QUICHE) || defined(USE_MSH3)
#define ENABLE_QUIC
#define USE_HTTP3
#endif
diff --git a/libs/libcurl/src/dict.c b/libs/libcurl/src/dict.c index fb0f12107a..0d1208bc58 100644 --- a/libs/libcurl/src/dict.c +++ b/libs/libcurl/src/dict.c @@ -312,7 +312,7 @@ static CURLcode dict_do(struct Curl_easy *data, bool *done) }
}
- error:
+error:
free(eword);
free(path);
return result;
diff --git a/libs/libcurl/src/doh.c b/libs/libcurl/src/doh.c index c0173d8160..59cb71d7e4 100644 --- a/libs/libcurl/src/doh.c +++ b/libs/libcurl/src/doh.c @@ -347,7 +347,7 @@ static CURLcode dohprobe(struct Curl_easy *data, free(nurl);
return CURLE_OK;
- error:
+error:
free(nurl);
Curl_close(&doh);
return result;
@@ -409,7 +409,7 @@ struct Curl_addrinfo *Curl_doh(struct Curl_easy *data, #endif
return NULL;
- error:
+error:
curl_slist_free_all(dohp->headers);
data->req.doh->headers = NULL;
for(slot = 0; slot < DOH_PROBE_SLOTS; slot++) {
diff --git a/libs/libcurl/src/dynbuf.c b/libs/libcurl/src/dynbuf.c index 124377b367..fc376a4e4b 100644 --- a/libs/libcurl/src/dynbuf.c +++ b/libs/libcurl/src/dynbuf.c @@ -76,6 +76,7 @@ static CURLcode dyn_nappend(struct dynbuf *s, DEBUGASSERT(s->toobig);
DEBUGASSERT(indx < s->toobig);
DEBUGASSERT(!s->leng || s->bufr);
+ DEBUGASSERT(a <= s->toobig);
if(fit > s->toobig) {
Curl_dyn_free(s);
@@ -84,7 +85,9 @@ static CURLcode dyn_nappend(struct dynbuf *s, else if(!a) {
DEBUGASSERT(!indx);
/* first invoke */
- if(fit < MIN_FIRST_ALLOC)
+ if(MIN_FIRST_ALLOC > s->toobig)
+ a = s->toobig;
+ else if(fit < MIN_FIRST_ALLOC)
a = MIN_FIRST_ALLOC;
else
a = fit;
@@ -92,6 +95,9 @@ static CURLcode dyn_nappend(struct dynbuf *s, else {
while(a < fit)
a *= 2;
+ if(a > s->toobig)
+ /* no point in allocating a larger buffer than this is allowed to use */
+ a = s->toobig;
}
if(a != s->allc) {
diff --git a/libs/libcurl/src/dynhds.c b/libs/libcurl/src/dynhds.c new file mode 100644 index 0000000000..346426e427 --- /dev/null +++ b/libs/libcurl/src/dynhds.c @@ -0,0 +1,366 @@ +/***************************************************************************
+ * _ _ ____ _
+ * Project ___| | | | _ \| |
+ * / __| | | | |_) | |
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+ * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+ * are also available at https://curl.se/docs/copyright.html.
+ *
+ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
+ * copies of the Software, and permit persons to whom the Software is
+ * furnished to do so, under the terms of the COPYING file.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ * SPDX-License-Identifier: curl
+ *
+ ***************************************************************************/
+
+#include "curl_setup.h"
+#include "dynhds.h"
+#include "strcase.h"
+
+/* The last 3 #include files should be in this order */
+#include "curl_printf.h"
+#include "curl_memory.h"
+#include "memdebug.h"
+
+
+static struct dynhds_entry *
+entry_new(const char *name, size_t namelen,
+ const char *value, size_t valuelen, int opts)
+{
+ struct dynhds_entry *e;
+ char *p;
+
+ DEBUGASSERT(name);
+ DEBUGASSERT(value);
+ e = calloc(1, sizeof(*e) + namelen + valuelen + 2);
+ if(!e)
+ return NULL;
+ e->name = p = ((char *)e) + sizeof(*e);
+ memcpy(p, name, namelen);
+ e->namelen = namelen;
+ e->value = p += namelen + 1; /* leave a \0 at the end of name */
+ memcpy(p, value, valuelen);
+ e->valuelen = valuelen;
+ if(opts & DYNHDS_OPT_LOWERCASE)
+ Curl_strntolower(e->name, e->name, e->namelen);
+ return e;
+}
+
+static struct dynhds_entry *
+entry_append(struct dynhds_entry *e,
+ const char *value, size_t valuelen)
+{
+ struct dynhds_entry *e2;
+ size_t valuelen2 = e->valuelen + 1 + valuelen;
+ char *p;
+
+ DEBUGASSERT(value);
+ e2 = calloc(1, sizeof(*e) + e->namelen + valuelen2 + 2);
+ if(!e2)
+ return NULL;
+ e2->name = p = ((char *)e2) + sizeof(*e2);
+ memcpy(p, e->name, e->namelen);
+ e2->namelen = e->namelen;
+ e2->value = p += e->namelen + 1; /* leave a \0 at the end of name */
+ memcpy(p, e->value, e->valuelen);
+ p += e->valuelen;
+ p[0] = ' ';
+ memcpy(p + 1, value, valuelen);
+ e2->valuelen = valuelen2;
+ return e2;
+}
+
+static void entry_free(struct dynhds_entry *e)
+{
+ free(e);
+}
+
+void Curl_dynhds_init(struct dynhds *dynhds, size_t max_entries,
+ size_t max_strs_size)
+{
+ DEBUGASSERT(dynhds);
+ DEBUGASSERT(max_strs_size);
+ dynhds->hds = NULL;
+ dynhds->hds_len = dynhds->hds_allc = dynhds->strs_len = 0;
+ dynhds->max_entries = max_entries;
+ dynhds->max_strs_size = max_strs_size;
+ dynhds->opts = 0;
+}
+
+void Curl_dynhds_free(struct dynhds *dynhds)
+{
+ DEBUGASSERT(dynhds);
+ if(dynhds->hds && dynhds->hds_len) {
+ size_t i;
+ DEBUGASSERT(dynhds->hds);
+ for(i = 0; i < dynhds->hds_len; ++i) {
+ entry_free(dynhds->hds[i]);
+ }
+ }
+ Curl_safefree(dynhds->hds);
+ dynhds->hds_len = dynhds->hds_allc = dynhds->strs_len = 0;
+}
+
+void Curl_dynhds_reset(struct dynhds *dynhds)
+{
+ DEBUGASSERT(dynhds);
+ if(dynhds->hds_len) {
+ size_t i;
+ DEBUGASSERT(dynhds->hds);
+ for(i = 0; i < dynhds->hds_len; ++i) {
+ entry_free(dynhds->hds[i]);
+ dynhds->hds[i] = NULL;
+ }
+ }
+ dynhds->hds_len = dynhds->strs_len = 0;
+}
+
+size_t Curl_dynhds_count(struct dynhds *dynhds)
+{
+ return dynhds->hds_len;
+}
+
+void Curl_dynhds_set_opts(struct dynhds *dynhds, int opts)
+{
+ dynhds->opts = opts;
+}
+
+struct dynhds_entry *Curl_dynhds_getn(struct dynhds *dynhds, size_t n)
+{
+ DEBUGASSERT(dynhds);
+ return (n < dynhds->hds_len)? dynhds->hds[n] : NULL;
+}
+
+struct dynhds_entry *Curl_dynhds_get(struct dynhds *dynhds, const char *name,
+ size_t namelen)
+{
+ size_t i;
+ for(i = 0; i < dynhds->hds_len; ++i) {
+ if(dynhds->hds[i]->namelen == namelen &&
+ strncasecompare(dynhds->hds[i]->name, name, namelen)) {
+ return dynhds->hds[i];
+ }
+ }
+ return NULL;
+}
+
+struct dynhds_entry *Curl_dynhds_cget(struct dynhds *dynhds, const char *name)
+{
+ return Curl_dynhds_get(dynhds, name, strlen(name));
+}
+
+CURLcode Curl_dynhds_add(struct dynhds *dynhds,
+ const char *name, size_t namelen,
+ const char *value, size_t valuelen)
+{
+ struct dynhds_entry *entry = NULL;
+ CURLcode result = CURLE_OUT_OF_MEMORY;
+
+ DEBUGASSERT(dynhds);
+ if(dynhds->max_entries && dynhds->hds_len >= dynhds->max_entries)
+ return CURLE_OUT_OF_MEMORY;
+ if(dynhds->strs_len + namelen + valuelen > dynhds->max_strs_size)
+ return CURLE_OUT_OF_MEMORY;
+
+entry = entry_new(name, namelen, value, valuelen, dynhds->opts);
+ if(!entry)
+ goto out;
+
+ if(dynhds->hds_len + 1 >= dynhds->hds_allc) {
+ size_t nallc = dynhds->hds_len + 16;
+ struct dynhds_entry **nhds;
+
+ if(dynhds->max_entries && nallc > dynhds->max_entries)
+ nallc = dynhds->max_entries;
+
+ nhds = calloc(nallc, sizeof(struct dynhds_entry *));
+ if(!nhds)
+ goto out;
+ if(dynhds->hds) {
+ memcpy(nhds, dynhds->hds,
+ dynhds->hds_len * sizeof(struct dynhds_entry *));
+ Curl_safefree(dynhds->hds);
+ }
+ dynhds->hds = nhds;
+ dynhds->hds_allc = nallc;
+ }
+ dynhds->hds[dynhds->hds_len++] = entry;
+ entry = NULL;
+ dynhds->strs_len += namelen + valuelen;
+ result = CURLE_OK;
+
+out:
+ if(entry)
+ entry_free(entry);
+ return result;
+}
+
+CURLcode Curl_dynhds_cadd(struct dynhds *dynhds,
+ const char *name, const char *value)
+{
+ return Curl_dynhds_add(dynhds, name, strlen(name), value, strlen(value));
+}
+
+CURLcode Curl_dynhds_h1_add_line(struct dynhds *dynhds,
+ const char *line, size_t line_len)
+{
+ const char *p;
+ const char *name;
+ size_t namelen;
+ const char *value;
+ size_t valuelen, i;
+
+ if(!line || !line_len)
+ return CURLE_OK;
+
+ if((line[0] == ' ') || (line[0] == '\t')) {
+ struct dynhds_entry *e, *e2;
+ /* header continuation, yikes! */
+ if(!dynhds->hds_len)
+ return CURLE_BAD_FUNCTION_ARGUMENT;
+
+ while(line_len && ISBLANK(line[0])) {
+ ++line;
+ --line_len;
+ }
+ if(!line_len)
+ return CURLE_BAD_FUNCTION_ARGUMENT;
+ e = dynhds->hds[dynhds->hds_len-1];
+ e2 = entry_append(e, line, line_len);
+ if(!e2)
+ return CURLE_OUT_OF_MEMORY;
+ dynhds->hds[dynhds->hds_len-1] = e2;
+ entry_free(e);
+ return CURLE_OK;
+ }
+ else {
+ p = memchr(line, ':', line_len);
+ if(!p)
+ return CURLE_BAD_FUNCTION_ARGUMENT;
+ name = line;
+ namelen = p - line;
+ p++; /* move past the colon */
+ for(i = namelen + 1; i < line_len; ++i, ++p) {
+ if(!ISBLANK(*p))
+ break;
+ }
+ value = p;
+ valuelen = line_len - i;
+
+ p = memchr(value, '\r', valuelen);
+ if(!p)
+ p = memchr(value, '\n', valuelen);
+ if(p)
+ valuelen = (size_t)(p - value);
+
+ return Curl_dynhds_add(dynhds, name, namelen, value, valuelen);
+ }
+}
+
+CURLcode Curl_dynhds_h1_cadd_line(struct dynhds *dynhds, const char *line)
+{
+ return Curl_dynhds_h1_add_line(dynhds, line, line? strlen(line) : 0);
+}
+
+#ifdef DEBUGBUILD
+/* used by unit2602.c */
+
+bool Curl_dynhds_contains(struct dynhds *dynhds,
+ const char *name, size_t namelen)
+{
+ return !!Curl_dynhds_get(dynhds, name, namelen);
+}
+
+bool Curl_dynhds_ccontains(struct dynhds *dynhds, const char *name)
+{
+ return Curl_dynhds_contains(dynhds, name, strlen(name));
+}
+
+size_t Curl_dynhds_count_name(struct dynhds *dynhds,
+ const char *name, size_t namelen)
+{
+ size_t n = 0;
+ if(dynhds->hds_len) {
+ size_t i;
+ for(i = 0; i < dynhds->hds_len; ++i) {
+ if((namelen == dynhds->hds[i]->namelen) &&
+ strncasecompare(name, dynhds->hds[i]->name, namelen))
+ ++n;
+ }
+ }
+ return n;
+}
+
+size_t Curl_dynhds_ccount_name(struct dynhds *dynhds, const char *name)
+{
+ return Curl_dynhds_count_name(dynhds, name, strlen(name));
+}
+
+CURLcode Curl_dynhds_set(struct dynhds *dynhds,
+ const char *name, size_t namelen,
+ const char *value, size_t valuelen)
+{
+ Curl_dynhds_remove(dynhds, name, namelen);
+ return Curl_dynhds_add(dynhds, name, namelen, value, valuelen);
+}
+
+size_t Curl_dynhds_remove(struct dynhds *dynhds,
+ const char *name, size_t namelen)
+{
+ size_t n = 0;
+ if(dynhds->hds_len) {
+ size_t i, len;
+ for(i = 0; i < dynhds->hds_len; ++i) {
+ if((namelen == dynhds->hds[i]->namelen) &&
+ strncasecompare(name, dynhds->hds[i]->name, namelen)) {
+ ++n;
+ --dynhds->hds_len;
+ dynhds->strs_len -= (dynhds->hds[i]->namelen +
+ dynhds->hds[i]->valuelen);
+ entry_free(dynhds->hds[i]);
+ len = dynhds->hds_len - i; /* remaining entries */
+ if(len) {
+ memmove(&dynhds->hds[i], &dynhds->hds[i + 1],
+ len * sizeof(dynhds->hds[i]));
+ }
+ --i; /* do this index again */
+ }
+ }
+ }
+ return n;
+}
+
+size_t Curl_dynhds_cremove(struct dynhds *dynhds, const char *name)
+{
+ return Curl_dynhds_remove(dynhds, name, strlen(name));
+}
+
+CURLcode Curl_dynhds_h1_dprint(struct dynhds *dynhds, struct dynbuf *dbuf)
+{
+ CURLcode result = CURLE_OK;
+ size_t i;
+
+ if(!dynhds->hds_len)
+ return result;
+
+ for(i = 0; i < dynhds->hds_len; ++i) {
+ result = Curl_dyn_addf(dbuf, "%.*s: %.*s\r\n",
+ (int)dynhds->hds[i]->namelen, dynhds->hds[i]->name,
+ (int)dynhds->hds[i]->valuelen, dynhds->hds[i]->value);
+ if(result)
+ break;
+ }
+
+ return result;
+}
+
+#endif
diff --git a/libs/libcurl/src/dynhds.h b/libs/libcurl/src/dynhds.h new file mode 100644 index 0000000000..8cb5e6bc56 --- /dev/null +++ b/libs/libcurl/src/dynhds.h @@ -0,0 +1,174 @@ +#ifndef HEADER_CURL_DYNHDS_H
+#define HEADER_CURL_DYNHDS_H
+/***************************************************************************
+ * _ _ ____ _
+ * Project ___| | | | _ \| |
+ * / __| | | | |_) | |
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+ * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+ * are also available at https://curl.se/docs/copyright.html.
+ *
+ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
+ * copies of the Software, and permit persons to whom the Software is
+ * furnished to do so, under the terms of the COPYING file.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ * SPDX-License-Identifier: curl
+ *
+ ***************************************************************************/
+#include "curl_setup.h"
+
+#include <curl/curl.h>
+#include "dynbuf.h"
+
+struct dynbuf;
+
+/**
+ * A single header entry.
+ * `name` and `value` are non-NULL and always NUL terminated.
+ */
+struct dynhds_entry {
+ char *name;
+ char *value;
+ size_t namelen;
+ size_t valuelen;
+};
+
+struct dynhds {
+ struct dynhds_entry **hds;
+ size_t hds_len; /* number of entries in hds */
+ size_t hds_allc; /* size of hds allocation */
+ size_t max_entries; /* size limit number of entries */
+ size_t strs_len; /* length of all strings */
+ size_t max_strs_size; /* max length of all strings */
+ int opts;
+};
+
+#define DYNHDS_OPT_NONE (0)
+#define DYNHDS_OPT_LOWERCASE (1 << 0)
+
+/**
+ * Init for use on first time or after a reset.
+ * Allow `max_entries` headers to be added, 0 for unlimited.
+ * Allow size of all name and values added to not exceed `max_strs_size``
+ */
+void Curl_dynhds_init(struct dynhds *dynhds, size_t max_entries,
+ size_t max_strs_size);
+/**
+ * Frees all data held in `dynhds`, but not the struct itself.
+ */
+void Curl_dynhds_free(struct dynhds *dynhds);
+
+/**
+ * Reset `dyndns` to the initial init state. May keep allocations
+ * around.
+ */
+void Curl_dynhds_reset(struct dynhds *dynhds);
+
+/**
+ * Return the number of header entries.
+ */
+size_t Curl_dynhds_count(struct dynhds *dynhds);
+
+/**
+ * Set the options to use, replacing any existing ones.
+ * This will not have an effect on already existing headers.
+ */
+void Curl_dynhds_set_opts(struct dynhds *dynhds, int opts);
+
+/**
+ * Return the n-th header entry or NULL if it does not exist.
+ */
+struct dynhds_entry *Curl_dynhds_getn(struct dynhds *dynhds, size_t n);
+
+/**
+ * Return the 1st header entry of the name or NULL if none exists.
+ */
+struct dynhds_entry *Curl_dynhds_get(struct dynhds *dynhds,
+ const char *name, size_t namelen);
+struct dynhds_entry *Curl_dynhds_cget(struct dynhds *dynhds, const char *name);
+
+/**
+ * Return TRUE iff one or more headers with the given name exist.
+ */
+bool Curl_dynhds_contains(struct dynhds *dynhds,
+ const char *name, size_t namelen);
+bool Curl_dynhds_ccontains(struct dynhds *dynhds, const char *name);
+
+/**
+ * Return how often the given name appears in `dynhds`.
+ * Names are case-insensitive.
+ */
+size_t Curl_dynhds_count_name(struct dynhds *dynhds,
+ const char *name, size_t namelen);
+
+/**
+ * Return how often the given 0-terminated name appears in `dynhds`.
+ * Names are case-insensitive.
+ */
+size_t Curl_dynhds_ccount_name(struct dynhds *dynhds, const char *name);
+
+/**
+ * Add a header, name + value, to `dynhds` at the end. Does *not*
+ * check for duplicate names.
+ */
+CURLcode Curl_dynhds_add(struct dynhds *dynhds,
+ const char *name, size_t namelen,
+ const char *value, size_t valuelen);
+
+/**
+ * Add a header, c-string name + value, to `dynhds` at the end.
+ */
+CURLcode Curl_dynhds_cadd(struct dynhds *dynhds,
+ const char *name, const char *value);
+
+/**
+ * Remove all entries with the given name.
+ * Returns number of entries removed.
+ */
+size_t Curl_dynhds_remove(struct dynhds *dynhds,
+ const char *name, size_t namelen);
+size_t Curl_dynhds_cremove(struct dynhds *dynhds, const char *name);
+
+
+/**
+ * Set the give header name and value, replacing any entries with
+ * the same name. The header is added at the end of all (remaining)
+ * entries.
+ */
+CURLcode Curl_dynhds_set(struct dynhds *dynhds,
+ const char *name, size_t namelen,
+ const char *value, size_t valuelen);
+
+CURLcode Curl_dynhds_cset(struct dynhds *dynhds,
+ const char *name, const char *value);
+
+/**
+ * Add a single header from a HTTP/1.1 formatted line at the end. Line
+ * may contain a delimiting \r\n or just \n. Any characters after
+ * that will be ignored.
+ */
+CURLcode Curl_dynhds_h1_cadd_line(struct dynhds *dynhds, const char *line);
+
+/**
+ * Add a single header from a HTTP/1.1 formatted line at the end. Line
+ * may contain a delimiting \r\n or just \n. Any characters after
+ * that will be ignored.
+ */
+CURLcode Curl_dynhds_h1_add_line(struct dynhds *dynhds,
+ const char *line, size_t line_len);
+
+/**
+ * Add the headers to the given `dynbuf` in HTTP/1.1 format with
+ * cr+lf line endings. Will NOT output a last empty line.
+ */
+CURLcode Curl_dynhds_h1_dprint(struct dynhds *dynhds, struct dynbuf *dbuf);
+
+#endif /* HEADER_CURL_DYNHDS_H */
diff --git a/libs/libcurl/src/easy.c b/libs/libcurl/src/easy.c index 497a3570d3..919b83de83 100644 --- a/libs/libcurl/src/easy.c +++ b/libs/libcurl/src/easy.c @@ -24,14 +24,6 @@ #include "curl_setup.h"
-/*
- * See comment in curl_memory.h for the explanation of this sanity check.
- */
-
-#ifdef CURLX_NO_MEMORY_CALLBACKS
-#error "libcurl shall not ever be built with CURLX_NO_MEMORY_CALLBACKS defined"
-#endif
-
#ifdef HAVE_NETINET_IN_H
#include <netinet/in.h>
#endif
@@ -217,7 +209,7 @@ static CURLcode global_init(long flags, bool memoryfuncs) return CURLE_OK;
- fail:
+fail:
initialized--; /* undo the increase */
return CURLE_FAILED_INIT;
}
@@ -795,14 +787,12 @@ CURLcode curl_easy_perform_ev(struct Curl_easy *data) */
void curl_easy_cleanup(struct Curl_easy *data)
{
- SIGPIPE_VARIABLE(pipe_st);
-
- if(!data)
- return;
-
- sigpipe_ignore(data, &pipe_st);
- Curl_close(&data);
- sigpipe_restore(&pipe_st);
+ if(GOOD_EASY_HANDLE(data)) {
+ SIGPIPE_VARIABLE(pipe_st);
+ sigpipe_ignore(data, &pipe_st);
+ Curl_close(&data);
+ sigpipe_restore(&pipe_st);
+ }
}
/*
@@ -1003,7 +993,7 @@ struct Curl_easy *curl_easy_duphandle(struct Curl_easy *data) return outcurl;
- fail:
+fail:
if(outcurl) {
#ifndef CURL_DISABLE_COOKIES
@@ -1231,6 +1221,26 @@ CURLcode curl_easy_recv(struct Curl_easy *data, void *buffer, size_t buflen, return CURLE_OK;
}
+#ifdef USE_WEBSOCKETS
+CURLcode Curl_connect_only_attach(struct Curl_easy *data)
+{
+ curl_socket_t sfd;
+ CURLcode result;
+ struct connectdata *c = NULL;
+
+ result = easy_connection(data, &sfd, &c);
+ if(result)
+ return result;
+
+ if(!data->conn)
+ /* on first invoke, the transfer has been detached from the connection and
+ needs to be reattached */
+ Curl_attach_connection(data, c);
+
+ return CURLE_OK;
+}
+#endif /* USE_WEBSOCKETS */
+
/*
* Sends data over the connected socket.
*
diff --git a/libs/libcurl/src/easyif.h b/libs/libcurl/src/easyif.h index fa403f7f09..b51ddc6693 100644 --- a/libs/libcurl/src/easyif.h +++ b/libs/libcurl/src/easyif.h @@ -30,6 +30,10 @@ CURLcode Curl_senddata(struct Curl_easy *data, const void *buffer,
size_t buflen, ssize_t *n);
+#ifdef USE_WEBSOCKETS
+CURLcode Curl_connect_only_attach(struct Curl_easy *data);
+#endif
+
#ifdef CURLDEBUG
CURL_EXTERN CURLcode curl_easy_perform_ev(struct Curl_easy *easy);
#endif
diff --git a/libs/libcurl/src/file.c b/libs/libcurl/src/file.c index 36d419cc68..39b9e80059 100644 --- a/libs/libcurl/src/file.c +++ b/libs/libcurl/src/file.c @@ -240,7 +240,7 @@ static CURLcode file_connect(struct Curl_easy *data, bool *done) file->freepath = real_path; /* free this when done */
file->fd = fd;
- if(!data->set.upload && (fd == -1)) {
+ if(!data->state.upload && (fd == -1)) {
failf(data, "Couldn't open file %s", data->state.up.path);
file_done(data, CURLE_FILE_COULDNT_READ_FILE, FALSE);
return CURLE_FILE_COULDNT_READ_FILE;
@@ -422,7 +422,7 @@ static CURLcode file_do(struct Curl_easy *data, bool *done) Curl_pgrsStartNow(data);
- if(data->set.upload)
+ if(data->state.upload)
return file_upload(data);
file = data->req.p.file;
diff --git a/libs/libcurl/src/fileinfo.c b/libs/libcurl/src/fileinfo.c index 5ded488f77..1c313e5f2c 100644 --- a/libs/libcurl/src/fileinfo.c +++ b/libs/libcurl/src/fileinfo.c @@ -40,7 +40,7 @@ void Curl_fileinfo_cleanup(struct fileinfo *finfo) if(!finfo)
return;
- Curl_safefree(finfo->info.b_data);
+ Curl_dyn_free(&finfo->buf);
free(finfo);
}
#endif
diff --git a/libs/libcurl/src/fileinfo.h b/libs/libcurl/src/fileinfo.h index d2daece49f..4868bacecb 100644 --- a/libs/libcurl/src/fileinfo.h +++ b/libs/libcurl/src/fileinfo.h @@ -26,10 +26,12 @@ #include <curl/curl.h>
#include "llist.h"
+#include "dynbuf.h"
struct fileinfo {
struct curl_fileinfo info;
struct Curl_llist_element list;
+ struct dynbuf buf;
};
struct fileinfo *Curl_fileinfo_alloc(void);
diff --git a/libs/libcurl/src/ftp.c b/libs/libcurl/src/ftp.c index f785a7db7f..17eba04e4e 100644 --- a/libs/libcurl/src/ftp.c +++ b/libs/libcurl/src/ftp.c @@ -1085,8 +1085,6 @@ static CURLcode ftp_state_use_port(struct Curl_easy *data, host = NULL;
/* step 2, create a socket for the requested address */
-
- portsock = CURL_SOCKET_BAD;
error = 0;
for(ai = res; ai; ai = ai->ai_next) {
if(Curl_socket_open(data, ai, NULL, conn->transport, &portsock)) {
@@ -1350,7 +1348,7 @@ static CURLcode ftp_state_prepare_transfer(struct Curl_easy *data) data->set.str[STRING_CUSTOMREQUEST]?
data->set.str[STRING_CUSTOMREQUEST]:
(data->state.list_only?"NLST":"LIST"));
- else if(data->set.upload)
+ else if(data->state.upload)
result = Curl_pp_sendf(data, &ftpc->pp, "PRET STOR %s",
conn->proto.ftpc.file);
else
@@ -3386,7 +3384,7 @@ static CURLcode ftp_done(struct Curl_easy *data, CURLcode status, /* the response code from the transfer showed an error already so no
use checking further */
;
- else if(data->set.upload) {
+ else if(data->state.upload) {
if((-1 != data->state.infilesize) &&
(data->state.infilesize != data->req.writebytecount) &&
!data->set.crlf &&
@@ -3621,7 +3619,7 @@ static CURLcode ftp_do_more(struct Curl_easy *data, int *completep) /* a transfer is about to take place, or if not a file name was given
so we'll do a SIZE on it later and then we need the right TYPE first */
- if(ftpc->wait_data_conn == TRUE) {
+ if(ftpc->wait_data_conn) {
bool serv_conned;
result = ReceivedServerConnect(data, &serv_conned);
@@ -3642,20 +3640,14 @@ static CURLcode ftp_do_more(struct Curl_easy *data, int *completep) connected back to us */
}
}
- else if(data->set.upload) {
+ else if(data->state.upload) {
result = ftp_nb_type(data, conn, data->state.prefer_ascii,
FTP_STOR_TYPE);
if(result)
return result;
result = ftp_multi_statemach(data, &complete);
- if(ftpc->wait_data_conn)
- /* if we reach the end of the FTP state machine here, *complete will be
- TRUE but so is ftpc->wait_data_conn, which says we need to wait for
- the data connection and therefore we're not actually complete */
- *completep = 0;
- else
- *completep = (int)complete;
+ *completep = (int)complete;
}
else {
/* download */
@@ -3846,7 +3838,7 @@ static CURLcode init_wc_data(struct Curl_easy *data) infof(data, "Wildcard - Parsing started");
return CURLE_OK;
- fail:
+fail:
if(ftpwc) {
Curl_ftp_parselist_data_free(&ftpwc->parser);
free(ftpwc);
@@ -3977,8 +3969,10 @@ static CURLcode wc_statemach(struct Curl_easy *data) case CURLWC_DONE:
case CURLWC_ERROR:
case CURLWC_CLEAR:
- if(wildcard->dtor)
+ if(wildcard->dtor) {
wildcard->dtor(wildcard->ftpwc);
+ wildcard->ftpwc = NULL;
+ }
return result;
}
}
@@ -4140,7 +4134,7 @@ CURLcode ftp_parse_url_path(struct Curl_easy *data) case FTPFILE_NOCWD: /* fastest, but less standard-compliant */
if((pathLen > 0) && (rawPath[pathLen - 1] != '/'))
- fileName = rawPath; /* this is a full file path */
+ fileName = rawPath; /* this is a full file path */
/*
else: ftpc->file is not used anywhere other than for operations on
a file. In other words, never for directory operations.
@@ -4186,7 +4180,7 @@ CURLcode ftp_parse_url_path(struct Curl_easy *data) size_t dirAlloc = 0;
const char *str = rawPath;
for(; *str != 0; ++str)
- if (*str == '/')
+ if(*str == '/')
++dirAlloc;
if(dirAlloc) {
@@ -4231,7 +4225,7 @@ CURLcode ftp_parse_url_path(struct Curl_easy *data) ftpc->file = NULL; /* instead of point to a zero byte,
we make it a NULL pointer */
- if(data->set.upload && !ftpc->file && (ftp->transfer == PPTRANSFER_BODY)) {
+ if(data->state.upload && !ftpc->file && (ftp->transfer == PPTRANSFER_BODY)) {
/* We need a file name when uploading. Return error! */
failf(data, "Uploading to a URL without a file name");
free(rawPath);
diff --git a/libs/libcurl/src/ftplistparser.c b/libs/libcurl/src/ftplistparser.c index 6ad56b9601..7c88595335 100644 --- a/libs/libcurl/src/ftplistparser.c +++ b/libs/libcurl/src/ftplistparser.c @@ -318,8 +318,8 @@ static CURLcode ftp_pl_insert_finfo(struct Curl_easy *data, bool add = TRUE;
struct curl_fileinfo *finfo = &infop->info;
- /* move finfo pointers to b_data */
- char *str = finfo->b_data;
+ /* set the finfo pointers */
+ char *str = Curl_dyn_ptr(&infop->buf);
finfo->filename = str + parser->offsets.filename;
finfo->strings.group = parser->offsets.group ?
str + parser->offsets.group : NULL;
@@ -362,6 +362,8 @@ static CURLcode ftp_pl_insert_finfo(struct Curl_easy *data, return CURLE_OK;
}
+#define MAX_FTPLIST_BUFFER 10000 /* arbitrarily set */
+
size_t Curl_ftp_parselist(char *buffer, size_t size, size_t nmemb,
void *connptr)
{
@@ -369,8 +371,6 @@ size_t Curl_ftp_parselist(char *buffer, size_t size, size_t nmemb, struct Curl_easy *data = (struct Curl_easy *)connptr;
struct ftp_wc *ftpwc = data->wildcard->ftpwc;
struct ftp_parselist_data *parser = ftpwc->parser;
- struct fileinfo *infop;
- struct curl_fileinfo *finfo;
size_t i = 0;
CURLcode result;
size_t retsize = bufflen;
@@ -387,48 +387,35 @@ size_t Curl_ftp_parselist(char *buffer, size_t size, size_t nmemb, if(parser->os_type == OS_TYPE_UNKNOWN && bufflen > 0) {
/* considering info about FILE response format */
- parser->os_type = (buffer[0] >= '0' && buffer[0] <= '9') ?
- OS_TYPE_WIN_NT : OS_TYPE_UNIX;
+ parser->os_type = ISDIGIT(buffer[0]) ? OS_TYPE_WIN_NT : OS_TYPE_UNIX;
}
while(i < bufflen) { /* FSM */
-
+ char *mem;
+ size_t len; /* number of bytes of data in the dynbuf */
char c = buffer[i];
+ struct fileinfo *infop;
+ struct curl_fileinfo *finfo;
if(!parser->file_data) { /* tmp file data is not allocated yet */
parser->file_data = Curl_fileinfo_alloc();
if(!parser->file_data) {
parser->error = CURLE_OUT_OF_MEMORY;
goto fail;
}
- parser->file_data->info.b_data = malloc(FTP_BUFFER_ALLOCSIZE);
- if(!parser->file_data->info.b_data) {
- parser->error = CURLE_OUT_OF_MEMORY;
- goto fail;
- }
- parser->file_data->info.b_size = FTP_BUFFER_ALLOCSIZE;
parser->item_offset = 0;
parser->item_length = 0;
+ Curl_dyn_init(&parser->file_data->buf, MAX_FTPLIST_BUFFER);
}
infop = parser->file_data;
finfo = &infop->info;
- finfo->b_data[finfo->b_used++] = c;
- if(finfo->b_used >= finfo->b_size - 1) {
- /* if it is important, extend buffer space for file data */
- char *tmp = realloc(finfo->b_data,
- finfo->b_size + FTP_BUFFER_ALLOCSIZE);
- if(tmp) {
- finfo->b_size += FTP_BUFFER_ALLOCSIZE;
- finfo->b_data = tmp;
- }
- else {
- Curl_fileinfo_cleanup(parser->file_data);
- parser->file_data = NULL;
- parser->error = CURLE_OUT_OF_MEMORY;
- goto fail;
- }
+ if(Curl_dyn_addn(&infop->buf, &c, 1)) {
+ parser->error = CURLE_OUT_OF_MEMORY;
+ goto fail;
}
+ len = Curl_dyn_len(&infop->buf);
+ mem = Curl_dyn_ptr(&infop->buf);
switch(parser->os_type) {
case OS_TYPE_UNIX:
@@ -443,7 +430,7 @@ size_t Curl_ftp_parselist(char *buffer, size_t size, size_t nmemb, else {
parser->state.UNIX.main = PL_UNIX_FILETYPE;
/* start FSM again not considering size of directory */
- finfo->b_used = 0;
+ Curl_dyn_reset(&infop->buf);
continue;
}
break;
@@ -451,12 +438,12 @@ size_t Curl_ftp_parselist(char *buffer, size_t size, size_t nmemb, parser->item_length++;
if(c == '\r') {
parser->item_length--;
- finfo->b_used--;
+ Curl_dyn_setlen(&infop->buf, --len);
}
else if(c == '\n') {
- finfo->b_data[parser->item_length - 1] = 0;
- if(strncmp("total ", finfo->b_data, 6) == 0) {
- char *endptr = finfo->b_data + 6;
+ mem[parser->item_length - 1] = 0;
+ if(!strncmp("total ", mem, 6)) {
+ char *endptr = mem + 6;
/* here we can deal with directory size, pass the leading
whitespace and then the digits */
while(ISBLANK(*endptr))
@@ -468,7 +455,7 @@ size_t Curl_ftp_parselist(char *buffer, size_t size, size_t nmemb, goto fail;
}
parser->state.UNIX.main = PL_UNIX_FILETYPE;
- finfo->b_used = 0;
+ Curl_dyn_reset(&infop->buf);
}
else {
parser->error = CURLE_FTP_BAD_FILE_LIST;
@@ -526,8 +513,8 @@ size_t Curl_ftp_parselist(char *buffer, size_t size, size_t nmemb, parser->error = CURLE_FTP_BAD_FILE_LIST;
goto fail;
}
- finfo->b_data[10] = 0; /* terminate permissions */
- perm = ftp_pl_get_permission(finfo->b_data + parser->item_offset);
+ mem[10] = 0; /* terminate permissions */
+ perm = ftp_pl_get_permission(mem + parser->item_offset);
if(perm & FTP_LP_MALFORMATED_PERM) {
parser->error = CURLE_FTP_BAD_FILE_LIST;
goto fail;
@@ -545,8 +532,8 @@ size_t Curl_ftp_parselist(char *buffer, size_t size, size_t nmemb, switch(parser->state.UNIX.sub.hlinks) {
case PL_UNIX_HLINKS_PRESPACE:
if(c != ' ') {
- if(c >= '0' && c <= '9') {
- parser->item_offset = finfo->b_used - 1;
+ if(ISDIGIT(c)) {
+ parser->item_offset = len - 1;
parser->item_length = 1;
parser->state.UNIX.sub.hlinks = PL_UNIX_HLINKS_NUMBER;
}
@@ -561,8 +548,8 @@ size_t Curl_ftp_parselist(char *buffer, size_t size, size_t nmemb, if(c == ' ') {
char *p;
long int hlinks;
- finfo->b_data[parser->item_offset + parser->item_length - 1] = 0;
- hlinks = strtol(finfo->b_data + parser->item_offset, &p, 10);
+ mem[parser->item_offset + parser->item_length - 1] = 0;
+ hlinks = strtol(mem + parser->item_offset, &p, 10);
if(p[0] == '\0' && hlinks != LONG_MAX && hlinks != LONG_MIN) {
parser->file_data->info.flags |= CURLFINFOFLAG_KNOWN_HLINKCOUNT;
parser->file_data->info.hardlinks = hlinks;
@@ -572,7 +559,7 @@ size_t Curl_ftp_parselist(char *buffer, size_t size, size_t nmemb, parser->state.UNIX.main = PL_UNIX_USER;
parser->state.UNIX.sub.user = PL_UNIX_USER_PRESPACE;
}
- else if(c < '0' || c > '9') {
+ else if(!ISDIGIT(c)) {
parser->error = CURLE_FTP_BAD_FILE_LIST;
goto fail;
}
@@ -583,7 +570,7 @@ size_t Curl_ftp_parselist(char *buffer, size_t size, size_t nmemb, switch(parser->state.UNIX.sub.user) {
case PL_UNIX_USER_PRESPACE:
if(c != ' ') {
- parser->item_offset = finfo->b_used - 1;
+ parser->item_offset = len - 1;
parser->item_length = 1;
parser->state.UNIX.sub.user = PL_UNIX_USER_PARSING;
}
@@ -591,7 +578,7 @@ size_t Curl_ftp_parselist(char *buffer, size_t size, size_t nmemb, case PL_UNIX_USER_PARSING:
parser->item_length++;
if(c == ' ') {
- finfo->b_data[parser->item_offset + parser->item_length - 1] = 0;
+ mem[parser->item_offset + parser->item_length - 1] = 0;
parser->offsets.user = parser->item_offset;
parser->state.UNIX.main = PL_UNIX_GROUP;
parser->state.UNIX.sub.group = PL_UNIX_GROUP_PRESPACE;
@@ -605,7 +592,7 @@ size_t Curl_ftp_parselist(char *buffer, size_t size, size_t nmemb, switch(parser->state.UNIX.sub.group) {
case PL_UNIX_GROUP_PRESPACE:
if(c != ' ') {
- parser->item_offset = finfo->b_used - 1;
+ parser->item_offset = len - 1;
parser->item_length = 1;
parser->state.UNIX.sub.group = PL_UNIX_GROUP_NAME;
}
@@ -613,7 +600,7 @@ size_t Curl_ftp_parselist(char *buffer, size_t size, size_t nmemb, case PL_UNIX_GROUP_NAME:
parser->item_length++;
if(c == ' ') {
- finfo->b_data[parser->item_offset + parser->item_length - 1] = 0;
+ mem[parser->item_offset + parser->item_length - 1] = 0;
parser->offsets.group = parser->item_offset;
parser->state.UNIX.main = PL_UNIX_SIZE;
parser->state.UNIX.sub.size = PL_UNIX_SIZE_PRESPACE;
@@ -627,8 +614,8 @@ size_t Curl_ftp_parselist(char *buffer, size_t size, size_t nmemb, switch(parser->state.UNIX.sub.size) {
case PL_UNIX_SIZE_PRESPACE:
if(c != ' ') {
- if(c >= '0' && c <= '9') {
- parser->item_offset = finfo->b_used - 1;
+ if(ISDIGIT(c)) {
+ parser->item_offset = len - 1;
parser->item_length = 1;
parser->state.UNIX.sub.size = PL_UNIX_SIZE_NUMBER;
}
@@ -643,8 +630,8 @@ size_t Curl_ftp_parselist(char *buffer, size_t size, size_t nmemb, if(c == ' ') {
char *p;
curl_off_t fsize;
- finfo->b_data[parser->item_offset + parser->item_length - 1] = 0;
- if(!curlx_strtoofft(finfo->b_data + parser->item_offset,
+ mem[parser->item_offset + parser->item_length - 1] = 0;
+ if(!curlx_strtoofft(mem + parser->item_offset,
&p, 10, &fsize)) {
if(p[0] == '\0' && fsize != CURL_OFF_T_MAX &&
fsize != CURL_OFF_T_MIN) {
@@ -669,7 +656,7 @@ size_t Curl_ftp_parselist(char *buffer, size_t size, size_t nmemb, case PL_UNIX_TIME_PREPART1:
if(c != ' ') {
if(ISALNUM(c)) {
- parser->item_offset = finfo->b_used -1;
+ parser->item_offset = len -1;
parser->item_length = 1;
parser->state.UNIX.sub.time = PL_UNIX_TIME_PART1;
}
@@ -726,10 +713,10 @@ size_t Curl_ftp_parselist(char *buffer, size_t size, size_t nmemb, case PL_UNIX_TIME_PART3:
parser->item_length++;
if(c == ' ') {
- finfo->b_data[parser->item_offset + parser->item_length -1] = 0;
+ mem[parser->item_offset + parser->item_length -1] = 0;
parser->offsets.time = parser->item_offset;
/*
- if(ftp_pl_gettime(parser, finfo->b_data + parser->item_offset)) {
+ if(ftp_pl_gettime(parser, finfo->mem + parser->item_offset)) {
parser->file_data->flags |= CURLFINFOFLAG_KNOWN_TIME;
}
*/
@@ -753,7 +740,7 @@ size_t Curl_ftp_parselist(char *buffer, size_t size, size_t nmemb, switch(parser->state.UNIX.sub.filename) {
case PL_UNIX_FILENAME_PRESPACE:
if(c != ' ') {
- parser->item_offset = finfo->b_used - 1;
+ parser->item_offset = len - 1;
parser->item_length = 1;
parser->state.UNIX.sub.filename = PL_UNIX_FILENAME_NAME;
}
@@ -764,7 +751,7 @@ size_t Curl_ftp_parselist(char *buffer, size_t size, size_t nmemb, parser->state.UNIX.sub.filename = PL_UNIX_FILENAME_WINDOWSEOL;
}
else if(c == '\n') {
- finfo->b_data[parser->item_offset + parser->item_length - 1] = 0;
+ mem[parser->item_offset + parser->item_length - 1] = 0;
parser->offsets.filename = parser->item_offset;
parser->state.UNIX.main = PL_UNIX_FILETYPE;
result = ftp_pl_insert_finfo(data, infop);
@@ -776,7 +763,7 @@ size_t Curl_ftp_parselist(char *buffer, size_t size, size_t nmemb, break;
case PL_UNIX_FILENAME_WINDOWSEOL:
if(c == '\n') {
- finfo->b_data[parser->item_offset + parser->item_length - 1] = 0;
+ mem[parser->item_offset + parser->item_length - 1] = 0;
parser->offsets.filename = parser->item_offset;
parser->state.UNIX.main = PL_UNIX_FILETYPE;
result = ftp_pl_insert_finfo(data, infop);
@@ -796,7 +783,7 @@ size_t Curl_ftp_parselist(char *buffer, size_t size, size_t nmemb, switch(parser->state.UNIX.sub.symlink) {
case PL_UNIX_SYMLINK_PRESPACE:
if(c != ' ') {
- parser->item_offset = finfo->b_used - 1;
+ parser->item_offset = len - 1;
parser->item_length = 1;
parser->state.UNIX.sub.symlink = PL_UNIX_SYMLINK_NAME;
}
@@ -842,7 +829,7 @@ size_t Curl_ftp_parselist(char *buffer, size_t size, size_t nmemb, if(c == ' ') {
parser->state.UNIX.sub.symlink = PL_UNIX_SYMLINK_PRETARGET4;
/* now place where is symlink following */
- finfo->b_data[parser->item_offset + parser->item_length - 4] = 0;
+ mem[parser->item_offset + parser->item_length - 4] = 0;
parser->offsets.filename = parser->item_offset;
parser->item_length = 0;
parser->item_offset = 0;
@@ -858,7 +845,7 @@ size_t Curl_ftp_parselist(char *buffer, size_t size, size_t nmemb, case PL_UNIX_SYMLINK_PRETARGET4:
if(c != '\r' && c != '\n') {
parser->state.UNIX.sub.symlink = PL_UNIX_SYMLINK_TARGET;
- parser->item_offset = finfo->b_used - 1;
+ parser->item_offset = len - 1;
parser->item_length = 1;
}
else {
@@ -872,7 +859,7 @@ size_t Curl_ftp_parselist(char *buffer, size_t size, size_t nmemb, parser->state.UNIX.sub.symlink = PL_UNIX_SYMLINK_WINDOWSEOL;
}
else if(c == '\n') {
- finfo->b_data[parser->item_offset + parser->item_length - 1] = 0;
+ mem[parser->item_offset + parser->item_length - 1] = 0;
parser->offsets.symlink_target = parser->item_offset;
result = ftp_pl_insert_finfo(data, infop);
if(result) {
@@ -884,7 +871,7 @@ size_t Curl_ftp_parselist(char *buffer, size_t size, size_t nmemb, break;
case PL_UNIX_SYMLINK_WINDOWSEOL:
if(c == '\n') {
- finfo->b_data[parser->item_offset + parser->item_length - 1] = 0;
+ mem[parser->item_offset + parser->item_length - 1] = 0;
parser->offsets.symlink_target = parser->item_offset;
result = ftp_pl_insert_finfo(data, infop);
if(result) {
@@ -938,7 +925,7 @@ size_t Curl_ftp_parselist(char *buffer, size_t size, size_t nmemb, case PL_WINNT_TIME_TIME:
if(c == ' ') {
parser->offsets.time = parser->item_offset;
- finfo->b_data[parser->item_offset + parser->item_length -1] = 0;
+ mem[parser->item_offset + parser->item_length -1] = 0;
parser->state.NT.main = PL_WINNT_DIRORSIZE;
parser->state.NT.sub.dirorsize = PL_WINNT_DIRORSIZE_PRESPACE;
parser->item_length = 0;
@@ -954,7 +941,7 @@ size_t Curl_ftp_parselist(char *buffer, size_t size, size_t nmemb, switch(parser->state.NT.sub.dirorsize) {
case PL_WINNT_DIRORSIZE_PRESPACE:
if(c != ' ') {
- parser->item_offset = finfo->b_used - 1;
+ parser->item_offset = len - 1;
parser->item_length = 1;
parser->state.NT.sub.dirorsize = PL_WINNT_DIRORSIZE_CONTENT;
}
@@ -962,14 +949,14 @@ size_t Curl_ftp_parselist(char *buffer, size_t size, size_t nmemb, case PL_WINNT_DIRORSIZE_CONTENT:
parser->item_length ++;
if(c == ' ') {
- finfo->b_data[parser->item_offset + parser->item_length - 1] = 0;
- if(strcmp("<DIR>", finfo->b_data + parser->item_offset) == 0) {
+ mem[parser->item_offset + parser->item_length - 1] = 0;
+ if(strcmp("<DIR>", mem + parser->item_offset) == 0) {
finfo->filetype = CURLFILETYPE_DIRECTORY;
finfo->size = 0;
}
else {
char *endptr;
- if(curlx_strtoofft(finfo->b_data +
+ if(curlx_strtoofft(mem +
parser->item_offset,
&endptr, 10, &finfo->size)) {
parser->error = CURLE_FTP_BAD_FILE_LIST;
@@ -991,7 +978,7 @@ size_t Curl_ftp_parselist(char *buffer, size_t size, size_t nmemb, switch(parser->state.NT.sub.filename) {
case PL_WINNT_FILENAME_PRESPACE:
if(c != ' ') {
- parser->item_offset = finfo->b_used -1;
+ parser->item_offset = len -1;
parser->item_length = 1;
parser->state.NT.sub.filename = PL_WINNT_FILENAME_CONTENT;
}
@@ -1000,11 +987,11 @@ size_t Curl_ftp_parselist(char *buffer, size_t size, size_t nmemb, parser->item_length++;
if(c == '\r') {
parser->state.NT.sub.filename = PL_WINNT_FILENAME_WINEOL;
- finfo->b_data[finfo->b_used - 1] = 0;
+ mem[len - 1] = 0;
}
else if(c == '\n') {
parser->offsets.filename = parser->item_offset;
- finfo->b_data[finfo->b_used - 1] = 0;
+ mem[len - 1] = 0;
result = ftp_pl_insert_finfo(data, infop);
if(result) {
parser->error = result;
diff --git a/libs/libcurl/src/h2h3.c b/libs/libcurl/src/h2h3.c deleted file mode 100644 index 6a4c7bc1b8..0000000000 --- a/libs/libcurl/src/h2h3.c +++ /dev/null @@ -1,316 +0,0 @@ -/***************************************************************************
- * _ _ ____ _
- * Project ___| | | | _ \| |
- * / __| | | | |_) | |
- * | (__| |_| | _ <| |___
- * \___|\___/|_| \_\_____|
- *
- * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
- *
- * This software is licensed as described in the file COPYING, which
- * you should have received as part of this distribution. The terms
- * are also available at https://curl.se/docs/copyright.html.
- *
- * You may opt to use, copy, modify, merge, publish, distribute and/or sell
- * copies of the Software, and permit persons to whom the Software is
- * furnished to do so, under the terms of the COPYING file.
- *
- * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
- * KIND, either express or implied.
- *
- * SPDX-License-Identifier: curl
- *
- ***************************************************************************/
-
-#include "curl_setup.h"
-#include "urldata.h"
-#include "h2h3.h"
-#include "transfer.h"
-#include "sendf.h"
-#include "strcase.h"
-
-/* The last 3 #include files should be in this order */
-#include "curl_printf.h"
-#include "curl_memory.h"
-#include "memdebug.h"
-
-/*
- * Curl_pseudo_headers() creates the array with pseudo headers to be
- * used in an HTTP/2 or HTTP/3 request.
- */
-
-#if defined(USE_NGHTTP2) || defined(ENABLE_QUIC)
-
-/* Index where :authority header field will appear in request header
- field list. */
-#define AUTHORITY_DST_IDX 3
-
-/* USHRT_MAX is 65535 == 0xffff */
-#define HEADER_OVERFLOW(x) \
- (x.namelen > 0xffff || x.valuelen > 0xffff - x.namelen)
-
-/*
- * Check header memory for the token "trailers".
- * Parse the tokens as separated by comma and surrounded by whitespace.
- * Returns TRUE if found or FALSE if not.
- */
-static bool contains_trailers(const char *p, size_t len)
-{
- const char *end = p + len;
- for(;;) {
- for(; p != end && (*p == ' ' || *p == '\t'); ++p)
- ;
- if(p == end || (size_t)(end - p) < sizeof("trailers") - 1)
- return FALSE;
- if(strncasecompare("trailers", p, sizeof("trailers") - 1)) {
- p += sizeof("trailers") - 1;
- for(; p != end && (*p == ' ' || *p == '\t'); ++p)
- ;
- if(p == end || *p == ',')
- return TRUE;
- }
- /* skip to next token */
- for(; p != end && *p != ','; ++p)
- ;
- if(p == end)
- return FALSE;
- ++p;
- }
-}
-
-typedef enum {
- /* Send header to server */
- HEADERINST_FORWARD,
- /* Don't send header to server */
- HEADERINST_IGNORE,
- /* Discard header, and replace it with "te: trailers" */
- HEADERINST_TE_TRAILERS
-} header_instruction;
-
-/* Decides how to treat given header field. */
-static header_instruction inspect_header(const char *name, size_t namelen,
- const char *value, size_t valuelen) {
- switch(namelen) {
- case 2:
- if(!strncasecompare("te", name, namelen))
- return HEADERINST_FORWARD;
-
- return contains_trailers(value, valuelen) ?
- HEADERINST_TE_TRAILERS : HEADERINST_IGNORE;
- case 7:
- return strncasecompare("upgrade", name, namelen) ?
- HEADERINST_IGNORE : HEADERINST_FORWARD;
- case 10:
- return (strncasecompare("connection", name, namelen) ||
- strncasecompare("keep-alive", name, namelen)) ?
- HEADERINST_IGNORE : HEADERINST_FORWARD;
- case 16:
- return strncasecompare("proxy-connection", name, namelen) ?
- HEADERINST_IGNORE : HEADERINST_FORWARD;
- case 17:
- return strncasecompare("transfer-encoding", name, namelen) ?
- HEADERINST_IGNORE : HEADERINST_FORWARD;
- default:
- return HEADERINST_FORWARD;
- }
-}
-
-CURLcode Curl_pseudo_headers(struct Curl_easy *data,
- const char *mem, /* the request */
- const size_t len /* size of request */,
- size_t* hdrlen /* opt size of headers read */,
- struct h2h3req **hp)
-{
- struct connectdata *conn = data->conn;
- size_t nheader = 0;
- size_t i;
- size_t authority_idx;
- char *hdbuf = (char *)mem;
- char *end, *line_end;
- struct h2h3pseudo *nva = NULL;
- struct h2h3req *hreq = NULL;
- char *vptr;
-
- /* Calculate number of headers contained in [mem, mem + len). Assumes a
- correctly generated HTTP header field block. */
- for(i = 1; i < len; ++i) {
- if(hdbuf[i] == '\n' && hdbuf[i - 1] == '\r') {
- ++nheader;
- ++i;
- }
- }
- if(nheader < 2) {
- goto fail;
- }
- /* We counted additional 2 \r\n in the first and last line. We need 3
- new headers: :method, :path and :scheme. Therefore we need one
- more space. */
- nheader += 1;
- hreq = malloc(sizeof(struct h2h3req) +
- sizeof(struct h2h3pseudo) * (nheader - 1));
- if(!hreq) {
- goto fail;
- }
-
- nva = &hreq->header[0];
-
- /* Extract :method, :path from request line
- We do line endings with CRLF so checking for CR is enough */
- line_end = memchr(hdbuf, '\r', len);
- if(!line_end) {
- goto fail;
- }
-
- /* Method does not contain spaces */
- end = memchr(hdbuf, ' ', line_end - hdbuf);
- if(!end || end == hdbuf)
- goto fail;
- nva[0].name = H2H3_PSEUDO_METHOD;
- nva[0].namelen = sizeof(H2H3_PSEUDO_METHOD) - 1;
- nva[0].value = hdbuf;
- nva[0].valuelen = (size_t)(end - hdbuf);
-
- hdbuf = end + 1;
-
- /* Path may contain spaces so scan backwards */
- end = NULL;
- for(i = (size_t)(line_end - hdbuf); i; --i) {
- if(hdbuf[i - 1] == ' ') {
- end = &hdbuf[i - 1];
- break;
- }
- }
- if(!end || end == hdbuf)
- goto fail;
- nva[1].name = H2H3_PSEUDO_PATH;
- nva[1].namelen = sizeof(H2H3_PSEUDO_PATH) - 1;
- nva[1].value = hdbuf;
- nva[1].valuelen = (end - hdbuf);
-
- nva[2].name = H2H3_PSEUDO_SCHEME;
- nva[2].namelen = sizeof(H2H3_PSEUDO_SCHEME) - 1;
- vptr = Curl_checkheaders(data, STRCONST(H2H3_PSEUDO_SCHEME));
- if(vptr) {
- vptr += sizeof(H2H3_PSEUDO_SCHEME);
- while(*vptr && ISBLANK(*vptr))
- vptr++;
- nva[2].value = vptr;
- infof(data, "set pseudo header %s to %s", H2H3_PSEUDO_SCHEME, vptr);
- }
- else {
- if(conn->handler->flags & PROTOPT_SSL)
- nva[2].value = "https";
- else
- nva[2].value = "http";
- }
- nva[2].valuelen = strlen((char *)nva[2].value);
-
- authority_idx = 0;
- i = 3;
- while(i < nheader) {
- size_t hlen;
-
- hdbuf = line_end + 2;
-
- /* check for next CR, but only within the piece of data left in the given
- buffer */
- line_end = memchr(hdbuf, '\r', len - (hdbuf - (char *)mem));
- if(!line_end || (line_end == hdbuf))
- goto fail;
-
- /* header continuation lines are not supported */
- if(*hdbuf == ' ' || *hdbuf == '\t')
- goto fail;
-
- for(end = hdbuf; end < line_end && *end != ':'; ++end)
- ;
- if(end == hdbuf || end == line_end)
- goto fail;
- hlen = end - hdbuf;
-
- if(hlen == 4 && strncasecompare("host", hdbuf, 4)) {
- authority_idx = i;
- nva[i].name = H2H3_PSEUDO_AUTHORITY;
- nva[i].namelen = sizeof(H2H3_PSEUDO_AUTHORITY) - 1;
- }
- else {
- nva[i].namelen = (size_t)(end - hdbuf);
- /* Lower case the header name for HTTP/3 */
- Curl_strntolower((char *)hdbuf, hdbuf, nva[i].namelen);
- nva[i].name = hdbuf;
- }
- hdbuf = end + 1;
- while(*hdbuf == ' ' || *hdbuf == '\t')
- ++hdbuf;
- end = line_end;
-
- switch(inspect_header((const char *)nva[i].name, nva[i].namelen, hdbuf,
- end - hdbuf)) {
- case HEADERINST_IGNORE:
- /* skip header fields prohibited by HTTP/2 specification. */
- --nheader;
- continue;
- case HEADERINST_TE_TRAILERS:
- nva[i].value = "trailers";
- nva[i].valuelen = sizeof("trailers") - 1;
- break;
- default:
- nva[i].value = hdbuf;
- nva[i].valuelen = (end - hdbuf);
- }
-
- ++i;
- }
-
- /* :authority must come before non-pseudo header fields */
- if(authority_idx && authority_idx != AUTHORITY_DST_IDX) {
- struct h2h3pseudo authority = nva[authority_idx];
- for(i = authority_idx; i > AUTHORITY_DST_IDX; --i) {
- nva[i] = nva[i - 1];
- }
- nva[i] = authority;
- }
-
- /* Warn stream may be rejected if cumulative length of headers is too
- large. */
-#define MAX_ACC 60000 /* <64KB to account for some overhead */
- {
- size_t acc = 0;
-
- for(i = 0; i < nheader; ++i) {
- acc += nva[i].namelen + nva[i].valuelen;
-
- infof(data, "h2h3 [%.*s: %.*s]",
- (int)nva[i].namelen, nva[i].name,
- (int)nva[i].valuelen, nva[i].value);
- }
-
- if(acc > MAX_ACC) {
- infof(data, "http_request: Warning: The cumulative length of all "
- "headers exceeds %d bytes and that could cause the "
- "stream to be rejected.", MAX_ACC);
- }
- }
-
- if(hdrlen) {
- /* Skip trailing CRLF */
- end += 4;
- *hdrlen = end - mem;
- }
-
- hreq->entries = nheader;
- *hp = hreq;
-
- return CURLE_OK;
-
- fail:
- free(hreq);
- return CURLE_OUT_OF_MEMORY;
-}
-
-void Curl_pseudo_free(struct h2h3req *hp)
-{
- free(hp);
-}
-
-#endif /* USE_NGHTTP2 or HTTP/3 enabled */
diff --git a/libs/libcurl/src/h2h3.h b/libs/libcurl/src/h2h3.h deleted file mode 100644 index 2e456f6a10..0000000000 --- a/libs/libcurl/src/h2h3.h +++ /dev/null @@ -1,62 +0,0 @@ -#ifndef HEADER_CURL_H2H3_H
-#define HEADER_CURL_H2H3_H
-/***************************************************************************
- * _ _ ____ _
- * Project ___| | | | _ \| |
- * / __| | | | |_) | |
- * | (__| |_| | _ <| |___
- * \___|\___/|_| \_\_____|
- *
- * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
- *
- * This software is licensed as described in the file COPYING, which
- * you should have received as part of this distribution. The terms
- * are also available at https://curl.se/docs/copyright.html.
- *
- * You may opt to use, copy, modify, merge, publish, distribute and/or sell
- * copies of the Software, and permit persons to whom the Software is
- * furnished to do so, under the terms of the COPYING file.
- *
- * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
- * KIND, either express or implied.
- *
- * SPDX-License-Identifier: curl
- *
- ***************************************************************************/
-#include "curl_setup.h"
-
-#define H2H3_PSEUDO_METHOD ":method"
-#define H2H3_PSEUDO_SCHEME ":scheme"
-#define H2H3_PSEUDO_AUTHORITY ":authority"
-#define H2H3_PSEUDO_PATH ":path"
-#define H2H3_PSEUDO_STATUS ":status"
-
-struct h2h3pseudo {
- const char *name;
- size_t namelen;
- const char *value;
- size_t valuelen;
-};
-
-struct h2h3req {
- size_t entries;
- struct h2h3pseudo header[1]; /* the array is allocated to contain entries */
-};
-
-/*
- * Curl_pseudo_headers() creates the array with pseudo headers to be
- * used in an HTTP/2 or HTTP/3 request. Returns an allocated struct.
- * Free it with Curl_pseudo_free().
- */
-CURLcode Curl_pseudo_headers(struct Curl_easy *data,
- const char *request,
- const size_t len,
- size_t* hdrlen /* optional */,
- struct h2h3req **hp);
-
-/*
- * Curl_pseudo_free() frees a h2h3req struct.
- */
-void Curl_pseudo_free(struct h2h3req *hp);
-
-#endif /* HEADER_CURL_H2H3_H */
diff --git a/libs/libcurl/src/hash.c b/libs/libcurl/src/hash.c index f44c8035e5..8a1aadfda4 100644 --- a/libs/libcurl/src/hash.c +++ b/libs/libcurl/src/hash.c @@ -330,7 +330,6 @@ Curl_hash_next_element(struct Curl_hash_iterator *iter) struct Curl_hash_element *he = iter->current_element->ptr;
return he;
}
- iter->current_element = NULL;
return NULL;
}
diff --git a/libs/libcurl/src/headers.c b/libs/libcurl/src/headers.c index e05552dd3b..1272979677 100644 --- a/libs/libcurl/src/headers.c +++ b/libs/libcurl/src/headers.c @@ -325,7 +325,7 @@ CURLcode Curl_headers_push(struct Curl_easy *data, const char *header, hs, &hs->node);
data->state.prevhead = hs;
return CURLE_OK;
- fail:
+fail:
free(hs);
return result;
}
@@ -336,6 +336,7 @@ CURLcode Curl_headers_push(struct Curl_easy *data, const char *header, static void headers_init(struct Curl_easy *data)
{
Curl_llist_init(&data->state.httphdrs, NULL);
+ data->state.prevhead = NULL;
}
/*
diff --git a/libs/libcurl/src/hostip.c b/libs/libcurl/src/hostip.c index 0731075ba6..57479b1428 100644 --- a/libs/libcurl/src/hostip.c +++ b/libs/libcurl/src/hostip.c @@ -61,6 +61,7 @@ #include "doh.h"
#include "warnless.h"
#include "strcase.h"
+#include "easy_lock.h"
/* The last 3 #include files should be in this order */
#include "curl_printf.h"
#include "curl_memory.h"
@@ -70,14 +71,19 @@ #include <SystemConfiguration/SCDynamicStoreCopySpecific.h>
#endif
-#if defined(CURLRES_SYNCH) && \
- defined(HAVE_ALARM) && defined(SIGALRM) && defined(HAVE_SIGSETJMP)
+#if defined(CURLRES_SYNCH) && \
+ defined(HAVE_ALARM) && \
+ defined(SIGALRM) && \
+ defined(HAVE_SIGSETJMP) && \
+ defined(GLOBAL_INIT_IS_THREADSAFE)
/* alarm-based timeouts can only be used with all the dependencies satisfied */
#define USE_ALARM_TIMEOUT
#endif
#define MAX_HOSTCACHE_LEN (255 + 7) /* max FQDN + colon + port number + zero */
+#define MAX_DNS_CACHE_SIZE 29999
+
/*
* hostip.c explained
* ==================
@@ -122,7 +128,7 @@ static void freednsentry(void *freethis); /*
* Return # of addresses in a Curl_addrinfo struct
*/
-int Curl_num_addresses(const struct Curl_addrinfo *addr)
+static int num_addresses(const struct Curl_addrinfo *addr)
{
int i = 0;
while(addr) {
@@ -189,8 +195,9 @@ create_hostcache_id(const char *name, }
struct hostcache_prune_data {
- long cache_timeout;
time_t now;
+ time_t oldest; /* oldest time in cache not pruned. */
+ int cache_timeout;
};
/*
@@ -203,28 +210,40 @@ struct hostcache_prune_data { static int
hostcache_timestamp_remove(void *datap, void *hc)
{
- struct hostcache_prune_data *data =
+ struct hostcache_prune_data *prune =
(struct hostcache_prune_data *) datap;
struct Curl_dns_entry *c = (struct Curl_dns_entry *) hc;
- return (0 != c->timestamp)
- && (data->now - c->timestamp >= data->cache_timeout);
+ if(c->timestamp) {
+ /* age in seconds */
+ time_t age = prune->now - c->timestamp;
+ if(age >= prune->cache_timeout)
+ return TRUE;
+ if(age > prune->oldest)
+ prune->oldest = age;
+ }
+ return FALSE;
}
/*
* Prune the DNS cache. This assumes that a lock has already been taken.
+ * Returns the 'age' of the oldest still kept entry.
*/
-static void
-hostcache_prune(struct Curl_hash *hostcache, long cache_timeout, time_t now)
+static time_t
+hostcache_prune(struct Curl_hash *hostcache, int cache_timeout,
+ time_t now)
{
struct hostcache_prune_data user;
user.cache_timeout = cache_timeout;
user.now = now;
+ user.oldest = 0;
Curl_hash_clean_with_criterium(hostcache,
(void *) &user,
hostcache_timestamp_remove);
+
+ return user.oldest;
}
/*
@@ -234,10 +253,11 @@ hostcache_prune(struct Curl_hash *hostcache, long cache_timeout, time_t now) void Curl_hostcache_prune(struct Curl_easy *data)
{
time_t now;
+ /* the timeout may be set -1 (forever) */
+ int timeout = data->set.dns_cache_timeout;
- if((data->set.dns_cache_timeout == -1) || !data->dns.hostcache)
- /* cache forever means never prune, and NULL hostcache means
- we can't do it */
+ if(!data->dns.hostcache)
+ /* NULL hostcache means we can't do it */
return;
if(data->share)
@@ -245,20 +265,29 @@ void Curl_hostcache_prune(struct Curl_easy *data) time(&now);
- /* Remove outdated and unused entries from the hostcache */
- hostcache_prune(data->dns.hostcache,
- data->set.dns_cache_timeout,
- now);
+ do {
+ /* Remove outdated and unused entries from the hostcache */
+ time_t oldest = hostcache_prune(data->dns.hostcache, timeout, now);
+
+ if(oldest < INT_MAX)
+ timeout = (int)oldest; /* we know it fits */
+ else
+ timeout = INT_MAX - 1;
+
+ /* if the cache size is still too big, use the oldest age as new
+ prune limit */
+ } while(timeout && (data->dns.hostcache->size > MAX_DNS_CACHE_SIZE));
if(data->share)
Curl_share_unlock(data, CURL_LOCK_DATA_DNS);
}
-#ifdef HAVE_SIGSETJMP
+#ifdef USE_ALARM_TIMEOUT
/* Beware this is a global and unique instance. This is used to store the
return address that we can jump back to from inside a signal handler. This
is not thread-safe stuff. */
-sigjmp_buf curl_jmpenv;
+static sigjmp_buf curl_jmpenv;
+static curl_simple_lock curl_jmpenv_lock;
#endif
/* lookup address, returns entry if found and not stale */
@@ -290,6 +319,7 @@ static struct Curl_dns_entry *fetch_addr(struct Curl_easy *data, time(&user.now);
user.cache_timeout = data->set.dns_cache_timeout;
+ user.oldest = 0;
if(hostcache_timestamp_remove(&user, dns)) {
infof(data, "Hostname in DNS cache was stale, zapped");
@@ -380,7 +410,7 @@ UNITTEST CURLcode Curl_shuffle_addr(struct Curl_easy *data, struct Curl_addrinfo **addr)
{
CURLcode result = CURLE_OK;
- const int num_addrs = Curl_num_addresses(*addr);
+ const int num_addrs = num_addresses(*addr);
if(num_addrs > 1) {
struct Curl_addrinfo **nodes;
@@ -652,6 +682,14 @@ enum resolve_t Curl_resolv(struct Curl_easy *data, CURLcode result;
enum resolve_t rc = CURLRESOLV_ERROR; /* default to failure */
struct connectdata *conn = data->conn;
+ /* We should intentionally error and not resolve .onion TLDs */
+ size_t hostname_len = strlen(hostname);
+ if(hostname_len >= 7 &&
+ (curl_strequal(&hostname[hostname_len - 6], ".onion") ||
+ curl_strequal(&hostname[hostname_len - 7], ".onion."))) {
+ failf(data, "Not resolving .onion address (RFC 7686)");
+ return CURLRESOLV_ERROR;
+ }
*entry = NULL;
#ifndef CURL_DISABLE_DOH
conn->bits.doh = FALSE; /* default is not */
@@ -824,7 +862,6 @@ enum resolve_t Curl_resolv(struct Curl_easy *data, static
void alarmfunc(int sig)
{
- /* this is for "-ansi -Wall -pedantic" to stop complaining! (rabe) */
(void)sig;
siglongjmp(curl_jmpenv, 1);
}
@@ -904,6 +941,8 @@ enum resolve_t Curl_resolv_timeout(struct Curl_easy *data, This should be the last thing we do before calling Curl_resolv(),
as otherwise we'd have to worry about variables that get modified
before we invoke Curl_resolv() (and thus use "volatile"). */
+ curl_simple_lock_lock(&curl_jmpenv_lock);
+
if(sigsetjmp(curl_jmpenv, 1)) {
/* this is coming from a siglongjmp() after an alarm signal */
failf(data, "name lookup timed out");
@@ -972,6 +1011,8 @@ clean_up: #endif
#endif /* HAVE_SIGACTION */
+ curl_simple_lock_unlock(&curl_jmpenv_lock);
+
/* switch back the alarm() to either zero or to what it was before minus
the time we spent until now! */
if(prev_alarm) {
@@ -1196,7 +1237,7 @@ CURLcode Curl_loadhostpairs(struct Curl_easy *data) goto err;
error = false;
- err:
+err:
if(error) {
failf(data, "Couldn't parse CURLOPT_RESOLVE entry '%s'",
hostp->data);
diff --git a/libs/libcurl/src/hostip.h b/libs/libcurl/src/hostip.h index 018af6b5c7..28381dc37a 100644 --- a/libs/libcurl/src/hostip.h +++ b/libs/libcurl/src/hostip.h @@ -132,9 +132,6 @@ void Curl_init_dnscache(struct Curl_hash *hash, int hashsize); /* prune old entries from the DNS cache */
void Curl_hostcache_prune(struct Curl_easy *data);
-/* Return # of addresses in a Curl_addrinfo struct */
-int Curl_num_addresses(const struct Curl_addrinfo *addr);
-
/* IPv4 threadsafe resolve function used for synch and asynch builds */
struct Curl_addrinfo *Curl_ipv4_resolve_r(const char *hostname, int port);
@@ -186,15 +183,6 @@ Curl_cache_addr(struct Curl_easy *data, struct Curl_addrinfo *addr, #define CURL_INADDR_NONE INADDR_NONE
#endif
-#ifdef HAVE_SIGSETJMP
-/* Forward-declaration of variable defined in hostip.c. Beware this
- * is a global and unique instance. This is used to store the return
- * address that we can jump back to from inside a signal handler.
- * This is not thread-safe stuff.
- */
-extern sigjmp_buf curl_jmpenv;
-#endif
-
/*
* Function provided by the resolver backend to set DNS servers to use.
*/
diff --git a/libs/libcurl/src/hsts.c b/libs/libcurl/src/hsts.c index 2d8a791071..535465ce79 100644 --- a/libs/libcurl/src/hsts.c +++ b/libs/libcurl/src/hsts.c @@ -204,7 +204,7 @@ CURLcode Curl_hsts_parse(struct hsts *h, const char *hostname, p++;
if(*p == ';')
p++;
- } while (*p);
+ } while(*p);
if(!gotma)
/* max-age is mandatory */
@@ -390,7 +390,7 @@ CURLcode Curl_hsts_save(struct Curl_easy *data, struct hsts *h, unlink(tempstore);
}
free(tempstore);
- skipsave:
+skipsave:
if(data->set.hsts_write) {
/* if there's a write callback */
struct curl_index i; /* count */
@@ -534,7 +534,7 @@ static CURLcode hsts_load(struct hsts *h, const char *file) }
return result;
- fail:
+fail:
Curl_safefree(h->filename);
fclose(fp);
return CURLE_OUT_OF_MEMORY;
diff --git a/libs/libcurl/src/http.c b/libs/libcurl/src/http.c index 055e250e2d..c335f872e0 100644 --- a/libs/libcurl/src/http.c +++ b/libs/libcurl/src/http.c @@ -71,6 +71,7 @@ #include "url.h"
#include "share.h"
#include "hostip.h"
+#include "dynhds.h"
#include "http.h"
#include "select.h"
#include "parsedate.h" /* for the week day and month names */
@@ -397,7 +398,7 @@ static CURLcode http_output_basic(struct Curl_easy *data, bool proxy) goto fail;
}
- fail:
+fail:
free(out);
return result;
}
@@ -423,7 +424,7 @@ static CURLcode http_output_bearer(struct Curl_easy *data) goto fail;
}
- fail:
+fail:
return result;
}
@@ -1009,7 +1010,7 @@ CURLcode Curl_http_input_auth(struct Curl_easy *data, bool proxy, if(authp->picked == CURLAUTH_NEGOTIATE) {
CURLcode result = Curl_input_negotiate(data, conn, proxy, auth);
if(!result) {
- DEBUGASSERT(!data->req.newurl);
+ free(data->req.newurl);
data->req.newurl = strdup(data->state.url);
if(!data->req.newurl)
return CURLE_OUT_OF_MEMORY;
@@ -1304,7 +1305,7 @@ CURLcode Curl_buffer_send(struct dynbuf *in, if((conn->handler->flags & PROTOPT_SSL
#ifndef CURL_DISABLE_PROXY
- || conn->http_proxy.proxytype == CURLPROXY_HTTPS
+ || IS_HTTPS_PROXY(conn->http_proxy.proxytype)
#endif
)
&& conn->httpversion != 20) {
@@ -1713,6 +1714,157 @@ CURLcode Curl_http_compile_trailers(struct curl_slist *trailers, return result;
}
+static bool hd_name_eq(const char *n1, size_t n1len,
+ const char *n2, size_t n2len)
+{
+ if(n1len == n2len) {
+ return strncasecompare(n1, n2, n1len);
+ }
+ return FALSE;
+}
+
+CURLcode Curl_dynhds_add_custom(struct Curl_easy *data,
+ bool is_connect,
+ struct dynhds *hds)
+{
+ struct connectdata *conn = data->conn;
+ char *ptr;
+ struct curl_slist *h[2];
+ struct curl_slist *headers;
+ int numlists = 1; /* by default */
+ int i;
+
+#ifndef CURL_DISABLE_PROXY
+ enum proxy_use proxy;
+
+ if(is_connect)
+ proxy = HEADER_CONNECT;
+ else
+ proxy = conn->bits.httpproxy && !conn->bits.tunnel_proxy?
+ HEADER_PROXY:HEADER_SERVER;
+
+ switch(proxy) {
+ case HEADER_SERVER:
+ h[0] = data->set.headers;
+ break;
+ case HEADER_PROXY:
+ h[0] = data->set.headers;
+ if(data->set.sep_headers) {
+ h[1] = data->set.proxyheaders;
+ numlists++;
+ }
+ break;
+ case HEADER_CONNECT:
+ if(data->set.sep_headers)
+ h[0] = data->set.proxyheaders;
+ else
+ h[0] = data->set.headers;
+ break;
+ }
+#else
+ (void)is_connect;
+ h[0] = data->set.headers;
+#endif
+
+ /* loop through one or two lists */
+ for(i = 0; i < numlists; i++) {
+ for(headers = h[i]; headers; headers = headers->next) {
+ const char *name, *value;
+ size_t namelen, valuelen;
+
+ /* There are 2 quirks in place for custom headers:
+ * 1. setting only 'name:' to suppress a header from being sent
+ * 2. setting only 'name;' to send an empty (illegal) header
+ */
+ ptr = strchr(headers->data, ':');
+ if(ptr) {
+ name = headers->data;
+ namelen = ptr - headers->data;
+ ptr++; /* pass the colon */
+ while(*ptr && ISSPACE(*ptr))
+ ptr++;
+ if(*ptr) {
+ value = ptr;
+ valuelen = strlen(value);
+ }
+ else {
+ /* quirk #1, suppress this header */
+ continue;
+ }
+ }
+ else {
+ ptr = strchr(headers->data, ';');
+
+ if(!ptr) {
+ /* neither : nor ; in provided header value. We seem
+ * to ignore this silently */
+ continue;
+ }
+
+ name = headers->data;
+ namelen = ptr - headers->data;
+ ptr++; /* pass the semicolon */
+ while(*ptr && ISSPACE(*ptr))
+ ptr++;
+ if(!*ptr) {
+ /* quirk #2, send an empty header */
+ value = "";
+ valuelen = 0;
+ }
+ else {
+ /* this may be used for something else in the future,
+ * ignore this for now */
+ continue;
+ }
+ }
+
+ DEBUGASSERT(name && value);
+ if(data->state.aptr.host &&
+ /* a Host: header was sent already, don't pass on any custom Host:
+ header as that will produce *two* in the same request! */
+ hd_name_eq(name, namelen, STRCONST("Host:")))
+ ;
+ else if(data->state.httpreq == HTTPREQ_POST_FORM &&
+ /* this header (extended by formdata.c) is sent later */
+ hd_name_eq(name, namelen, STRCONST("Content-Type:")))
+ ;
+ else if(data->state.httpreq == HTTPREQ_POST_MIME &&
+ /* this header is sent later */
+ hd_name_eq(name, namelen, STRCONST("Content-Type:")))
+ ;
+ else if(conn->bits.authneg &&
+ /* while doing auth neg, don't allow the custom length since
+ we will force length zero then */
+ hd_name_eq(name, namelen, STRCONST("Content-Length:")))
+ ;
+ else if(data->state.aptr.te &&
+ /* when asking for Transfer-Encoding, don't pass on a custom
+ Connection: */
+ hd_name_eq(name, namelen, STRCONST("Connection:")))
+ ;
+ else if((conn->httpversion >= 20) &&
+ hd_name_eq(name, namelen, STRCONST("Transfer-Encoding:")))
+ /* HTTP/2 doesn't support chunked requests */
+ ;
+ else if((hd_name_eq(name, namelen, STRCONST("Authorization:")) ||
+ hd_name_eq(name, namelen, STRCONST("Cookie:"))) &&
+ /* be careful of sending this potentially sensitive header to
+ other hosts */
+ !Curl_auth_allowed_to_host(data))
+ ;
+ else {
+ CURLcode result;
+
+ result = Curl_dynhds_add(hds, name, namelen, value, valuelen);
+ if(result)
+ return result;
+ }
+ }
+ }
+
+ return CURLE_OK;
+}
+
CURLcode Curl_add_custom_headers(struct Curl_easy *data,
bool is_connect,
#ifndef USE_HYPER
@@ -1960,7 +2112,7 @@ void Curl_http_method(struct Curl_easy *data, struct connectdata *conn, Curl_HttpReq httpreq = (Curl_HttpReq)data->state.httpreq;
const char *request;
if((conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_FTP)) &&
- data->set.upload)
+ data->state.upload)
httpreq = HTTPREQ_PUT;
/* Now set the 'request' pointer to the proper request string */
@@ -2011,6 +2163,7 @@ CURLcode Curl_http_useragent(struct Curl_easy *data) CURLcode Curl_http_host(struct Curl_easy *data, struct connectdata *conn)
{
const char *ptr;
+ struct dynamically_allocated_data *aptr = &data->state.aptr;
if(!data->state.this_is_a_follow) {
/* Free to avoid leaking memory on multiple requests */
free(data->state.first_host);
@@ -2022,7 +2175,7 @@ CURLcode Curl_http_host(struct Curl_easy *data, struct connectdata *conn) data->state.first_remote_port = conn->remote_port;
data->state.first_remote_protocol = conn->handler->protocol;
}
- Curl_safefree(data->state.aptr.host);
+ Curl_safefree(aptr->host);
ptr = Curl_checkheaders(data, STRCONST("Host"));
if(ptr && (!data->state.this_is_a_follow ||
@@ -2057,19 +2210,16 @@ CURLcode Curl_http_host(struct Curl_easy *data, struct connectdata *conn) if(colon)
*colon = 0; /* The host must not include an embedded port number */
}
- Curl_safefree(data->state.aptr.cookiehost);
- data->state.aptr.cookiehost = cookiehost;
+ Curl_safefree(aptr->cookiehost);
+ aptr->cookiehost = cookiehost;
}
#endif
if(strcmp("Host:", ptr)) {
- data->state.aptr.host = aprintf("Host:%s\r\n", &ptr[5]);
- if(!data->state.aptr.host)
+ aptr->host = aprintf("Host:%s\r\n", &ptr[5]);
+ if(!aptr->host)
return CURLE_OUT_OF_MEMORY;
}
- else
- /* when clearing the header */
- data->state.aptr.host = NULL;
}
else {
/* When building Host: headers, we must put the host name within
@@ -2082,18 +2232,14 @@ CURLcode Curl_http_host(struct Curl_easy *data, struct connectdata *conn) (conn->remote_port == PORT_HTTP)) )
/* if(HTTPS on port 443) OR (HTTP on port 80) then don't include
the port number in the host string */
- data->state.aptr.host = aprintf("Host: %s%s%s\r\n",
- conn->bits.ipv6_ip?"[":"",
- host,
- conn->bits.ipv6_ip?"]":"");
+ aptr->host = aprintf("Host: %s%s%s\r\n", conn->bits.ipv6_ip?"[":"",
+ host, conn->bits.ipv6_ip?"]":"");
else
- data->state.aptr.host = aprintf("Host: %s%s%s:%d\r\n",
- conn->bits.ipv6_ip?"[":"",
- host,
- conn->bits.ipv6_ip?"]":"",
- conn->remote_port);
+ aptr->host = aprintf("Host: %s%s%s:%d\r\n", conn->bits.ipv6_ip?"[":"",
+ host, conn->bits.ipv6_ip?"]":"",
+ conn->remote_port);
- if(!data->state.aptr.host)
+ if(!aptr->host)
/* without Host: we can't make a nice request */
return CURLE_OUT_OF_MEMORY;
}
@@ -2277,7 +2423,7 @@ CURLcode Curl_http_body(struct Curl_easy *data, struct connectdata *conn, if((conn->handler->protocol & PROTO_FAMILY_HTTP) &&
(((httpreq == HTTPREQ_POST_MIME || httpreq == HTTPREQ_POST_FORM) &&
http->postsize < 0) ||
- ((data->set.upload || httpreq == HTTPREQ_POST) &&
+ ((data->state.upload || httpreq == HTTPREQ_POST) &&
data->state.infilesize == -1))) {
if(conn->bits.authneg)
/* don't enable chunked during auth neg */
@@ -2990,7 +3136,17 @@ CURLcode Curl_http(struct Curl_easy *data, bool *done) DEBUGASSERT(Curl_conn_is_http3(data, conn, FIRSTSOCKET));
break;
case CURL_HTTP_VERSION_2:
- DEBUGASSERT(Curl_conn_is_http2(data, conn, FIRSTSOCKET));
+#ifndef CURL_DISABLE_PROXY
+ if(!Curl_conn_is_http2(data, conn, FIRSTSOCKET) &&
+ conn->bits.proxy && !conn->bits.tunnel_proxy
+ ) {
+ result = Curl_http2_switch(data, conn, FIRSTSOCKET);
+ if(result)
+ return result;
+ }
+ else
+#endif
+ DEBUGASSERT(Curl_conn_is_http2(data, conn, FIRSTSOCKET));
break;
case CURL_HTTP_VERSION_1_1:
/* continue with HTTP/1.1 when explicitly requested */
@@ -3420,7 +3576,7 @@ CURLcode Curl_http_header(struct Curl_easy *data, struct connectdata *conn, TRUE);
if(result)
return result;
- if(!k->chunk) {
+ if(!k->chunk && data->set.http_transfer_encoding) {
/* if this isn't chunked, only close can signal the end of this transfer
as Content-Length is said not to be trusted for transfer-encoding! */
connclose(conn, "HTTP/1.1 transfer-encoding without chunks");
@@ -4344,4 +4500,385 @@ CURLcode Curl_http_readwrite_headers(struct Curl_easy *data, return CURLE_OK;
}
+
+/* Decode HTTP status code string. */
+CURLcode Curl_http_decode_status(int *pstatus, const char *s, size_t len)
+{
+ CURLcode result = CURLE_BAD_FUNCTION_ARGUMENT;
+ int status = 0;
+ int i;
+
+ if(len != 3)
+ goto out;
+
+ for(i = 0; i < 3; ++i) {
+ char c = s[i];
+
+ if(c < '0' || c > '9')
+ goto out;
+
+ status *= 10;
+ status += c - '0';
+ }
+ result = CURLE_OK;
+out:
+ *pstatus = result? -1 : status;
+ return result;
+}
+
+/* simple implementation of strndup(), which isn't portable */
+static char *my_strndup(const char *ptr, size_t len)
+{
+ char *copy = malloc(len + 1);
+ if(!copy)
+ return NULL;
+ memcpy(copy, ptr, len);
+ copy[len] = '\0';
+ return copy;
+}
+
+CURLcode Curl_http_req_make(struct httpreq **preq,
+ const char *method, size_t m_len,
+ const char *scheme, size_t s_len,
+ const char *authority, size_t a_len,
+ const char *path, size_t p_len)
+{
+ struct httpreq *req;
+ CURLcode result = CURLE_OUT_OF_MEMORY;
+
+ DEBUGASSERT(method);
+ if(m_len + 1 >= sizeof(req->method))
+ return CURLE_BAD_FUNCTION_ARGUMENT;
+
+ req = calloc(1, sizeof(*req));
+ if(!req)
+ goto out;
+ memcpy(req->method, method, m_len);
+ if(scheme) {
+ req->scheme = my_strndup(scheme, s_len);
+ if(!req->scheme)
+ goto out;
+ }
+ if(authority) {
+ req->authority = my_strndup(authority, a_len);
+ if(!req->authority)
+ goto out;
+ }
+ if(path) {
+ req->path = my_strndup(path, p_len);
+ if(!req->path)
+ goto out;
+ }
+ Curl_dynhds_init(&req->headers, 0, DYN_H2_HEADERS);
+ Curl_dynhds_init(&req->trailers, 0, DYN_H2_TRAILERS);
+ result = CURLE_OK;
+
+out:
+ if(result && req)
+ Curl_http_req_free(req);
+ *preq = result? NULL : req;
+ return result;
+}
+
+static CURLcode req_assign_url_authority(struct httpreq *req, CURLU *url)
+{
+ char *user, *pass, *host, *port;
+ struct dynbuf buf;
+ CURLUcode uc;
+ CURLcode result = CURLE_URL_MALFORMAT;
+
+ user = pass = host = port = NULL;
+ Curl_dyn_init(&buf, DYN_HTTP_REQUEST);
+
+ uc = curl_url_get(url, CURLUPART_HOST, &host, 0);
+ if(uc && uc != CURLUE_NO_HOST)
+ goto out;
+ if(!host) {
+ req->authority = NULL;
+ result = CURLE_OK;
+ goto out;
+ }
+
+ uc = curl_url_get(url, CURLUPART_PORT, &port, CURLU_NO_DEFAULT_PORT);
+ if(uc && uc != CURLUE_NO_PORT)
+ goto out;
+ uc = curl_url_get(url, CURLUPART_USER, &user, 0);
+ if(uc && uc != CURLUE_NO_USER)
+ goto out;
+ if(user) {
+ uc = curl_url_get(url, CURLUPART_PASSWORD, &pass, 0);
+ if(uc && uc != CURLUE_NO_PASSWORD)
+ goto out;
+ }
+
+ if(user) {
+ result = Curl_dyn_add(&buf, user);
+ if(result)
+ goto out;
+ if(pass) {
+ result = Curl_dyn_addf(&buf, ":%s", pass);
+ if(result)
+ goto out;
+ }
+ result = Curl_dyn_add(&buf, "@");
+ if(result)
+ goto out;
+ }
+ result = Curl_dyn_add(&buf, host);
+ if(result)
+ goto out;
+ if(port) {
+ result = Curl_dyn_addf(&buf, ":%s", port);
+ if(result)
+ goto out;
+ }
+ req->authority = strdup(Curl_dyn_ptr(&buf));
+ if(!req->authority)
+ goto out;
+ result = CURLE_OK;
+
+out:
+ free(user);
+ free(pass);
+ free(host);
+ free(port);
+ Curl_dyn_free(&buf);
+ return result;
+}
+
+static CURLcode req_assign_url_path(struct httpreq *req, CURLU *url)
+{
+ char *path, *query;
+ struct dynbuf buf;
+ CURLUcode uc;
+ CURLcode result = CURLE_URL_MALFORMAT;
+
+ path = query = NULL;
+ Curl_dyn_init(&buf, DYN_HTTP_REQUEST);
+
+ uc = curl_url_get(url, CURLUPART_PATH, &path, CURLU_PATH_AS_IS);
+ if(uc)
+ goto out;
+ uc = curl_url_get(url, CURLUPART_QUERY, &query, 0);
+ if(uc && uc != CURLUE_NO_QUERY)
+ goto out;
+
+ if(!path && !query) {
+ req->path = NULL;
+ }
+ else if(path && !query) {
+ req->path = path;
+ path = NULL;
+ }
+ else {
+ if(path) {
+ result = Curl_dyn_add(&buf, path);
+ if(result)
+ goto out;
+ }
+ if(query) {
+ result = Curl_dyn_addf(&buf, "?%s", query);
+ if(result)
+ goto out;
+ }
+ req->path = strdup(Curl_dyn_ptr(&buf));
+ if(!req->path)
+ goto out;
+ }
+ result = CURLE_OK;
+
+out:
+ free(path);
+ free(query);
+ Curl_dyn_free(&buf);
+ return result;
+}
+
+CURLcode Curl_http_req_make2(struct httpreq **preq,
+ const char *method, size_t m_len,
+ CURLU *url, const char *scheme_default)
+{
+ struct httpreq *req;
+ CURLcode result = CURLE_OUT_OF_MEMORY;
+ CURLUcode uc;
+
+ DEBUGASSERT(method);
+ if(m_len + 1 >= sizeof(req->method))
+ return CURLE_BAD_FUNCTION_ARGUMENT;
+
+ req = calloc(1, sizeof(*req));
+ if(!req)
+ goto out;
+ memcpy(req->method, method, m_len);
+
+ uc = curl_url_get(url, CURLUPART_SCHEME, &req->scheme, 0);
+ if(uc && uc != CURLUE_NO_SCHEME)
+ goto out;
+ if(!req->scheme && scheme_default) {
+ req->scheme = strdup(scheme_default);
+ if(!req->scheme)
+ goto out;
+ }
+
+ result = req_assign_url_authority(req, url);
+ if(result)
+ goto out;
+ result = req_assign_url_path(req, url);
+ if(result)
+ goto out;
+
+ Curl_dynhds_init(&req->headers, 0, DYN_H2_HEADERS);
+ Curl_dynhds_init(&req->trailers, 0, DYN_H2_TRAILERS);
+ result = CURLE_OK;
+
+out:
+ if(result && req)
+ Curl_http_req_free(req);
+ *preq = result? NULL : req;
+ return result;
+}
+
+void Curl_http_req_free(struct httpreq *req)
+{
+ if(req) {
+ free(req->scheme);
+ free(req->authority);
+ free(req->path);
+ Curl_dynhds_free(&req->headers);
+ Curl_dynhds_free(&req->trailers);
+ free(req);
+ }
+}
+
+struct name_const {
+ const char *name;
+ size_t namelen;
+};
+
+static struct name_const H2_NON_FIELD[] = {
+ { STRCONST("Host") },
+ { STRCONST("Upgrade") },
+ { STRCONST("Connection") },
+ { STRCONST("Keep-Alive") },
+ { STRCONST("Proxy-Connection") },
+ { STRCONST("Transfer-Encoding") },
+};
+
+static bool h2_non_field(const char *name, size_t namelen)
+{
+ size_t i;
+ for(i = 0; i < sizeof(H2_NON_FIELD)/sizeof(H2_NON_FIELD[0]); ++i) {
+ if(namelen < H2_NON_FIELD[i].namelen)
+ return FALSE;
+ if(namelen == H2_NON_FIELD[i].namelen &&
+ strcasecompare(H2_NON_FIELD[i].name, name))
+ return TRUE;
+ }
+ return FALSE;
+}
+
+CURLcode Curl_http_req_to_h2(struct dynhds *h2_headers,
+ struct httpreq *req, struct Curl_easy *data)
+{
+ const char *scheme = NULL, *authority = NULL;
+ struct dynhds_entry *e;
+ size_t i;
+ CURLcode result;
+
+ DEBUGASSERT(req);
+ DEBUGASSERT(h2_headers);
+
+ if(req->scheme) {
+ scheme = req->scheme;
+ }
+ else if(strcmp("CONNECT", req->method)) {
+ scheme = Curl_checkheaders(data, STRCONST(HTTP_PSEUDO_SCHEME));
+ if(scheme) {
+ scheme += sizeof(HTTP_PSEUDO_SCHEME);
+ while(*scheme && ISBLANK(*scheme))
+ scheme++;
+ infof(data, "set pseudo header %s to %s", HTTP_PSEUDO_SCHEME, scheme);
+ }
+ else {
+ scheme = (data->conn && data->conn->handler->flags & PROTOPT_SSL)?
+ "https" : "http";
+ }
+ }
+
+ if(req->authority) {
+ authority = req->authority;
+ }
+ else {
+ e = Curl_dynhds_get(&req->headers, STRCONST("Host"));
+ if(e)
+ authority = e->value;
+ }
+
+ Curl_dynhds_reset(h2_headers);
+ Curl_dynhds_set_opts(h2_headers, DYNHDS_OPT_LOWERCASE);
+ result = Curl_dynhds_add(h2_headers, STRCONST(HTTP_PSEUDO_METHOD),
+ req->method, strlen(req->method));
+ if(!result && scheme) {
+ result = Curl_dynhds_add(h2_headers, STRCONST(HTTP_PSEUDO_SCHEME),
+ scheme, strlen(scheme));
+ }
+ if(!result && authority) {
+ result = Curl_dynhds_add(h2_headers, STRCONST(HTTP_PSEUDO_AUTHORITY),
+ authority, strlen(authority));
+ }
+ if(!result && req->path) {
+ result = Curl_dynhds_add(h2_headers, STRCONST(HTTP_PSEUDO_PATH),
+ req->path, strlen(req->path));
+ }
+ for(i = 0; !result && i < Curl_dynhds_count(&req->headers); ++i) {
+ e = Curl_dynhds_getn(&req->headers, i);
+ if(!h2_non_field(e->name, e->namelen)) {
+ result = Curl_dynhds_add(h2_headers, e->name, e->namelen,
+ e->value, e->valuelen);
+ }
+ }
+
+ return result;
+}
+
+CURLcode Curl_http_resp_make(struct http_resp **presp,
+ int status,
+ const char *description)
+{
+ struct http_resp *resp;
+ CURLcode result = CURLE_OUT_OF_MEMORY;
+
+ resp = calloc(1, sizeof(*resp));
+ if(!resp)
+ goto out;
+
+ resp->status = status;
+ if(description) {
+ resp->description = strdup(description);
+ if(!resp->description)
+ goto out;
+ }
+ Curl_dynhds_init(&resp->headers, 0, DYN_H2_HEADERS);
+ Curl_dynhds_init(&resp->trailers, 0, DYN_H2_TRAILERS);
+ result = CURLE_OK;
+
+out:
+ if(result && resp)
+ Curl_http_resp_free(resp);
+ *presp = result? NULL : resp;
+ return result;
+}
+
+void Curl_http_resp_free(struct http_resp *resp)
+{
+ if(resp) {
+ free(resp->description);
+ Curl_dynhds_free(&resp->headers);
+ Curl_dynhds_free(&resp->trailers);
+ if(resp->prev)
+ Curl_http_resp_free(resp->prev);
+ free(resp);
+ }
+}
+
#endif /* CURL_DISABLE_HTTP */
diff --git a/libs/libcurl/src/http.h b/libs/libcurl/src/http.h index a74844cc01..dea4db7fb6 100644 --- a/libs/libcurl/src/http.h +++ b/libs/libcurl/src/http.h @@ -29,6 +29,8 @@ #include <pthread.h>
#endif
+#include "bufq.h"
+#include "dynhds.h"
#include "ws.h"
typedef enum {
@@ -42,7 +44,7 @@ typedef enum { #ifndef CURL_DISABLE_HTTP
-#if defined(ENABLE_QUIC) || defined(USE_NGHTTP2)
+#if defined(ENABLE_QUIC)
#include <stdint.h>
#endif
@@ -60,6 +62,7 @@ extern const struct Curl_handler Curl_handler_wss; #endif
#endif /* websockets */
+struct dynhds;
/* Header specific functions */
bool Curl_compareheader(const char *headerline, /* line to check */
@@ -97,6 +100,10 @@ CURLcode Curl_add_custom_headers(struct Curl_easy *data, void *headers
#endif
);
+CURLcode Curl_dynhds_add_custom(struct Curl_easy *data,
+ bool is_connect,
+ struct dynhds *hds);
+
CURLcode Curl_http_compile_trailers(struct curl_slist *trailers,
struct dynbuf *buf,
struct Curl_easy *handle);
@@ -178,10 +185,6 @@ CURLcode Curl_http_auth_act(struct Curl_easy *data); #endif /* CURL_DISABLE_HTTP */
-#ifdef USE_NGHTTP3
-struct h3out; /* see ngtcp2 */
-#endif
-
/****************************************************************************
* HTTP unique setup
***************************************************************************/
@@ -209,91 +212,13 @@ struct HTTP { HTTPSEND_BODY /* sending body */
} sending;
-#ifdef USE_WEBSOCKETS
- struct websocket ws;
-#endif
-
#ifndef CURL_DISABLE_HTTP
+ void *h2_ctx; /* HTTP/2 implementation context */
+ void *h3_ctx; /* HTTP/3 implementation context */
struct dynbuf send_buffer; /* used if the request couldn't be sent in one
chunk, points to an allocated send_buffer
struct */
#endif
-#ifdef USE_NGHTTP2
- /*********** for HTTP/2 we store stream-local data here *************/
- int32_t stream_id; /* stream we are interested in */
-
- /* We store non-final and final response headers here, per-stream */
- struct dynbuf header_recvbuf;
- size_t nread_header_recvbuf; /* number of bytes in header_recvbuf fed into
- upper layer */
- struct dynbuf trailer_recvbuf;
- const uint8_t *pausedata; /* pointer to data received in on_data_chunk */
- size_t pauselen; /* the number of bytes left in data */
- bool close_handled; /* TRUE if stream closure is handled by libcurl */
-
- char **push_headers; /* allocated array */
- size_t push_headers_used; /* number of entries filled in */
- size_t push_headers_alloc; /* number of entries allocated */
- uint32_t error; /* HTTP/2 stream error code */
-#endif
-#if defined(USE_NGHTTP2) || defined(USE_NGHTTP3)
- bool bodystarted;
- int status_code; /* HTTP status code */
- char *mem; /* points to a buffer in memory to store received data */
- size_t len; /* size of the buffer 'mem' points to */
- size_t memlen; /* size of data copied to mem */
-#endif
-#if defined(USE_NGHTTP2) || defined(ENABLE_QUIC)
- /* fields used by both HTTP/2 and HTTP/3 */
- const uint8_t *upload_mem; /* points to a buffer to read from */
- size_t upload_len; /* size of the buffer 'upload_mem' points to */
- curl_off_t upload_left; /* number of bytes left to upload */
- bool closed; /* TRUE on stream close */
- bool reset; /* TRUE on stream reset */
-#endif
-
-#ifdef ENABLE_QUIC
-#ifndef USE_MSH3
- /*********** for HTTP/3 we store stream-local data here *************/
- int64_t stream3_id; /* stream we are interested in */
- uint64_t error3; /* HTTP/3 stream error code */
- bool firstheader; /* FALSE until headers arrive */
- bool firstbody; /* FALSE until body arrives */
- bool h3req; /* FALSE until request is issued */
-#endif /* !USE_MSH3 */
- bool upload_done;
-#endif /* ENABLE_QUIC */
-#ifdef USE_NGHTTP3
- size_t recv_buf_nonflow; /* buffered bytes, not counting for flow control */
- struct h3out *h3out; /* per-stream buffers for upload */
- struct dynbuf overflow; /* excess data received during a single Curl_read */
-#endif /* USE_NGHTTP3 */
-#ifdef USE_MSH3
- struct MSH3_REQUEST *req;
-#ifdef _WIN32
- CRITICAL_SECTION recv_lock;
-#else /* !_WIN32 */
- pthread_mutex_t recv_lock;
-#endif /* _WIN32 */
- /* Receive Buffer (Headers and Data) */
- uint8_t* recv_buf;
- size_t recv_buf_alloc;
- size_t recv_buf_max;
- /* Receive Headers */
- size_t recv_header_len;
- bool recv_header_complete;
- /* Receive Data */
- size_t recv_data_len;
- bool recv_data_complete;
- /* General Receive Error */
- CURLcode recv_error;
-#endif /* USE_MSH3 */
-#ifdef USE_QUICHE
- bool h3_got_header; /* TRUE when h3 stream has recvd some HEADER */
- bool h3_recving_data; /* TRUE when h3 stream is reading DATA */
- bool h3_body_pending; /* TRUE when h3 stream may have more body DATA */
- struct h3_event_node *pending;
-#endif /* USE_QUICHE */
};
CURLcode Curl_http_size(struct Curl_easy *data);
@@ -328,4 +253,79 @@ Curl_http_output_auth(struct Curl_easy *data, bool proxytunnel); /* TRUE if this is the request setting
up the proxy tunnel */
+/* Decode HTTP status code string. */
+CURLcode Curl_http_decode_status(int *pstatus, const char *s, size_t len);
+
+
+/**
+ * All about a core HTTP request, excluding body and trailers
+ */
+struct httpreq {
+ char method[12];
+ char *scheme;
+ char *authority;
+ char *path;
+ struct dynhds headers;
+ struct dynhds trailers;
+};
+
+/**
+ * Create a HTTP request struct.
+ */
+CURLcode Curl_http_req_make(struct httpreq **preq,
+ const char *method, size_t m_len,
+ const char *scheme, size_t s_len,
+ const char *authority, size_t a_len,
+ const char *path, size_t p_len);
+
+CURLcode Curl_http_req_make2(struct httpreq **preq,
+ const char *method, size_t m_len,
+ CURLU *url, const char *scheme_default);
+
+void Curl_http_req_free(struct httpreq *req);
+
+#define HTTP_PSEUDO_METHOD ":method"
+#define HTTP_PSEUDO_SCHEME ":scheme"
+#define HTTP_PSEUDO_AUTHORITY ":authority"
+#define HTTP_PSEUDO_PATH ":path"
+#define HTTP_PSEUDO_STATUS ":status"
+
+/**
+ * Create the list of HTTP/2 headers which represent the request,
+ * using HTTP/2 pseudo headers preceeding the `req->headers`.
+ *
+ * Applies the following transformations:
+ * - if `authority` is set, any "Host" header is removed.
+ * - if `authority` is unset and a "Host" header is present, use
+ * that as `authority` and remove "Host"
+ * - removes and Connection header fields as defined in rfc9113 ch. 8.2.2
+ * - lower-cases the header field names
+ *
+ * @param h2_headers will contain the HTTP/2 headers on success
+ * @param req the request to transform
+ * @param data the handle to lookup defaults like ' :scheme' from
+ */
+CURLcode Curl_http_req_to_h2(struct dynhds *h2_headers,
+ struct httpreq *req, struct Curl_easy *data);
+
+/**
+ * All about a core HTTP response, excluding body and trailers
+ */
+struct http_resp {
+ int status;
+ char *description;
+ struct dynhds headers;
+ struct dynhds trailers;
+ struct http_resp *prev;
+};
+
+/**
+ * Create a HTTP response struct.
+ */
+CURLcode Curl_http_resp_make(struct http_resp **presp,
+ int status,
+ const char *description);
+
+void Curl_http_resp_free(struct http_resp *resp);
+
#endif /* HEADER_CURL_HTTP_H */
diff --git a/libs/libcurl/src/http1.c b/libs/libcurl/src/http1.c new file mode 100644 index 0000000000..a5c85f50b4 --- /dev/null +++ b/libs/libcurl/src/http1.c @@ -0,0 +1,349 @@ +/***************************************************************************
+ * _ _ ____ _
+ * Project ___| | | | _ \| |
+ * / __| | | | |_) | |
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+ * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+ * are also available at https://curl.se/docs/copyright.html.
+ *
+ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
+ * copies of the Software, and permit persons to whom the Software is
+ * furnished to do so, under the terms of the COPYING file.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ * SPDX-License-Identifier: curl
+ *
+ ***************************************************************************/
+
+#include "curl_setup.h"
+
+#ifndef CURL_DISABLE_HTTP
+
+#include "urldata.h"
+#include <curl/curl.h>
+#include "http.h"
+#include "http1.h"
+#include "urlapi-int.h"
+
+/* The last 3 #include files should be in this order */
+#include "curl_printf.h"
+#include "curl_memory.h"
+#include "memdebug.h"
+
+
+#define MAX_URL_LEN (4*1024)
+
+void Curl_h1_req_parse_init(struct h1_req_parser *parser, size_t max_line_len)
+{
+ memset(parser, 0, sizeof(*parser));
+ parser->max_line_len = max_line_len;
+ Curl_bufq_init(&parser->scratch, max_line_len, 1);
+}
+
+void Curl_h1_req_parse_free(struct h1_req_parser *parser)
+{
+ if(parser) {
+ Curl_http_req_free(parser->req);
+ Curl_bufq_free(&parser->scratch);
+ parser->req = NULL;
+ parser->done = FALSE;
+ }
+}
+
+static ssize_t detect_line(struct h1_req_parser *parser,
+ const char *buf, const size_t buflen, int options,
+ CURLcode *err)
+{
+ const char *line_end;
+ size_t len;
+
+ DEBUGASSERT(!parser->line);
+ line_end = memchr(buf, '\n', buflen);
+ if(!line_end) {
+ *err = (buflen > parser->max_line_len)? CURLE_URL_MALFORMAT : CURLE_AGAIN;
+ return -1;
+ }
+ len = line_end - buf + 1;
+ if(len > parser->max_line_len) {
+ *err = CURLE_URL_MALFORMAT;
+ return -1;
+ }
+
+ if(options & H1_PARSE_OPT_STRICT) {
+ if((len == 1) || (buf[len - 2] != '\r')) {
+ *err = CURLE_URL_MALFORMAT;
+ return -1;
+ }
+ parser->line = buf;
+ parser->line_len = len - 2;
+ }
+ else {
+ parser->line = buf;
+ parser->line_len = len - (((len == 1) || (buf[len - 2] != '\r'))? 1 : 2);
+ }
+ *err = CURLE_OK;
+ return (ssize_t)len;
+}
+
+static ssize_t next_line(struct h1_req_parser *parser,
+ const char *buf, const size_t buflen, int options,
+ CURLcode *err)
+{
+ ssize_t nread = 0, n;
+
+ if(parser->line) {
+ if(parser->scratch_skip) {
+ /* last line was from scratch. Remove it now, since we are done
+ * with it and look for the next one. */
+ Curl_bufq_skip_and_shift(&parser->scratch, parser->scratch_skip);
+ parser->scratch_skip = 0;
+ }
+ parser->line = NULL;
+ parser->line_len = 0;
+ }
+
+ if(Curl_bufq_is_empty(&parser->scratch)) {
+ nread = detect_line(parser, buf, buflen, options, err);
+ if(nread < 0) {
+ if(*err != CURLE_AGAIN)
+ return -1;
+ /* not a complete line, add to scratch for later revisit */
+ nread = Curl_bufq_write(&parser->scratch,
+ (const unsigned char *)buf, buflen, err);
+ return nread;
+ }
+ /* found one */
+ }
+ else {
+ const char *sbuf;
+ size_t sbuflen;
+
+ /* scratch contains bytes from last attempt, add more to it */
+ if(buflen) {
+ const char *line_end;
+ size_t add_len;
+ ssize_t pos;
+
+ line_end = memchr(buf, '\n', buflen);
+ pos = line_end? (line_end - buf + 1) : -1;
+ add_len = (pos >= 0)? (size_t)pos : buflen;
+ nread = Curl_bufq_write(&parser->scratch,
+ (const unsigned char *)buf, add_len, err);
+ if(nread < 0) {
+ /* Unable to add anything to scratch is an error, since we should
+ * have seen a line there then before. */
+ if(*err == CURLE_AGAIN)
+ *err = CURLE_URL_MALFORMAT;
+ return -1;
+ }
+ }
+
+ if(Curl_bufq_peek(&parser->scratch,
+ (const unsigned char **)&sbuf, &sbuflen)) {
+ n = detect_line(parser, sbuf, sbuflen, options, err);
+ if(n < 0 && *err != CURLE_AGAIN)
+ return -1; /* real error */
+ parser->scratch_skip = (size_t)n;
+ }
+ else {
+ /* we SHOULD be able to peek at scratch data */
+ DEBUGASSERT(0);
+ }
+ }
+ return nread;
+}
+
+static CURLcode start_req(struct h1_req_parser *parser,
+ const char *scheme_default, int options)
+{
+ const char *p, *m, *target, *hv, *scheme, *authority, *path;
+ size_t m_len, target_len, hv_len, scheme_len, authority_len, path_len;
+ size_t i;
+ CURLU *url = NULL;
+ CURLcode result = CURLE_URL_MALFORMAT; /* Use this as default fail */
+
+ DEBUGASSERT(!parser->req);
+ /* line must match: "METHOD TARGET HTTP_VERSION" */
+ p = memchr(parser->line, ' ', parser->line_len);
+ if(!p || p == parser->line)
+ goto out;
+
+ m = parser->line;
+ m_len = p - parser->line;
+ target = p + 1;
+ target_len = hv_len = 0;
+ hv = NULL;
+
+ /* URL may contain spaces so scan backwards */
+ for(i = parser->line_len; i > m_len; --i) {
+ if(parser->line[i] == ' ') {
+ hv = &parser->line[i + 1];
+ hv_len = parser->line_len - i;
+ target_len = (hv - target) - 1;
+ break;
+ }
+ }
+ /* no SPACE found or empty TARGET or empy HTTP_VERSION */
+ if(!target_len || !hv_len)
+ goto out;
+
+ /* TODO: we do not check HTTP_VERSION for conformity, should
+ + do that when STRICT option is supplied. */
+ (void)hv;
+
+ /* The TARGET can be (rfc 9112, ch. 3.2):
+ * origin-form: path + optional query
+ * absolute-form: absolute URI
+ * authority-form: host+port for CONNECT
+ * asterisk-form: '*' for OPTIONS
+ *
+ * from TARGET, we derive `scheme` `authority` `path`
+ * origin-form -- -- TARGET
+ * absolute-form URL* URL* URL*
+ * authority-form -- TARGET --
+ * asterisk-form -- -- TARGET
+ */
+ scheme = authority = path = NULL;
+ scheme_len = authority_len = path_len = 0;
+
+ if(target_len == 1 && target[0] == '*') {
+ /* asterisk-form */
+ path = target;
+ path_len = target_len;
+ }
+ else if(!strncmp("CONNECT", m, m_len)) {
+ /* authority-form */
+ authority = target;
+ authority_len = target_len;
+ }
+ else if(target[0] == '/') {
+ /* origin-form */
+ path = target;
+ path_len = target_len;
+ }
+ else {
+ /* origin-form OR absolute-form */
+ CURLUcode uc;
+ char tmp[MAX_URL_LEN];
+
+ /* default, unless we see an absolute URL */
+ path = target;
+ path_len = target_len;
+
+ /* URL parser wants 0-termination */
+ if(target_len >= sizeof(tmp))
+ goto out;
+ memcpy(tmp, target, target_len);
+ tmp[target_len] = '\0';
+ /* See if treating TARGET as an absolute URL makes sense */
+ if(Curl_is_absolute_url(tmp, NULL, 0, FALSE)) {
+ int url_options;
+
+ url = curl_url();
+ if(!url) {
+ result = CURLE_OUT_OF_MEMORY;
+ goto out;
+ }
+ url_options = (CURLU_NON_SUPPORT_SCHEME|
+ CURLU_PATH_AS_IS|
+ CURLU_NO_DEFAULT_PORT);
+ if(!(options & H1_PARSE_OPT_STRICT))
+ url_options |= CURLU_ALLOW_SPACE;
+ uc = curl_url_set(url, CURLUPART_URL, tmp, url_options);
+ if(uc) {
+ goto out;
+ }
+ }
+
+ if(!url && (options & H1_PARSE_OPT_STRICT)) {
+ /* we should have an absolute URL or have seen `/` earlier */
+ goto out;
+ }
+ }
+
+ if(url) {
+ result = Curl_http_req_make2(&parser->req, m, m_len, url, scheme_default);
+ }
+ else {
+ if(!scheme && scheme_default) {
+ scheme = scheme_default;
+ scheme_len = strlen(scheme_default);
+ }
+ result = Curl_http_req_make(&parser->req, m, m_len, scheme, scheme_len,
+ authority, authority_len, path, path_len);
+ }
+
+out:
+ curl_url_cleanup(url);
+ return result;
+}
+
+ssize_t Curl_h1_req_parse_read(struct h1_req_parser *parser,
+ const char *buf, size_t buflen,
+ const char *scheme_default, int options,
+ CURLcode *err)
+{
+ ssize_t nread = 0, n;
+
+ *err = CURLE_OK;
+ while(!parser->done) {
+ n = next_line(parser, buf, buflen, options, err);
+ if(n < 0) {
+ if(*err != CURLE_AGAIN) {
+ nread = -1;
+ }
+ *err = CURLE_OK;
+ goto out;
+ }
+
+ /* Consume this line */
+ nread += (size_t)n;
+ buf += (size_t)n;
+ buflen -= (size_t)n;
+
+ if(!parser->line) {
+ /* consumed bytes, but line not complete */
+ if(!buflen)
+ goto out;
+ }
+ else if(!parser->req) {
+ *err = start_req(parser, scheme_default, options);
+ if(*err) {
+ nread = -1;
+ goto out;
+ }
+ }
+ else if(parser->line_len == 0) {
+ /* last, empty line, we are finished */
+ if(!parser->req) {
+ *err = CURLE_URL_MALFORMAT;
+ nread = -1;
+ goto out;
+ }
+ parser->done = TRUE;
+ Curl_bufq_free(&parser->scratch);
+ /* last chance adjustments */
+ }
+ else {
+ *err = Curl_dynhds_h1_add_line(&parser->req->headers,
+ parser->line, parser->line_len);
+ if(*err) {
+ nread = -1;
+ goto out;
+ }
+ }
+ }
+
+out:
+ return nread;
+}
+
+
+#endif /* !CURL_DISABLE_HTTP */
diff --git a/libs/libcurl/src/http1.h b/libs/libcurl/src/http1.h new file mode 100644 index 0000000000..4d8c7f41bd --- /dev/null +++ b/libs/libcurl/src/http1.h @@ -0,0 +1,61 @@ +#ifndef HEADER_CURL_HTTP1_H
+#define HEADER_CURL_HTTP1_H
+/***************************************************************************
+ * _ _ ____ _
+ * Project ___| | | | _ \| |
+ * / __| | | | |_) | |
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+ * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+ * are also available at https://curl.se/docs/copyright.html.
+ *
+ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
+ * copies of the Software, and permit persons to whom the Software is
+ * furnished to do so, under the terms of the COPYING file.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ * SPDX-License-Identifier: curl
+ *
+ ***************************************************************************/
+
+#include "curl_setup.h"
+
+#ifndef CURL_DISABLE_HTTP
+#include "bufq.h"
+#include "http.h"
+
+#define H1_PARSE_OPT_NONE (0)
+#define H1_PARSE_OPT_STRICT (1 << 0)
+
+#define H1_PARSE_DEFAULT_MAX_LINE_LEN (8 * 1024)
+
+struct h1_req_parser {
+ struct httpreq *req;
+ struct bufq scratch;
+ size_t scratch_skip;
+ const char *line;
+ size_t max_line_len;
+ size_t line_len;
+ bool done;
+};
+
+void Curl_h1_req_parse_init(struct h1_req_parser *parser, size_t max_line_len);
+void Curl_h1_req_parse_free(struct h1_req_parser *parser);
+
+ssize_t Curl_h1_req_parse_read(struct h1_req_parser *parser,
+ const char *buf, size_t buflen,
+ const char *scheme_default, int options,
+ CURLcode *err);
+
+CURLcode Curl_h1_req_dprint(const struct httpreq *req,
+ struct dynbuf *dbuf);
+
+
+#endif /* !CURL_DISABLE_HTTP */
+#endif /* HEADER_CURL_HTTP1_H */
diff --git a/libs/libcurl/src/http2.c b/libs/libcurl/src/http2.c index 4defa7e2f7..9e9a7e3e72 100644 --- a/libs/libcurl/src/http2.c +++ b/libs/libcurl/src/http2.c @@ -25,8 +25,11 @@ #include "curl_setup.h"
#ifdef USE_NGHTTP2
+#include <stdint.h>
#include <nghttp2/nghttp2.h>
#include "urldata.h"
+#include "bufq.h"
+#include "http1.h"
#include "http2.h"
#include "http.h"
#include "sendf.h"
@@ -35,21 +38,19 @@ #include "strcase.h"
#include "multiif.h"
#include "url.h"
+#include "urlapi-int.h"
#include "cfilters.h"
#include "connect.h"
#include "strtoofft.h"
#include "strdup.h"
#include "transfer.h"
#include "dynbuf.h"
-#include "h2h3.h"
#include "headers.h"
/* The last 3 #include files should be in this order */
#include "curl_printf.h"
#include "curl_memory.h"
#include "memdebug.h"
-#define H2_BUFSIZE 32768
-
#if (NGHTTP2_VERSION_NUM < 0x010c00)
#error too old nghttp2 version, upgrade!
#endif
@@ -62,8 +63,30 @@ #define NGHTTP2_HAS_SET_LOCAL_WINDOW_SIZE 1
#endif
-#define HTTP2_HUGE_WINDOW_SIZE (32 * 1024 * 1024) /* 32 MB */
+/* buffer dimensioning:
+ * use 16K as chunk size, as that fits H2 DATA frames well */
+#define H2_CHUNK_SIZE (16 * 1024)
+/* this is how much we want "in flight" for a stream */
+#define H2_STREAM_WINDOW_SIZE (10 * 1024 * 1024)
+/* on receving from TLS, we prep for holding a full stream window */
+#define H2_NW_RECV_CHUNKS (H2_STREAM_WINDOW_SIZE / H2_CHUNK_SIZE)
+/* on send into TLS, we just want to accumulate small frames */
+#define H2_NW_SEND_CHUNKS 1
+/* stream recv/send chunks are a result of window / chunk sizes */
+#define H2_STREAM_RECV_CHUNKS (H2_STREAM_WINDOW_SIZE / H2_CHUNK_SIZE)
+/* keep smaller stream upload buffer (default h2 window size) to have
+ * our progress bars and "upload done" reporting closer to reality */
+#define H2_STREAM_SEND_CHUNKS ((64 * 1024) / H2_CHUNK_SIZE)
+/* spare chunks we keep for a full window */
+#define H2_STREAM_POOL_SPARES (H2_STREAM_WINDOW_SIZE / H2_CHUNK_SIZE)
+
+/* We need to accommodate the max number of streams with their window
+ * sizes on the overall connection. Streams might become PAUSED which
+ * will block their received QUOTA in the connection window. And if we
+ * run out of space, the server is blocked from sending us any data.
+ * See #10988 for an issue with this. */
+#define HTTP2_HUGE_WINDOW_SIZE (100 * H2_STREAM_WINDOW_SIZE)
#define H2_SETTINGS_IV_LEN 3
#define H2_BINSETTINGS_LEN 80
@@ -75,7 +98,7 @@ static int populate_settings(nghttp2_settings_entry *iv, iv[0].value = Curl_multi_max_concurrent_streams(data->multi);
iv[1].settings_id = NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE;
- iv[1].value = HTTP2_HUGE_WINDOW_SIZE;
+ iv[1].value = H2_STREAM_WINDOW_SIZE;
iv[2].settings_id = NGHTTP2_SETTINGS_ENABLE_PUSH;
iv[2].value = data->multi->push_cb != NULL;
@@ -101,22 +124,14 @@ struct cf_h2_ctx { /* The easy handle used in the current filter call, cleared at return */
struct cf_call_data call_data;
- char *inbuf; /* buffer to receive data from underlying socket */
- size_t inbuflen; /* number of bytes filled in inbuf */
- size_t nread_inbuf; /* number of bytes read from in inbuf */
-
- struct dynbuf outbuf;
+ struct bufq inbufq; /* network input */
+ struct bufq outbufq; /* network output */
+ struct bufc_pool stream_bufcp; /* spares for stream buffers */
- /* We need separate buffer for transmission and reception because we
- may call nghttp2_session_send() after the
- nghttp2_session_mem_recv() but mem buffer is still not full. In
- this case, we wrongly sends the content of mem buffer if we share
- them for both cases. */
- int32_t pause_stream_id; /* stream ID which paused
- nghttp2_session_mem_recv */
- size_t drain_total; /* sum of all stream's UrlState.drain */
+ size_t drain_total; /* sum of all stream's UrlState drain */
int32_t goaway_error;
int32_t last_stream_id;
+ BIT(conn_closed);
BIT(goaway);
BIT(enable_push);
};
@@ -125,7 +140,6 @@ struct cf_h2_ctx { #define CF_CTX_CALL_DATA(cf) \
((struct cf_h2_ctx *)(cf)->ctx)->call_data
-
static void cf_h2_ctx_clear(struct cf_h2_ctx *ctx)
{
struct cf_call_data save = ctx->call_data;
@@ -133,8 +147,9 @@ static void cf_h2_ctx_clear(struct cf_h2_ctx *ctx) if(ctx->h2) {
nghttp2_session_del(ctx->h2);
}
- free(ctx->inbuf);
- Curl_dyn_free(&ctx->outbuf);
+ Curl_bufq_free(&ctx->inbufq);
+ Curl_bufq_free(&ctx->outbufq);
+ Curl_bufcp_free(&ctx->stream_bufcp);
memset(ctx, 0, sizeof(*ctx));
ctx->call_data = save;
}
@@ -147,26 +162,202 @@ static void cf_h2_ctx_free(struct cf_h2_ctx *ctx) }
}
+static CURLcode h2_progress_egress(struct Curl_cfilter *cf,
+ struct Curl_easy *data);
+
+/**
+ * All about the H3 internals of a stream
+ */
+struct stream_ctx {
+ /*********** for HTTP/2 we store stream-local data here *************/
+ int32_t id; /* HTTP/2 protocol identifier for stream */
+ struct bufq recvbuf; /* response buffer */
+ struct bufq sendbuf; /* request buffer */
+ struct dynhds resp_trailers; /* response trailer fields */
+ size_t resp_hds_len; /* amount of response header bytes in recvbuf */
+ curl_off_t upload_left; /* number of request bytes left to upload */
+
+ char **push_headers; /* allocated array */
+ size_t push_headers_used; /* number of entries filled in */
+ size_t push_headers_alloc; /* number of entries allocated */
+
+ int status_code; /* HTTP response status code */
+ uint32_t error; /* stream error code */
+ bool closed; /* TRUE on stream close */
+ bool reset; /* TRUE on stream reset */
+ bool close_handled; /* TRUE if stream closure is handled by libcurl */
+ bool bodystarted;
+ bool send_closed; /* transfer is done sending, we might have still
+ buffered data in stream->sendbuf to upload. */
+};
+
+#define H2_STREAM_CTX(d) ((struct stream_ctx *)(((d) && (d)->req.p.http)? \
+ ((struct HTTP *)(d)->req.p.http)->h2_ctx \
+ : NULL))
+#define H2_STREAM_LCTX(d) ((struct HTTP *)(d)->req.p.http)->h2_ctx
+#define H2_STREAM_ID(d) (H2_STREAM_CTX(d)? \
+ H2_STREAM_CTX(d)->id : -2)
+
+/*
+ * Mark this transfer to get "drained".
+ */
+static void drain_stream(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ struct stream_ctx *stream)
+{
+ unsigned char bits;
+
+ (void)cf;
+ bits = CURL_CSELECT_IN;
+ if(!stream->send_closed && stream->upload_left)
+ bits |= CURL_CSELECT_OUT;
+ if(data->state.dselect_bits != bits) {
+ data->state.dselect_bits = bits;
+ Curl_expire(data, 0, EXPIRE_RUN_NOW);
+ }
+}
+
+static CURLcode http2_data_setup(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ struct stream_ctx **pstream)
+{
+ struct cf_h2_ctx *ctx = cf->ctx;
+ struct stream_ctx *stream;
+
+ (void)cf;
+ DEBUGASSERT(data);
+ if(!data->req.p.http) {
+ failf(data, "initialization failure, transfer not http initialized");
+ return CURLE_FAILED_INIT;
+ }
+ stream = H2_STREAM_CTX(data);
+ if(stream) {
+ *pstream = stream;
+ return CURLE_OK;
+ }
+
+ stream = calloc(1, sizeof(*stream));
+ if(!stream)
+ return CURLE_OUT_OF_MEMORY;
+
+ stream->id = -1;
+ Curl_bufq_initp(&stream->sendbuf, &ctx->stream_bufcp,
+ H2_STREAM_SEND_CHUNKS, BUFQ_OPT_NONE);
+ Curl_bufq_initp(&stream->recvbuf, &ctx->stream_bufcp,
+ H2_STREAM_RECV_CHUNKS, BUFQ_OPT_SOFT_LIMIT);
+ Curl_dynhds_init(&stream->resp_trailers, 0, DYN_H2_TRAILERS);
+ stream->resp_hds_len = 0;
+ stream->bodystarted = FALSE;
+ stream->status_code = -1;
+ stream->closed = FALSE;
+ stream->close_handled = FALSE;
+ stream->error = NGHTTP2_NO_ERROR;
+ stream->upload_left = 0;
+
+ H2_STREAM_LCTX(data) = stream;
+ *pstream = stream;
+ return CURLE_OK;
+}
+
+static void http2_data_done(struct Curl_cfilter *cf,
+ struct Curl_easy *data, bool premature)
+{
+ struct cf_h2_ctx *ctx = cf->ctx;
+ struct stream_ctx *stream = H2_STREAM_CTX(data);
+
+ DEBUGASSERT(ctx);
+ (void)premature;
+ if(!stream)
+ return;
+
+ if(ctx->h2) {
+ if(!stream->closed && stream->id > 0) {
+ /* RST_STREAM */
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%d] premature DATA_DONE, RST stream",
+ stream->id));
+ if(!nghttp2_submit_rst_stream(ctx->h2, NGHTTP2_FLAG_NONE,
+ stream->id, NGHTTP2_STREAM_CLOSED))
+ (void)nghttp2_session_send(ctx->h2);
+ }
+ if(!Curl_bufq_is_empty(&stream->recvbuf)) {
+ /* Anything in the recvbuf is still being counted
+ * in stream and connection window flow control. Need
+ * to free that space or the connection window might get
+ * exhausted eventually. */
+ nghttp2_session_consume(ctx->h2, stream->id,
+ Curl_bufq_len(&stream->recvbuf));
+ /* give WINDOW_UPATE a chance to be sent, but ignore any error */
+ (void)h2_progress_egress(cf, data);
+ }
+
+ /* -1 means unassigned and 0 means cleared */
+ if(nghttp2_session_get_stream_user_data(ctx->h2, stream->id)) {
+ int rv = nghttp2_session_set_stream_user_data(ctx->h2,
+ stream->id, 0);
+ if(rv) {
+ infof(data, "http/2: failed to clear user_data for stream %u",
+ stream->id);
+ DEBUGASSERT(0);
+ }
+ }
+ }
+
+ Curl_bufq_free(&stream->sendbuf);
+ Curl_bufq_free(&stream->recvbuf);
+ Curl_dynhds_free(&stream->resp_trailers);
+ if(stream->push_headers) {
+ /* if they weren't used and then freed before */
+ for(; stream->push_headers_used > 0; --stream->push_headers_used) {
+ free(stream->push_headers[stream->push_headers_used - 1]);
+ }
+ free(stream->push_headers);
+ stream->push_headers = NULL;
+ }
+
+ free(stream);
+ H2_STREAM_LCTX(data) = NULL;
+}
+
static int h2_client_new(struct Curl_cfilter *cf,
nghttp2_session_callbacks *cbs)
{
struct cf_h2_ctx *ctx = cf->ctx;
-
-#if NGHTTP2_VERSION_NUM < 0x013200
- /* before 1.50.0 */
- return nghttp2_session_client_new(&ctx->h2, cbs, cf);
-#else
nghttp2_option *o;
+
int rc = nghttp2_option_new(&o);
if(rc)
return rc;
+ /* We handle window updates ourself to enforce buffer limits */
+ nghttp2_option_set_no_auto_window_update(o, 1);
+#if NGHTTP2_VERSION_NUM >= 0x013200
+ /* with 1.50.0 */
/* turn off RFC 9113 leading and trailing white spaces validation against
HTTP field value. */
nghttp2_option_set_no_rfc9113_leading_and_trailing_ws_validation(o, 1);
+#endif
rc = nghttp2_session_client_new2(&ctx->h2, cbs, cf, o);
nghttp2_option_del(o);
return rc;
-#endif
+}
+
+static ssize_t nw_in_reader(void *reader_ctx,
+ unsigned char *buf, size_t buflen,
+ CURLcode *err)
+{
+ struct Curl_cfilter *cf = reader_ctx;
+ struct Curl_easy *data = CF_DATA_CURRENT(cf);
+
+ return Curl_conn_cf_recv(cf->next, data, (char *)buf, buflen, err);
+}
+
+static ssize_t nw_out_writer(void *writer_ctx,
+ const unsigned char *buf, size_t buflen,
+ CURLcode *err)
+{
+ struct Curl_cfilter *cf = writer_ctx;
+ struct Curl_easy *data = CF_DATA_CURRENT(cf);
+
+ return Curl_conn_cf_send(cf->next, data, (const char *)buf, buflen, err);
}
static ssize_t send_callback(nghttp2_session *h2,
@@ -201,37 +392,6 @@ static void multi_connchanged(struct Curl_multi *multi) multi->recheckstate = TRUE;
}
-static CURLcode http2_data_setup(struct Curl_cfilter *cf,
- struct Curl_easy *data)
-{
- struct HTTP *stream = data->req.p.http;
-
- (void)cf;
- DEBUGASSERT(stream);
- DEBUGASSERT(data->state.buffer);
-
- stream->stream_id = -1;
-
- Curl_dyn_init(&stream->header_recvbuf, DYN_H2_HEADERS);
- Curl_dyn_init(&stream->trailer_recvbuf, DYN_H2_TRAILERS);
-
- stream->bodystarted = FALSE;
- stream->status_code = -1;
- stream->pausedata = NULL;
- stream->pauselen = 0;
- stream->closed = FALSE;
- stream->close_handled = FALSE;
- stream->memlen = 0;
- stream->error = NGHTTP2_NO_ERROR;
- stream->upload_left = 0;
- stream->upload_mem = NULL;
- stream->upload_len = 0;
- stream->mem = data->state.buffer;
- stream->len = data->set.buffer_size;
-
- return CURLE_OK;
-}
-
/*
* Initialize the cfilter context
*/
@@ -240,17 +400,16 @@ static CURLcode cf_h2_ctx_init(struct Curl_cfilter *cf, bool via_h1_upgrade)
{
struct cf_h2_ctx *ctx = cf->ctx;
- struct HTTP *stream = data->req.p.http;
+ struct stream_ctx *stream;
CURLcode result = CURLE_OUT_OF_MEMORY;
int rc;
nghttp2_session_callbacks *cbs = NULL;
DEBUGASSERT(!ctx->h2);
- ctx->inbuf = malloc(H2_BUFSIZE);
- if(!ctx->inbuf)
- goto out;
- /* we want to aggregate small frames, SETTINGS, PRIO, UPDATES */
- Curl_dyn_init(&ctx->outbuf, 4*1024);
+ Curl_bufcp_init(&ctx->stream_bufcp, H2_CHUNK_SIZE, H2_STREAM_POOL_SPARES);
+ Curl_bufq_initp(&ctx->inbufq, &ctx->stream_bufcp, H2_NW_RECV_CHUNKS, 0);
+ Curl_bufq_initp(&ctx->outbufq, &ctx->stream_bufcp, H2_NW_SEND_CHUNKS, 0);
+ ctx->last_stream_id = 2147483647;
rc = nghttp2_session_callbacks_new(&cbs);
if(rc) {
@@ -276,10 +435,6 @@ static CURLcode cf_h2_ctx_init(struct Curl_cfilter *cf, }
ctx->max_concurrent_streams = DEFAULT_MAX_CONCURRENT_STREAMS;
- result = http2_data_setup(cf, data);
- if(result)
- goto out;
-
if(via_h1_upgrade) {
/* HTTP/1.1 Upgrade issued. H2 Settings have already been submitted
* in the H1 request and we upgrade from there. This stream
@@ -289,7 +444,11 @@ static CURLcode cf_h2_ctx_init(struct Curl_cfilter *cf, binlen = populate_binsettings(binsettings, data);
- stream->stream_id = 1;
+ result = http2_data_setup(cf, data, &stream);
+ if(result)
+ goto out;
+ DEBUGASSERT(stream);
+ stream->id = 1;
/* queue SETTINGS frame (again) */
rc = nghttp2_session_upgrade2(ctx->h2, binsettings, binlen,
data->state.httpreq == HTTPREQ_HEAD,
@@ -301,11 +460,11 @@ static CURLcode cf_h2_ctx_init(struct Curl_cfilter *cf, goto out;
}
- rc = nghttp2_session_set_stream_user_data(ctx->h2, stream->stream_id,
+ rc = nghttp2_session_set_stream_user_data(ctx->h2, stream->id,
data);
if(rc) {
infof(data, "http/2: failed to set user_data for stream %u",
- stream->stream_id);
+ stream->id);
DEBUGASSERT(0);
}
}
@@ -313,9 +472,6 @@ static CURLcode cf_h2_ctx_init(struct Curl_cfilter *cf, nghttp2_settings_entry iv[H2_SETTINGS_IV_LEN];
int ivlen;
- /* H2 Settings need to be submitted. Stream is not open yet. */
- DEBUGASSERT(stream->stream_id == -1);
-
ivlen = populate_settings(iv, data);
rc = nghttp2_submit_settings(ctx->h2, NGHTTP2_FLAG_NONE,
iv, ivlen);
@@ -345,34 +501,58 @@ out: return result;
}
-static CURLcode h2_session_send(struct Curl_cfilter *cf,
- struct Curl_easy *data);
-static int h2_process_pending_input(struct Curl_cfilter *cf,
- struct Curl_easy *data,
- CURLcode *err);
-
/*
- * http2_stream_free() free HTTP2 stream related data
+ * Returns nonzero if current HTTP/2 session should be closed.
*/
-static void http2_stream_free(struct HTTP *stream)
+static int should_close_session(struct cf_h2_ctx *ctx)
{
- if(stream) {
- Curl_dyn_free(&stream->header_recvbuf);
- for(; stream->push_headers_used > 0; --stream->push_headers_used) {
- free(stream->push_headers[stream->push_headers_used - 1]);
- }
- free(stream->push_headers);
- stream->push_headers = NULL;
- }
+ return ctx->drain_total == 0 && !nghttp2_session_want_read(ctx->h2) &&
+ !nghttp2_session_want_write(ctx->h2);
}
/*
- * Returns nonzero if current HTTP/2 session should be closed.
+ * Processes pending input left in network input buffer.
+ * This function returns 0 if it succeeds, or -1 and error code will
+ * be assigned to *err.
*/
-static int should_close_session(struct cf_h2_ctx *ctx)
+static int h2_process_pending_input(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ CURLcode *err)
{
- return ctx->drain_total == 0 && !nghttp2_session_want_read(ctx->h2) &&
- !nghttp2_session_want_write(ctx->h2);
+ struct cf_h2_ctx *ctx = cf->ctx;
+ const unsigned char *buf;
+ size_t blen;
+ ssize_t rv;
+
+ while(Curl_bufq_peek(&ctx->inbufq, &buf, &blen)) {
+
+ rv = nghttp2_session_mem_recv(ctx->h2, (const uint8_t *)buf, blen);
+ if(rv < 0) {
+ failf(data,
+ "process_pending_input: nghttp2_session_mem_recv() returned "
+ "%zd:%s", rv, nghttp2_strerror((int)rv));
+ *err = CURLE_RECV_ERROR;
+ return -1;
+ }
+ Curl_bufq_skip(&ctx->inbufq, (size_t)rv);
+ if(Curl_bufq_is_empty(&ctx->inbufq)) {
+ break;
+ }
+ else {
+ DEBUGF(LOG_CF(data, cf, "process_pending_input: %zu bytes left "
+ "in connection buffer", Curl_bufq_len(&ctx->inbufq)));
+ }
+ }
+
+ if(nghttp2_session_check_request_allowed(ctx->h2) == 0) {
+ /* No more requests are allowed in the current session, so
+ the connection may not be reused. This is set when a
+ GOAWAY frame has been received or when the limit of stream
+ identifiers has been reached. */
+ connclose(cf->conn, "http/2: No new requests allowed");
+ }
+
+ return 0;
}
/*
@@ -401,13 +581,10 @@ static bool http2_connisalive(struct Curl_cfilter *cf, struct Curl_easy *data, *input_pending = FALSE;
Curl_attach_connection(data, cf->conn);
- nread = Curl_conn_cf_recv(cf->next, data,
- ctx->inbuf, H2_BUFSIZE, &result);
+ nread = Curl_bufq_slurp(&ctx->inbufq, nw_in_reader, cf, &result);
if(nread != -1) {
- DEBUGF(LOG_CF(data, cf, "%d bytes stray data read before trying "
- "h2 connection", (int)nread));
- ctx->nread_inbuf = 0;
- ctx->inbuflen = nread;
+ DEBUGF(LOG_CF(data, cf, "%zd bytes stray data read before trying "
+ "h2 connection", nread));
if(h2_process_pending_input(cf, data, &result) < 0)
/* immediate error, considered dead */
alive = FALSE;
@@ -456,30 +633,23 @@ void Curl_http2_ver(char *p, size_t len) (void)msnprintf(p, len, "nghttp2/%s", h2->version_str);
}
-static CURLcode flush_output(struct Curl_cfilter *cf,
+static CURLcode nw_out_flush(struct Curl_cfilter *cf,
struct Curl_easy *data)
{
struct cf_h2_ctx *ctx = cf->ctx;
- size_t buflen = Curl_dyn_len(&ctx->outbuf);
- ssize_t written;
+ ssize_t nwritten;
CURLcode result;
- if(!buflen)
+ (void)data;
+ if(Curl_bufq_is_empty(&ctx->outbufq))
return CURLE_OK;
- DEBUGF(LOG_CF(data, cf, "h2 conn flush %zu bytes", buflen));
- written = Curl_conn_cf_send(cf->next, data, Curl_dyn_ptr(&ctx->outbuf),
- buflen, &result);
- if(written < 0) {
+ DEBUGF(LOG_CF(data, cf, "h2 conn flush %zu bytes",
+ Curl_bufq_len(&ctx->outbufq)));
+ nwritten = Curl_bufq_pass(&ctx->outbufq, nw_out_writer, cf, &result);
+ if(nwritten < 0 && result != CURLE_AGAIN) {
return result;
}
- if((size_t)written < buflen) {
- Curl_dyn_tail(&ctx->outbuf, buflen - (size_t)written);
- return CURLE_AGAIN;
- }
- else {
- Curl_dyn_reset(&ctx->outbuf);
- }
return CURLE_OK;
}
@@ -495,49 +665,27 @@ static ssize_t send_callback(nghttp2_session *h2, struct Curl_cfilter *cf = userp;
struct cf_h2_ctx *ctx = cf->ctx;
struct Curl_easy *data = CF_DATA_CURRENT(cf);
- ssize_t written;
+ ssize_t nwritten;
CURLcode result = CURLE_OK;
- size_t buflen = Curl_dyn_len(&ctx->outbuf);
(void)h2;
(void)flags;
DEBUGASSERT(data);
- if(blen < 1024 && (buflen + blen + 1 < ctx->outbuf.toobig)) {
- result = Curl_dyn_addn(&ctx->outbuf, buf, blen);
- if(result) {
- failf(data, "Failed to add data to output buffer");
- return NGHTTP2_ERR_CALLBACK_FAILURE;
+ nwritten = Curl_bufq_write_pass(&ctx->outbufq, buf, blen,
+ nw_out_writer, cf, &result);
+ if(nwritten < 0) {
+ if(result == CURLE_AGAIN) {
+ return NGHTTP2_ERR_WOULDBLOCK;
}
- return blen;
- }
- if(buflen) {
- /* not adding, flush buffer */
- result = flush_output(cf, data);
- if(result) {
- if(result == CURLE_AGAIN) {
- return NGHTTP2_ERR_WOULDBLOCK;
- }
- failf(data, "Failed sending HTTP2 data");
- return NGHTTP2_ERR_CALLBACK_FAILURE;
- }
- }
-
- DEBUGF(LOG_CF(data, cf, "h2 conn send %zu bytes", blen));
- written = Curl_conn_cf_send(cf->next, data, buf, blen, &result);
- if(result == CURLE_AGAIN) {
- return NGHTTP2_ERR_WOULDBLOCK;
- }
-
- if(written == -1) {
failf(data, "Failed sending HTTP2 data");
return NGHTTP2_ERR_CALLBACK_FAILURE;
}
- if(!written)
+ if(!nwritten)
return NGHTTP2_ERR_WOULDBLOCK;
- return written;
+ return nwritten;
}
@@ -558,8 +706,8 @@ char *curl_pushheader_bynum(struct curl_pushheaders *h, size_t num) if(!h || !GOOD_EASY_HANDLE(h->data))
return NULL;
else {
- struct HTTP *stream = h->data->req.p.http;
- if(num < stream->push_headers_used)
+ struct stream_ctx *stream = H2_STREAM_CTX(h->data);
+ if(stream && num < stream->push_headers_used)
return stream->push_headers[num];
}
return NULL;
@@ -570,6 +718,9 @@ char *curl_pushheader_bynum(struct curl_pushheaders *h, size_t num) */
char *curl_pushheader_byname(struct curl_pushheaders *h, const char *header)
{
+ struct stream_ctx *stream;
+ size_t len;
+ size_t i;
/* Verify that we got a good easy handle in the push header struct,
mostly to detect rubbish input fast(er). Also empty header name
is just a rubbish too. We have to allow ":" at the beginning of
@@ -579,48 +730,21 @@ char *curl_pushheader_byname(struct curl_pushheaders *h, const char *header) if(!h || !GOOD_EASY_HANDLE(h->data) || !header || !header[0] ||
!strcmp(header, ":") || strchr(header + 1, ':'))
return NULL;
- else {
- struct HTTP *stream = h->data->req.p.http;
- size_t len = strlen(header);
- size_t i;
- for(i = 0; i<stream->push_headers_used; i++) {
- if(!strncmp(header, stream->push_headers[i], len)) {
- /* sub-match, make sure that it is followed by a colon */
- if(stream->push_headers[i][len] != ':')
- continue;
- return &stream->push_headers[i][len + 1];
- }
- }
- }
- return NULL;
-}
-/*
- * This specific transfer on this connection has been "drained".
- */
-static void drained_transfer(struct Curl_cfilter *cf,
- struct Curl_easy *data)
-{
- if(data->state.drain) {
- struct cf_h2_ctx *ctx = cf->ctx;
- DEBUGASSERT(ctx->drain_total > 0);
- ctx->drain_total--;
- data->state.drain = 0;
- }
-}
+ stream = H2_STREAM_CTX(h->data);
+ if(!stream)
+ return NULL;
-/*
- * Mark this transfer to get "drained".
- */
-static void drain_this(struct Curl_cfilter *cf,
- struct Curl_easy *data)
-{
- if(!data->state.drain) {
- struct cf_h2_ctx *ctx = cf->ctx;
- data->state.drain = 1;
- ctx->drain_total++;
- DEBUGASSERT(ctx->drain_total > 0);
+ len = strlen(header);
+ for(i = 0; i<stream->push_headers_used; i++) {
+ if(!strncmp(header, stream->push_headers[i], len)) {
+ /* sub-match, make sure that it is followed by a colon */
+ if(stream->push_headers[i][len] != ':')
+ continue;
+ return &stream->push_headers[i][len + 1];
+ }
}
+ return NULL;
}
static struct Curl_easy *h2_duphandle(struct Curl_cfilter *cf,
@@ -634,8 +758,10 @@ static struct Curl_easy *h2_duphandle(struct Curl_cfilter *cf, (void)Curl_close(&second);
}
else {
+ struct stream_ctx *second_stream;
+
second->req.p.http = http;
- http2_data_setup(cf, second);
+ http2_data_setup(cf, second, &second_stream);
second->state.priority.weight = data->state.priority.weight;
}
}
@@ -654,7 +780,7 @@ static int set_transfer_url(struct Curl_easy *data, if(!u)
return 5;
- v = curl_pushheader_byname(hp, H2H3_PSEUDO_SCHEME);
+ v = curl_pushheader_byname(hp, HTTP_PSEUDO_SCHEME);
if(v) {
uc = curl_url_set(u, CURLUPART_SCHEME, v, 0);
if(uc) {
@@ -663,16 +789,16 @@ static int set_transfer_url(struct Curl_easy *data, }
}
- v = curl_pushheader_byname(hp, H2H3_PSEUDO_AUTHORITY);
+ v = curl_pushheader_byname(hp, HTTP_PSEUDO_AUTHORITY);
if(v) {
- uc = curl_url_set(u, CURLUPART_HOST, v, 0);
+ uc = Curl_url_set_authority(u, v, CURLU_DISALLOW_USER);
if(uc) {
rc = 2;
goto fail;
}
}
- v = curl_pushheader_byname(hp, H2H3_PSEUDO_PATH);
+ v = curl_pushheader_byname(hp, HTTP_PSEUDO_PATH);
if(v) {
uc = curl_url_set(u, CURLUPART_PATH, v, 0);
if(uc) {
@@ -684,7 +810,7 @@ static int set_transfer_url(struct Curl_easy *data, uc = curl_url_get(u, CURLUPART_URL, &url, 0);
if(uc)
rc = 4;
- fail:
+fail:
curl_url_cleanup(u);
if(rc)
return rc;
@@ -696,6 +822,16 @@ static int set_transfer_url(struct Curl_easy *data, return 0;
}
+static void discard_newhandle(struct Curl_cfilter *cf,
+ struct Curl_easy *newhandle)
+{
+ if(!newhandle->req.p.http) {
+ http2_data_done(cf, newhandle, TRUE);
+ newhandle->req.p.http = NULL;
+ }
+ (void)Curl_close(&newhandle);
+}
+
static int push_promise(struct Curl_cfilter *cf,
struct Curl_easy *data,
const nghttp2_push_promise *frame)
@@ -703,13 +839,14 @@ static int push_promise(struct Curl_cfilter *cf, struct cf_h2_ctx *ctx = cf->ctx;
int rv; /* one of the CURL_PUSH_* defines */
- DEBUGF(LOG_CF(data, cf, "[h2sid=%u] PUSH_PROMISE received",
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%d] PUSH_PROMISE received",
frame->promised_stream_id));
if(data->multi->push_cb) {
- struct HTTP *stream;
- struct HTTP *newstream;
+ struct stream_ctx *stream;
+ struct stream_ctx *newstream;
struct curl_pushheaders heads;
CURLMcode rc;
+ CURLcode result;
size_t i;
/* clone the parent */
struct Curl_easy *newhandle = h2_duphandle(cf, data);
@@ -724,21 +861,30 @@ static int push_promise(struct Curl_cfilter *cf, /* ask the application */
DEBUGF(LOG_CF(data, cf, "Got PUSH_PROMISE, ask application"));
- stream = data->req.p.http;
+ stream = H2_STREAM_CTX(data);
if(!stream) {
failf(data, "Internal NULL stream");
- (void)Curl_close(&newhandle);
+ discard_newhandle(cf, newhandle);
rv = CURL_PUSH_DENY;
goto fail;
}
rv = set_transfer_url(newhandle, &heads);
if(rv) {
- (void)Curl_close(&newhandle);
+ discard_newhandle(cf, newhandle);
rv = CURL_PUSH_DENY;
goto fail;
}
+ result = http2_data_setup(cf, newhandle, &newstream);
+ if(result) {
+ failf(data, "error setting up stream: %d", result);
+ discard_newhandle(cf, newhandle);
+ rv = CURL_PUSH_DENY;
+ goto fail;
+ }
+ DEBUGASSERT(stream);
+
Curl_set_in_callback(data, true);
rv = data->multi->push_cb(data, newhandle,
stream->push_headers_used, &heads,
@@ -755,14 +901,11 @@ static int push_promise(struct Curl_cfilter *cf, if(rv) {
DEBUGASSERT((rv > CURL_PUSH_OK) && (rv <= CURL_PUSH_ERROROUT));
/* denied, kill off the new handle again */
- http2_stream_free(newhandle->req.p.http);
- newhandle->req.p.http = NULL;
- (void)Curl_close(&newhandle);
+ discard_newhandle(cf, newhandle);
goto fail;
}
- newstream = newhandle->req.p.http;
- newstream->stream_id = frame->promised_stream_id;
+ newstream->id = frame->promised_stream_id;
newhandle->req.maxdownload = -1;
newhandle->req.size = -1;
@@ -771,125 +914,88 @@ static int push_promise(struct Curl_cfilter *cf, rc = Curl_multi_add_perform(data->multi, newhandle, cf->conn);
if(rc) {
infof(data, "failed to add handle to multi");
- http2_stream_free(newhandle->req.p.http);
- newhandle->req.p.http = NULL;
- Curl_close(&newhandle);
+ discard_newhandle(cf, newhandle);
rv = CURL_PUSH_DENY;
goto fail;
}
rv = nghttp2_session_set_stream_user_data(ctx->h2,
- frame->promised_stream_id,
+ newstream->id,
newhandle);
if(rv) {
infof(data, "failed to set user_data for stream %u",
- frame->promised_stream_id);
+ newstream->id);
DEBUGASSERT(0);
rv = CURL_PUSH_DENY;
goto fail;
}
- Curl_dyn_init(&newstream->header_recvbuf, DYN_H2_HEADERS);
- Curl_dyn_init(&newstream->trailer_recvbuf, DYN_H2_TRAILERS);
}
else {
DEBUGF(LOG_CF(data, cf, "Got PUSH_PROMISE, ignore it"));
rv = CURL_PUSH_DENY;
}
- fail:
+fail:
return rv;
}
-static int on_frame_recv(nghttp2_session *session, const nghttp2_frame *frame,
- void *userp)
+static CURLcode recvbuf_write_hds(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ const char *buf, size_t blen)
+{
+ struct stream_ctx *stream = H2_STREAM_CTX(data);
+ ssize_t nwritten;
+ CURLcode result;
+
+ (void)cf;
+ nwritten = Curl_bufq_write(&stream->recvbuf,
+ (const unsigned char *)buf, blen, &result);
+ if(nwritten < 0)
+ return result;
+ stream->resp_hds_len += (size_t)nwritten;
+ DEBUGASSERT((size_t)nwritten == blen);
+ return CURLE_OK;
+}
+
+static CURLcode on_stream_frame(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ const nghttp2_frame *frame)
{
- struct Curl_cfilter *cf = userp;
struct cf_h2_ctx *ctx = cf->ctx;
- struct Curl_easy *data_s = NULL;
- struct HTTP *stream = NULL;
- struct Curl_easy *data = CF_DATA_CURRENT(cf);
- int rv;
- size_t left, ncopy;
+ struct stream_ctx *stream = H2_STREAM_CTX(data);
int32_t stream_id = frame->hd.stream_id;
CURLcode result;
+ int rv;
- DEBUGASSERT(data);
- if(!stream_id) {
- /* stream ID zero is for connection-oriented stuff */
- DEBUGASSERT(data);
- switch(frame->hd.type) {
- case NGHTTP2_SETTINGS: {
- uint32_t max_conn = ctx->max_concurrent_streams;
- DEBUGF(LOG_CF(data, cf, "recv frame SETTINGS"));
- ctx->max_concurrent_streams = nghttp2_session_get_remote_settings(
- session, NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS);
- ctx->enable_push = nghttp2_session_get_remote_settings(
- session, NGHTTP2_SETTINGS_ENABLE_PUSH) != 0;
- DEBUGF(LOG_CF(data, cf, "MAX_CONCURRENT_STREAMS == %d",
- ctx->max_concurrent_streams));
- DEBUGF(LOG_CF(data, cf, "ENABLE_PUSH == %s",
- ctx->enable_push ? "TRUE" : "false"));
- if(data && max_conn != ctx->max_concurrent_streams) {
- /* only signal change if the value actually changed */
- DEBUGF(LOG_CF(data, cf, "MAX_CONCURRENT_STREAMS now %u",
- ctx->max_concurrent_streams));
- multi_connchanged(data->multi);
- }
- break;
- }
- case NGHTTP2_GOAWAY:
- ctx->goaway = TRUE;
- ctx->goaway_error = frame->goaway.error_code;
- ctx->last_stream_id = frame->goaway.last_stream_id;
- if(data) {
- infof(data, "recveived GOAWAY, error=%d, last_stream=%u",
- ctx->goaway_error, ctx->last_stream_id);
- multi_connchanged(data->multi);
- }
- break;
- case NGHTTP2_WINDOW_UPDATE:
- DEBUGF(LOG_CF(data, cf, "recv frame WINDOW_UPDATE"));
- break;
- default:
- DEBUGF(LOG_CF(data, cf, "recv frame %x on 0", frame->hd.type));
- }
- return 0;
- }
- data_s = nghttp2_session_get_stream_user_data(session, stream_id);
- if(!data_s) {
- DEBUGF(LOG_CF(data, cf, "[h2sid=%u] No Curl_easy associated",
- stream_id));
- return 0;
- }
-
- stream = data_s->req.p.http;
if(!stream) {
- DEBUGF(LOG_CF(data_s, cf, "[h2sid=%u] No proto pointer", stream_id));
- return NGHTTP2_ERR_CALLBACK_FAILURE;
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%d] No proto pointer", stream_id));
+ return CURLE_FAILED_INIT;
}
switch(frame->hd.type) {
case NGHTTP2_DATA:
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%d] FRAME[DATA len=%zu pad=%zu], "
+ "buffered=%zu, window=%d/%d",
+ stream_id, frame->hd.length, frame->data.padlen,
+ Curl_bufq_len(&stream->recvbuf),
+ nghttp2_session_get_stream_effective_recv_data_length(
+ ctx->h2, stream->id),
+ nghttp2_session_get_stream_effective_local_window_size(
+ ctx->h2, stream->id)));
/* If !body started on this stream, then receiving DATA is illegal. */
- DEBUGF(LOG_CF(data_s, cf, "[h2sid=%u] recv frame DATA", stream_id));
if(!stream->bodystarted) {
- rv = nghttp2_submit_rst_stream(session, NGHTTP2_FLAG_NONE,
+ rv = nghttp2_submit_rst_stream(ctx->h2, NGHTTP2_FLAG_NONE,
stream_id, NGHTTP2_PROTOCOL_ERROR);
if(nghttp2_is_fatal(rv)) {
- return NGHTTP2_ERR_CALLBACK_FAILURE;
+ return CURLE_RECV_ERROR;
}
}
if(frame->hd.flags & NGHTTP2_FLAG_END_STREAM) {
- /* Stream has ended. If there is pending data, ensure that read
- will occur to consume it. */
- if(!data->state.drain && stream->memlen) {
- drain_this(cf, data_s);
- Curl_expire(data, 0, EXPIRE_RUN_NOW);
- }
+ drain_stream(cf, data, stream);
}
break;
case NGHTTP2_HEADERS:
- DEBUGF(LOG_CF(data_s, cf, "[h2sid=%u] recv frame HEADERS", stream_id));
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%d] FRAME[HEADERS]", stream_id));
if(stream->bodystarted) {
/* Only valid HEADERS after body started is trailer HEADERS. We
buffer them in on_header callback. */
@@ -900,7 +1006,7 @@ static int on_frame_recv(nghttp2_session *session, const nghttp2_frame *frame, stream->status_code. Fuzzing has proven this can still be reached
without status code having been set. */
if(stream->status_code == -1)
- return NGHTTP2_ERR_CALLBACK_FAILURE;
+ return CURLE_RECV_ERROR;
/* Only final status code signals the end of header */
if(stream->status_code / 100 != 1) {
@@ -908,71 +1014,118 @@ static int on_frame_recv(nghttp2_session *session, const nghttp2_frame *frame, stream->status_code = -1;
}
- result = Curl_dyn_addn(&stream->header_recvbuf, STRCONST("\r\n"));
+ result = recvbuf_write_hds(cf, data, STRCONST("\r\n"));
if(result)
- return NGHTTP2_ERR_CALLBACK_FAILURE;
-
- left = Curl_dyn_len(&stream->header_recvbuf) -
- stream->nread_header_recvbuf;
- ncopy = CURLMIN(stream->len, left);
-
- memcpy(&stream->mem[stream->memlen],
- Curl_dyn_ptr(&stream->header_recvbuf) +
- stream->nread_header_recvbuf,
- ncopy);
- stream->nread_header_recvbuf += ncopy;
-
- DEBUGASSERT(stream->mem);
- DEBUGF(LOG_CF(data_s, cf, "[h2sid=%u] %zu header bytes, at %p",
- stream_id, ncopy, (void *)stream->mem));
-
- stream->len -= ncopy;
- stream->memlen += ncopy;
+ return result;
- drain_this(cf, data_s);
- Curl_expire(data_s, 0, EXPIRE_RUN_NOW);
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%d] %zu header bytes",
+ stream_id, Curl_bufq_len(&stream->recvbuf)));
+ drain_stream(cf, data, stream);
break;
case NGHTTP2_PUSH_PROMISE:
- DEBUGF(LOG_CF(data_s, cf, "[h2sid=%u] recv PUSH_PROMISE", stream_id));
- rv = push_promise(cf, data_s, &frame->push_promise);
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%d] FRAME[PUSH_PROMISE]", stream_id));
+ rv = push_promise(cf, data, &frame->push_promise);
if(rv) { /* deny! */
- int h2;
DEBUGASSERT((rv > CURL_PUSH_OK) && (rv <= CURL_PUSH_ERROROUT));
- h2 = nghttp2_submit_rst_stream(session, NGHTTP2_FLAG_NONE,
+ rv = nghttp2_submit_rst_stream(ctx->h2, NGHTTP2_FLAG_NONE,
frame->push_promise.promised_stream_id,
NGHTTP2_CANCEL);
- if(nghttp2_is_fatal(h2))
- return NGHTTP2_ERR_CALLBACK_FAILURE;
+ if(nghttp2_is_fatal(rv))
+ return CURLE_SEND_ERROR;
else if(rv == CURL_PUSH_ERROROUT) {
- DEBUGF(LOG_CF(data_s, cf, "Fail the parent stream (too)"));
- return NGHTTP2_ERR_CALLBACK_FAILURE;
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%d] fail in PUSH_PROMISE received",
+ stream_id));
+ return CURLE_RECV_ERROR;
}
}
break;
case NGHTTP2_RST_STREAM:
- DEBUGF(LOG_CF(data_s, cf, "[h2sid=%u] recv RST", stream_id));
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%d] FRAME[RST]", stream_id));
stream->closed = TRUE;
stream->reset = TRUE;
- drain_this(cf, data);
- Curl_expire(data, 0, EXPIRE_RUN_NOW);
+ stream->send_closed = TRUE;
+ data->req.keepon &= ~KEEP_SEND_HOLD;
+ drain_stream(cf, data, stream);
break;
case NGHTTP2_WINDOW_UPDATE:
- DEBUGF(LOG_CF(data, cf, "[h2sid=%u] recv WINDOW_UPDATE", stream_id));
- if((data_s->req.keepon & KEEP_SEND_HOLD) &&
- (data_s->req.keepon & KEEP_SEND)) {
- data_s->req.keepon &= ~KEEP_SEND_HOLD;
- drain_this(cf, data_s);
- Curl_expire(data_s, 0, EXPIRE_RUN_NOW);
- DEBUGF(LOG_CF(data, cf, "[h2sid=%u] un-holding after win update",
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%d] FRAME[WINDOW_UPDATE]", stream_id));
+ if((data->req.keepon & KEEP_SEND_HOLD) &&
+ (data->req.keepon & KEEP_SEND)) {
+ data->req.keepon &= ~KEEP_SEND_HOLD;
+ drain_stream(cf, data, stream);
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%d] un-holding after win update",
stream_id));
}
break;
default:
- DEBUGF(LOG_CF(data_s, cf, "[h2sid=%u] recv frame %x",
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%d] FRAME[%x]",
stream_id, frame->hd.type));
break;
}
- return 0;
+ return CURLE_OK;
+}
+
+static int on_frame_recv(nghttp2_session *session, const nghttp2_frame *frame,
+ void *userp)
+{
+ struct Curl_cfilter *cf = userp;
+ struct cf_h2_ctx *ctx = cf->ctx;
+ struct Curl_easy *data = CF_DATA_CURRENT(cf), *data_s;
+ int32_t stream_id = frame->hd.stream_id;
+
+ DEBUGASSERT(data);
+ if(!stream_id) {
+ /* stream ID zero is for connection-oriented stuff */
+ DEBUGASSERT(data);
+ switch(frame->hd.type) {
+ case NGHTTP2_SETTINGS: {
+ uint32_t max_conn = ctx->max_concurrent_streams;
+ DEBUGF(LOG_CF(data, cf, "FRAME[SETTINGS]"));
+ ctx->max_concurrent_streams = nghttp2_session_get_remote_settings(
+ session, NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS);
+ ctx->enable_push = nghttp2_session_get_remote_settings(
+ session, NGHTTP2_SETTINGS_ENABLE_PUSH) != 0;
+ DEBUGF(LOG_CF(data, cf, "MAX_CONCURRENT_STREAMS == %d",
+ ctx->max_concurrent_streams));
+ DEBUGF(LOG_CF(data, cf, "ENABLE_PUSH == %s",
+ ctx->enable_push ? "TRUE" : "false"));
+ if(data && max_conn != ctx->max_concurrent_streams) {
+ /* only signal change if the value actually changed */
+ DEBUGF(LOG_CF(data, cf, "MAX_CONCURRENT_STREAMS now %u",
+ ctx->max_concurrent_streams));
+ multi_connchanged(data->multi);
+ }
+ break;
+ }
+ case NGHTTP2_GOAWAY:
+ ctx->goaway = TRUE;
+ ctx->goaway_error = frame->goaway.error_code;
+ ctx->last_stream_id = frame->goaway.last_stream_id;
+ if(data) {
+ DEBUGF(LOG_CF(data, cf, "FRAME[GOAWAY, error=%d, last_stream=%u]",
+ ctx->goaway_error, ctx->last_stream_id));
+ infof(data, "received GOAWAY, error=%d, last_stream=%u",
+ ctx->goaway_error, ctx->last_stream_id);
+ multi_connchanged(data->multi);
+ }
+ break;
+ case NGHTTP2_WINDOW_UPDATE:
+ DEBUGF(LOG_CF(data, cf, "FRAME[WINDOW_UPDATE]"));
+ break;
+ default:
+ DEBUGF(LOG_CF(data, cf, "recv frame %x on 0", frame->hd.type));
+ }
+ return 0;
+ }
+
+ data_s = nghttp2_session_get_stream_user_data(session, stream_id);
+ if(!data_s) {
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%d] No Curl_easy associated",
+ stream_id));
+ return 0;
+ }
+
+ return on_stream_frame(cf, data_s, frame)? NGHTTP2_ERR_CALLBACK_FAILURE : 0;
}
static int on_data_chunk_recv(nghttp2_session *session, uint8_t flags,
@@ -980,10 +1133,10 @@ static int on_data_chunk_recv(nghttp2_session *session, uint8_t flags, const uint8_t *mem, size_t len, void *userp)
{
struct Curl_cfilter *cf = userp;
- struct cf_h2_ctx *ctx = cf->ctx;
- struct HTTP *stream;
+ struct stream_ctx *stream;
struct Curl_easy *data_s;
- size_t nread;
+ ssize_t nwritten;
+ CURLcode result;
(void)flags;
DEBUGASSERT(stream_id); /* should never be a zero stream ID here */
@@ -995,42 +1148,29 @@ static int on_data_chunk_recv(nghttp2_session *session, uint8_t flags, /* Receiving a Stream ID not in the hash should not happen - unless
we have aborted a transfer artificially and there were more data
in the pipeline. Silently ignore. */
- DEBUGF(LOG_CF(CF_DATA_CURRENT(cf), cf, "[h2sid=%u] Data for unknown",
+ DEBUGF(LOG_CF(CF_DATA_CURRENT(cf), cf, "[h2sid=%d] Data for unknown",
stream_id));
+ /* consumed explicitly as no one will read it */
+ nghttp2_session_consume(session, stream_id, len);
return 0;
}
- stream = data_s->req.p.http;
+ stream = H2_STREAM_CTX(data_s);
if(!stream)
return NGHTTP2_ERR_CALLBACK_FAILURE;
- nread = CURLMIN(stream->len, len);
- memcpy(&stream->mem[stream->memlen], mem, nread);
-
- stream->len -= nread;
- stream->memlen += nread;
+ nwritten = Curl_bufq_write(&stream->recvbuf, mem, len, &result);
+ if(nwritten < 0) {
+ if(result != CURLE_AGAIN)
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
- /* if we receive data for another handle, wake that up */
- if(CF_DATA_CURRENT(cf) != data_s) {
- drain_this(cf, data_s);
- Curl_expire(data_s, 0, EXPIRE_RUN_NOW);
+ nwritten = 0;
}
- DEBUGF(LOG_CF(data_s, cf, "[h2sid=%u] %zu DATA recvd, "
- "(buffer now holds %zu, %zu still free in %p)",
- stream_id, nread,
- stream->memlen, stream->len, (void *)stream->mem));
-
- if(nread < len) {
- stream->pausedata = mem + nread;
- stream->pauselen = len - nread;
- DEBUGF(LOG_CF(data_s, cf, "[h2sid=%u] %zu not recvd -> NGHTTP2_ERR_PAUSE",
- stream_id, len - nread));
- ctx->pause_stream_id = stream_id;
- drain_this(cf, data_s);
- return NGHTTP2_ERR_PAUSE;
- }
+ /* if we receive data for another handle, wake that up */
+ drain_stream(cf, data_s, stream);
+ DEBUGASSERT((size_t)nwritten == len);
return 0;
}
@@ -1038,9 +1178,8 @@ static int on_stream_close(nghttp2_session *session, int32_t stream_id, uint32_t error_code, void *userp)
{
struct Curl_cfilter *cf = userp;
- struct cf_h2_ctx *ctx = cf->ctx;
struct Curl_easy *data_s;
- struct HTTP *stream;
+ struct stream_ctx *stream;
int rv;
(void)session;
@@ -1051,8 +1190,8 @@ static int on_stream_close(nghttp2_session *session, int32_t stream_id, if(!data_s) {
return 0;
}
- stream = data_s->req.p.http;
- DEBUGF(LOG_CF(data_s, cf, "[h2sid=%u] on_stream_close(), %s (err %d)",
+ stream = H2_STREAM_CTX(data_s);
+ DEBUGF(LOG_CF(data_s, cf, "[h2sid=%d] on_stream_close(), %s (err %d)",
stream_id, nghttp2_http2_strerror(error_code), error_code));
if(!stream)
return NGHTTP2_ERR_CALLBACK_FAILURE;
@@ -1061,11 +1200,9 @@ static int on_stream_close(nghttp2_session *session, int32_t stream_id, stream->error = error_code;
if(stream->error)
stream->reset = TRUE;
+ data_s->req.keepon &= ~KEEP_SEND_HOLD;
- if(CF_DATA_CURRENT(cf) != data_s) {
- drain_this(cf, data_s);
- Curl_expire(data_s, 0, EXPIRE_RUN_NOW);
- }
+ drain_stream(cf, data_s, stream);
/* remove `data_s` from the nghttp2 stream */
rv = nghttp2_session_set_stream_user_data(session, stream_id, 0);
@@ -1074,12 +1211,7 @@ static int on_stream_close(nghttp2_session *session, int32_t stream_id, stream_id);
DEBUGASSERT(0);
}
- if(stream_id == ctx->pause_stream_id) {
- DEBUGF(LOG_CF(data_s, cf, "[h2sid=%u] closed the pause stream",
- stream_id));
- ctx->pause_stream_id = 0;
- }
- DEBUGF(LOG_CF(data_s, cf, "[h2sid=%u] closed now", stream_id));
+ DEBUGF(LOG_CF(data_s, cf, "[h2sid=%d] closed now", stream_id));
return 0;
}
@@ -1087,7 +1219,7 @@ static int on_begin_headers(nghttp2_session *session, const nghttp2_frame *frame, void *userp)
{
struct Curl_cfilter *cf = userp;
- struct HTTP *stream;
+ struct stream_ctx *stream;
struct Curl_easy *data_s = NULL;
(void)cf;
@@ -1102,7 +1234,7 @@ static int on_begin_headers(nghttp2_session *session, return 0;
}
- stream = data_s->req.p.http;
+ stream = H2_STREAM_CTX(data_s);
if(!stream || !stream->bodystarted) {
return 0;
}
@@ -1110,33 +1242,6 @@ static int on_begin_headers(nghttp2_session *session, return 0;
}
-/* Decode HTTP status code. Returns -1 if no valid status code was
- decoded. */
-static int decode_status_code(const uint8_t *value, size_t len)
-{
- int i;
- int res;
-
- if(len != 3) {
- return -1;
- }
-
- res = 0;
-
- for(i = 0; i < 3; ++i) {
- char c = value[i];
-
- if(c < '0' || c > '9') {
- return -1;
- }
-
- res *= 10;
- res += c - '0';
- }
-
- return res;
-}
-
/* frame->hd.type is either NGHTTP2_HEADERS or NGHTTP2_PUSH_PROMISE */
static int on_header(nghttp2_session *session, const nghttp2_frame *frame,
const uint8_t *name, size_t namelen,
@@ -1145,7 +1250,7 @@ static int on_header(nghttp2_session *session, const nghttp2_frame *frame, void *userp)
{
struct Curl_cfilter *cf = userp;
- struct HTTP *stream;
+ struct stream_ctx *stream;
struct Curl_easy *data_s;
int32_t stream_id = frame->hd.stream_id;
CURLcode result;
@@ -1160,7 +1265,7 @@ static int on_header(nghttp2_session *session, const nghttp2_frame *frame, internal error more than anything else! */
return NGHTTP2_ERR_CALLBACK_FAILURE;
- stream = data_s->req.p.http;
+ stream = H2_STREAM_CTX(data_s);
if(!stream) {
failf(data_s, "Internal NULL stream");
return NGHTTP2_ERR_CALLBACK_FAILURE;
@@ -1171,7 +1276,7 @@ static int on_header(nghttp2_session *session, const nghttp2_frame *frame, if(frame->hd.type == NGHTTP2_PUSH_PROMISE) {
char *h;
- if(!strcmp(H2H3_PSEUDO_AUTHORITY, (const char *)name)) {
+ if(!strcmp(HTTP_PSEUDO_AUTHORITY, (const char *)name)) {
/* pseudo headers are lower case */
int rc = 0;
char *check = aprintf("%s:%d", cf->conn->host.name,
@@ -1230,89 +1335,90 @@ static int on_header(nghttp2_session *session, const nghttp2_frame *frame, if(stream->bodystarted) {
/* This is a trailer */
- DEBUGF(LOG_CF(data_s, cf, "[h2sid=%u] trailer: %.*s: %.*s",
- stream->stream_id,
+ DEBUGF(LOG_CF(data_s, cf, "[h2sid=%d] trailer: %.*s: %.*s",
+ stream->id,
(int)namelen, name,
(int)valuelen, value));
- result = Curl_dyn_addf(&stream->trailer_recvbuf,
- "%.*s: %.*s\r\n", (int)namelen, name,
- (int)valuelen, value);
+ result = Curl_dynhds_add(&stream->resp_trailers,
+ (const char *)name, namelen,
+ (const char *)value, valuelen);
if(result)
return NGHTTP2_ERR_CALLBACK_FAILURE;
return 0;
}
- if(namelen == sizeof(H2H3_PSEUDO_STATUS) - 1 &&
- memcmp(H2H3_PSEUDO_STATUS, name, namelen) == 0) {
- /* nghttp2 guarantees :status is received first and only once, and
- value is 3 digits status code, and decode_status_code always
- succeeds. */
+ if(namelen == sizeof(HTTP_PSEUDO_STATUS) - 1 &&
+ memcmp(HTTP_PSEUDO_STATUS, name, namelen) == 0) {
+ /* nghttp2 guarantees :status is received first and only once. */
char buffer[32];
- stream->status_code = decode_status_code(value, valuelen);
- DEBUGASSERT(stream->status_code != -1);
- msnprintf(buffer, sizeof(buffer), H2H3_PSEUDO_STATUS ":%u\r",
+ result = Curl_http_decode_status(&stream->status_code,
+ (const char *)value, valuelen);
+ if(result)
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ msnprintf(buffer, sizeof(buffer), HTTP_PSEUDO_STATUS ":%u\r",
stream->status_code);
result = Curl_headers_push(data_s, buffer, CURLH_PSEUDO);
if(result)
return NGHTTP2_ERR_CALLBACK_FAILURE;
- result = Curl_dyn_addn(&stream->header_recvbuf, STRCONST("HTTP/2 "));
+ result = recvbuf_write_hds(cf, data_s, STRCONST("HTTP/2 "));
if(result)
return NGHTTP2_ERR_CALLBACK_FAILURE;
- result = Curl_dyn_addn(&stream->header_recvbuf, value, valuelen);
+ result = recvbuf_write_hds(cf, data_s, (const char *)value, valuelen);
if(result)
return NGHTTP2_ERR_CALLBACK_FAILURE;
/* the space character after the status code is mandatory */
- result = Curl_dyn_addn(&stream->header_recvbuf, STRCONST(" \r\n"));
+ result = recvbuf_write_hds(cf, data_s, STRCONST(" \r\n"));
if(result)
return NGHTTP2_ERR_CALLBACK_FAILURE;
/* if we receive data for another handle, wake that up */
if(CF_DATA_CURRENT(cf) != data_s)
Curl_expire(data_s, 0, EXPIRE_RUN_NOW);
- DEBUGF(LOG_CF(data_s, cf, "[h2sid=%u] status: HTTP/2 %03d",
- stream->stream_id, stream->status_code));
+ DEBUGF(LOG_CF(data_s, cf, "[h2sid=%d] status: HTTP/2 %03d",
+ stream->id, stream->status_code));
return 0;
}
/* nghttp2 guarantees that namelen > 0, and :status was already
received, and this is not pseudo-header field . */
/* convert to an HTTP1-style header */
- result = Curl_dyn_addn(&stream->header_recvbuf, name, namelen);
+ result = recvbuf_write_hds(cf, data_s, (const char *)name, namelen);
if(result)
return NGHTTP2_ERR_CALLBACK_FAILURE;
- result = Curl_dyn_addn(&stream->header_recvbuf, STRCONST(": "));
+ result = recvbuf_write_hds(cf, data_s, STRCONST(": "));
if(result)
return NGHTTP2_ERR_CALLBACK_FAILURE;
- result = Curl_dyn_addn(&stream->header_recvbuf, value, valuelen);
+ result = recvbuf_write_hds(cf, data_s, (const char *)value, valuelen);
if(result)
return NGHTTP2_ERR_CALLBACK_FAILURE;
- result = Curl_dyn_addn(&stream->header_recvbuf, STRCONST("\r\n"));
+ result = recvbuf_write_hds(cf, data_s, STRCONST("\r\n"));
if(result)
return NGHTTP2_ERR_CALLBACK_FAILURE;
/* if we receive data for another handle, wake that up */
if(CF_DATA_CURRENT(cf) != data_s)
Curl_expire(data_s, 0, EXPIRE_RUN_NOW);
- DEBUGF(LOG_CF(data_s, cf, "[h2sid=%u] header: %.*s: %.*s",
- stream->stream_id,
+ DEBUGF(LOG_CF(data_s, cf, "[h2sid=%d] header: %.*s: %.*s",
+ stream->id,
(int)namelen, name,
(int)valuelen, value));
return 0; /* 0 is successful */
}
-static ssize_t data_source_read_callback(nghttp2_session *session,
- int32_t stream_id,
- uint8_t *buf, size_t length,
- uint32_t *data_flags,
- nghttp2_data_source *source,
- void *userp)
+static ssize_t req_body_read_callback(nghttp2_session *session,
+ int32_t stream_id,
+ uint8_t *buf, size_t length,
+ uint32_t *data_flags,
+ nghttp2_data_source *source,
+ void *userp)
{
struct Curl_cfilter *cf = userp;
struct Curl_easy *data_s;
- struct HTTP *stream = NULL;
- size_t nread;
+ struct stream_ctx *stream = NULL;
+ CURLcode result;
+ ssize_t nread;
(void)source;
(void)cf;
@@ -1325,30 +1431,32 @@ static ssize_t data_source_read_callback(nghttp2_session *session, internal error more than anything else! */
return NGHTTP2_ERR_CALLBACK_FAILURE;
- stream = data_s->req.p.http;
+ stream = H2_STREAM_CTX(data_s);
if(!stream)
return NGHTTP2_ERR_CALLBACK_FAILURE;
}
else
return NGHTTP2_ERR_INVALID_ARGUMENT;
- nread = CURLMIN(stream->upload_len, length);
- if(nread > 0) {
- memcpy(buf, stream->upload_mem, nread);
- stream->upload_mem += nread;
- stream->upload_len -= nread;
- if(data_s->state.infilesize != -1)
- stream->upload_left -= nread;
+ nread = Curl_bufq_read(&stream->sendbuf, buf, length, &result);
+ if(nread < 0) {
+ if(result != CURLE_AGAIN)
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ nread = 0;
}
+ if(nread > 0 && stream->upload_left != -1)
+ stream->upload_left -= nread;
+
+ DEBUGF(LOG_CF(data_s, cf, "[h2sid=%d] req_body_read(len=%zu) left=%zd"
+ " -> %zd, %d",
+ stream_id, length, stream->upload_left, nread, result));
+
if(stream->upload_left == 0)
*data_flags = NGHTTP2_DATA_FLAG_EOF;
else if(nread == 0)
return NGHTTP2_ERR_DEFERRED;
- DEBUGF(LOG_CF(data_s, cf, "[h2sid=%u] data_source_read_callback: "
- "returns %zu bytes", stream_id, nread));
-
return nread;
}
@@ -1366,59 +1474,6 @@ static int error_callback(nghttp2_session *session, }
#endif
-static void http2_data_done(struct Curl_cfilter *cf,
- struct Curl_easy *data, bool premature)
-{
- struct cf_h2_ctx *ctx = cf->ctx;
- struct HTTP *stream = data->req.p.http;
-
- /* there might be allocated resources done before this got the 'h2' pointer
- setup */
- Curl_dyn_free(&stream->header_recvbuf);
- Curl_dyn_free(&stream->trailer_recvbuf);
- if(stream->push_headers) {
- /* if they weren't used and then freed before */
- for(; stream->push_headers_used > 0; --stream->push_headers_used) {
- free(stream->push_headers[stream->push_headers_used - 1]);
- }
- free(stream->push_headers);
- stream->push_headers = NULL;
- }
-
- if(!ctx || !ctx->h2)
- return;
-
- /* do this before the reset handling, as that might clear ->stream_id */
- if(stream->stream_id && stream->stream_id == ctx->pause_stream_id) {
- DEBUGF(LOG_CF(data, cf, "[h2sid=%u] DONE, the pause stream",
- stream->stream_id));
- ctx->pause_stream_id = 0;
- }
-
- (void)premature;
- if(!stream->closed && stream->stream_id) {
- /* RST_STREAM */
- DEBUGF(LOG_CF(data, cf, "[h2sid=%u] RST", stream->stream_id));
- if(!nghttp2_submit_rst_stream(ctx->h2, NGHTTP2_FLAG_NONE,
- stream->stream_id, NGHTTP2_STREAM_CLOSED))
- (void)nghttp2_session_send(ctx->h2);
- }
-
- if(data->state.drain)
- drained_transfer(cf, data);
-
- /* -1 means unassigned and 0 means cleared */
- if(nghttp2_session_get_stream_user_data(ctx->h2, stream->stream_id)) {
- int rv = nghttp2_session_set_stream_user_data(ctx->h2,
- stream->stream_id, 0);
- if(rv) {
- infof(data, "http/2: failed to clear user_data for stream %u",
- stream->stream_id);
- DEBUGASSERT(0);
- }
- }
-}
-
/*
* Append headers to ask for an HTTP1.1 to HTTP2 upgrade.
*/
@@ -1458,113 +1513,26 @@ CURLcode Curl_http2_request_upgrade(struct dynbuf *req, return result;
}
-/*
- * h2_process_pending_input() processes pending input left in
- * httpc->inbuf. Then, call h2_session_send() to send pending data.
- * This function returns 0 if it succeeds, or -1 and error code will
- * be assigned to *err.
- */
-static int h2_process_pending_input(struct Curl_cfilter *cf,
- struct Curl_easy *data,
- CURLcode *err)
-{
- struct cf_h2_ctx *ctx = cf->ctx;
- ssize_t nread;
- ssize_t rv;
-
- nread = ctx->inbuflen - ctx->nread_inbuf;
- if(nread) {
- char *inbuf = ctx->inbuf + ctx->nread_inbuf;
-
- rv = nghttp2_session_mem_recv(ctx->h2, (const uint8_t *)inbuf, nread);
- if(rv < 0) {
- failf(data,
- "h2_process_pending_input: nghttp2_session_mem_recv() returned "
- "%zd:%s", rv, nghttp2_strerror((int)rv));
- *err = CURLE_RECV_ERROR;
- return -1;
- }
-
- if(nread == rv) {
- DEBUGF(LOG_CF(data, cf, "all data in connection buffer processed"));
- ctx->inbuflen = 0;
- ctx->nread_inbuf = 0;
- }
- else {
- ctx->nread_inbuf += rv;
- DEBUGF(LOG_CF(data, cf, "h2_process_pending_input: %zu bytes left "
- "in connection buffer",
- ctx->inbuflen - ctx->nread_inbuf));
- }
- }
-
- rv = h2_session_send(cf, data);
- if(rv) {
- *err = CURLE_SEND_ERROR;
- return -1;
- }
-
- if(nghttp2_session_check_request_allowed(ctx->h2) == 0) {
- /* No more requests are allowed in the current session, so
- the connection may not be reused. This is set when a
- GOAWAY frame has been received or when the limit of stream
- identifiers has been reached. */
- connclose(cf->conn, "http/2: No new requests allowed");
- }
-
- if(should_close_session(ctx)) {
- struct HTTP *stream = data->req.p.http;
- DEBUGF(LOG_CF(data, cf,
- "h2_process_pending_input: nothing to do in this session"));
- if(stream->reset)
- *err = CURLE_PARTIAL_FILE;
- else if(stream->error)
- *err = CURLE_HTTP2;
- else {
- /* not an error per se, but should still close the connection */
- connclose(cf->conn, "GOAWAY received");
- *err = CURLE_OK;
- }
- return -1;
- }
- return 0;
-}
-
static CURLcode http2_data_done_send(struct Curl_cfilter *cf,
struct Curl_easy *data)
{
struct cf_h2_ctx *ctx = cf->ctx;
CURLcode result = CURLE_OK;
- struct HTTP *stream = data->req.p.http;
+ struct stream_ctx *stream = H2_STREAM_CTX(data);
- if(!ctx || !ctx->h2)
+ if(!ctx || !ctx->h2 || !stream)
goto out;
- if(stream->upload_left) {
- /* If the stream still thinks there's data left to upload. */
- stream->upload_left = 0; /* DONE! */
-
- /* resume sending here to trigger the callback to get called again so
- that it can signal EOF to nghttp2 */
- (void)nghttp2_session_resume_data(ctx->h2, stream->stream_id);
- (void)h2_process_pending_input(cf, data, &result);
- }
-
- /* If nghttp2 still has pending frames unsent */
- if(nghttp2_session_want_write(ctx->h2)) {
- struct SingleRequest *k = &data->req;
- int rv;
-
- DEBUGF(LOG_CF(data, cf, "HTTP/2 still wants to send data"));
-
- /* and attempt to send the pending frames */
- rv = h2_session_send(cf, data);
- if(rv)
- result = CURLE_SEND_ERROR;
-
- if(nghttp2_session_want_write(ctx->h2)) {
- /* re-set KEEP_SEND to make sure we are called again */
- k->keepon |= KEEP_SEND;
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%d] data done send", stream->id));
+ if(!stream->send_closed) {
+ stream->send_closed = TRUE;
+ if(stream->upload_left) {
+ /* we now know that everything that is buffered is all there is. */
+ stream->upload_left = Curl_bufq_len(&stream->sendbuf);
+ /* resume sending here to trigger the callback to get called again so
+ that it can signal EOF to nghttp2 */
+ (void)nghttp2_session_resume_data(ctx->h2, stream->id);
+ drain_stream(cf, data, stream);
}
}
@@ -1574,79 +1542,76 @@ out: static ssize_t http2_handle_stream_close(struct Curl_cfilter *cf,
struct Curl_easy *data,
- struct HTTP *stream, CURLcode *err)
+ struct stream_ctx *stream,
+ CURLcode *err)
{
- struct cf_h2_ctx *ctx = cf->ctx;
-
- if(ctx->pause_stream_id == stream->stream_id) {
- ctx->pause_stream_id = 0;
- }
-
- drained_transfer(cf, data);
-
- if(ctx->pause_stream_id == 0) {
- if(h2_process_pending_input(cf, data, err) != 0) {
- return -1;
- }
- }
+ ssize_t rv = 0;
if(stream->error == NGHTTP2_REFUSED_STREAM) {
- DEBUGF(LOG_CF(data, cf, "[h2sid=%u] REFUSED_STREAM, try again on a new "
- "connection", stream->stream_id));
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%d] REFUSED_STREAM, try again on a new "
+ "connection", stream->id));
connclose(cf->conn, "REFUSED_STREAM"); /* don't use this anymore */
data->state.refused_stream = TRUE;
- *err = CURLE_RECV_ERROR; /* trigger Curl_retry_request() later */
+ *err = CURLE_SEND_ERROR; /* trigger Curl_retry_request() later */
+ return -1;
+ }
+ else if(stream->reset) {
+ failf(data, "HTTP/2 stream %u was reset", stream->id);
+ *err = stream->bodystarted? CURLE_PARTIAL_FILE : CURLE_RECV_ERROR;
return -1;
}
else if(stream->error != NGHTTP2_NO_ERROR) {
failf(data, "HTTP/2 stream %u was not closed cleanly: %s (err %u)",
- stream->stream_id, nghttp2_http2_strerror(stream->error),
+ stream->id, nghttp2_http2_strerror(stream->error),
stream->error);
*err = CURLE_HTTP2_STREAM;
return -1;
}
- else if(stream->reset) {
- failf(data, "HTTP/2 stream %u was reset", stream->stream_id);
- *err = stream->bodystarted? CURLE_PARTIAL_FILE : CURLE_RECV_ERROR;
- return -1;
- }
if(!stream->bodystarted) {
failf(data, "HTTP/2 stream %u was closed cleanly, but before getting "
" all response header fields, treated as error",
- stream->stream_id);
+ stream->id);
*err = CURLE_HTTP2_STREAM;
return -1;
}
- if(Curl_dyn_len(&stream->trailer_recvbuf)) {
- char *trailp = Curl_dyn_ptr(&stream->trailer_recvbuf);
- char *lf;
+ if(Curl_dynhds_count(&stream->resp_trailers)) {
+ struct dynhds_entry *e;
+ struct dynbuf dbuf;
+ size_t i;
- do {
- size_t len = 0;
- CURLcode result;
- /* each trailer line ends with a newline */
- lf = strchr(trailp, '\n');
- if(!lf)
+ *err = CURLE_OK;
+ Curl_dyn_init(&dbuf, DYN_TRAILERS);
+ for(i = 0; i < Curl_dynhds_count(&stream->resp_trailers); ++i) {
+ e = Curl_dynhds_getn(&stream->resp_trailers, i);
+ if(!e)
break;
- len = lf + 1 - trailp;
-
- Curl_debug(data, CURLINFO_HEADER_IN, trailp, len);
- /* pass the trailers one by one to the callback */
- result = Curl_client_write(data, CLIENTWRITE_HEADER, trailp, len);
- if(result) {
- *err = result;
- return -1;
- }
- trailp = ++lf;
- } while(lf);
+ Curl_dyn_reset(&dbuf);
+ *err = Curl_dyn_addf(&dbuf, "%.*s: %.*s\x0d\x0a",
+ (int)e->namelen, e->name,
+ (int)e->valuelen, e->value);
+ if(*err)
+ break;
+ Curl_debug(data, CURLINFO_HEADER_IN, Curl_dyn_ptr(&dbuf),
+ Curl_dyn_len(&dbuf));
+ *err = Curl_client_write(data, CLIENTWRITE_HEADER|CLIENTWRITE_TRAILER,
+ Curl_dyn_ptr(&dbuf), Curl_dyn_len(&dbuf));
+ if(*err)
+ break;
+ }
+ Curl_dyn_free(&dbuf);
+ if(*err)
+ goto out;
}
stream->close_handled = TRUE;
+ *err = CURLE_OK;
+ rv = 0;
- DEBUGF(LOG_CF(data, cf, "[h2sid=%u] closed cleanly", stream->stream_id));
- return 0;
+out:
+ DEBUGF(LOG_CF(data, cf, "handle_stream_close -> %zd, %d", rv, *err));
+ return rv;
}
static int sweight_wanted(const struct Curl_easy *data)
@@ -1673,9 +1638,8 @@ static void h2_pri_spec(struct Curl_easy *data, nghttp2_priority_spec *pri_spec)
{
struct Curl_data_priority *prio = &data->set.priority;
- struct HTTP *depstream = (prio->parent?
- prio->parent->req.p.http:NULL);
- int32_t depstream_id = depstream? depstream->stream_id:0;
+ struct stream_ctx *depstream = H2_STREAM_CTX(prio->parent);
+ int32_t depstream_id = depstream? depstream->id:0;
nghttp2_priority_spec_init(pri_spec, depstream_id,
sweight_wanted(data),
data->set.priority.exclusive);
@@ -1683,15 +1647,16 @@ static void h2_pri_spec(struct Curl_easy *data, }
/*
- * h2_session_send() checks if there's been an update in the priority /
+ * Check if there's been an update in the priority /
* dependency settings and if so it submits a PRIORITY frame with the updated
* info.
+ * Flush any out data pending in the network buffer.
*/
-static CURLcode h2_session_send(struct Curl_cfilter *cf,
- struct Curl_easy *data)
+static CURLcode h2_progress_egress(struct Curl_cfilter *cf,
+ struct Curl_easy *data)
{
struct cf_h2_ctx *ctx = cf->ctx;
- struct HTTP *stream = data->req.p.http;
+ struct stream_ctx *stream = H2_STREAM_CTX(data);
int rv = 0;
if((sweight_wanted(data) != sweight_in_effect(data)) ||
@@ -1701,401 +1666,265 @@ static CURLcode h2_session_send(struct Curl_cfilter *cf, nghttp2_priority_spec pri_spec;
h2_pri_spec(data, &pri_spec);
- DEBUGF(LOG_CF(data, cf, "[h2sid=%u] Queuing PRIORITY",
- stream->stream_id));
- DEBUGASSERT(stream->stream_id != -1);
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%d] Queuing PRIORITY",
+ stream->id));
+ DEBUGASSERT(stream->id != -1);
rv = nghttp2_submit_priority(ctx->h2, NGHTTP2_FLAG_NONE,
- stream->stream_id, &pri_spec);
+ stream->id, &pri_spec);
if(rv)
goto out;
}
- rv = nghttp2_session_send(ctx->h2);
+ while(!rv && nghttp2_session_want_write(ctx->h2))
+ rv = nghttp2_session_send(ctx->h2);
+
out:
if(nghttp2_is_fatal(rv)) {
DEBUGF(LOG_CF(data, cf, "nghttp2_session_send error (%s)%d",
nghttp2_strerror(rv), rv));
return CURLE_SEND_ERROR;
}
- return flush_output(cf, data);
+ return nw_out_flush(cf, data);
}
-static ssize_t cf_h2_recv(struct Curl_cfilter *cf, struct Curl_easy *data,
- char *buf, size_t len, CURLcode *err)
+static ssize_t stream_recv(struct Curl_cfilter *cf, struct Curl_easy *data,
+ char *buf, size_t len, CURLcode *err)
{
struct cf_h2_ctx *ctx = cf->ctx;
- struct HTTP *stream = data->req.p.http;
+ struct stream_ctx *stream = H2_STREAM_CTX(data);
ssize_t nread = -1;
- struct cf_call_data save;
- bool conn_is_closed = FALSE;
- CF_DATA_SAVE(save, cf, data);
-
- /* If the h2 session has told us to GOAWAY with an error AND
- * indicated the highest stream id it has processes AND
- * the stream we are trying to read has a higher id, this
- * means we will most likely not receive any more for it.
- * Treat this as if the server explicitly had RST the stream */
- if((ctx->goaway && ctx->goaway_error &&
- ctx->last_stream_id > 0 &&
- ctx->last_stream_id < stream->stream_id)) {
- stream->reset = TRUE;
- }
-
- /* If a stream is RST, it does not matter what state the h2 session
- * is in, our answer to receiving data is always the same. */
- if(stream->reset) {
- *err = stream->bodystarted? CURLE_PARTIAL_FILE : CURLE_RECV_ERROR;
- nread = -1;
- goto out;
+ *err = CURLE_AGAIN;
+ if(!Curl_bufq_is_empty(&stream->recvbuf)) {
+ nread = Curl_bufq_read(&stream->recvbuf,
+ (unsigned char *)buf, len, err);
+ DEBUGF(LOG_CF(data, cf, "recvbuf read(len=%zu) -> %zd, %d",
+ len, nread, *err));
+ if(nread < 0)
+ goto out;
+ DEBUGASSERT(nread > 0);
}
- if(should_close_session(ctx)) {
- DEBUGF(LOG_CF(data, cf, "http2_recv: nothing to do in this session"));
- if(cf->conn->bits.close) {
- /* already marked for closure, return OK and we're done */
- drained_transfer(cf, data);
- *err = CURLE_OK;
- nread = 0;
- goto out;
+ if(nread < 0) {
+ if(stream->closed) {
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%d] returning CLOSE", stream->id));
+ nread = http2_handle_stream_close(cf, data, stream, err);
}
- *err = CURLE_HTTP2;
+ else if(stream->reset ||
+ (ctx->conn_closed && Curl_bufq_is_empty(&ctx->inbufq)) ||
+ (ctx->goaway && ctx->last_stream_id < stream->id)) {
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%d] returning ERR", stream->id));
+ *err = stream->bodystarted? CURLE_PARTIAL_FILE : CURLE_RECV_ERROR;
+ nread = -1;
+ }
+ }
+ else if(nread == 0) {
+ *err = CURLE_AGAIN;
nread = -1;
- goto out;
}
- /* Nullify here because we call nghttp2_session_send() and they
- might refer to the old buffer. */
- stream->upload_mem = NULL;
- stream->upload_len = 0;
+out:
+ DEBUGF(LOG_CF(data, cf, "stream_recv(len=%zu) -> %zd, %d",
+ len, nread, *err));
+ return nread;
+}
- /*
- * At this point 'stream' is just in the Curl_easy the connection
- * identifies as its owner at this time.
- */
+static CURLcode h2_progress_ingress(struct Curl_cfilter *cf,
+ struct Curl_easy *data)
+{
+ struct cf_h2_ctx *ctx = cf->ctx;
+ struct stream_ctx *stream;
+ CURLcode result = CURLE_OK;
+ ssize_t nread;
- if(stream->bodystarted &&
- stream->nread_header_recvbuf < Curl_dyn_len(&stream->header_recvbuf)) {
- /* If there is header data pending for this stream to return, do that */
- size_t left =
- Curl_dyn_len(&stream->header_recvbuf) - stream->nread_header_recvbuf;
- size_t ncopy = CURLMIN(len, left);
- memcpy(buf, Curl_dyn_ptr(&stream->header_recvbuf) +
- stream->nread_header_recvbuf, ncopy);
- stream->nread_header_recvbuf += ncopy;
-
- DEBUGF(LOG_CF(data, cf, "recv: Got %d bytes from header_recvbuf",
- (int)ncopy));
- nread = ncopy;
- goto out;
+ /* Process network input buffer fist */
+ if(!Curl_bufq_is_empty(&ctx->inbufq)) {
+ DEBUGF(LOG_CF(data, cf, "Process %zd bytes in connection buffer",
+ Curl_bufq_len(&ctx->inbufq)));
+ if(h2_process_pending_input(cf, data, &result) < 0)
+ return result;
}
- DEBUGF(LOG_CF(data, cf, "[h2sid=%u] cf_recv: win %u/%u",
- stream->stream_id,
- nghttp2_session_get_local_window_size(ctx->h2),
- nghttp2_session_get_stream_local_window_size(ctx->h2,
- stream->stream_id)
- ));
-
- if(stream->memlen) {
- DEBUGF(LOG_CF(data, cf, "[h2sid=%u] recv: DRAIN %zu bytes (%p => %p)",
- stream->stream_id, stream->memlen,
- (void *)stream->mem, (void *)buf));
- if(buf != stream->mem) {
- /* if we didn't get the same buffer this time, we must move the data to
- the beginning */
- memmove(buf, stream->mem, stream->memlen);
- stream->len = len - stream->memlen;
- stream->mem = buf;
- }
-
- if(ctx->pause_stream_id == stream->stream_id && !stream->pausedata) {
- /* We have paused nghttp2, but we have no pause data (see
- on_data_chunk_recv). */
- ctx->pause_stream_id = 0;
- if(h2_process_pending_input(cf, data, err) != 0) {
- nread = -1;
- goto out;
+ /* Receive data from the "lower" filters, e.g. network until
+ * it is time to stop due to connection close or us not processing
+ * all network input */
+ while(!ctx->conn_closed && Curl_bufq_is_empty(&ctx->inbufq)) {
+ stream = H2_STREAM_CTX(data);
+ if(stream && (stream->closed || Curl_bufq_is_full(&stream->recvbuf))) {
+ /* We would like to abort here and stop processing, so that
+ * the transfer loop can handle the data/close here. However,
+ * this may leave data in underlying buffers that will not
+ * be consumed. */
+ if(!cf->next || !cf->next->cft->has_data_pending(cf->next, data))
+ break;
+ }
+
+ nread = Curl_bufq_slurp(&ctx->inbufq, nw_in_reader, cf, &result);
+ /* DEBUGF(LOG_CF(data, cf, "read %zd bytes nw data -> %zd, %d",
+ Curl_bufq_len(&ctx->inbufq), nread, result)); */
+ if(nread < 0) {
+ if(result != CURLE_AGAIN) {
+ failf(data, "Failed receiving HTTP2 data: %d(%s)", result,
+ curl_easy_strerror(result));
+ return result;
}
+ break;
+ }
+ else if(nread == 0) {
+ ctx->conn_closed = TRUE;
+ break;
}
+
+ if(h2_process_pending_input(cf, data, &result))
+ return result;
}
- else if(stream->pausedata) {
- DEBUGASSERT(ctx->pause_stream_id == stream->stream_id);
- nread = CURLMIN(len, stream->pauselen);
- memcpy(buf, stream->pausedata, nread);
- stream->pausedata += nread;
- stream->pauselen -= nread;
- drain_this(cf, data);
+ if(ctx->conn_closed && Curl_bufq_is_empty(&ctx->inbufq)) {
+ connclose(cf->conn, "GOAWAY received");
+ }
+
+ return CURLE_OK;
+}
+
+static ssize_t cf_h2_recv(struct Curl_cfilter *cf, struct Curl_easy *data,
+ char *buf, size_t len, CURLcode *err)
+{
+ struct cf_h2_ctx *ctx = cf->ctx;
+ struct stream_ctx *stream = H2_STREAM_CTX(data);
+ ssize_t nread = -1;
+ CURLcode result;
+ struct cf_call_data save;
- if(stream->pauselen == 0) {
- DEBUGF(LOG_CF(data, cf, "[h2sid=%u] Unpaused", stream->stream_id));
- DEBUGASSERT(ctx->pause_stream_id == stream->stream_id);
- ctx->pause_stream_id = 0;
+ CF_DATA_SAVE(save, cf, data);
- stream->pausedata = NULL;
- stream->pauselen = 0;
- }
- DEBUGF(LOG_CF(data, cf, "[h2sid=%u] recv: returns unpaused %zd bytes",
- stream->stream_id, nread));
+ nread = stream_recv(cf, data, buf, len, err);
+ if(nread < 0 && *err != CURLE_AGAIN)
goto out;
- }
- else if(ctx->pause_stream_id) {
- /* If a stream paused nghttp2_session_mem_recv previously, and has
- not processed all data, it still refers to the buffer in
- nghttp2_session. If we call nghttp2_session_mem_recv(), we may
- overwrite that buffer. To avoid that situation, just return
- here with CURLE_AGAIN. This could be busy loop since data in
- socket is not read. But it seems that usually streams are
- notified with its drain property, and socket is read again
- quickly. */
- if(stream->closed) {
- /* closed overrides paused */
- drained_transfer(cf, data);
- nread = 0;
+
+ if(nread < 0) {
+ *err = h2_progress_ingress(cf, data);
+ if(*err)
goto out;
- }
- DEBUGF(LOG_CF(data, cf, "[h2sid=%u] is paused, pause h2sid: %u",
- stream->stream_id, ctx->pause_stream_id));
- *err = CURLE_AGAIN;
- nread = -1;
- goto out;
- }
- else {
- /* We have nothing buffered for `data` and no other stream paused
- * the processing of incoming data, we can therefore read new data
- * from the network.
- * If DATA is coming for this stream, we want to store it ad the
- * `buf` passed in right away - saving us a copy.
- */
- stream->mem = buf;
- stream->len = len;
- stream->memlen = 0;
-
- if(ctx->inbuflen > 0) {
- DEBUGF(LOG_CF(data, cf, "[h2sid=%u] %zd bytes in inbuf",
- stream->stream_id, ctx->inbuflen - ctx->nread_inbuf));
- if(h2_process_pending_input(cf, data, err))
- return -1;
- }
-
- while(stream->memlen == 0 && /* have no data for this stream */
- !stream->closed && /* and it is not closed/reset */
- !ctx->pause_stream_id && /* we are not paused either */
- ctx->inbuflen == 0 && /* and out input buffer is empty */
- !conn_is_closed) { /* and connection is not closed */
- /* Receive data from the "lower" filters */
- nread = Curl_conn_cf_recv(cf->next, data, ctx->inbuf, H2_BUFSIZE, err);
- if(nread < 0) {
- DEBUGASSERT(*err);
- if(*err == CURLE_AGAIN) {
- break;
- }
- failf(data, "Failed receiving HTTP2 data");
- conn_is_closed = TRUE;
- }
- else if(nread == 0) {
- DEBUGF(LOG_CF(data, cf, "[h2sid=%u] underlying connection is closed",
- stream->stream_id));
- conn_is_closed = TRUE;
- }
- else {
- DEBUGF(LOG_CF(data, cf, "[h2sid=%u] read %zd from connection",
- stream->stream_id, nread));
- ctx->inbuflen = nread;
- DEBUGASSERT(ctx->nread_inbuf == 0);
- if(h2_process_pending_input(cf, data, err))
- return -1;
- }
- }
+ nread = stream_recv(cf, data, buf, len, err);
}
- if(stream->memlen) {
- ssize_t retlen = stream->memlen;
-
- /* TODO: all this buffer handling is very brittle */
- stream->len += stream->memlen;
- stream->memlen = 0;
-
- if(ctx->pause_stream_id == stream->stream_id) {
- /* data for this stream is returned now, but this stream caused a pause
- already so we need it called again asap */
- DEBUGF(LOG_CF(data, cf, "[h2sid=%u] Data returned for PAUSED stream",
- stream->stream_id));
- drain_this(cf, data);
- Curl_expire(data, 0, EXPIRE_RUN_NOW);
- }
- else if(stream->closed) {
- if(stream->reset || stream->error) {
- nread = http2_handle_stream_close(cf, data, stream, err);
- goto out;
- }
- /* this stream is closed, trigger a another read ASAP to detect that */
- DEBUGF(LOG_CF(data, cf, "[h2sid=%u] is closed now, run again",
- stream->stream_id));
- drain_this(cf, data);
- Curl_expire(data, 0, EXPIRE_RUN_NOW);
+ if(nread > 0) {
+ size_t data_consumed = (size_t)nread;
+ /* Now that we transferred this to the upper layer, we report
+ * the actual amount of DATA consumed to the H2 session, so
+ * that it adjusts stream flow control */
+ if(stream->resp_hds_len >= data_consumed) {
+ stream->resp_hds_len -= data_consumed; /* no DATA */
}
else {
- drained_transfer(cf, data);
+ if(stream->resp_hds_len) {
+ data_consumed -= stream->resp_hds_len;
+ stream->resp_hds_len = 0;
+ }
+ if(data_consumed) {
+ nghttp2_session_consume(ctx->h2, stream->id, data_consumed);
+ }
}
- *err = CURLE_OK;
- nread = retlen;
- goto out;
- }
-
- if(conn_is_closed && !stream->closed) {
- /* underlying connection is closed and we have nothing for the stream.
- * Treat this as a RST */
- stream->closed = stream->reset = TRUE;
- failf(data, "HTTP/2 stream %u was not closed cleanly before"
- " end of the underlying connection",
- stream->stream_id);
+ if(stream->closed) {
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%d] closed stream, set drain",
+ stream->id));
+ drain_stream(cf, data, stream);
+ }
}
- if(stream->closed) {
- nread = http2_handle_stream_close(cf, data, stream, err);
- goto out;
+out:
+ result = h2_progress_egress(cf, data);
+ if(result) {
+ *err = result;
+ nread = -1;
}
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%d] cf_recv(len=%zu) -> %zd %d, "
+ "buffered=%zu, window=%d/%d, connection %d/%d",
+ stream->id, len, nread, *err,
+ Curl_bufq_len(&stream->recvbuf),
+ nghttp2_session_get_stream_effective_recv_data_length(
+ ctx->h2, stream->id),
+ nghttp2_session_get_stream_effective_local_window_size(
+ ctx->h2, stream->id),
+ nghttp2_session_get_local_window_size(ctx->h2),
+ HTTP2_HUGE_WINDOW_SIZE));
- if(!data->state.drain && Curl_conn_cf_data_pending(cf->next, data)) {
- DEBUGF(LOG_CF(data, cf, "[h2sid=%u] pending data, set drain",
- stream->stream_id));
- drain_this(cf, data);
- }
- *err = CURLE_AGAIN;
- nread = -1;
-out:
- DEBUGF(LOG_CF(data, cf, "[h2sid=%u] cf_recv -> %zd, %d",
- stream->stream_id, nread, *err));
CF_DATA_RESTORE(cf, save);
return nread;
}
-static ssize_t cf_h2_send(struct Curl_cfilter *cf, struct Curl_easy *data,
- const void *buf, size_t len, CURLcode *err)
+static ssize_t h2_submit(struct stream_ctx **pstream,
+ struct Curl_cfilter *cf, struct Curl_easy *data,
+ const void *buf, size_t len, CURLcode *err)
{
- /*
- * Currently, we send request in this function, but this function is also
- * used to send request body. It would be nice to add dedicated function for
- * request.
- */
struct cf_h2_ctx *ctx = cf->ctx;
- int rv;
- struct HTTP *stream = data->req.p.http;
+ struct stream_ctx *stream = NULL;
+ struct h1_req_parser h1;
+ struct dynhds h2_headers;
nghttp2_nv *nva = NULL;
- size_t nheader;
+ size_t nheader, i;
nghttp2_data_provider data_prd;
int32_t stream_id;
nghttp2_priority_spec pri_spec;
- CURLcode result;
- struct h2h3req *hreq;
- struct cf_call_data save;
ssize_t nwritten;
- CF_DATA_SAVE(save, cf, data);
- DEBUGF(LOG_CF(data, cf, "cf_send(len=%zu) start", len));
-
- if(stream->stream_id != -1) {
- if(stream->close_handled) {
- infof(data, "stream %u closed", stream->stream_id);
- *err = CURLE_HTTP2_STREAM;
- nwritten = -1;
- goto out;
- }
- else if(stream->closed) {
- nwritten = http2_handle_stream_close(cf, data, stream, err);
- goto out;
- }
- /* If stream_id != -1, we have dispatched request HEADERS, and now
- are going to send or sending request body in DATA frame */
- stream->upload_mem = buf;
- stream->upload_len = len;
- rv = nghttp2_session_resume_data(ctx->h2, stream->stream_id);
- if(nghttp2_is_fatal(rv)) {
- *err = CURLE_SEND_ERROR;
- nwritten = -1;
- goto out;
- }
- result = h2_session_send(cf, data);
- if(result) {
- *err = result;
- nwritten = -1;
- goto out;
- }
-
- nwritten = (ssize_t)len - (ssize_t)stream->upload_len;
- stream->upload_mem = NULL;
- stream->upload_len = 0;
-
- if(should_close_session(ctx)) {
- DEBUGF(LOG_CF(data, cf, "send: nothing to do in this session"));
- *err = CURLE_HTTP2;
- nwritten = -1;
- goto out;
- }
-
- if(stream->upload_left) {
- /* we are sure that we have more data to send here. Calling the
- following API will make nghttp2_session_want_write() return
- nonzero if remote window allows it, which then libcurl checks
- socket is writable or not. See http2_perform_getsock(). */
- nghttp2_session_resume_data(ctx->h2, stream->stream_id);
- }
-
- if(!nwritten) {
- size_t rwin = nghttp2_session_get_stream_remote_window_size(ctx->h2,
- stream->stream_id);
- DEBUGF(LOG_CF(data, cf, "[h2sid=%u] cf_send: win %u/%zu",
- stream->stream_id,
- nghttp2_session_get_remote_window_size(ctx->h2), rwin));
- if(rwin == 0) {
- /* We cannot upload more as the stream's remote window size
- * is 0. We need to receive WIN_UPDATEs before we can continue.
- */
- data->req.keepon |= KEEP_SEND_HOLD;
- DEBUGF(LOG_CF(data, cf, "[h2sid=%u] holding send as remote flow "
- "window is exhausted", stream->stream_id));
- }
- }
- DEBUGF(LOG_CF(data, cf, "[h2sid=%u] cf_send returns %zd ",
- stream->stream_id, nwritten));
+ Curl_h1_req_parse_init(&h1, H1_PARSE_DEFAULT_MAX_LINE_LEN);
+ Curl_dynhds_init(&h2_headers, 0, DYN_HTTP_REQUEST);
- /* handled writing BODY for open stream. */
+ *err = http2_data_setup(cf, data, &stream);
+ if(*err) {
+ nwritten = -1;
goto out;
}
- /* Stream has not been opened yet. `buf` is expected to contain
- * request headers. */
- /* TODO: this assumes that the `buf` and `len` we are called with
- * is *all* HEADERs and no body. We have no way to determine here
- * if that is indeed the case. */
- result = Curl_pseudo_headers(data, buf, len, NULL, &hreq);
- if(result) {
- *err = result;
+
+ nwritten = Curl_h1_req_parse_read(&h1, buf, len, NULL, 0, err);
+ if(nwritten < 0)
+ goto out;
+ DEBUGASSERT(h1.done);
+ DEBUGASSERT(h1.req);
+
+ *err = Curl_http_req_to_h2(&h2_headers, h1.req, data);
+ if(*err) {
nwritten = -1;
goto out;
}
- nheader = hreq->entries;
+ nheader = Curl_dynhds_count(&h2_headers);
nva = malloc(sizeof(nghttp2_nv) * nheader);
if(!nva) {
- Curl_pseudo_free(hreq);
*err = CURLE_OUT_OF_MEMORY;
nwritten = -1;
goto out;
}
- else {
- unsigned int i;
- for(i = 0; i < nheader; i++) {
- nva[i].name = (unsigned char *)hreq->header[i].name;
- nva[i].namelen = hreq->header[i].namelen;
- nva[i].value = (unsigned char *)hreq->header[i].value;
- nva[i].valuelen = hreq->header[i].valuelen;
- nva[i].flags = NGHTTP2_NV_FLAG_NONE;
+
+ for(i = 0; i < nheader; ++i) {
+ struct dynhds_entry *e = Curl_dynhds_getn(&h2_headers, i);
+ nva[i].name = (unsigned char *)e->name;
+ nva[i].namelen = e->namelen;
+ nva[i].value = (unsigned char *)e->value;
+ nva[i].valuelen = e->valuelen;
+ nva[i].flags = NGHTTP2_NV_FLAG_NONE;
+ }
+
+#define MAX_ACC 60000 /* <64KB to account for some overhead */
+ {
+ size_t acc = 0;
+
+ for(i = 0; i < nheader; ++i) {
+ acc += nva[i].namelen + nva[i].valuelen;
+
+ infof(data, "h2 [%.*s: %.*s]",
+ (int)nva[i].namelen, nva[i].name,
+ (int)nva[i].valuelen, nva[i].value);
+ }
+
+ if(acc > MAX_ACC) {
+ infof(data, "http_request: Warning: The cumulative length of all "
+ "headers exceeds %d bytes and that could cause the "
+ "stream to be rejected.", MAX_ACC);
}
- Curl_pseudo_free(hreq);
}
h2_pri_spec(data, &pri_spec);
@@ -2112,14 +1941,15 @@ static ssize_t cf_h2_send(struct Curl_cfilter *cf, struct Curl_easy *data, stream->upload_left = data->state.infilesize;
else
/* data sending without specifying the data amount up front */
- stream->upload_left = -1; /* unknown, but not zero */
+ stream->upload_left = -1; /* unknown */
- data_prd.read_callback = data_source_read_callback;
+ data_prd.read_callback = req_body_read_callback;
data_prd.source.ptr = NULL;
stream_id = nghttp2_submit_request(ctx->h2, &pri_spec, nva, nheader,
&data_prd, data);
break;
default:
+ stream->upload_left = 0; /* no request body */
stream_id = nghttp2_submit_request(ctx->h2, &pri_spec, nva, nheader,
NULL, data);
}
@@ -2134,37 +1964,167 @@ static ssize_t cf_h2_send(struct Curl_cfilter *cf, struct Curl_easy *data, goto out;
}
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%d] cf_send(len=%zu) submit %s",
+ stream_id, len, data->state.url));
infof(data, "Using Stream ID: %u (easy handle %p)",
stream_id, (void *)data);
- stream->stream_id = stream_id;
- /* See TODO above. We assume that the whole buf was consumed by
- * generating the request headers. */
- nwritten = len;
+ stream->id = stream_id;
- result = h2_session_send(cf, data);
- if(result) {
- *err = result;
- nwritten = -1;
- goto out;
- }
+out:
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%d] submit -> %zd, %d",
+ stream? stream->id : -1, nwritten, *err));
+ *pstream = stream;
+ Curl_h1_req_parse_free(&h1);
+ Curl_dynhds_free(&h2_headers);
+ return nwritten;
+}
- if(should_close_session(ctx)) {
- DEBUGF(LOG_CF(data, cf, "send: nothing to do in this session"));
- *err = CURLE_HTTP2;
- nwritten = -1;
+static ssize_t cf_h2_send(struct Curl_cfilter *cf, struct Curl_easy *data,
+ const void *buf, size_t len, CURLcode *err)
+{
+ /*
+ * Currently, we send request in this function, but this function is also
+ * used to send request body. It would be nice to add dedicated function for
+ * request.
+ */
+ struct cf_h2_ctx *ctx = cf->ctx;
+ struct stream_ctx *stream = H2_STREAM_CTX(data);
+ struct cf_call_data save;
+ int rv;
+ ssize_t nwritten;
+ CURLcode result;
+
+ CF_DATA_SAVE(save, cf, data);
+
+ if(stream && stream->id != -1) {
+ if(stream->close_handled) {
+ infof(data, "stream %u closed", stream->id);
+ *err = CURLE_HTTP2_STREAM;
+ nwritten = -1;
+ goto out;
+ }
+ else if(stream->closed) {
+ nwritten = http2_handle_stream_close(cf, data, stream, err);
+ goto out;
+ }
+ /* If stream_id != -1, we have dispatched request HEADERS, and now
+ are going to send or sending request body in DATA frame */
+ nwritten = Curl_bufq_write(&stream->sendbuf, buf, len, err);
+ if(nwritten < 0) {
+ if(*err != CURLE_AGAIN)
+ goto out;
+ nwritten = 0;
+ }
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%u] bufq_write(len=%zu) -> %zd, %d",
+ stream->id, len, nwritten, *err));
+
+ if(!Curl_bufq_is_empty(&stream->sendbuf)) {
+ rv = nghttp2_session_resume_data(ctx->h2, stream->id);
+ if(nghttp2_is_fatal(rv)) {
+ *err = CURLE_SEND_ERROR;
+ nwritten = -1;
+ goto out;
+ }
+ }
+
+ result = h2_progress_ingress(cf, data);
+ if(result) {
+ *err = result;
+ nwritten = -1;
+ goto out;
+ }
+
+ result = h2_progress_egress(cf, data);
+ if(result) {
+ *err = result;
+ nwritten = -1;
+ goto out;
+ }
+
+ if(should_close_session(ctx)) {
+ if(stream->closed) {
+ nwritten = http2_handle_stream_close(cf, data, stream, err);
+ }
+ else {
+ DEBUGF(LOG_CF(data, cf, "send: nothing to do in this session"));
+ *err = CURLE_HTTP2;
+ nwritten = -1;
+ }
+ goto out;
+ }
+
+ if(!nwritten) {
+ size_t rwin = nghttp2_session_get_stream_remote_window_size(ctx->h2,
+ stream->id);
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%d] cf_send: win %u/%zu",
+ stream->id,
+ nghttp2_session_get_remote_window_size(ctx->h2), rwin));
+ if(rwin == 0) {
+ /* We cannot upload more as the stream's remote window size
+ * is 0. We need to receive WIN_UPDATEs before we can continue.
+ */
+ data->req.keepon |= KEEP_SEND_HOLD;
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%d] holding send as remote flow "
+ "window is exhausted", stream->id));
+ }
+ nwritten = -1;
+ *err = CURLE_AGAIN;
+ }
+ /* handled writing BODY for open stream. */
goto out;
}
+ else {
+ nwritten = h2_submit(&stream, cf, data, buf, len, err);
+ if(nwritten < 0) {
+ goto out;
+ }
+
+ result = h2_progress_ingress(cf, data);
+ if(result) {
+ *err = result;
+ nwritten = -1;
+ goto out;
+ }
- /* If whole HEADERS frame was sent off to the underlying socket, the nghttp2
- library calls data_source_read_callback. But only it found that no data
- available, so it deferred the DATA transmission. Which means that
- nghttp2_session_want_write() returns 0 on http2_perform_getsock(), which
- results that no writable socket check is performed. To workaround this,
- we issue nghttp2_session_resume_data() here to bring back DATA
- transmission from deferred state. */
- nghttp2_session_resume_data(ctx->h2, stream->stream_id);
+ result = h2_progress_egress(cf, data);
+ if(result) {
+ *err = result;
+ nwritten = -1;
+ goto out;
+ }
+
+ if(should_close_session(ctx)) {
+ if(stream->closed) {
+ nwritten = http2_handle_stream_close(cf, data, stream, err);
+ }
+ else {
+ DEBUGF(LOG_CF(data, cf, "send: nothing to do in this session"));
+ *err = CURLE_HTTP2;
+ nwritten = -1;
+ }
+ goto out;
+ }
+ }
out:
+ if(stream) {
+ DEBUGF(LOG_CF(data, cf, "[h2sid=%d] cf_send(len=%zu) -> %zd, %d, "
+ "buffered=%zu, upload_left=%zu, stream-window=%d, "
+ "connection-window=%d",
+ stream->id, len, nwritten, *err,
+ Curl_bufq_len(&stream->sendbuf),
+ (ssize_t)stream->upload_left,
+ nghttp2_session_get_stream_remote_window_size(
+ ctx->h2, stream->id),
+ nghttp2_session_get_remote_window_size(ctx->h2)));
+ drain_stream(cf, data, stream);
+ }
+ else {
+ DEBUGF(LOG_CF(data, cf, "cf_send(len=%zu) -> %zd, %d, "
+ "connection-window=%d",
+ len, nwritten, *err,
+ nghttp2_session_get_remote_window_size(ctx->h2)));
+ }
CF_DATA_RESTORE(cf, save);
return nwritten;
}
@@ -2175,14 +2135,14 @@ static int cf_h2_get_select_socks(struct Curl_cfilter *cf, {
struct cf_h2_ctx *ctx = cf->ctx;
struct SingleRequest *k = &data->req;
- struct HTTP *stream = data->req.p.http;
+ struct stream_ctx *stream = H2_STREAM_CTX(data);
int bitmap = GETSOCK_BLANK;
struct cf_call_data save;
CF_DATA_SAVE(save, cf, data);
sock[0] = Curl_conn_cf_get_socket(cf, data);
- if(!(k->keepon & KEEP_RECV_PAUSE))
+ if(!(k->keepon & (KEEP_RECV_PAUSE|KEEP_RECV_HOLD)))
/* Unless paused - in an HTTP/2 connection we can basically always get a
frame so we should always be ready for one */
bitmap |= GETSOCK_READSOCK(0);
@@ -2193,7 +2153,7 @@ static int cf_h2_get_select_socks(struct Curl_cfilter *cf, nghttp2_session_want_write(ctx->h2)) &&
(nghttp2_session_get_remote_window_size(ctx->h2) &&
nghttp2_session_get_stream_remote_window_size(ctx->h2,
- stream->stream_id)))
+ stream->id)))
bitmap |= GETSOCK_WRITESOCK(0);
CF_DATA_RESTORE(cf, save);
@@ -2230,10 +2190,13 @@ static CURLcode cf_h2_connect(struct Curl_cfilter *cf, goto out;
}
- if(-1 == h2_process_pending_input(cf, data, &result)) {
- result = CURLE_HTTP2;
+ result = h2_progress_ingress(cf, data);
+ if(result)
+ goto out;
+
+ result = h2_progress_egress(cf, data);
+ if(result)
goto out;
- }
*done = TRUE;
cf->connected = TRUE;
@@ -2272,18 +2235,18 @@ static CURLcode http2_data_pause(struct Curl_cfilter *cf, struct Curl_easy *data,
bool pause)
{
+#ifdef NGHTTP2_HAS_SET_LOCAL_WINDOW_SIZE
struct cf_h2_ctx *ctx = cf->ctx;
+ struct stream_ctx *stream = H2_STREAM_CTX(data);
DEBUGASSERT(data);
-#ifdef NGHTTP2_HAS_SET_LOCAL_WINDOW_SIZE
- if(ctx && ctx->h2) {
- struct HTTP *stream = data->req.p.http;
- uint32_t window = !pause * HTTP2_HUGE_WINDOW_SIZE;
+ if(ctx && ctx->h2 && stream) {
+ uint32_t window = !pause * H2_STREAM_WINDOW_SIZE;
CURLcode result;
int rv = nghttp2_session_set_local_window_size(ctx->h2,
NGHTTP2_FLAG_NONE,
- stream->stream_id,
+ stream->id,
window);
if(rv) {
failf(data, "nghttp2_session_set_local_window_size() failed: %s(%d)",
@@ -2291,22 +2254,34 @@ static CURLcode http2_data_pause(struct Curl_cfilter *cf, return CURLE_HTTP2;
}
+ if(!pause)
+ drain_stream(cf, data, stream);
+
/* make sure the window update gets sent */
- result = h2_session_send(cf, data);
+ result = h2_progress_egress(cf, data);
if(result)
return result;
+ if(!pause) {
+ /* Unpausing a h2 transfer, requires it to be run again. The server
+ * may send new DATA on us increasing the flow window, and it may
+ * not. We may have already buffered and exhausted the new window
+ * by operating on things in flight during the handling of other
+ * transfers. */
+ drain_stream(cf, data, stream);
+ Curl_expire(data, 0, EXPIRE_RUN_NOW);
+ }
DEBUGF(infof(data, "Set HTTP/2 window size to %u for stream %u",
- window, stream->stream_id));
+ window, stream->id));
#ifdef DEBUGBUILD
{
/* read out the stream local window again */
uint32_t window2 =
nghttp2_session_get_stream_local_window_size(ctx->h2,
- stream->stream_id);
+ stream->id);
DEBUGF(infof(data, "HTTP/2 window size is now %u for stream %u",
- window2, stream->stream_id));
+ window2, stream->id));
}
#endif
}
@@ -2325,14 +2300,11 @@ static CURLcode cf_h2_cntrl(struct Curl_cfilter *cf, CF_DATA_SAVE(save, cf, data);
switch(event) {
- case CF_CTRL_DATA_SETUP: {
- result = http2_data_setup(cf, data);
+ case CF_CTRL_DATA_SETUP:
break;
- }
- case CF_CTRL_DATA_PAUSE: {
+ case CF_CTRL_DATA_PAUSE:
result = http2_data_pause(cf, data, (arg1 != 0));
break;
- }
case CF_CTRL_DATA_DONE_SEND: {
result = http2_data_done_send(cf, data);
break;
@@ -2352,7 +2324,11 @@ static bool cf_h2_data_pending(struct Curl_cfilter *cf, const struct Curl_easy *data)
{
struct cf_h2_ctx *ctx = cf->ctx;
- if(ctx && ctx->inbuflen > 0 && ctx->nread_inbuf > ctx->inbuflen)
+ struct stream_ctx *stream = H2_STREAM_CTX(data);
+
+ if(ctx && (!Curl_bufq_is_empty(&ctx->inbufq)
+ || (stream && !Curl_bufq_is_empty(&stream->sendbuf))
+ || (stream && !Curl_bufq_is_empty(&stream->recvbuf))))
return TRUE;
return cf->next? cf->next->cft->has_data_pending(cf->next, data) : FALSE;
}
@@ -2487,7 +2463,8 @@ out: return result;
}
-bool Curl_cf_is_http2(struct Curl_cfilter *cf, const struct Curl_easy *data)
+static bool Curl_cf_is_http2(struct Curl_cfilter *cf,
+ const struct Curl_easy *data)
{
(void)data;
for(; cf; cf = cf->next) {
@@ -2606,23 +2583,26 @@ CURLcode Curl_http2_upgrade(struct Curl_easy *data, if(result)
return result;
- if(nread) {
- /* we are going to copy mem to httpc->inbuf. This is required since
- mem is part of buffer pointed by stream->mem, and callbacks
- called by nghttp2_session_mem_recv() will write stream specific
- data into stream->mem, overwriting data already there. */
- if(H2_BUFSIZE < nread) {
- failf(data, "connection buffer size is too small to store data "
- "following HTTP Upgrade response header: buflen=%d, datalen=%zu",
- H2_BUFSIZE, nread);
+ if(nread > 0) {
+ /* Remaining data from the protocol switch reply is already using
+ * the switched protocol, ie. HTTP/2. We add that to the network
+ * inbufq. */
+ ssize_t copied;
+
+ copied = Curl_bufq_write(&ctx->inbufq,
+ (const unsigned char *)mem, nread, &result);
+ if(copied < 0) {
+ failf(data, "error on copying HTTP Upgrade response: %d", result);
+ return CURLE_RECV_ERROR;
+ }
+ if((size_t)copied < nread) {
+ failf(data, "connection buffer size could not take all data "
+ "from HTTP Upgrade response header: copied=%zd, datalen=%zu",
+ copied, nread);
return CURLE_HTTP2;
}
-
- infof(data, "Copying HTTP/2 data in stream buffer to connection buffer"
+ infof(data, "Copied HTTP/2 data in stream buffer to connection buffer"
" after upgrade: len=%zu", nread);
- DEBUGASSERT(ctx->nread_inbuf == 0);
- memcpy(ctx->inbuf, mem, nread);
- ctx->inbuflen = nread;
}
conn->httpversion = 20; /* we know we're on HTTP/2 now */
@@ -2641,7 +2621,7 @@ CURLcode Curl_http2_upgrade(struct Curl_easy *data, CURLE_HTTP2_STREAM error! */
bool Curl_h2_http_1_1_error(struct Curl_easy *data)
{
- struct HTTP *stream = data->req.p.http;
+ struct stream_ctx *stream = H2_STREAM_CTX(data);
return (stream && stream->error == NGHTTP2_HTTP_1_1_REQUIRED);
}
diff --git a/libs/libcurl/src/http2.h b/libs/libcurl/src/http2.h index ae49f40001..3c6ffae278 100644 --- a/libs/libcurl/src/http2.h +++ b/libs/libcurl/src/http2.h @@ -38,8 +38,6 @@ */
void Curl_http2_ver(char *p, size_t len);
-const char *Curl_http2_strerror(uint32_t err);
-
CURLcode Curl_http2_request_upgrade(struct dynbuf *req,
struct Curl_easy *data);
@@ -49,8 +47,6 @@ bool Curl_h2_http_1_1_error(struct Curl_easy *data); bool Curl_conn_is_http2(const struct Curl_easy *data,
const struct connectdata *conn,
int sockindex);
-bool Curl_cf_is_http2(struct Curl_cfilter *cf, const struct Curl_easy *data);
-
bool Curl_http2_may_switch(struct Curl_easy *data,
struct connectdata *conn,
int sockindex);
diff --git a/libs/libcurl/src/http_aws_sigv4.c b/libs/libcurl/src/http_aws_sigv4.c index 24228d2e65..e7baaf82cd 100644 --- a/libs/libcurl/src/http_aws_sigv4.c +++ b/libs/libcurl/src/http_aws_sigv4.c @@ -192,7 +192,7 @@ static CURLcode make_headers(struct Curl_easy *data, }
- if (*content_sha256_header) {
+ if(*content_sha256_header) {
tmp_head = curl_slist_append(head, content_sha256_header);
if(!tmp_head)
goto fail;
diff --git a/libs/libcurl/src/http_proxy.c b/libs/libcurl/src/http_proxy.c index 6d2435feaf..0d680832ab 100644 --- a/libs/libcurl/src/http_proxy.c +++ b/libs/libcurl/src/http_proxy.c @@ -26,7 +26,7 @@ #include "http_proxy.h"
-#if !defined(CURL_DISABLE_PROXY)
+#if !defined(CURL_DISABLE_HTTP) && !defined(CURL_DISABLE_PROXY)
#include <curl/curl.h>
#ifdef USE_HYPER
@@ -38,6 +38,8 @@ #include "select.h"
#include "progress.h"
#include "cfilters.h"
+#include "cf-h1-proxy.h"
+#include "cf-h2-proxy.h"
#include "connect.h"
#include "curlx.h"
#include "vtls/vtls.h"
@@ -50,1023 +52,17 @@ #include "memdebug.h"
-#if !defined(CURL_DISABLE_HTTP)
-
-typedef enum {
- TUNNEL_INIT, /* init/default/no tunnel state */
- TUNNEL_CONNECT, /* CONNECT request is being send */
- TUNNEL_RECEIVE, /* CONNECT answer is being received */
- TUNNEL_RESPONSE, /* CONNECT response received completely */
- TUNNEL_ESTABLISHED,
- TUNNEL_FAILED
-} tunnel_state;
-
-/* struct for HTTP CONNECT tunneling */
-struct tunnel_state {
- int sockindex;
- const char *hostname;
- int remote_port;
- struct HTTP CONNECT;
- struct dynbuf rcvbuf;
- struct dynbuf req;
- size_t nsend;
- size_t headerlines;
- enum keeponval {
- KEEPON_DONE,
- KEEPON_CONNECT,
- KEEPON_IGNORE
- } keepon;
- curl_off_t cl; /* size of content to read and ignore */
- tunnel_state tunnel_state;
- BIT(chunked_encoding);
- BIT(close_connection);
+struct cf_proxy_ctx {
+ /* the protocol specific sub-filter we install during connect */
+ struct Curl_cfilter *cf_protocol;
};
-
-static bool tunnel_is_established(struct tunnel_state *ts)
-{
- return ts && (ts->tunnel_state == TUNNEL_ESTABLISHED);
-}
-
-static bool tunnel_is_failed(struct tunnel_state *ts)
-{
- return ts && (ts->tunnel_state == TUNNEL_FAILED);
-}
-
-static CURLcode tunnel_reinit(struct tunnel_state *ts,
- struct connectdata *conn,
- struct Curl_easy *data)
-{
- (void)data;
- DEBUGASSERT(ts);
- Curl_dyn_reset(&ts->rcvbuf);
- Curl_dyn_reset(&ts->req);
- ts->tunnel_state = TUNNEL_INIT;
- ts->keepon = KEEPON_CONNECT;
- ts->cl = 0;
- ts->close_connection = FALSE;
-
- if(conn->bits.conn_to_host)
- ts->hostname = conn->conn_to_host.name;
- else if(ts->sockindex == SECONDARYSOCKET)
- ts->hostname = conn->secondaryhostname;
- else
- ts->hostname = conn->host.name;
-
- if(ts->sockindex == SECONDARYSOCKET)
- ts->remote_port = conn->secondary_port;
- else if(conn->bits.conn_to_port)
- ts->remote_port = conn->conn_to_port;
- else
- ts->remote_port = conn->remote_port;
-
- return CURLE_OK;
-}
-
-static CURLcode tunnel_init(struct tunnel_state **pts,
- struct Curl_easy *data,
- struct connectdata *conn,
- int sockindex)
-{
- struct tunnel_state *ts;
- CURLcode result;
-
- if(conn->handler->flags & PROTOPT_NOTCPPROXY) {
- failf(data, "%s cannot be done over CONNECT", conn->handler->scheme);
- return CURLE_UNSUPPORTED_PROTOCOL;
- }
-
- /* we might need the upload buffer for streaming a partial request */
- result = Curl_get_upload_buffer(data);
- if(result)
- return result;
-
- ts = calloc(1, sizeof(*ts));
- if(!ts)
- return CURLE_OUT_OF_MEMORY;
-
- ts->sockindex = sockindex;
- infof(data, "allocate connect buffer");
-
- Curl_dyn_init(&ts->rcvbuf, DYN_PROXY_CONNECT_HEADERS);
- Curl_dyn_init(&ts->req, DYN_HTTP_REQUEST);
-
- *pts = ts;
- connkeep(conn, "HTTP proxy CONNECT");
- return tunnel_reinit(ts, conn, data);
-}
-
-static void tunnel_go_state(struct Curl_cfilter *cf,
- struct tunnel_state *ts,
- tunnel_state new_state,
- struct Curl_easy *data)
-{
- if(ts->tunnel_state == new_state)
- return;
- /* leaving this one */
- switch(ts->tunnel_state) {
- case TUNNEL_CONNECT:
- data->req.ignorebody = FALSE;
- break;
- default:
- break;
- }
- /* entering this one */
- switch(new_state) {
- case TUNNEL_INIT:
- DEBUGF(LOG_CF(data, cf, "new tunnel state 'init'"));
- tunnel_reinit(ts, cf->conn, data);
- break;
-
- case TUNNEL_CONNECT:
- DEBUGF(LOG_CF(data, cf, "new tunnel state 'connect'"));
- ts->tunnel_state = TUNNEL_CONNECT;
- ts->keepon = KEEPON_CONNECT;
- Curl_dyn_reset(&ts->rcvbuf);
- break;
-
- case TUNNEL_RECEIVE:
- DEBUGF(LOG_CF(data, cf, "new tunnel state 'receive'"));
- ts->tunnel_state = TUNNEL_RECEIVE;
- break;
-
- case TUNNEL_RESPONSE:
- DEBUGF(LOG_CF(data, cf, "new tunnel state 'response'"));
- ts->tunnel_state = TUNNEL_RESPONSE;
- break;
-
- case TUNNEL_ESTABLISHED:
- DEBUGF(LOG_CF(data, cf, "new tunnel state 'established'"));
- infof(data, "CONNECT phase completed");
- data->state.authproxy.done = TRUE;
- data->state.authproxy.multipass = FALSE;
- /* FALLTHROUGH */
- case TUNNEL_FAILED:
- DEBUGF(LOG_CF(data, cf, "new tunnel state 'failed'"));
- ts->tunnel_state = new_state;
- Curl_dyn_reset(&ts->rcvbuf);
- Curl_dyn_reset(&ts->req);
- /* restore the protocol pointer */
- data->info.httpcode = 0; /* clear it as it might've been used for the
- proxy */
- /* If a proxy-authorization header was used for the proxy, then we should
- make sure that it isn't accidentally used for the document request
- after we've connected. So let's free and clear it here. */
- Curl_safefree(data->state.aptr.proxyuserpwd);
- data->state.aptr.proxyuserpwd = NULL;
-#ifdef USE_HYPER
- data->state.hconnect = FALSE;
-#endif
- break;
- }
-}
-
-static void tunnel_free(struct Curl_cfilter *cf,
- struct Curl_easy *data)
-{
- struct tunnel_state *ts = cf->ctx;
- if(ts) {
- tunnel_go_state(cf, ts, TUNNEL_FAILED, data);
- Curl_dyn_free(&ts->rcvbuf);
- Curl_dyn_free(&ts->req);
- free(ts);
- cf->ctx = NULL;
- }
-}
-
-static CURLcode CONNECT_host(struct Curl_easy *data,
- struct connectdata *conn,
- const char *hostname,
- int remote_port,
- char **connecthostp,
- char **hostp)
-{
- char *hostheader; /* for CONNECT */
- char *host = NULL; /* Host: */
- bool ipv6_ip = conn->bits.ipv6_ip;
-
- /* the hostname may be different */
- if(hostname != conn->host.name)
- ipv6_ip = (strchr(hostname, ':') != NULL);
- hostheader = /* host:port with IPv6 support */
- aprintf("%s%s%s:%d", ipv6_ip?"[":"", hostname, ipv6_ip?"]":"",
- remote_port);
- if(!hostheader)
- return CURLE_OUT_OF_MEMORY;
-
- if(!Curl_checkProxyheaders(data, conn, STRCONST("Host"))) {
- host = aprintf("Host: %s\r\n", hostheader);
- if(!host) {
- free(hostheader);
- return CURLE_OUT_OF_MEMORY;
- }
- }
- *connecthostp = hostheader;
- *hostp = host;
- return CURLE_OK;
-}
-
-#ifndef USE_HYPER
-static CURLcode start_CONNECT(struct Curl_cfilter *cf,
- struct Curl_easy *data,
- struct tunnel_state *ts)
-{
- struct connectdata *conn = cf->conn;
- char *hostheader = NULL;
- char *host = NULL;
- const char *httpv;
- CURLcode result;
-
- infof(data, "Establish HTTP proxy tunnel to %s:%d",
- ts->hostname, ts->remote_port);
-
- /* This only happens if we've looped here due to authentication
- reasons, and we don't really use the newly cloned URL here
- then. Just free() it. */
- Curl_safefree(data->req.newurl);
-
- result = CONNECT_host(data, conn,
- ts->hostname, ts->remote_port,
- &hostheader, &host);
- if(result)
- goto out;
-
- /* Setup the proxy-authorization header, if any */
- result = Curl_http_output_auth(data, conn, "CONNECT", HTTPREQ_GET,
- hostheader, TRUE);
- if(result)
- goto out;
-
- httpv = (conn->http_proxy.proxytype == CURLPROXY_HTTP_1_0) ? "1.0" : "1.1";
-
- result =
- Curl_dyn_addf(&ts->req,
- "CONNECT %s HTTP/%s\r\n"
- "%s" /* Host: */
- "%s", /* Proxy-Authorization */
- hostheader,
- httpv,
- host?host:"",
- data->state.aptr.proxyuserpwd?
- data->state.aptr.proxyuserpwd:"");
- if(result)
- goto out;
-
- if(!Curl_checkProxyheaders(data, conn, STRCONST("User-Agent"))
- && data->set.str[STRING_USERAGENT])
- result = Curl_dyn_addf(&ts->req, "User-Agent: %s\r\n",
- data->set.str[STRING_USERAGENT]);
- if(result)
- goto out;
-
- if(!Curl_checkProxyheaders(data, conn, STRCONST("Proxy-Connection")))
- result = Curl_dyn_addn(&ts->req,
- STRCONST("Proxy-Connection: Keep-Alive\r\n"));
- if(result)
- goto out;
-
- result = Curl_add_custom_headers(data, TRUE, &ts->req);
- if(result)
- goto out;
-
- /* CRLF terminate the request */
- result = Curl_dyn_addn(&ts->req, STRCONST("\r\n"));
- if(result)
- goto out;
-
- /* Send the connect request to the proxy */
- result = Curl_buffer_send(&ts->req, data, &ts->CONNECT,
- &data->info.request_size, 0,
- ts->sockindex);
- ts->headerlines = 0;
-
-out:
- if(result)
- failf(data, "Failed sending CONNECT to proxy");
- free(host);
- free(hostheader);
- return result;
-}
-
-static CURLcode send_CONNECT(struct Curl_easy *data,
- struct connectdata *conn,
- struct tunnel_state *ts,
- bool *done)
-{
- struct SingleRequest *k = &data->req;
- struct HTTP *http = &ts->CONNECT;
- CURLcode result = CURLE_OK;
-
- if(http->sending != HTTPSEND_REQUEST)
- goto out;
-
- if(!ts->nsend) {
- size_t fillcount;
- k->upload_fromhere = data->state.ulbuf;
- result = Curl_fillreadbuffer(data, data->set.upload_buffer_size,
- &fillcount);
- if(result)
- goto out;
- ts->nsend = fillcount;
- }
- if(ts->nsend) {
- ssize_t bytes_written;
- /* write to socket (send away data) */
- result = Curl_write(data,
- conn->writesockfd, /* socket to send to */
- k->upload_fromhere, /* buffer pointer */
- ts->nsend, /* buffer size */
- &bytes_written); /* actually sent */
- if(result)
- goto out;
- /* send to debug callback! */
- Curl_debug(data, CURLINFO_HEADER_OUT,
- k->upload_fromhere, bytes_written);
-
- ts->nsend -= bytes_written;
- k->upload_fromhere += bytes_written;
- }
- if(!ts->nsend)
- http->sending = HTTPSEND_NADA;
-
-out:
- if(result)
- failf(data, "Failed sending CONNECT to proxy");
- *done = (http->sending != HTTPSEND_REQUEST);
- return result;
-}
-
-static CURLcode on_resp_header(struct Curl_cfilter *cf,
- struct Curl_easy *data,
- struct tunnel_state *ts,
- const char *header)
-{
- CURLcode result = CURLE_OK;
- struct SingleRequest *k = &data->req;
- (void)cf;
-
- if((checkprefix("WWW-Authenticate:", header) &&
- (401 == k->httpcode)) ||
- (checkprefix("Proxy-authenticate:", header) &&
- (407 == k->httpcode))) {
-
- bool proxy = (k->httpcode == 407) ? TRUE : FALSE;
- char *auth = Curl_copy_header_value(header);
- if(!auth)
- return CURLE_OUT_OF_MEMORY;
-
- DEBUGF(LOG_CF(data, cf, "CONNECT: fwd auth header '%s'", header));
- result = Curl_http_input_auth(data, proxy, auth);
-
- free(auth);
-
- if(result)
- return result;
- }
- else if(checkprefix("Content-Length:", header)) {
- if(k->httpcode/100 == 2) {
- /* A client MUST ignore any Content-Length or Transfer-Encoding
- header fields received in a successful response to CONNECT.
- "Successful" described as: 2xx (Successful). RFC 7231 4.3.6 */
- infof(data, "Ignoring Content-Length in CONNECT %03d response",
- k->httpcode);
- }
- else {
- (void)curlx_strtoofft(header + strlen("Content-Length:"),
- NULL, 10, &ts->cl);
- }
- }
- else if(Curl_compareheader(header,
- STRCONST("Connection:"), STRCONST("close")))
- ts->close_connection = TRUE;
- else if(checkprefix("Transfer-Encoding:", header)) {
- if(k->httpcode/100 == 2) {
- /* A client MUST ignore any Content-Length or Transfer-Encoding
- header fields received in a successful response to CONNECT.
- "Successful" described as: 2xx (Successful). RFC 7231 4.3.6 */
- infof(data, "Ignoring Transfer-Encoding in "
- "CONNECT %03d response", k->httpcode);
- }
- else if(Curl_compareheader(header,
- STRCONST("Transfer-Encoding:"),
- STRCONST("chunked"))) {
- infof(data, "CONNECT responded chunked");
- ts->chunked_encoding = TRUE;
- /* init our chunky engine */
- Curl_httpchunk_init(data);
- }
- }
- else if(Curl_compareheader(header,
- STRCONST("Proxy-Connection:"),
- STRCONST("close")))
- ts->close_connection = TRUE;
- else if(!strncmp(header, "HTTP/1.", 7) &&
- ((header[7] == '0') || (header[7] == '1')) &&
- (header[8] == ' ') &&
- ISDIGIT(header[9]) && ISDIGIT(header[10]) && ISDIGIT(header[11]) &&
- !ISDIGIT(header[12])) {
- /* store the HTTP code from the proxy */
- data->info.httpproxycode = k->httpcode = (header[9] - '0') * 100 +
- (header[10] - '0') * 10 + (header[11] - '0');
- }
- return result;
-}
-
-static CURLcode recv_CONNECT_resp(struct Curl_cfilter *cf,
- struct Curl_easy *data,
- struct tunnel_state *ts,
- bool *done)
-{
- CURLcode result = CURLE_OK;
- struct SingleRequest *k = &data->req;
- curl_socket_t tunnelsocket = Curl_conn_cf_get_socket(cf, data);
- char *linep;
- size_t perline;
- int error;
-
-#define SELECT_OK 0
-#define SELECT_ERROR 1
-
- error = SELECT_OK;
- *done = FALSE;
-
- if(!Curl_conn_data_pending(data, ts->sockindex))
- return CURLE_OK;
-
- while(ts->keepon) {
- ssize_t gotbytes;
- char byte;
-
- /* Read one byte at a time to avoid a race condition. Wait at most one
- second before looping to ensure continuous pgrsUpdates. */
- result = Curl_read(data, tunnelsocket, &byte, 1, &gotbytes);
- if(result == CURLE_AGAIN)
- /* socket buffer drained, return */
- return CURLE_OK;
-
- if(Curl_pgrsUpdate(data))
- return CURLE_ABORTED_BY_CALLBACK;
-
- if(result) {
- ts->keepon = KEEPON_DONE;
- break;
- }
-
- if(gotbytes <= 0) {
- if(data->set.proxyauth && data->state.authproxy.avail &&
- data->state.aptr.proxyuserpwd) {
- /* proxy auth was requested and there was proxy auth available,
- then deem this as "mere" proxy disconnect */
- ts->close_connection = TRUE;
- infof(data, "Proxy CONNECT connection closed");
- }
- else {
- error = SELECT_ERROR;
- failf(data, "Proxy CONNECT aborted");
- }
- ts->keepon = KEEPON_DONE;
- break;
- }
-
- if(ts->keepon == KEEPON_IGNORE) {
- /* This means we are currently ignoring a response-body */
-
- if(ts->cl) {
- /* A Content-Length based body: simply count down the counter
- and make sure to break out of the loop when we're done! */
- ts->cl--;
- if(ts->cl <= 0) {
- ts->keepon = KEEPON_DONE;
- break;
- }
- }
- else {
- /* chunked-encoded body, so we need to do the chunked dance
- properly to know when the end of the body is reached */
- CHUNKcode r;
- CURLcode extra;
- ssize_t tookcareof = 0;
-
- /* now parse the chunked piece of data so that we can
- properly tell when the stream ends */
- r = Curl_httpchunk_read(data, &byte, 1, &tookcareof, &extra);
- if(r == CHUNKE_STOP) {
- /* we're done reading chunks! */
- infof(data, "chunk reading DONE");
- ts->keepon = KEEPON_DONE;
- }
- }
- continue;
- }
-
- if(Curl_dyn_addn(&ts->rcvbuf, &byte, 1)) {
- failf(data, "CONNECT response too large");
- return CURLE_RECV_ERROR;
- }
-
- /* if this is not the end of a header line then continue */
- if(byte != 0x0a)
- continue;
-
- ts->headerlines++;
- linep = Curl_dyn_ptr(&ts->rcvbuf);
- perline = Curl_dyn_len(&ts->rcvbuf); /* amount of bytes in this line */
-
- /* output debug if that is requested */
- Curl_debug(data, CURLINFO_HEADER_IN, linep, perline);
-
- if(!data->set.suppress_connect_headers) {
- /* send the header to the callback */
- int writetype = CLIENTWRITE_HEADER | CLIENTWRITE_CONNECT |
- (data->set.include_header ? CLIENTWRITE_BODY : 0) |
- (ts->headerlines == 1 ? CLIENTWRITE_STATUS : 0);
-
- result = Curl_client_write(data, writetype, linep, perline);
- if(result)
- return result;
- }
-
- data->info.header_size += (long)perline;
-
- /* Newlines are CRLF, so the CR is ignored as the line isn't
- really terminated until the LF comes. Treat a following CR
- as end-of-headers as well.*/
-
- if(('\r' == linep[0]) ||
- ('\n' == linep[0])) {
- /* end of response-headers from the proxy */
-
- if((407 == k->httpcode) && !data->state.authproblem) {
- /* If we get a 407 response code with content length
- when we have no auth problem, we must ignore the
- whole response-body */
- ts->keepon = KEEPON_IGNORE;
-
- if(ts->cl) {
- infof(data, "Ignore %" CURL_FORMAT_CURL_OFF_T
- " bytes of response-body", ts->cl);
- }
- else if(ts->chunked_encoding) {
- CHUNKcode r;
- CURLcode extra;
-
- infof(data, "Ignore chunked response-body");
-
- /* We set ignorebody true here since the chunked decoder
- function will acknowledge that. Pay attention so that this is
- cleared again when this function returns! */
- k->ignorebody = TRUE;
-
- if(linep[1] == '\n')
- /* this can only be a LF if the letter at index 0 was a CR */
- linep++;
-
- /* now parse the chunked piece of data so that we can properly
- tell when the stream ends */
- r = Curl_httpchunk_read(data, linep + 1, 1, &gotbytes,
- &extra);
- if(r == CHUNKE_STOP) {
- /* we're done reading chunks! */
- infof(data, "chunk reading DONE");
- ts->keepon = KEEPON_DONE;
- }
- }
- else {
- /* without content-length or chunked encoding, we
- can't keep the connection alive since the close is
- the end signal so we bail out at once instead */
- DEBUGF(LOG_CF(data, cf, "CONNECT: no content-length or chunked"));
- ts->keepon = KEEPON_DONE;
- }
- }
- else {
- ts->keepon = KEEPON_DONE;
- }
-
- DEBUGASSERT(ts->keepon == KEEPON_IGNORE
- || ts->keepon == KEEPON_DONE);
- continue;
- }
-
- result = on_resp_header(cf, data, ts, linep);
- if(result)
- return result;
-
- Curl_dyn_reset(&ts->rcvbuf);
- } /* while there's buffer left and loop is requested */
-
- if(error)
- result = CURLE_RECV_ERROR;
- *done = (ts->keepon == KEEPON_DONE);
- if(!result && *done && data->info.httpproxycode/100 != 2) {
- /* Deal with the possibly already received authenticate
- headers. 'newurl' is set to a new URL if we must loop. */
- result = Curl_http_auth_act(data);
- }
- return result;
-}
-
-#else /* USE_HYPER */
-/* The Hyper version of CONNECT */
-static CURLcode start_CONNECT(struct Curl_cfilter *cf,
- struct Curl_easy *data,
- struct tunnel_state *ts)
-{
- struct connectdata *conn = cf->conn;
- struct hyptransfer *h = &data->hyp;
- curl_socket_t tunnelsocket = Curl_conn_cf_get_socket(cf, data);
- hyper_io *io = NULL;
- hyper_request *req = NULL;
- hyper_headers *headers = NULL;
- hyper_clientconn_options *options = NULL;
- hyper_task *handshake = NULL;
- hyper_task *task = NULL; /* for the handshake */
- hyper_clientconn *client = NULL;
- hyper_task *sendtask = NULL; /* for the send */
- char *hostheader = NULL; /* for CONNECT */
- char *host = NULL; /* Host: */
- CURLcode result = CURLE_OUT_OF_MEMORY;
-
- io = hyper_io_new();
- if(!io) {
- failf(data, "Couldn't create hyper IO");
- result = CURLE_OUT_OF_MEMORY;
- goto error;
- }
- /* tell Hyper how to read/write network data */
- hyper_io_set_userdata(io, data);
- hyper_io_set_read(io, Curl_hyper_recv);
- hyper_io_set_write(io, Curl_hyper_send);
- conn->sockfd = tunnelsocket;
-
- data->state.hconnect = TRUE;
-
- /* create an executor to poll futures */
- if(!h->exec) {
- h->exec = hyper_executor_new();
- if(!h->exec) {
- failf(data, "Couldn't create hyper executor");
- result = CURLE_OUT_OF_MEMORY;
- goto error;
- }
- }
-
- options = hyper_clientconn_options_new();
- hyper_clientconn_options_set_preserve_header_case(options, 1);
- hyper_clientconn_options_set_preserve_header_order(options, 1);
-
- if(!options) {
- failf(data, "Couldn't create hyper client options");
- result = CURLE_OUT_OF_MEMORY;
- goto error;
- }
-
- hyper_clientconn_options_exec(options, h->exec);
-
- /* "Both the `io` and the `options` are consumed in this function
- call" */
- handshake = hyper_clientconn_handshake(io, options);
- if(!handshake) {
- failf(data, "Couldn't create hyper client handshake");
- result = CURLE_OUT_OF_MEMORY;
- goto error;
- }
- io = NULL;
- options = NULL;
-
- if(HYPERE_OK != hyper_executor_push(h->exec, handshake)) {
- failf(data, "Couldn't hyper_executor_push the handshake");
- result = CURLE_OUT_OF_MEMORY;
- goto error;
- }
- handshake = NULL; /* ownership passed on */
-
- task = hyper_executor_poll(h->exec);
- if(!task) {
- failf(data, "Couldn't hyper_executor_poll the handshake");
- result = CURLE_OUT_OF_MEMORY;
- goto error;
- }
-
- client = hyper_task_value(task);
- hyper_task_free(task);
- req = hyper_request_new();
- if(!req) {
- failf(data, "Couldn't hyper_request_new");
- result = CURLE_OUT_OF_MEMORY;
- goto error;
- }
- if(hyper_request_set_method(req, (uint8_t *)"CONNECT",
- strlen("CONNECT"))) {
- failf(data, "error setting method");
- result = CURLE_OUT_OF_MEMORY;
- goto error;
- }
-
- infof(data, "Establish HTTP proxy tunnel to %s:%d",
- ts->hostname, ts->remote_port);
-
- /* This only happens if we've looped here due to authentication
- reasons, and we don't really use the newly cloned URL here
- then. Just free() it. */
- Curl_safefree(data->req.newurl);
-
- result = CONNECT_host(data, conn, ts->hostname, ts->remote_port,
- &hostheader, &host);
- if(result)
- goto error;
-
- if(hyper_request_set_uri(req, (uint8_t *)hostheader,
- strlen(hostheader))) {
- failf(data, "error setting path");
- result = CURLE_OUT_OF_MEMORY;
- goto error;
- }
- if(data->set.verbose) {
- char *se = aprintf("CONNECT %s HTTP/1.1\r\n", hostheader);
- if(!se) {
- result = CURLE_OUT_OF_MEMORY;
- goto error;
- }
- Curl_debug(data, CURLINFO_HEADER_OUT, se, strlen(se));
- free(se);
- }
- /* Setup the proxy-authorization header, if any */
- result = Curl_http_output_auth(data, conn, "CONNECT", HTTPREQ_GET,
- hostheader, TRUE);
- if(result)
- goto error;
- Curl_safefree(hostheader);
-
- /* default is 1.1 */
- if((conn->http_proxy.proxytype == CURLPROXY_HTTP_1_0) &&
- (HYPERE_OK != hyper_request_set_version(req,
- HYPER_HTTP_VERSION_1_0))) {
- failf(data, "error setting HTTP version");
- result = CURLE_OUT_OF_MEMORY;
- goto error;
- }
-
- headers = hyper_request_headers(req);
- if(!headers) {
- failf(data, "hyper_request_headers");
- result = CURLE_OUT_OF_MEMORY;
- goto error;
- }
- if(host) {
- result = Curl_hyper_header(data, headers, host);
- if(result)
- goto error;
- Curl_safefree(host);
- }
-
- if(data->state.aptr.proxyuserpwd) {
- result = Curl_hyper_header(data, headers,
- data->state.aptr.proxyuserpwd);
- if(result)
- goto error;
- }
-
- if(!Curl_checkProxyheaders(data, conn, STRCONST("User-Agent")) &&
- data->set.str[STRING_USERAGENT]) {
- struct dynbuf ua;
- Curl_dyn_init(&ua, DYN_HTTP_REQUEST);
- result = Curl_dyn_addf(&ua, "User-Agent: %s\r\n",
- data->set.str[STRING_USERAGENT]);
- if(result)
- goto error;
- result = Curl_hyper_header(data, headers, Curl_dyn_ptr(&ua));
- if(result)
- goto error;
- Curl_dyn_free(&ua);
- }
-
- if(!Curl_checkProxyheaders(data, conn, STRCONST("Proxy-Connection"))) {
- result = Curl_hyper_header(data, headers,
- "Proxy-Connection: Keep-Alive");
- if(result)
- goto error;
- }
-
- result = Curl_add_custom_headers(data, TRUE, headers);
- if(result)
- goto error;
-
- sendtask = hyper_clientconn_send(client, req);
- if(!sendtask) {
- failf(data, "hyper_clientconn_send");
- result = CURLE_OUT_OF_MEMORY;
- goto error;
- }
-
- if(HYPERE_OK != hyper_executor_push(h->exec, sendtask)) {
- failf(data, "Couldn't hyper_executor_push the send");
- result = CURLE_OUT_OF_MEMORY;
- goto error;
- }
-
-error:
- free(host);
- free(hostheader);
- if(io)
- hyper_io_free(io);
- if(options)
- hyper_clientconn_options_free(options);
- if(handshake)
- hyper_task_free(handshake);
- if(client)
- hyper_clientconn_free(client);
- return result;
-}
-
-static CURLcode send_CONNECT(struct Curl_easy *data,
- struct connectdata *conn,
- struct tunnel_state *ts,
- bool *done)
-{
- struct hyptransfer *h = &data->hyp;
- hyper_task *task = NULL;
- hyper_error *hypererr = NULL;
- CURLcode result = CURLE_OK;
-
- (void)ts;
- (void)conn;
- do {
- task = hyper_executor_poll(h->exec);
- if(task) {
- bool error = hyper_task_type(task) == HYPER_TASK_ERROR;
- if(error)
- hypererr = hyper_task_value(task);
- hyper_task_free(task);
- if(error) {
- /* this could probably use a better error code? */
- result = CURLE_OUT_OF_MEMORY;
- goto error;
- }
- }
- } while(task);
-error:
- *done = (result == CURLE_OK);
- if(hypererr) {
- uint8_t errbuf[256];
- size_t errlen = hyper_error_print(hypererr, errbuf, sizeof(errbuf));
- failf(data, "Hyper: %.*s", (int)errlen, errbuf);
- hyper_error_free(hypererr);
- }
- return result;
-}
-
-static CURLcode recv_CONNECT_resp(struct Curl_cfilter *cf,
- struct Curl_easy *data,
- struct tunnel_state *ts,
- bool *done)
-{
- struct hyptransfer *h = &data->hyp;
- CURLcode result;
- int didwhat;
-
- (void)ts;
- *done = FALSE;
- result = Curl_hyper_stream(data, cf->conn, &didwhat, done,
- CURL_CSELECT_IN | CURL_CSELECT_OUT);
- if(result || !*done)
- return result;
- if(h->exec) {
- hyper_executor_free(h->exec);
- h->exec = NULL;
- }
- if(h->read_waker) {
- hyper_waker_free(h->read_waker);
- h->read_waker = NULL;
- }
- if(h->write_waker) {
- hyper_waker_free(h->write_waker);
- h->write_waker = NULL;
- }
- return result;
-}
-
-#endif /* USE_HYPER */
-
-static CURLcode CONNECT(struct Curl_cfilter *cf,
- struct Curl_easy *data,
- struct tunnel_state *ts)
-{
- struct connectdata *conn = cf->conn;
- CURLcode result;
- bool done;
-
- if(tunnel_is_established(ts))
- return CURLE_OK;
- if(tunnel_is_failed(ts))
- return CURLE_RECV_ERROR; /* Need a cfilter close and new bootstrap */
-
- do {
- timediff_t check;
-
- check = Curl_timeleft(data, NULL, TRUE);
- if(check <= 0) {
- failf(data, "Proxy CONNECT aborted due to timeout");
- result = CURLE_OPERATION_TIMEDOUT;
- goto out;
- }
-
- switch(ts->tunnel_state) {
- case TUNNEL_INIT:
- /* Prepare the CONNECT request and make a first attempt to send. */
- DEBUGF(LOG_CF(data, cf, "CONNECT start"));
- result = start_CONNECT(cf, data, ts);
- if(result)
- goto out;
- tunnel_go_state(cf, ts, TUNNEL_CONNECT, data);
- /* FALLTHROUGH */
-
- case TUNNEL_CONNECT:
- /* see that the request is completely sent */
- DEBUGF(LOG_CF(data, cf, "CONNECT send"));
- result = send_CONNECT(data, cf->conn, ts, &done);
- if(result || !done)
- goto out;
- tunnel_go_state(cf, ts, TUNNEL_RECEIVE, data);
- /* FALLTHROUGH */
-
- case TUNNEL_RECEIVE:
- /* read what is there */
- DEBUGF(LOG_CF(data, cf, "CONNECT receive"));
- result = recv_CONNECT_resp(cf, data, ts, &done);
- if(Curl_pgrsUpdate(data)) {
- result = CURLE_ABORTED_BY_CALLBACK;
- goto out;
- }
- /* error or not complete yet. return for more multi-multi */
- if(result || !done)
- goto out;
- /* got it */
- tunnel_go_state(cf, ts, TUNNEL_RESPONSE, data);
- /* FALLTHROUGH */
-
- case TUNNEL_RESPONSE:
- DEBUGF(LOG_CF(data, cf, "CONNECT response"));
- if(data->req.newurl) {
- /* not the "final" response, we need to do a follow up request.
- * If the other side indicated a connection close, or if someone
- * else told us to close this connection, do so now.
- */
- if(ts->close_connection || conn->bits.close) {
- /* Close this filter and the sub-chain, re-connect the
- * sub-chain and continue. Closing this filter will
- * reset our tunnel state. To avoid recursion, we return
- * and expect to be called again.
- */
- DEBUGF(LOG_CF(data, cf, "CONNECT need to close+open"));
- infof(data, "Connect me again please");
- Curl_conn_cf_close(cf, data);
- connkeep(conn, "HTTP proxy CONNECT");
- result = Curl_conn_cf_connect(cf->next, data, FALSE, &done);
- goto out;
- }
- else {
- /* staying on this connection, reset state */
- tunnel_go_state(cf, ts, TUNNEL_INIT, data);
- }
- }
- break;
-
- default:
- break;
- }
-
- } while(data->req.newurl);
-
- DEBUGASSERT(ts->tunnel_state == TUNNEL_RESPONSE);
- if(data->info.httpproxycode/100 != 2) {
- /* a non-2xx response and we have no next url to try. */
- free(data->req.newurl);
- data->req.newurl = NULL;
- /* failure, close this connection to avoid re-use */
- streamclose(conn, "proxy CONNECT failure");
- tunnel_go_state(cf, ts, TUNNEL_FAILED, data);
- failf(data, "CONNECT tunnel failed, response %d", data->req.httpcode);
- return CURLE_RECV_ERROR;
- }
- /* 2xx response, SUCCESS! */
- tunnel_go_state(cf, ts, TUNNEL_ESTABLISHED, data);
- infof(data, "CONNECT tunnel established, response %d",
- data->info.httpproxycode);
- result = CURLE_OK;
-
-out:
- if(result)
- tunnel_go_state(cf, ts, TUNNEL_FAILED, data);
- return result;
-}
-
static CURLcode http_proxy_cf_connect(struct Curl_cfilter *cf,
struct Curl_easy *data,
bool blocking, bool *done)
{
+ struct cf_proxy_ctx *ctx = cf->ctx;
CURLcode result;
- struct tunnel_state *ts = cf->ctx;
if(cf->connected) {
*done = TRUE;
@@ -1074,44 +70,74 @@ static CURLcode http_proxy_cf_connect(struct Curl_cfilter *cf, }
DEBUGF(LOG_CF(data, cf, "connect"));
+connect_sub:
result = cf->next->cft->connect(cf->next, data, blocking, done);
if(result || !*done)
return result;
- DEBUGF(LOG_CF(data, cf, "subchain is connected"));
- /* TODO: can we do blocking? */
- /* We want "seamless" operations through HTTP proxy tunnel */
-
- /* for the secondary socket (FTP), use the "connect to host"
- * but ignore the "connect to port" (use the secondary port)
- */
*done = FALSE;
- if(!ts) {
- result = tunnel_init(&ts, data, cf->conn, cf->sockindex);
- if(result)
- return result;
- cf->ctx = ts;
- }
+ if(!ctx->cf_protocol) {
+ struct Curl_cfilter *cf_protocol = NULL;
+ int alpn = Curl_conn_cf_is_ssl(cf->next)?
+ cf->conn->proxy_alpn : CURL_HTTP_VERSION_1_1;
+
+ /* First time call after the subchain connected */
+ switch(alpn) {
+ case CURL_HTTP_VERSION_NONE:
+ case CURL_HTTP_VERSION_1_0:
+ case CURL_HTTP_VERSION_1_1:
+ DEBUGF(LOG_CF(data, cf, "installing subfilter for HTTP/1.1"));
+ infof(data, "CONNECT tunnel: HTTP/1.%d negotiated",
+ (alpn == CURL_HTTP_VERSION_1_0)? 0 : 1);
+ result = Curl_cf_h1_proxy_insert_after(cf, data);
+ if(result)
+ goto out;
+ cf_protocol = cf->next;
+ break;
+#ifdef USE_NGHTTP2
+ case CURL_HTTP_VERSION_2:
+ DEBUGF(LOG_CF(data, cf, "installing subfilter for HTTP/2"));
+ infof(data, "CONNECT tunnel: HTTP/2 negotiated");
+ result = Curl_cf_h2_proxy_insert_after(cf, data);
+ if(result)
+ goto out;
+ cf_protocol = cf->next;
+ break;
+#endif
+ default:
+ DEBUGF(LOG_CF(data, cf, "installing subfilter for default HTTP/1.1"));
+ infof(data, "CONNECT tunnel: unsupported ALPN(%d) negotiated", alpn);
+ result = CURLE_COULDNT_CONNECT;
+ goto out;
+ }
- result = CONNECT(cf, data, ts);
- if(result)
- goto out;
- Curl_safefree(data->state.aptr.proxyuserpwd);
+ ctx->cf_protocol = cf_protocol;
+ /* after we installed the filter "below" us, we call connect
+ * on out sub-chain again.
+ */
+ goto connect_sub;
+ }
+ else {
+ /* subchain connected and we had already installed the protocol filter.
+ * This means the protocol tunnel is established, we are done.
+ */
+ DEBUGASSERT(ctx->cf_protocol);
+ result = CURLE_OK;
+ }
out:
- *done = (result == CURLE_OK) && tunnel_is_established(cf->ctx);
- if (*done) {
+ if(!result) {
cf->connected = TRUE;
- tunnel_free(cf, data);
+ *done = TRUE;
}
return result;
}
-static void http_proxy_cf_get_host(struct Curl_cfilter *cf,
- struct Curl_easy *data,
- const char **phost,
- const char **pdisplay_host,
- int *pport)
+void Curl_cf_http_proxy_get_host(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ const char **phost,
+ const char **pdisplay_host,
+ int *pport)
{
(void)data;
if(!cf->connected) {
@@ -1124,50 +150,38 @@ static void http_proxy_cf_get_host(struct Curl_cfilter *cf, }
}
-static int http_proxy_cf_get_select_socks(struct Curl_cfilter *cf,
- struct Curl_easy *data,
- curl_socket_t *socks)
-{
- struct tunnel_state *ts = cf->ctx;
- int fds;
-
- fds = cf->next->cft->get_select_socks(cf->next, data, socks);
- if(!fds && cf->next->connected && !cf->connected) {
- /* If we are not connected, but the filter "below" is
- * and not waiting on something, we are tunneling. */
- socks[0] = Curl_conn_cf_get_socket(cf, data);
- if(ts) {
- /* when we've sent a CONNECT to a proxy, we should rather either
- wait for the socket to become readable to be able to get the
- response headers or if we're still sending the request, wait
- for write. */
- if(ts->CONNECT.sending == HTTPSEND_REQUEST) {
- return GETSOCK_WRITESOCK(0);
- }
- return GETSOCK_READSOCK(0);
- }
- return GETSOCK_WRITESOCK(0);
- }
- return fds;
-}
-
static void http_proxy_cf_destroy(struct Curl_cfilter *cf,
struct Curl_easy *data)
{
+ struct cf_proxy_ctx *ctx = cf->ctx;
+
+ (void)data;
DEBUGF(LOG_CF(data, cf, "destroy"));
- tunnel_free(cf, data);
+ free(ctx);
}
static void http_proxy_cf_close(struct Curl_cfilter *cf,
struct Curl_easy *data)
{
- DEBUGASSERT(cf->next);
+ struct cf_proxy_ctx *ctx = cf->ctx;
+
DEBUGF(LOG_CF(data, cf, "close"));
cf->connected = FALSE;
- cf->next->cft->close(cf->next, data);
- if(cf->ctx) {
- tunnel_go_state(cf, cf->ctx, TUNNEL_INIT, data);
+ if(ctx->cf_protocol) {
+ struct Curl_cfilter *f;
+ /* if someone already removed it, we assume he also
+ * took care of destroying it. */
+ for(f = cf->next; f; f = f->next) {
+ if(f == ctx->cf_protocol) {
+ /* still in our sub-chain */
+ Curl_conn_cf_discard_sub(cf, ctx->cf_protocol, data, FALSE);
+ break;
+ }
+ }
+ ctx->cf_protocol = NULL;
}
+ if(cf->next)
+ cf->next->cft->close(cf->next, data);
}
@@ -1178,8 +192,8 @@ struct Curl_cftype Curl_cft_http_proxy = { http_proxy_cf_destroy,
http_proxy_cf_connect,
http_proxy_cf_close,
- http_proxy_cf_get_host,
- http_proxy_cf_get_select_socks,
+ Curl_cf_http_proxy_get_host,
+ Curl_cf_def_get_select_socks,
Curl_cf_def_data_pending,
Curl_cf_def_send,
Curl_cf_def_recv,
@@ -1189,253 +203,28 @@ struct Curl_cftype Curl_cft_http_proxy = { Curl_cf_def_query,
};
-CURLcode Curl_conn_http_proxy_add(struct Curl_easy *data,
- struct connectdata *conn,
- int sockindex)
-{
- struct Curl_cfilter *cf;
- CURLcode result;
-
- result = Curl_cf_create(&cf, &Curl_cft_http_proxy, NULL);
- if(!result)
- Curl_conn_cf_add(data, conn, sockindex, cf);
- return result;
-}
-
CURLcode Curl_cf_http_proxy_insert_after(struct Curl_cfilter *cf_at,
struct Curl_easy *data)
{
struct Curl_cfilter *cf;
+ struct cf_proxy_ctx *ctx = NULL;
CURLcode result;
(void)data;
- result = Curl_cf_create(&cf, &Curl_cft_http_proxy, NULL);
- if(!result)
- Curl_conn_cf_insert_after(cf_at, cf);
- return result;
-}
-
-#endif /* ! CURL_DISABLE_HTTP */
-
-
-typedef enum {
- HAPROXY_INIT, /* init/default/no tunnel state */
- HAPROXY_SEND, /* data_out being sent */
- HAPROXY_DONE /* all work done */
-} haproxy_state;
-
-struct cf_haproxy_ctx {
- int state;
- struct dynbuf data_out;
-};
-
-static void cf_haproxy_ctx_reset(struct cf_haproxy_ctx *ctx)
-{
- DEBUGASSERT(ctx);
- ctx->state = HAPROXY_INIT;
- Curl_dyn_reset(&ctx->data_out);
-}
-
-static void cf_haproxy_ctx_free(struct cf_haproxy_ctx *ctx)
-{
- if(ctx) {
- Curl_dyn_free(&ctx->data_out);
- free(ctx);
- }
-}
-
-static CURLcode cf_haproxy_date_out_set(struct Curl_cfilter*cf,
- struct Curl_easy *data)
-{
- struct cf_haproxy_ctx *ctx = cf->ctx;
- CURLcode result;
- const char *tcp_version;
-
- DEBUGASSERT(ctx);
- DEBUGASSERT(ctx->state == HAPROXY_INIT);
-#ifdef USE_UNIX_SOCKETS
- if(cf->conn->unix_domain_socket)
- /* the buffer is large enough to hold this! */
- result = Curl_dyn_addn(&ctx->data_out, STRCONST("PROXY UNKNOWN\r\n"));
- else {
-#endif /* USE_UNIX_SOCKETS */
- /* Emit the correct prefix for IPv6 */
- tcp_version = cf->conn->bits.ipv6 ? "TCP6" : "TCP4";
-
- result = Curl_dyn_addf(&ctx->data_out, "PROXY %s %s %s %i %i\r\n",
- tcp_version,
- data->info.conn_local_ip,
- data->info.conn_primary_ip,
- data->info.conn_local_port,
- data->info.conn_primary_port);
-
-#ifdef USE_UNIX_SOCKETS
- }
-#endif /* USE_UNIX_SOCKETS */
- return result;
-}
-
-static CURLcode cf_haproxy_connect(struct Curl_cfilter *cf,
- struct Curl_easy *data,
- bool blocking, bool *done)
-{
- struct cf_haproxy_ctx *ctx = cf->ctx;
- CURLcode result;
- size_t len;
-
- DEBUGASSERT(ctx);
- if(cf->connected) {
- *done = TRUE;
- return CURLE_OK;
- }
-
- result = cf->next->cft->connect(cf->next, data, blocking, done);
- if(result || !*done)
- return result;
-
- switch(ctx->state) {
- case HAPROXY_INIT:
- result = cf_haproxy_date_out_set(cf, data);
- if(result)
- goto out;
- ctx->state = HAPROXY_SEND;
- /* FALLTHROUGH */
- case HAPROXY_SEND:
- len = Curl_dyn_len(&ctx->data_out);
- if(len > 0) {
- ssize_t written = Curl_conn_send(data, cf->sockindex,
- Curl_dyn_ptr(&ctx->data_out),
- len, &result);
- if(written < 0)
- goto out;
- Curl_dyn_tail(&ctx->data_out, len - (size_t)written);
- if(Curl_dyn_len(&ctx->data_out) > 0) {
- result = CURLE_OK;
- goto out;
- }
- }
- ctx->state = HAPROXY_DONE;
- /* FALLTHROUGH */
- default:
- Curl_dyn_free(&ctx->data_out);
- break;
- }
-
-out:
- *done = (!result) && (ctx->state == HAPROXY_DONE);
- cf->connected = *done;
- return result;
-}
-
-static void cf_haproxy_destroy(struct Curl_cfilter *cf,
- struct Curl_easy *data)
-{
- (void)data;
- DEBUGF(LOG_CF(data, cf, "destroy"));
- cf_haproxy_ctx_free(cf->ctx);
-}
-
-static void cf_haproxy_close(struct Curl_cfilter *cf,
- struct Curl_easy *data)
-{
- DEBUGF(LOG_CF(data, cf, "close"));
- cf->connected = FALSE;
- cf_haproxy_ctx_reset(cf->ctx);
- if(cf->next)
- cf->next->cft->close(cf->next, data);
-}
-
-static int cf_haproxy_get_select_socks(struct Curl_cfilter *cf,
- struct Curl_easy *data,
- curl_socket_t *socks)
-{
- int fds;
-
- fds = cf->next->cft->get_select_socks(cf->next, data, socks);
- if(!fds && cf->next->connected && !cf->connected) {
- /* If we are not connected, but the filter "below" is
- * and not waiting on something, we are sending. */
- socks[0] = Curl_conn_cf_get_socket(cf, data);
- return GETSOCK_WRITESOCK(0);
- }
- return fds;
-}
-
-
-struct Curl_cftype Curl_cft_haproxy = {
- "HAPROXY",
- 0,
- 0,
- cf_haproxy_destroy,
- cf_haproxy_connect,
- cf_haproxy_close,
- Curl_cf_def_get_host,
- cf_haproxy_get_select_socks,
- Curl_cf_def_data_pending,
- Curl_cf_def_send,
- Curl_cf_def_recv,
- Curl_cf_def_cntrl,
- Curl_cf_def_conn_is_alive,
- Curl_cf_def_conn_keep_alive,
- Curl_cf_def_query,
-};
-
-static CURLcode cf_haproxy_create(struct Curl_cfilter **pcf,
- struct Curl_easy *data)
-{
- struct Curl_cfilter *cf = NULL;
- struct cf_haproxy_ctx *ctx;
- CURLcode result;
-
- (void)data;
- ctx = calloc(sizeof(*ctx), 1);
+ ctx = calloc(1, sizeof(*ctx));
if(!ctx) {
result = CURLE_OUT_OF_MEMORY;
goto out;
}
- ctx->state = HAPROXY_INIT;
- Curl_dyn_init(&ctx->data_out, DYN_HAXPROXY);
-
- result = Curl_cf_create(&cf, &Curl_cft_haproxy, ctx);
+ result = Curl_cf_create(&cf, &Curl_cft_http_proxy, ctx);
if(result)
goto out;
ctx = NULL;
-
-out:
- cf_haproxy_ctx_free(ctx);
- *pcf = result? NULL : cf;
- return result;
-}
-
-CURLcode Curl_conn_haproxy_add(struct Curl_easy *data,
- struct connectdata *conn,
- int sockindex)
-{
- struct Curl_cfilter *cf;
- CURLcode result;
-
- result = cf_haproxy_create(&cf, data);
- if(result)
- goto out;
- Curl_conn_cf_add(data, conn, sockindex, cf);
-
-out:
- return result;
-}
-
-CURLcode Curl_cf_haproxy_insert_after(struct Curl_cfilter *cf_at,
- struct Curl_easy *data)
-{
- struct Curl_cfilter *cf;
- CURLcode result;
-
- result = cf_haproxy_create(&cf, data);
- if(result)
- goto out;
Curl_conn_cf_insert_after(cf_at, cf);
out:
+ free(ctx);
return result;
}
-#endif /* !CURL_DISABLE_PROXY */
+#endif /* ! CURL_DISABLE_HTTP && !CURL_DISABLE_PROXY */
diff --git a/libs/libcurl/src/http_proxy.h b/libs/libcurl/src/http_proxy.h index 4e6716fa5f..ce09046e95 100644 --- a/libs/libcurl/src/http_proxy.h +++ b/libs/libcurl/src/http_proxy.h @@ -25,34 +25,28 @@ ***************************************************************************/
#include "curl_setup.h"
-#include "urldata.h"
-#if !defined(CURL_DISABLE_PROXY)
+#if !defined(CURL_DISABLE_PROXY) && !defined(CURL_DISABLE_HTTP)
+
+#include "urldata.h"
-#if !defined(CURL_DISABLE_HTTP)
/* Default proxy timeout in milliseconds */
#define PROXY_TIMEOUT (3600*1000)
-CURLcode Curl_conn_http_proxy_add(struct Curl_easy *data,
- struct connectdata *conn,
- int sockindex);
+void Curl_cf_http_proxy_get_host(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ const char **phost,
+ const char **pdisplay_host,
+ int *pport);
CURLcode Curl_cf_http_proxy_insert_after(struct Curl_cfilter *cf_at,
struct Curl_easy *data);
extern struct Curl_cftype Curl_cft_http_proxy;
-#endif /* !CURL_DISABLE_HTTP */
-
-CURLcode Curl_conn_haproxy_add(struct Curl_easy *data,
- struct connectdata *conn,
- int sockindex);
-
-CURLcode Curl_cf_haproxy_insert_after(struct Curl_cfilter *cf_at,
- struct Curl_easy *data);
-
-extern struct Curl_cftype Curl_cft_haproxy;
+#endif /* !CURL_DISABLE_PROXY && !CURL_DISABLE_HTTP */
-#endif /* !CURL_DISABLE_PROXY */
+#define IS_HTTPS_PROXY(t) (((t) == CURLPROXY_HTTPS) || \
+ ((t) == CURLPROXY_HTTPS2))
#endif /* HEADER_CURL_HTTP_PROXY_H */
diff --git a/libs/libcurl/src/imap.c b/libs/libcurl/src/imap.c index 53d6a2b0aa..b2eb1b2d85 100644 --- a/libs/libcurl/src/imap.c +++ b/libs/libcurl/src/imap.c @@ -1511,11 +1511,11 @@ static CURLcode imap_done(struct Curl_easy *data, CURLcode status, result = status; /* use the already set error code */
}
else if(!data->set.connect_only && !imap->custom &&
- (imap->uid || imap->mindex || data->set.upload ||
+ (imap->uid || imap->mindex || data->state.upload ||
data->set.mimepost.kind != MIMEKIND_NONE)) {
/* Handle responses after FETCH or APPEND transfer has finished */
- if(!data->set.upload && data->set.mimepost.kind == MIMEKIND_NONE)
+ if(!data->state.upload && data->set.mimepost.kind == MIMEKIND_NONE)
state(data, IMAP_FETCH_FINAL);
else {
/* End the APPEND command first by sending an empty line */
@@ -1581,7 +1581,7 @@ static CURLcode imap_perform(struct Curl_easy *data, bool *connected, selected = TRUE;
/* Start the first command in the DO phase */
- if(data->set.upload || data->set.mimepost.kind != MIMEKIND_NONE)
+ if(data->state.upload || data->set.mimepost.kind != MIMEKIND_NONE)
/* APPEND can be executed directly */
result = imap_perform_append(data);
else if(imap->custom && (selected || !imap->mailbox))
@@ -1931,7 +1931,7 @@ static CURLcode imap_parse_url_options(struct connectdata *conn) const char *value;
while(*ptr && *ptr != '=')
- ptr++;
+ ptr++;
value = ptr + 1;
diff --git a/libs/libcurl/src/inet_ntop.c b/libs/libcurl/src/inet_ntop.c index e58a3b7e13..786634114d 100644 --- a/libs/libcurl/src/inet_ntop.c +++ b/libs/libcurl/src/inet_ntop.c @@ -164,7 +164,7 @@ static char *inet_ntop6 (const unsigned char *src, char *dst, size_t size) /* Was it a trailing run of 0x00's?
*/
if(best.base != -1 && (best.base + best.len) == (IN6ADDRSZ / INT16SZ))
- *tp++ = ':';
+ *tp++ = ':';
*tp++ = '\0';
/* Check for overflow, copy, and we're done.
diff --git a/libs/libcurl/src/ldap.c b/libs/libcurl/src/ldap.c index aa36b0ed66..7ed9412062 100644 --- a/libs/libcurl/src/ldap.c +++ b/libs/libcurl/src/ldap.c @@ -731,7 +731,7 @@ static CURLcode ldap_do(struct Curl_easy *data, bool *done) }
if(ber)
- ber_free(ber, 0);
+ ber_free(ber, 0);
}
quit:
@@ -1069,7 +1069,7 @@ static int _ldap_url_parse(struct Curl_easy *data, *ludpp = NULL;
if(!ludp)
- return LDAP_NO_MEMORY;
+ return LDAP_NO_MEMORY;
rc = _ldap_url_parse2(data, conn, ludp);
if(rc != LDAP_SUCCESS) {
diff --git a/libs/libcurl/src/libcurl.plist b/libs/libcurl/src/libcurl.plist index 2cbfb0ccf2..c9bb668381 100644 --- a/libs/libcurl/src/libcurl.plist +++ b/libs/libcurl/src/libcurl.plist @@ -15,7 +15,7 @@ <string>se.curl.libcurl</string>
<key>CFBundleVersion</key>
- <string>8.0.1</string>
+ <string>8.1.2</string>
<key>CFBundleName</key>
<string>libcurl</string>
@@ -27,9 +27,9 @@ <string>????</string>
<key>CFBundleShortVersionString</key>
- <string>libcurl 8.0.1</string>
+ <string>libcurl 8.1.2</string>
<key>CFBundleGetInfoString</key>
- <string>libcurl.plist 8.0.1</string>
+ <string>libcurl.plist 8.1.2</string>
</dict>
</plist>
diff --git a/libs/libcurl/src/md4.c b/libs/libcurl/src/md4.c index 7386675d71..fc4ac18e3e 100644 --- a/libs/libcurl/src/md4.c +++ b/libs/libcurl/src/md4.c @@ -24,7 +24,7 @@ #include "curl_setup.h"
-#if !defined(CURL_DISABLE_CRYPTO_AUTH)
+#if defined(USE_CURL_NTLM_CORE)
#include <string.h>
@@ -68,10 +68,12 @@ #include <openssl/md4.h>
#elif (defined(__MAC_OS_X_VERSION_MAX_ALLOWED) && \
(__MAC_OS_X_VERSION_MAX_ALLOWED >= 1040) && \
- defined(__MAC_OS_X_VERSION_MIN_ALLOWED) && \
- (__MAC_OS_X_VERSION_MIN_ALLOWED < 101500)) || \
+ defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && \
+ (__MAC_OS_X_VERSION_MIN_REQUIRED < 101500)) || \
(defined(__IPHONE_OS_VERSION_MAX_ALLOWED) && \
- (__IPHONE_OS_VERSION_MAX_ALLOWED >= 20000))
+ (__IPHONE_OS_VERSION_MAX_ALLOWED >= 20000) && \
+ defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && \
+ (__IPHONE_OS_VERSION_MIN_REQUIRED < 130000))
#define AN_APPLE_OS
#include <CommonCrypto/CommonDigest.h>
#elif defined(USE_WIN32_CRYPTO)
@@ -504,4 +506,4 @@ void Curl_md4it(unsigned char *output, const unsigned char *input, MD4_Final(output, &ctx);
}
-#endif /* CURL_DISABLE_CRYPTO_AUTH */
+#endif /* USE_CURL_NTLM_CORE */
diff --git a/libs/libcurl/src/md5.c b/libs/libcurl/src/md5.c index 8457cf6dd4..965cf35711 100644 --- a/libs/libcurl/src/md5.c +++ b/libs/libcurl/src/md5.c @@ -66,10 +66,12 @@ #include <mbedtls/md5.h>
#elif (defined(__MAC_OS_X_VERSION_MAX_ALLOWED) && \
(__MAC_OS_X_VERSION_MAX_ALLOWED >= 1040) && \
- defined(__MAC_OS_X_VERSION_MIN_ALLOWED) && \
- (__MAC_OS_X_VERSION_MIN_ALLOWED < 101500)) || \
+ defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && \
+ (__MAC_OS_X_VERSION_MIN_REQUIRED < 101500)) || \
(defined(__IPHONE_OS_VERSION_MAX_ALLOWED) && \
- (__IPHONE_OS_VERSION_MAX_ALLOWED >= 20000))
+ (__IPHONE_OS_VERSION_MAX_ALLOWED >= 20000) && \
+ defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && \
+ (__IPHONE_OS_VERSION_MIN_REQUIRED < 130000))
#define AN_APPLE_OS
#include <CommonCrypto/CommonDigest.h>
#elif defined(USE_WIN32_CRYPTO)
diff --git a/libs/libcurl/src/mime.c b/libs/libcurl/src/mime.c index b8c7fab762..5cb20679a4 100644 --- a/libs/libcurl/src/mime.c +++ b/libs/libcurl/src/mime.c @@ -750,7 +750,6 @@ static void mime_file_free(void *ptr) part->fp = NULL;
}
Curl_safefree(part->data);
- part->data = NULL;
}
@@ -1108,7 +1107,7 @@ static int mime_subparts_seek(void *instream, curl_off_t offset, int whence) return CURL_SEEKFUNC_CANTSEEK; /* Only support full rewind. */
if(mime->state.state == MIMESTATE_BEGIN)
- return CURL_SEEKFUNC_OK; /* Already rewound. */
+ return CURL_SEEKFUNC_OK; /* Already rewound. */
for(part = mime->firstpart; part; part = part->nextpart) {
int res = mime_part_rewind(part);
@@ -1341,7 +1340,6 @@ CURLcode curl_mime_name(curl_mimepart *part, const char *name) return CURLE_BAD_FUNCTION_ARGUMENT;
Curl_safefree(part->name);
- part->name = NULL;
if(name) {
part->name = strdup(name);
@@ -1359,7 +1357,6 @@ CURLcode curl_mime_filename(curl_mimepart *part, const char *filename) return CURLE_BAD_FUNCTION_ARGUMENT;
Curl_safefree(part->filename);
- part->filename = NULL;
if(filename) {
part->filename = strdup(filename);
@@ -1459,7 +1456,6 @@ CURLcode curl_mime_type(curl_mimepart *part, const char *mimetype) return CURLE_BAD_FUNCTION_ARGUMENT;
Curl_safefree(part->mimetype);
- part->mimetype = NULL;
if(mimetype) {
part->mimetype = strdup(mimetype);
@@ -1738,7 +1734,7 @@ const char *Curl_mime_contenttype(const char *filename) size_t len2 = strlen(ctts[i].extension);
if(len1 >= len2 && strcasecompare(nameend - len2, ctts[i].extension))
- return ctts[i].type;
+ return ctts[i].type;
}
}
return NULL;
diff --git a/libs/libcurl/src/mprintf.c b/libs/libcurl/src/mprintf.c index 30b09d5e05..c4eeb8c836 100644 --- a/libs/libcurl/src/mprintf.c +++ b/libs/libcurl/src/mprintf.c @@ -400,7 +400,7 @@ static int dprintf_Pass1(const char *format, struct va_stack *vto, /* out of allowed range */
return 1;
- switch (*fmt) {
+ switch(*fmt) {
case 'S':
flags |= FLAGS_ALT;
/* FALLTHROUGH */
@@ -743,11 +743,11 @@ static int dprintf_formatf( goto number;
- unsigned_number:
+unsigned_number:
/* Unsigned number of base BASE. */
is_neg = 0;
- number:
+number:
/* Number of base BASE. */
/* Supply a default precision if none was given. */
diff --git a/libs/libcurl/src/mqtt.c b/libs/libcurl/src/mqtt.c index 2d6f771be9..0ab7c6467e 100644 --- a/libs/libcurl/src/mqtt.c +++ b/libs/libcurl/src/mqtt.c @@ -605,7 +605,7 @@ static CURLcode mqtt_read_publish(struct Curl_easy *data, bool *done) unsigned char packet;
switch(mqtt->state) {
- MQTT_SUBACK_COMING:
+MQTT_SUBACK_COMING:
case MQTT_SUBACK_COMING:
result = mqtt_verify_suback(data);
if(result)
@@ -688,7 +688,7 @@ static CURLcode mqtt_read_publish(struct Curl_easy *data, bool *done) result = CURLE_WEIRD_SERVER_REPLY;
goto end;
}
- end:
+end:
return result;
}
diff --git a/libs/libcurl/src/multi.c b/libs/libcurl/src/multi.c index b1ad4866a8..a24d7dd5bb 100644 --- a/libs/libcurl/src/multi.c +++ b/libs/libcurl/src/multi.c @@ -90,8 +90,17 @@ #define CURL_MULTI_HANDLE 0x000bab1e
+#ifdef DEBUGBUILD
+/* On a debug build, we want to fail hard on multi handles that
+ * are not NULL, but no longer have the MAGIC touch. This gives
+ * us early warning on things only discovered by valgrind otherwise. */
+#define GOOD_MULTI_HANDLE(x) \
+ (((x) && (x)->magic == CURL_MULTI_HANDLE)? TRUE: \
+ (DEBUGASSERT(!(x)), FALSE))
+#else
#define GOOD_MULTI_HANDLE(x) \
((x) && (x)->magic == CURL_MULTI_HANDLE)
+#endif
static CURLMcode singlesocket(struct Curl_multi *multi,
struct Curl_easy *data);
@@ -383,12 +392,10 @@ static void sh_init(struct Curl_hash *hash, int hashsize) * Called when a transfer is completed. Adds the given msg pointer to
* the list kept in the multi handle.
*/
-static CURLMcode multi_addmsg(struct Curl_multi *multi,
- struct Curl_message *msg)
+static void multi_addmsg(struct Curl_multi *multi, struct Curl_message *msg)
{
Curl_llist_insert_next(&multi->msglist, multi->msglist.tail, msg,
&msg->list);
- return CURLM_OK;
}
struct Curl_multi *Curl_multi_handle(int hashsize, /* socket hash */
@@ -411,6 +418,7 @@ struct Curl_multi *Curl_multi_handle(int hashsize, /* socket hash */ Curl_llist_init(&multi->msglist, NULL);
Curl_llist_init(&multi->pending, NULL);
+ Curl_llist_init(&multi->msgsent, NULL);
multi->multiplexing = TRUE;
@@ -440,7 +448,7 @@ struct Curl_multi *Curl_multi_handle(int hashsize, /* socket hash */ return multi;
- error:
+error:
sockhash_destroy(&multi->sockhash);
Curl_hash_destroy(&multi->hostcache);
@@ -456,6 +464,14 @@ struct Curl_multi *curl_multi_init(void) CURL_DNS_HASH_SIZE);
}
+/* returns TRUE if the easy handle is supposed to be present in the main link
+ list */
+static bool in_main_list(struct Curl_easy *data)
+{
+ return ((data->mstate != MSTATE_PENDING) &&
+ (data->mstate != MSTATE_MSGSENT));
+}
+
static void link_easy(struct Curl_multi *multi,
struct Curl_easy *data)
{
@@ -489,6 +505,8 @@ static void unlink_easy(struct Curl_multi *multi, data->next->prev = data->prev;
else
multi->easylp = data->prev; /* point to last node */
+
+ data->prev = data->next = NULL;
}
@@ -681,6 +699,15 @@ static CURLcode multi_done(struct Curl_easy *data, process_pending_handles(data->multi); /* connection / multiplex */
+ Curl_safefree(data->state.ulbuf);
+
+ /* if the transfer was completed in a paused state there can be buffered
+ data left to free */
+ for(i = 0; i < data->state.tempcount; i++) {
+ Curl_dyn_free(&data->state.tempwrite[i].b);
+ }
+ data->state.tempcount = 0;
+
CONNCACHE_LOCK(data);
Curl_detach_connection(data);
if(CONN_INUSE(conn)) {
@@ -699,14 +726,6 @@ static CURLcode multi_done(struct Curl_easy *data, conn->dns_entry = NULL;
}
Curl_hostcache_prune(data);
- Curl_safefree(data->state.ulbuf);
-
- /* if the transfer was completed in a paused state there can be buffered
- data left to free */
- for(i = 0; i < data->state.tempcount; i++) {
- Curl_dyn_free(&data->state.tempwrite[i].b);
- }
- data->state.tempcount = 0;
/* if data->set.reuse_forbid is TRUE, it means the libcurl client has
forced us to close this connection. This is ignored for requests taking
@@ -848,10 +867,16 @@ CURLMcode curl_multi_remove_handle(struct Curl_multi *multi, called. Do it after multi_done() in case that sets another time! */
Curl_expire_clear(data);
- if(data->connect_queue.ptr)
- /* the handle was in the pending list waiting for an available connection,
- so go ahead and remove it */
- Curl_llist_remove(&multi->pending, &data->connect_queue, NULL);
+ if(data->connect_queue.ptr) {
+ /* the handle is in the pending or msgsent lists, so go ahead and remove
+ it */
+ if(data->mstate == MSTATE_PENDING)
+ Curl_llist_remove(&multi->pending, &data->connect_queue, NULL);
+ else
+ Curl_llist_remove(&multi->msgsent, &data->connect_queue, NULL);
+ }
+ if(in_main_list(data))
+ unlink_easy(multi, data);
if(data->dns.hostcachetype == HCACHE_MULTI) {
/* stop using the multi handle's DNS cache, *after* the possible
@@ -912,7 +937,6 @@ CURLMcode curl_multi_remove_handle(struct Curl_multi *multi, /* make sure there's no pending message in the queue sent from this easy
handle */
-
for(e = multi->msglist.head; e; e = e->next) {
struct Curl_message *msg = e->ptr;
@@ -923,19 +947,6 @@ CURLMcode curl_multi_remove_handle(struct Curl_multi *multi, }
}
- /* Remove from the pending list if it is there. Otherwise this will
- remain on the pending list forever due to the state change. */
- for(e = multi->pending.head; e; e = e->next) {
- struct Curl_easy *curr_data = e->ptr;
-
- if(curr_data == data) {
- Curl_llist_remove(&multi->pending, e, NULL);
- break;
- }
- }
-
- unlink_easy(multi, data);
-
/* NOTE NOTE NOTE
We do not touch the easy handle here! */
multi->num_easy--; /* one less to care about now */
@@ -1943,11 +1954,6 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi, }
break;
- case MSTATE_PENDING:
- /* We will stay here until there is a connection available. Then
- we try again in the MSTATE_CONNECT state. */
- break;
-
case MSTATE_CONNECT:
/* Connect. We want to get a connection identifier filled in. */
/* init this transfer. */
@@ -1971,6 +1977,8 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi, /* add this handle to the list of connect-pending handles */
Curl_llist_insert_next(&multi->pending, multi->pending.tail, data,
&data->connect_queue);
+ /* unlink from the main list */
+ unlink_easy(multi, data);
result = CURLE_OK;
break;
}
@@ -2013,7 +2021,7 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi, else
#endif
if(conn->bits.conn_to_host)
- hostname = conn->conn_to_host.name;
+ hostname = conn->conn_to_host.name;
else
hostname = conn->host.name;
@@ -2215,7 +2223,6 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi, /* DO was not completed in one function call, we must continue
DOING... */
multistate(data, MSTATE_DOING);
- rc = CURLM_OK;
}
/* after DO, go DO_DONE... or DO_MORE */
@@ -2223,7 +2230,6 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi, /* we're supposed to do more, but we need to sit down, relax
and wait a little while first */
multistate(data, MSTATE_DOING_MORE);
- rc = CURLM_OK;
}
else {
/* we're done with the DO, now DID */
@@ -2324,9 +2330,8 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi, MSTATE_DID : MSTATE_DOING);
rc = CURLM_CALL_MULTI_PERFORM;
}
- else
- /* stay in DO_MORE */
- rc = CURLM_OK;
+ /* else
+ stay in DO_MORE */
}
else {
/* failure detected */
@@ -2555,7 +2560,6 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi, won't get stuck on this transfer at the expense of other concurrent
transfers */
Curl_expire(data, 0, EXPIRE_RUN_NOW);
- rc = CURLM_OK;
}
break;
}
@@ -2597,9 +2601,11 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi, case MSTATE_COMPLETED:
break;
+ case MSTATE_PENDING:
case MSTATE_MSGSENT:
- data->result = result;
- return CURLM_OK; /* do nothing */
+ /* handles in these states should NOT be in this list */
+ DEBUGASSERT(0);
+ break;
default:
return CURLM_INTERNAL_ERROR;
@@ -2619,7 +2625,7 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi, multi_handle_timeout(data, nowp, &stream_error, &result, TRUE);
}
- statemachine_end:
+statemachine_end:
if(data->mstate < MSTATE_COMPLETED) {
if(result) {
@@ -2687,10 +2693,17 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi, msg->extmsg.easy_handle = data;
msg->extmsg.data.result = result;
- rc = multi_addmsg(multi, msg);
+ multi_addmsg(multi, msg);
DEBUGASSERT(!data->conn);
}
multistate(data, MSTATE_MSGSENT);
+
+ /* add this handle to the list of msgsent handles */
+ Curl_llist_insert_next(&multi->msgsent, multi->msgsent.tail, data,
+ &data->connect_queue);
+ /* unlink from the main list */
+ unlink_easy(multi, data);
+ return CURLM_OK;
}
} while((rc == CURLM_CALL_MULTI_PERFORM) || multi_ischanged(multi, FALSE));
@@ -2721,6 +2734,9 @@ CURLMcode curl_multi_perform(struct Curl_multi *multi, int *running_handles) /* Do the loop and only alter the signal ignore state if the next handle
has a different NO_SIGNAL state than the previous */
do {
+ /* the current node might be unlinked in multi_runsingle(), get the next
+ pointer now */
+ struct Curl_easy *datanext = data->next;
if(data->set.no_signal != nosig) {
sigpipe_restore(&pipe_st);
sigpipe_ignore(data, &pipe_st);
@@ -2729,7 +2745,7 @@ CURLMcode curl_multi_perform(struct Curl_multi *multi, int *running_handles) result = multi_runsingle(multi, &now, data);
if(result)
returncode = result;
- data = data->next; /* operate on next handle */
+ data = datanext; /* operate on next handle */
} while(data);
sigpipe_restore(&pipe_st);
}
@@ -2760,6 +2776,18 @@ CURLMcode curl_multi_perform(struct Curl_multi *multi, int *running_handles) return returncode;
}
+/* unlink_all_msgsent_handles() detaches all those easy handles from this
+ multi handle */
+static void unlink_all_msgsent_handles(struct Curl_multi *multi)
+{
+ struct Curl_llist_element *e = multi->msgsent.head;
+ if(e) {
+ struct Curl_easy *data = e->ptr;
+ DEBUGASSERT(data->mstate == MSTATE_MSGSENT);
+ data->multi = NULL;
+ }
+}
+
CURLMcode curl_multi_cleanup(struct Curl_multi *multi)
{
struct Curl_easy *data;
@@ -2771,6 +2799,8 @@ CURLMcode curl_multi_cleanup(struct Curl_multi *multi) multi->magic = 0; /* not good anymore */
+ unlink_all_msgsent_handles(multi);
+ process_pending_handles(multi);
/* First remove all remaining easy handles */
data = multi->easyp;
while(data) {
@@ -3150,6 +3180,9 @@ static CURLMcode multi_socket(struct Curl_multi *multi, struct Curl_easy *data = NULL;
struct Curl_tree *t;
struct curltime now = Curl_now();
+ bool first = FALSE;
+ bool nosig = FALSE;
+ SIGPIPE_VARIABLE(pipe_st);
if(checkall) {
/* *perform() deals with running_handles on its own */
@@ -3192,7 +3225,7 @@ static CURLMcode multi_socket(struct Curl_multi *multi, if(data->conn && !(data->conn->handler->flags & PROTOPT_DIRLOCK))
/* set socket event bitmask if they're not locked */
- data->conn->cselect_bits = ev_bitmask;
+ data->conn->cselect_bits = (unsigned char)ev_bitmask;
Curl_expire(data, 0, EXPIRE_RUN_NOW);
}
@@ -3224,18 +3257,24 @@ static CURLMcode multi_socket(struct Curl_multi *multi, do {
/* the first loop lap 'data' can be NULL */
if(data) {
- SIGPIPE_VARIABLE(pipe_st);
-
- sigpipe_ignore(data, &pipe_st);
+ if(!first) {
+ first = TRUE;
+ nosig = data->set.no_signal; /* initial state */
+ sigpipe_ignore(data, &pipe_st);
+ }
+ else if(data->set.no_signal != nosig) {
+ sigpipe_restore(&pipe_st);
+ sigpipe_ignore(data, &pipe_st);
+ nosig = data->set.no_signal; /* remember new state */
+ }
result = multi_runsingle(multi, &now, data);
- sigpipe_restore(&pipe_st);
if(CURLM_OK >= result) {
/* get the socket(s) and check if the state has been changed since
last */
result = singlesocket(multi, data);
if(result)
- return result;
+ break;
}
}
@@ -3249,6 +3288,8 @@ static CURLMcode multi_socket(struct Curl_multi *multi, }
} while(t);
+ if(first)
+ sigpipe_restore(&pipe_st);
*running_handles = multi->num_alive;
return result;
@@ -3702,6 +3743,8 @@ void Curl_multiuse_state(struct Curl_easy *data, process_pending_handles(data->multi);
}
+/* process_pending_handles() moves all handles from PENDING
+ back into the main list and change state to CONNECT */
static void process_pending_handles(struct Curl_multi *multi)
{
struct Curl_llist_element *e = multi->pending.head;
@@ -3710,6 +3753,9 @@ static void process_pending_handles(struct Curl_multi *multi) DEBUGASSERT(data->mstate == MSTATE_PENDING);
+ /* put it back into the main list */
+ link_easy(multi, data);
+
multistate(data, MSTATE_CONNECT);
/* Remove this node from the list */
diff --git a/libs/libcurl/src/multihandle.h b/libs/libcurl/src/multihandle.h index 5dc527ec2c..771c04d226 100644 --- a/libs/libcurl/src/multihandle.h +++ b/libs/libcurl/src/multihandle.h @@ -101,6 +101,8 @@ struct Curl_multi { struct Curl_llist pending; /* Curl_easys that are in the
MSTATE_PENDING state */
+ struct Curl_llist msgsent; /* Curl_easys that are in the
+ MSTATE_MSGSENT state */
/* callback function and user data pointer for the *socket() API */
curl_socket_callback socket_cb;
diff --git a/libs/libcurl/src/netrc.c b/libs/libcurl/src/netrc.c index 3b3eeeef3f..3163a60a1f 100644 --- a/libs/libcurl/src/netrc.c +++ b/libs/libcurl/src/netrc.c @@ -244,7 +244,7 @@ static int parsenetrc(const char *host, }
} /* while Curl_get_line() */
- out:
+out:
if(!retcode) {
/* success */
if(login_alloc) {
diff --git a/libs/libcurl/src/noproxy.c b/libs/libcurl/src/noproxy.c index 5a6aa89da3..8517cbf343 100644 --- a/libs/libcurl/src/noproxy.c +++ b/libs/libcurl/src/noproxy.c @@ -122,6 +122,7 @@ enum nametype { bool Curl_check_noproxy(const char *name, const char *no_proxy,
bool *spacesep)
{
+ char hostip[128];
*spacesep = FALSE;
/*
* If we don't have a hostname at all, like for example with a FILE
@@ -139,7 +140,6 @@ bool Curl_check_noproxy(const char *name, const char *no_proxy, const char *p = no_proxy;
size_t namelen;
enum nametype type = TYPE_HOST;
- char hostip[128];
if(!strcmp("*", no_proxy))
return TRUE;
diff --git a/libs/libcurl/src/openldap.c b/libs/libcurl/src/openldap.c index 30bdf6c976..7d3b9e85e2 100644 --- a/libs/libcurl/src/openldap.c +++ b/libs/libcurl/src/openldap.c @@ -295,7 +295,7 @@ static CURLcode oldap_parse_login_options(struct connectdata *conn) const char *value;
while(*ptr && *ptr != '=')
- ptr++;
+ ptr++;
value = ptr + 1;
diff --git a/libs/libcurl/src/parsedate.c b/libs/libcurl/src/parsedate.c index bd31aadf9e..c6ae8fb8e4 100644 --- a/libs/libcurl/src/parsedate.c +++ b/libs/libcurl/src/parsedate.c @@ -332,7 +332,7 @@ static bool match_time(const char *date, }
}
return FALSE; /* not a time string */
- match:
+match:
*h = hh;
*m = mm;
*s = ss;
diff --git a/libs/libcurl/src/pingpong.c b/libs/libcurl/src/pingpong.c index 46ef3c6c05..4134e2749f 100644 --- a/libs/libcurl/src/pingpong.c +++ b/libs/libcurl/src/pingpong.c @@ -211,7 +211,7 @@ CURLcode Curl_pp_vsendf(struct Curl_easy *data, #ifdef HAVE_GSSAPI
data_sec = conn->data_prot;
DEBUGASSERT(data_sec > PROT_NONE && data_sec < PROT_LAST);
- conn->data_prot = data_sec;
+ conn->data_prot = (unsigned char)data_sec;
#endif
Curl_debug(data, CURLINFO_HEADER_OUT, s, (size_t)bytes_written);
@@ -316,7 +316,7 @@ CURLcode Curl_pp_readresp(struct Curl_easy *data, &gotbytes);
#ifdef HAVE_GSSAPI
DEBUGASSERT(prot > PROT_NONE && prot < PROT_LAST);
- conn->data_prot = prot;
+ conn->data_prot = (unsigned char)prot;
#endif
if(result == CURLE_AGAIN)
return CURLE_OK; /* return */
diff --git a/libs/libcurl/src/pop3.c b/libs/libcurl/src/pop3.c index f65fec2c01..48c2a39d14 100644 --- a/libs/libcurl/src/pop3.c +++ b/libs/libcurl/src/pop3.c @@ -1376,7 +1376,7 @@ static CURLcode pop3_parse_url_options(struct connectdata *conn) const char *value;
while(*ptr && *ptr != '=')
- ptr++;
+ ptr++;
value = ptr + 1;
diff --git a/libs/libcurl/src/rand.c b/libs/libcurl/src/rand.c index 126fae9e29..7a40f4ac63 100644 --- a/libs/libcurl/src/rand.c +++ b/libs/libcurl/src/rand.c @@ -183,8 +183,8 @@ static CURLcode randit(struct Curl_easy *data, unsigned int *rnd) }
/*
- * Curl_rand() stores 'num' number of random unsigned integers in the buffer
- * 'rndptr' points to.
+ * Curl_rand() stores 'num' number of random unsigned characters in the buffer
+ * 'rnd' points to.
*
* If libcurl is built without TLS support or with a TLS backend that lacks a
* proper random API (rustls, Gskit or mbedTLS), this function will use "weak"
diff --git a/libs/libcurl/src/rtsp.c b/libs/libcurl/src/rtsp.c index 9643261242..bb9708f11b 100644 --- a/libs/libcurl/src/rtsp.c +++ b/libs/libcurl/src/rtsp.c @@ -45,8 +45,6 @@ #include "curl_memory.h"
#include "memdebug.h"
-#define RTP_PKT_CHANNEL(p) ((int)((unsigned char)((p)[1])))
-
#define RTP_PKT_LENGTH(p) ((((int)((unsigned char)((p)[2]))) << 8) | \
((int)((unsigned char)((p)[3]))))
@@ -91,6 +89,8 @@ static int rtsp_getsock_do(struct Curl_easy *data, struct connectdata *conn, static
CURLcode rtp_client_write(struct Curl_easy *data, char *ptr, size_t len);
+static
+CURLcode rtsp_parse_transport(struct Curl_easy *data, char *transport);
/*
@@ -119,6 +119,7 @@ const struct Curl_handler Curl_handler_rtsp = { PROTOPT_NONE /* flags */
};
+#define MAX_RTP_BUFFERSIZE 1000000 /* arbitrary */
static CURLcode rtsp_setup_connection(struct Curl_easy *data,
struct connectdata *conn)
@@ -130,6 +131,7 @@ static CURLcode rtsp_setup_connection(struct Curl_easy *data, if(!rtsp)
return CURLE_OUT_OF_MEMORY;
+ Curl_dyn_init(&conn->proto.rtspc.buf, MAX_RTP_BUFFERSIZE);
return CURLE_OK;
}
@@ -176,7 +178,7 @@ static CURLcode rtsp_disconnect(struct Curl_easy *data, {
(void) dead;
(void) data;
- Curl_safefree(conn->proto.rtspc.rtp_buf);
+ Curl_dyn_free(&conn->proto.rtspc.buf);
return CURLE_OK;
}
@@ -204,7 +206,7 @@ static CURLcode rtsp_done(struct Curl_easy *data, return CURLE_RTSP_CSEQ_ERROR;
}
if(data->set.rtspreq == RTSPREQ_RECEIVE &&
- (data->conn->proto.rtspc.rtp_channel == -1)) {
+ (data->conn->proto.rtspc.rtp_channel == -1)) {
infof(data, "Got an RTP Receive with a CSeq of %ld", CSeq_recv);
}
}
@@ -374,7 +376,6 @@ static CURLcode rtsp_do(struct Curl_easy *data, bool *done) if(Curl_checkheaders(data, STRCONST("User-Agent")) &&
data->state.aptr.uagent) {
Curl_safefree(data->state.aptr.uagent);
- data->state.aptr.uagent = NULL;
}
else if(!Curl_checkheaders(data, STRCONST("User-Agent")) &&
data->set.str[STRING_USERAGENT]) {
@@ -394,8 +395,6 @@ static CURLcode rtsp_do(struct Curl_easy *data, bool *done) Curl_safefree(data->state.aptr.ref);
if(data->state.referer && !Curl_checkheaders(data, STRCONST("Referer")))
data->state.aptr.ref = aprintf("Referer: %s\r\n", data->state.referer);
- else
- data->state.aptr.ref = NULL;
p_referrer = data->state.aptr.ref;
@@ -476,7 +475,6 @@ static CURLcode rtsp_do(struct Curl_easy *data, bool *done) * with basic and digest, it will be freed anyway by the next request
*/
Curl_safefree(data->state.aptr.userpwd);
- data->state.aptr.userpwd = NULL;
if(result)
return result;
@@ -495,7 +493,7 @@ static CURLcode rtsp_do(struct Curl_easy *data, bool *done) rtspreq == RTSPREQ_SET_PARAMETER ||
rtspreq == RTSPREQ_GET_PARAMETER) {
- if(data->set.upload) {
+ if(data->state.upload) {
putsize = data->state.infilesize;
data->state.httpreq = HTTPREQ_PUT;
@@ -514,7 +512,7 @@ static CURLcode rtsp_do(struct Curl_easy *data, bool *done) result =
Curl_dyn_addf(&req_buffer,
"Content-Length: %" CURL_FORMAT_CURL_OFF_T"\r\n",
- (data->set.upload ? putsize : postsize));
+ (data->state.upload ? putsize : postsize));
if(result)
return result;
}
@@ -594,26 +592,20 @@ static CURLcode rtsp_rtp_readwrite(struct Curl_easy *data, bool *readmore) {
struct SingleRequest *k = &data->req;
struct rtsp_conn *rtspc = &(conn->proto.rtspc);
+ unsigned char *rtp_channel_mask = data->state.rtp_channel_mask;
char *rtp; /* moving pointer to rtp data */
ssize_t rtp_dataleft; /* how much data left to parse in this round */
- char *scratch;
CURLcode result;
+ bool interleaved = false;
+ size_t skip_size = 0;
- if(rtspc->rtp_buf) {
- /* There was some leftover data the last time. Merge buffers */
- char *newptr = Curl_saferealloc(rtspc->rtp_buf,
- rtspc->rtp_bufsize + *nread);
- if(!newptr) {
- rtspc->rtp_buf = NULL;
- rtspc->rtp_bufsize = 0;
+ if(Curl_dyn_len(&rtspc->buf)) {
+ /* There was some leftover data the last time. Append new buffers */
+ if(Curl_dyn_addn(&rtspc->buf, k->str, *nread))
return CURLE_OUT_OF_MEMORY;
- }
- rtspc->rtp_buf = newptr;
- memcpy(rtspc->rtp_buf + rtspc->rtp_bufsize, k->str, *nread);
- rtspc->rtp_bufsize += *nread;
- rtp = rtspc->rtp_buf;
- rtp_dataleft = rtspc->rtp_bufsize;
+ rtp = Curl_dyn_ptr(&rtspc->buf);
+ rtp_dataleft = Curl_dyn_len(&rtspc->buf);
}
else {
/* Just parse the request buffer directly */
@@ -621,71 +613,107 @@ static CURLcode rtsp_rtp_readwrite(struct Curl_easy *data, rtp_dataleft = *nread;
}
- while((rtp_dataleft > 0) &&
- (rtp[0] == '$')) {
- if(rtp_dataleft > 4) {
- int rtp_length;
+ while(rtp_dataleft > 0) {
+ if(rtp[0] == '$') {
+ if(rtp_dataleft > 4) {
+ unsigned char rtp_channel;
+ int rtp_length;
+ int idx;
+ int off;
+
+ /* Parse the header */
+ /* The channel identifier immediately follows and is 1 byte */
+ rtp_channel = (unsigned char)rtp[1];
+ idx = rtp_channel / 8;
+ off = rtp_channel % 8;
+ if(!(rtp_channel_mask[idx] & (1 << off))) {
+ /* invalid channel number, maybe not an RTP packet */
+ rtp++;
+ rtp_dataleft--;
+ skip_size++;
+ continue;
+ }
+ if(skip_size > 0) {
+ DEBUGF(infof(data, "Skip the malformed interleaved data %lu "
+ "bytes", skip_size));
+ }
+ skip_size = 0;
+ rtspc->rtp_channel = rtp_channel;
- /* Parse the header */
- /* The channel identifier immediately follows and is 1 byte */
- rtspc->rtp_channel = RTP_PKT_CHANNEL(rtp);
+ /* The length is two bytes */
+ rtp_length = RTP_PKT_LENGTH(rtp);
- /* The length is two bytes */
- rtp_length = RTP_PKT_LENGTH(rtp);
+ if(rtp_dataleft < rtp_length + 4) {
+ /* Need more - incomplete payload */
+ *readmore = TRUE;
+ break;
+ }
+ interleaved = true;
+ /* We have the full RTP interleaved packet
+ * Write out the header including the leading '$' */
+ DEBUGF(infof(data, "RTP write channel %d rtp_length %d",
+ rtspc->rtp_channel, rtp_length));
+ result = rtp_client_write(data, &rtp[0], rtp_length + 4);
+ if(result) {
+ *readmore = FALSE;
+ return result;
+ }
+
+ /* Move forward in the buffer */
+ rtp_dataleft -= rtp_length + 4;
+ rtp += rtp_length + 4;
- if(rtp_dataleft < rtp_length + 4) {
- /* Need more - incomplete payload */
+ if(data->set.rtspreq == RTSPREQ_RECEIVE) {
+ /* If we are in a passive receive, give control back
+ * to the app as often as we can.
+ */
+ k->keepon &= ~KEEP_RECV;
+ }
+ }
+ else {
+ /* Need more - incomplete header */
*readmore = TRUE;
break;
}
- /* We have the full RTP interleaved packet
- * Write out the header including the leading '$' */
- DEBUGF(infof(data, "RTP write channel %d rtp_length %d",
- rtspc->rtp_channel, rtp_length));
- result = rtp_client_write(data, &rtp[0], rtp_length + 4);
- if(result) {
- failf(data, "Got an error writing an RTP packet");
- *readmore = FALSE;
- Curl_safefree(rtspc->rtp_buf);
- rtspc->rtp_buf = NULL;
- rtspc->rtp_bufsize = 0;
- return result;
- }
-
- /* Move forward in the buffer */
- rtp_dataleft -= rtp_length + 4;
- rtp += rtp_length + 4;
-
- if(data->set.rtspreq == RTSPREQ_RECEIVE) {
- /* If we are in a passive receive, give control back
- * to the app as often as we can.
- */
- k->keepon &= ~KEEP_RECV;
- }
}
else {
- /* Need more - incomplete header */
- *readmore = TRUE;
- break;
+ /* If the following data begins with 'RTSP/', which might be an RTSP
+ message, we should stop skipping the data. */
+ /* If `k-> headerline> 0 && !interleaved` is true, we are maybe in the
+ middle of an RTSP message. It is difficult to determine this, so we
+ stop skipping. */
+ size_t prefix_len = (rtp_dataleft < 5) ? rtp_dataleft : 5;
+ if((k->headerline > 0 && !interleaved) ||
+ strncmp(rtp, "RTSP/", prefix_len) == 0) {
+ if(skip_size > 0) {
+ DEBUGF(infof(data, "Skip the malformed interleaved data %lu "
+ "bytes", skip_size));
+ }
+ break; /* maybe is an RTSP message */
+ }
+ /* Skip incorrect data util the next RTP packet or RTSP message */
+ do {
+ rtp++;
+ rtp_dataleft--;
+ skip_size++;
+ } while(rtp_dataleft > 0 && rtp[0] != '$' && rtp[0] != 'R');
}
}
if(rtp_dataleft && rtp[0] == '$') {
DEBUGF(infof(data, "RTP Rewinding %zd %s", rtp_dataleft,
- *readmore ? "(READMORE)" : ""));
+ *readmore ? "(READMORE)" : ""));
/* Store the incomplete RTP packet for a "rewind" */
- scratch = malloc(rtp_dataleft);
- if(!scratch) {
- Curl_safefree(rtspc->rtp_buf);
- rtspc->rtp_buf = NULL;
- rtspc->rtp_bufsize = 0;
- return CURLE_OUT_OF_MEMORY;
+ if(!Curl_dyn_len(&rtspc->buf)) {
+ /* nothing was stored, add this data */
+ if(Curl_dyn_addn(&rtspc->buf, rtp, rtp_dataleft))
+ return CURLE_OUT_OF_MEMORY;
+ }
+ else {
+ /* keep the remainder */
+ Curl_dyn_tail(&rtspc->buf, rtp_dataleft);
}
- memcpy(scratch, rtp, rtp_dataleft);
- Curl_safefree(rtspc->rtp_buf);
- rtspc->rtp_buf = scratch;
- rtspc->rtp_bufsize = rtp_dataleft;
/* As far as the transfer is concerned, this data is consumed */
*nread = 0;
@@ -694,20 +722,10 @@ static CURLcode rtsp_rtp_readwrite(struct Curl_easy *data, /* Fix up k->str to point just after the last RTP packet */
k->str += *nread - rtp_dataleft;
- /* either all of the data has been read or...
- * rtp now points at the next byte to parse
- */
- if(rtp_dataleft > 0)
- DEBUGASSERT(k->str[0] == rtp[0]);
-
- DEBUGASSERT(rtp_dataleft <= *nread); /* sanity check */
-
*nread = rtp_dataleft;
/* If we get here, we have finished with the leftover/merge buffer */
- Curl_safefree(rtspc->rtp_buf);
- rtspc->rtp_buf = NULL;
- rtspc->rtp_bufsize = 0;
+ Curl_dyn_free(&rtspc->buf);
return CURLE_OK;
}
@@ -822,7 +840,63 @@ CURLcode Curl_rtsp_parseheader(struct Curl_easy *data, char *header) (data->set.str[STRING_RTSP_SESSION_ID])[idlen] = '\0';
}
}
+ else if(checkprefix("Transport:", header)) {
+ CURLcode result;
+ result = rtsp_parse_transport(data, header + 10);
+ if(result)
+ return result;
+ }
return CURLE_OK;
}
+static
+CURLcode rtsp_parse_transport(struct Curl_easy *data, char *transport)
+{
+ /* If we receive multiple Transport response-headers, the linterleaved
+ channels of each response header is recorded and used together for
+ subsequent data validity checks.*/
+ /* e.g.: ' RTP/AVP/TCP;unicast;interleaved=5-6' */
+ char *start;
+ char *end;
+ start = transport;
+ while(start && *start) {
+ while(*start && ISBLANK(*start) )
+ start++;
+ end = strchr(start, ';');
+ if(checkprefix("interleaved=", start)) {
+ long chan1, chan2, chan;
+ char *endp;
+ char *p = start + 12;
+ chan1 = strtol(p, &endp, 10);
+ if(p != endp && chan1 >= 0 && chan1 <= 255) {
+ unsigned char *rtp_channel_mask = data->state.rtp_channel_mask;
+ chan2 = chan1;
+ if(*endp == '-') {
+ p = endp + 1;
+ chan2 = strtol(p, &endp, 10);
+ if(p == endp || chan2 < 0 || chan2 > 255) {
+ infof(data, "Unable to read the interleaved parameter from "
+ "Transport header: [%s]", transport);
+ chan2 = chan1;
+ }
+ }
+ for(chan = chan1; chan <= chan2; chan++) {
+ long idx = chan / 8;
+ long off = chan % 8;
+ rtp_channel_mask[idx] |= (unsigned char)(1 << off);
+ }
+ }
+ else {
+ infof(data, "Unable to read the interleaved parameter from "
+ "Transport header: [%s]", transport);
+ }
+ break;
+ }
+ /* skip to next parameter */
+ start = (!end) ? end : (end + 1);
+ }
+ return CURLE_OK;
+}
+
+
#endif /* CURL_DISABLE_RTSP or using Hyper */
diff --git a/libs/libcurl/src/rtsp.h b/libs/libcurl/src/rtsp.h index 41012fc277..e084cdaa35 100644 --- a/libs/libcurl/src/rtsp.h +++ b/libs/libcurl/src/rtsp.h @@ -45,8 +45,7 @@ CURLcode Curl_rtsp_parseheader(struct Curl_easy *data, char *header); * Currently, only used for tracking incomplete RTP data reads
*/
struct rtsp_conn {
- char *rtp_buf;
- ssize_t rtp_bufsize;
+ struct dynbuf buf;
int rtp_channel;
};
diff --git a/libs/libcurl/src/select.c b/libs/libcurl/src/select.c index 278171a7ea..d0aa2a764b 100644 --- a/libs/libcurl/src/select.c +++ b/libs/libcurl/src/select.c @@ -61,8 +61,8 @@ * for the intended use of this function in the library.
*
* Return values:
- * -1 = system call error, invalid timeout value, or interrupted
- * 0 = specified timeout has elapsed
+ * -1 = system call error, or invalid timeout value
+ * 0 = specified timeout has elapsed, or interrupted
*/
int Curl_wait_ms(timediff_t timeout_ms)
{
@@ -99,8 +99,13 @@ int Curl_wait_ms(timediff_t timeout_ms) }
#endif /* HAVE_POLL_FINE */
#endif /* USE_WINSOCK */
- if(r)
- r = -1;
+ if(r) {
+ if((r == -1) && (SOCKERRNO == EINTR))
+ /* make EINTR from select or poll not a "lethal" error */
+ r = 0;
+ else
+ r = -1;
+ }
return r;
}
diff --git a/libs/libcurl/src/sendf.c b/libs/libcurl/src/sendf.c index bc94a0b139..78645780d2 100644 --- a/libs/libcurl/src/sendf.c +++ b/libs/libcurl/src/sendf.c @@ -271,10 +271,8 @@ static CURLcode chop_write(struct Curl_easy *data, if(type & CLIENTWRITE_BODY) {
#ifdef USE_WEBSOCKETS
if(conn->handler->protocol & (CURLPROTO_WS|CURLPROTO_WSS)) {
- struct HTTP *ws = data->req.p.http;
writebody = Curl_ws_writecb;
- ws->ws.data = data;
- writebody_ptr = ws;
+ writebody_ptr = data;
}
else
#endif
diff --git a/libs/libcurl/src/setopt.c b/libs/libcurl/src/setopt.c index 906222d3b9..c14ea491cd 100644 --- a/libs/libcurl/src/setopt.c +++ b/libs/libcurl/src/setopt.c @@ -115,7 +115,11 @@ static CURLcode setstropt_userpwd(char *option, char **userp, char **passwdp) /* Parse the login details if specified. It not then we treat NULL as a hint
to clear the existing data */
if(option) {
- result = Curl_parse_login_details(option, strlen(option),
+ size_t len = strlen(option);
+ if(len > CURL_MAX_INPUT_LENGTH)
+ return CURLE_BAD_FUNCTION_ARGUMENT;
+
+ result = Curl_parse_login_details(option, len,
(userp ? &user : NULL),
(passwdp ? &passwd : NULL),
NULL);
@@ -329,8 +333,8 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param) * We want to sent data to the remote host. If this is HTTP, that equals
* using the PUT request.
*/
- data->set.upload = (0 != va_arg(param, long)) ? TRUE : FALSE;
- if(data->set.upload) {
+ arg = va_arg(param, long);
+ if(arg) {
/* If this is HTTP, PUT is what's needed to "upload" */
data->set.method = HTTPREQ_PUT;
data->set.opt_no_body = FALSE; /* this is implied */
@@ -660,7 +664,6 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param) }
else
data->set.method = HTTPREQ_GET;
- data->set.upload = FALSE;
break;
#ifndef CURL_DISABLE_MIME
@@ -884,7 +887,6 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param) */
if(va_arg(param, long)) {
data->set.method = HTTPREQ_GET;
- data->set.upload = FALSE; /* switch off upload */
data->set.opt_no_body = FALSE; /* this is implied */
}
break;
@@ -1155,7 +1157,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param) case CURLOPT_PROXYTYPE:
/*
- * Set proxy type. HTTP/HTTP_1_0/SOCKS4/SOCKS4a/SOCKS5/SOCKS5_HOSTNAME
+ * Set proxy type.
*/
arg = va_arg(param, long);
if((arg < CURLPROXY_HTTP) || (arg > CURLPROXY_SOCKS5_HOSTNAME))
diff --git a/libs/libcurl/src/sha256.c b/libs/libcurl/src/sha256.c index d8a8d6bf67..dc65e2f0fd 100644 --- a/libs/libcurl/src/sha256.c +++ b/libs/libcurl/src/sha256.c @@ -59,9 +59,7 @@ #if defined(USE_OPENSSL_SHA256)
-/* When OpenSSL or wolfSSL is available is available we use their
- * SHA256-functions.
- */
+/* When OpenSSL or wolfSSL is available we use their SHA256-functions. */
#if defined(USE_OPENSSL)
#include <openssl/evp.h>
#elif defined(USE_WOLFSSL)
diff --git a/libs/libcurl/src/smb.c b/libs/libcurl/src/smb.c index 01553de971..0bcd779881 100644 --- a/libs/libcurl/src/smb.c +++ b/libs/libcurl/src/smb.c @@ -530,7 +530,7 @@ static CURLcode smb_send_open(struct Curl_easy *data) byte_count = strlen(req->path);
msg.name_length = smb_swap16((unsigned short)byte_count);
msg.share_access = smb_swap32(SMB_FILE_SHARE_ALL);
- if(data->set.upload) {
+ if(data->state.upload) {
msg.access = smb_swap32(SMB_GENERIC_READ | SMB_GENERIC_WRITE);
msg.create_disposition = smb_swap32(SMB_FILE_OVERWRITE_IF);
}
@@ -762,7 +762,7 @@ static CURLcode smb_request_state(struct Curl_easy *data, bool *done) void *msg = NULL;
const struct smb_nt_create_response *smb_m;
- if(data->set.upload && (data->state.infilesize < 0)) {
+ if(data->state.upload && (data->state.infilesize < 0)) {
failf(data, "SMB upload needs to know the size up front");
return CURLE_SEND_ERROR;
}
@@ -813,13 +813,12 @@ static CURLcode smb_request_state(struct Curl_easy *data, bool *done) smb_m = (const struct smb_nt_create_response*) msg;
req->fid = smb_swap16(smb_m->fid);
data->req.offset = 0;
- if(data->set.upload) {
+ if(data->state.upload) {
data->req.size = data->state.infilesize;
Curl_pgrsSetUploadSize(data, data->req.size);
next_state = SMB_UPLOAD;
}
else {
- smb_m = (const struct smb_nt_create_response*) msg;
data->req.size = smb_swap64(smb_m->end_of_file);
if(data->req.size < 0) {
req->result = CURLE_WEIRD_SERVER_REPLY;
diff --git a/libs/libcurl/src/smtp.c b/libs/libcurl/src/smtp.c index 8e5a7f5f4f..2b40c9249c 100644 --- a/libs/libcurl/src/smtp.c +++ b/libs/libcurl/src/smtp.c @@ -1419,7 +1419,7 @@ static CURLcode smtp_done(struct Curl_easy *data, CURLcode status, result = status; /* use the already set error code */
}
else if(!data->set.connect_only && data->set.mail_rcpt &&
- (data->set.upload || data->set.mimepost.kind)) {
+ (data->state.upload || data->set.mimepost.kind)) {
/* Calculate the EOB taking into account any terminating CRLF from the
previous line of the email or the CRLF of the DATA command when there
is "no mail data". RFC-5321, sect. 4.1.1.4.
@@ -1511,7 +1511,7 @@ static CURLcode smtp_perform(struct Curl_easy *data, bool *connected, smtp->eob = 2;
/* Start the first command in the DO phase */
- if((data->set.upload || data->set.mimepost.kind) && data->set.mail_rcpt)
+ if((data->state.upload || data->set.mimepost.kind) && data->set.mail_rcpt)
/* MAIL transfer */
result = smtp_perform_mail(data);
else
diff --git a/libs/libcurl/src/socketpair.c b/libs/libcurl/src/socketpair.c index 5bba59ed90..7733ab3322 100644 --- a/libs/libcurl/src/socketpair.c +++ b/libs/libcurl/src/socketpair.c @@ -24,6 +24,8 @@ #include "curl_setup.h"
#include "socketpair.h"
+#include "urldata.h"
+#include "rand.h"
#if !defined(HAVE_SOCKETPAIR) && !defined(CURL_DISABLE_SOCKETPAIR)
#ifdef WIN32
@@ -125,13 +127,17 @@ int Curl_socketpair(int domain, int type, int protocol, if(socks[1] == CURL_SOCKET_BAD)
goto error;
else {
- struct curltime check;
struct curltime start = Curl_now();
- char *p = (char *)✓
+ char rnd[9];
+ char check[sizeof(rnd)];
+ char *p = &check[0];
size_t s = sizeof(check);
+ if(Curl_rand(NULL, (unsigned char *)rnd, sizeof(rnd)))
+ goto error;
+
/* write data to the socket */
- swrite(socks[0], &start, sizeof(start));
+ swrite(socks[0], rnd, sizeof(rnd));
/* verify that we read the correct data */
do {
ssize_t nread;
@@ -168,7 +174,7 @@ int Curl_socketpair(int domain, int type, int protocol, p += nread;
continue;
}
- if(memcmp(&start, &check, sizeof(check)))
+ if(memcmp(rnd, check, sizeof(check)))
goto error;
break;
} while(1);
@@ -177,7 +183,7 @@ int Curl_socketpair(int domain, int type, int protocol, sclose(listener);
return 0;
- error:
+error:
sclose(listener);
sclose(socks[0]);
sclose(socks[1]);
diff --git a/libs/libcurl/src/socks.c b/libs/libcurl/src/socks.c index e09df3df4b..5a928b4fdd 100644 --- a/libs/libcurl/src/socks.c +++ b/libs/libcurl/src/socks.c @@ -354,7 +354,7 @@ static CURLproxycode do_SOCKS4(struct Curl_cfilter *cf, }
}
/* FALLTHROUGH */
- CONNECT_RESOLVED:
+CONNECT_RESOLVED:
case CONNECT_RESOLVED: {
struct Curl_addrinfo *hp = NULL;
/*
@@ -394,7 +394,7 @@ static CURLproxycode do_SOCKS4(struct Curl_cfilter *cf, return CURLPX_RESOLVE_HOST;
}
/* FALLTHROUGH */
- CONNECT_REQ_INIT:
+CONNECT_REQ_INIT:
case CONNECT_REQ_INIT:
/*
* This is currently not supporting "Identification Protocol (RFC1413)".
@@ -638,7 +638,7 @@ static CURLproxycode do_SOCKS5(struct Curl_cfilter *cf, return CURLPX_OK;
}
/* FALLTHROUGH */
- CONNECT_SOCKS_READ_INIT:
+CONNECT_SOCKS_READ_INIT:
case CONNECT_SOCKS_READ_INIT:
sx->outstanding = 2; /* expect two bytes */
sx->outp = socksreq; /* store it here */
@@ -700,7 +700,7 @@ static CURLproxycode do_SOCKS5(struct Curl_cfilter *cf, default: /* do nothing! */
break;
- CONNECT_AUTH_INIT:
+CONNECT_AUTH_INIT:
case CONNECT_AUTH_INIT: {
/* Needs user name and password */
size_t proxy_user_len, proxy_password_len;
@@ -779,7 +779,7 @@ static CURLproxycode do_SOCKS5(struct Curl_cfilter *cf, /* Everything is good so far, user was authenticated! */
sxstate(sx, data, CONNECT_REQ_INIT);
/* FALLTHROUGH */
- CONNECT_REQ_INIT:
+CONNECT_REQ_INIT:
case CONNECT_REQ_INIT:
if(socks5_resolve_local) {
enum resolve_t rc = Curl_resolv(data, sx->hostname, sx->remote_port,
@@ -818,7 +818,7 @@ static CURLproxycode do_SOCKS5(struct Curl_cfilter *cf, }
}
/* FALLTHROUGH */
- CONNECT_RESOLVED:
+CONNECT_RESOLVED:
case CONNECT_RESOLVED: {
struct Curl_addrinfo *hp = NULL;
size_t destlen;
@@ -873,7 +873,7 @@ static CURLproxycode do_SOCKS5(struct Curl_cfilter *cf, Curl_resolv_unlock(data, dns); /* not used anymore from now on */
goto CONNECT_REQ_SEND;
}
- CONNECT_RESOLVE_REMOTE:
+CONNECT_RESOLVE_REMOTE:
case CONNECT_RESOLVE_REMOTE:
/* Authentication is complete, now specify destination to the proxy */
len = 0;
@@ -913,7 +913,7 @@ static CURLproxycode do_SOCKS5(struct Curl_cfilter *cf, }
/* FALLTHROUGH */
- CONNECT_REQ_SEND:
+CONNECT_REQ_SEND:
case CONNECT_REQ_SEND:
/* PORT MSB */
socksreq[len++] = (unsigned char)((sx->remote_port >> 8) & 0xff);
@@ -1238,19 +1238,6 @@ struct Curl_cftype Curl_cft_socks_proxy = { Curl_cf_def_query,
};
-CURLcode Curl_conn_socks_proxy_add(struct Curl_easy *data,
- struct connectdata *conn,
- int sockindex)
-{
- struct Curl_cfilter *cf;
- CURLcode result;
-
- result = Curl_cf_create(&cf, &Curl_cft_socks_proxy, NULL);
- if(!result)
- Curl_conn_cf_add(data, conn, sockindex, cf);
- return result;
-}
-
CURLcode Curl_cf_socks_proxy_insert_after(struct Curl_cfilter *cf_at,
struct Curl_easy *data)
{
diff --git a/libs/libcurl/src/socks.h b/libs/libcurl/src/socks.h index d0bbb12d3c..dea1592270 100644 --- a/libs/libcurl/src/socks.h +++ b/libs/libcurl/src/socks.h @@ -51,10 +51,6 @@ CURLcode Curl_SOCKS5_gssapi_negotiate(struct Curl_cfilter *cf, struct Curl_easy *data);
#endif
-CURLcode Curl_conn_socks_proxy_add(struct Curl_easy *data,
- struct connectdata *conn,
- int sockindex);
-
CURLcode Curl_cf_socks_proxy_insert_after(struct Curl_cfilter *cf_at,
struct Curl_easy *data);
diff --git a/libs/libcurl/src/strerror.c b/libs/libcurl/src/strerror.c index 61f9998812..f85761bb76 100644 --- a/libs/libcurl/src/strerror.c +++ b/libs/libcurl/src/strerror.c @@ -181,13 +181,13 @@ curl_easy_strerror(CURLcode error) case CURLE_INTERFACE_FAILED:
return "Failed binding local connection end";
- case CURLE_TOO_MANY_REDIRECTS :
+ case CURLE_TOO_MANY_REDIRECTS:
return "Number of redirects hit maximum amount";
case CURLE_UNKNOWN_OPTION:
return "An unknown option was passed in to libcurl";
- case CURLE_SETOPT_OPTION_SYNTAX :
+ case CURLE_SETOPT_OPTION_SYNTAX:
return "Malformed option provided in a setopt";
case CURLE_GOT_NOTHING:
diff --git a/libs/libcurl/src/telnet.c b/libs/libcurl/src/telnet.c index d22dc8c1f9..07ba21d807 100644 --- a/libs/libcurl/src/telnet.c +++ b/libs/libcurl/src/telnet.c @@ -770,16 +770,23 @@ static void printsub(struct Curl_easy *data, }
}
+#ifdef _MSC_VER
+#pragma warning(push)
+/* warning C4706: assignment within conditional expression */
+#pragma warning(disable:4706)
+#endif
static bool str_is_nonascii(const char *str)
{
- size_t len = strlen(str);
- while(len--) {
- if(*str & 0x80)
+ char c;
+ while((c = *str++))
+ if(c & 0x80)
return TRUE;
- str++;
- }
+
return FALSE;
}
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
static CURLcode check_telnet_options(struct Curl_easy *data)
{
@@ -1103,7 +1110,7 @@ CURLcode telrcv(struct Curl_easy *data, break;
case CURL_TS_IAC:
- process_iac:
+process_iac:
DEBUGASSERT(startwrite < 0);
switch(c) {
case CURL_WILL:
diff --git a/libs/libcurl/src/tftp.c b/libs/libcurl/src/tftp.c index c5d3f8d101..0464369182 100644 --- a/libs/libcurl/src/tftp.c +++ b/libs/libcurl/src/tftp.c @@ -370,7 +370,7 @@ static CURLcode tftp_parse_option_ack(struct tftp_state_data *state, /* tsize should be ignored on upload: Who cares about the size of the
remote file? */
- if(!data->set.upload) {
+ if(!data->state.upload) {
if(!tsize) {
failf(data, "invalid tsize -:%s:- value in OACK packet", value);
return CURLE_TFTP_ILLEGAL;
@@ -451,7 +451,7 @@ static CURLcode tftp_send_first(struct tftp_state_data *state, return result;
}
- if(data->set.upload) {
+ if(data->state.upload) {
/* If we are uploading, send an WRQ */
setpacketevent(&state->spacket, TFTP_EVENT_WRQ);
state->data->req.upload_fromhere =
@@ -486,7 +486,7 @@ static CURLcode tftp_send_first(struct tftp_state_data *state, if(!data->set.tftp_no_options) {
char buf[64];
/* add tsize option */
- if(data->set.upload && (data->state.infilesize != -1))
+ if(data->state.upload && (data->state.infilesize != -1))
msnprintf(buf, sizeof(buf), "%" CURL_FORMAT_CURL_OFF_T,
data->state.infilesize);
else
@@ -540,7 +540,7 @@ static CURLcode tftp_send_first(struct tftp_state_data *state, break;
case TFTP_EVENT_OACK:
- if(data->set.upload) {
+ if(data->state.upload) {
result = tftp_connect_for_tx(state, event);
}
else {
diff --git a/libs/libcurl/src/transfer.c b/libs/libcurl/src/transfer.c index d0750c46f7..0b561b6c2c 100644 --- a/libs/libcurl/src/transfer.c +++ b/libs/libcurl/src/transfer.c @@ -753,7 +753,7 @@ static CURLcode readwrite_data(struct Curl_easy *data, if(maxloops <= 0) {
/* we mark it as read-again-please */
- conn->cselect_bits = CURL_CSELECT_IN;
+ data->state.dselect_bits = CURL_CSELECT_IN;
*comeback = TRUE;
}
@@ -1065,40 +1065,36 @@ CURLcode Curl_readwrite(struct connectdata *conn, CURLcode result;
struct curltime now;
int didwhat = 0;
+ int select_bits;
- curl_socket_t fd_read;
- curl_socket_t fd_write;
- int select_res = conn->cselect_bits;
- conn->cselect_bits = 0;
-
- /* only use the proper socket if the *_HOLD bit is not set simultaneously as
- then we are in rate limiting state in that transfer direction */
-
- if((k->keepon & KEEP_RECVBITS) == KEEP_RECV)
- fd_read = conn->sockfd;
- else
- fd_read = CURL_SOCKET_BAD;
-
- if((k->keepon & KEEP_SENDBITS) == KEEP_SEND)
- fd_write = conn->writesockfd;
- else
- fd_write = CURL_SOCKET_BAD;
+ if(data->state.dselect_bits) {
+ select_bits = data->state.dselect_bits;
+ data->state.dselect_bits = 0;
+ }
+ else if(conn->cselect_bits) {
+ select_bits = conn->cselect_bits;
+ conn->cselect_bits = 0;
+ }
+ else {
+ curl_socket_t fd_read;
+ curl_socket_t fd_write;
+ /* only use the proper socket if the *_HOLD bit is not set simultaneously
+ as then we are in rate limiting state in that transfer direction */
+ if((k->keepon & KEEP_RECVBITS) == KEEP_RECV)
+ fd_read = conn->sockfd;
+ else
+ fd_read = CURL_SOCKET_BAD;
-#if defined(USE_HTTP2) || defined(USE_HTTP3)
- if(data->state.drain) {
- select_res |= CURL_CSELECT_IN;
- DEBUGF(infof(data, "Curl_readwrite: forcibly told to drain data"));
if((k->keepon & KEEP_SENDBITS) == KEEP_SEND)
- select_res |= CURL_CSELECT_OUT;
- }
-#endif
+ fd_write = conn->writesockfd;
+ else
+ fd_write = CURL_SOCKET_BAD;
- if(!select_res) /* Call for select()/poll() only, if read/write/error
- status is not known. */
- select_res = Curl_socket_check(fd_read, CURL_SOCKET_BAD, fd_write, 0);
+ select_bits = Curl_socket_check(fd_read, CURL_SOCKET_BAD, fd_write, 0);
+ }
- if(select_res == CURL_CSELECT_ERR) {
+ if(select_bits == CURL_CSELECT_ERR) {
failf(data, "select/poll returned error");
result = CURLE_SEND_ERROR;
goto out;
@@ -1106,7 +1102,7 @@ CURLcode Curl_readwrite(struct connectdata *conn, #ifdef USE_HYPER
if(conn->datastream) {
- result = conn->datastream(data, conn, &didwhat, done, select_res);
+ result = conn->datastream(data, conn, &didwhat, done, select_bits);
if(result || *done)
goto out;
}
@@ -1115,14 +1111,14 @@ CURLcode Curl_readwrite(struct connectdata *conn, /* We go ahead and do a read if we have a readable socket or if
the stream was rewound (in which case we have data in a
buffer) */
- if((k->keepon & KEEP_RECV) && (select_res & CURL_CSELECT_IN)) {
+ if((k->keepon & KEEP_RECV) && (select_bits & CURL_CSELECT_IN)) {
result = readwrite_data(data, conn, k, &didwhat, done, comeback);
if(result || *done)
goto out;
}
/* If we still have writing to do, we check if we have a writable socket. */
- if((k->keepon & KEEP_SEND) && (select_res & CURL_CSELECT_OUT)) {
+ if((k->keepon & KEEP_SEND) && (select_bits & CURL_CSELECT_OUT)) {
/* write */
result = readwrite_upload(data, conn, &didwhat);
@@ -1235,7 +1231,6 @@ CURLcode Curl_readwrite(struct connectdata *conn, /* Now update the "done" boolean we return */
*done = (0 == (k->keepon&(KEEP_RECVBITS|KEEP_SENDBITS))) ? TRUE : FALSE;
- result = CURLE_OK;
out:
if(result)
DEBUGF(infof(data, DMSG(data, "Curl_readwrite() -> %d"), result));
@@ -1294,6 +1289,7 @@ void Curl_init_CONNECT(struct Curl_easy *data) {
data->state.fread_func = data->set.fread_func_set;
data->state.in = data->set.in_set;
+ data->state.upload = (data->state.httpreq == HTTPREQ_PUT);
}
/*
@@ -1329,6 +1325,12 @@ CURLcode Curl_pretransfer(struct Curl_easy *data) }
}
+ if(data->set.postfields && data->set.set_resume_from) {
+ /* we can't */
+ failf(data, "cannot mix POSTFIELDS with RESUME_FROM");
+ return CURLE_BAD_FUNCTION_ARGUMENT;
+ }
+
data->state.prefer_ascii = data->set.prefer_ascii;
data->state.list_only = data->set.list_only;
data->state.httpreq = data->set.method;
@@ -1408,7 +1410,12 @@ CURLcode Curl_pretransfer(struct Curl_easy *data) return CURLE_OUT_OF_MEMORY;
}
wc = data->wildcard;
- if(wc->state < CURLWC_INIT) {
+ if((wc->state < CURLWC_INIT) ||
+ (wc->state >= CURLWC_CLEAN)) {
+ if(wc->ftpwc)
+ wc->dtor(wc->ftpwc);
+ Curl_safefree(wc->pattern);
+ Curl_safefree(wc->path);
result = Curl_wildcard_init(wc); /* init wildcard structures */
if(result)
return CURLE_OUT_OF_MEMORY;
@@ -1728,7 +1735,6 @@ CURLcode Curl_follow(struct Curl_easy *data, data->state.httpreq != HTTPREQ_POST_MIME) ||
!(data->set.keep_post & CURL_REDIR_POST_303))) {
data->state.httpreq = HTTPREQ_GET;
- data->set.upload = false;
infof(data, "Switch to %s",
data->req.no_body?"HEAD":"GET");
}
@@ -1766,7 +1772,7 @@ CURLcode Curl_retry_request(struct Curl_easy *data, char **url) /* if we're talking upload, we can't do the checks below, unless the protocol
is HTTP as when uploading over HTTP we will still get a response */
- if(data->set.upload &&
+ if(data->state.upload &&
!(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)))
return CURLE_OK;
diff --git a/libs/libcurl/src/url.c b/libs/libcurl/src/url.c index 48616be963..c4e217a6be 100644 --- a/libs/libcurl/src/url.c +++ b/libs/libcurl/src/url.c @@ -129,7 +129,11 @@ #define ARRAYSIZE(A) (sizeof(A)/sizeof((A)[0]))
#endif
-static void conn_free(struct Curl_easy *data, struct connectdata *conn);
+#ifdef USE_NGHTTP2
+static void data_priority_cleanup(struct Curl_easy *data);
+#else
+#define data_priority_cleanup(x)
+#endif
/* Some parts of the code (e.g. chunked encoding) assume this buffer has at
* more than just a few bytes to play with. Don't let it become too small or
@@ -346,7 +350,6 @@ static void up_free(struct Curl_easy *data) CURLcode Curl_close(struct Curl_easy **datap)
{
- struct Curl_multi *m;
struct Curl_easy *data;
if(!datap || !*datap)
@@ -360,8 +363,7 @@ CURLcode Curl_close(struct Curl_easy **datap) /* Detach connection if any is left. This should not be normal, but can be
the case for example with CONNECT_ONLY + recv/send (test 556) */
Curl_detach_connection(data);
- m = data->multi;
- if(m)
+ if(data->multi)
/* This handle is still part of a multi handle, take care of this first
and detach this handle from there. */
curl_multi_remove_handle(data->multi, data);
@@ -373,11 +375,6 @@ CURLcode Curl_close(struct Curl_easy **datap) data->multi_easy = NULL;
}
- /* Destroy the timeout list that is held in the easy handle. It is
- /normally/ done by curl_multi_remove_handle() but this is "just in
- case" */
- Curl_llist_destroy(&data->state.timeoutlist, NULL);
-
data->magic = 0; /* force a clear AFTER the possibly enforced removal from
the multi handle, since that function uses the magic
field! */
@@ -427,7 +424,7 @@ CURLcode Curl_close(struct Curl_easy **datap) Curl_resolver_cancel(data);
Curl_resolver_cleanup(data->state.async.resolver);
- Curl_data_priority_cleanup(data);
+ data_priority_cleanup(data);
/* No longer a dirty share, if it exists */
if(data->share) {
@@ -1216,17 +1213,19 @@ ConnectionExists(struct Curl_easy *data, if(needle->bits.tunnel_proxy != check->bits.tunnel_proxy)
continue;
- if(needle->http_proxy.proxytype == CURLPROXY_HTTPS) {
+ if(IS_HTTPS_PROXY(needle->http_proxy.proxytype)) {
/* use https proxy */
- if(needle->handler->flags&PROTOPT_SSL) {
+ if(needle->http_proxy.proxytype !=
+ check->http_proxy.proxytype)
+ continue;
+ else if(needle->handler->flags&PROTOPT_SSL) {
/* use double layer ssl */
if(!Curl_ssl_config_matches(&needle->proxy_ssl_config,
&check->proxy_ssl_config))
continue;
}
-
- if(!Curl_ssl_config_matches(&needle->ssl_config,
- &check->ssl_config))
+ else if(!Curl_ssl_config_matches(&needle->ssl_config,
+ &check->ssl_config))
continue;
}
}
@@ -1515,7 +1514,7 @@ static struct connectdata *allocate_conn(struct Curl_easy *data) conn->created = Curl_now();
/* Store current time to give a baseline to keepalive connection times. */
- conn->keepalive = Curl_now();
+ conn->keepalive = conn->created;
#ifndef CURL_DISABLE_PROXY
conn->http_proxy.proxytype = data->set.proxytype;
@@ -1528,8 +1527,8 @@ static struct connectdata *allocate_conn(struct Curl_easy *data) conn->bits.httpproxy = (conn->bits.proxy &&
(conn->http_proxy.proxytype == CURLPROXY_HTTP ||
conn->http_proxy.proxytype == CURLPROXY_HTTP_1_0 ||
- conn->http_proxy.proxytype == CURLPROXY_HTTPS)) ?
- TRUE : FALSE;
+ IS_HTTPS_PROXY(conn->http_proxy.proxytype))) ?
+ TRUE : FALSE;
conn->bits.socksproxy = (conn->bits.proxy &&
!conn->bits.httpproxy) ? TRUE : FALSE;
@@ -1588,11 +1587,11 @@ static struct connectdata *allocate_conn(struct Curl_easy *data) it may live on without (this specific) Curl_easy */
conn->fclosesocket = data->set.fclosesocket;
conn->closesocket_client = data->set.closesocket_client;
- conn->lastused = Curl_now(); /* used now */
+ conn->lastused = conn->created;
conn->gssapi_delegation = data->set.gssapi_delegation;
return conn;
- error:
+error:
free(conn->localdev);
free(conn);
@@ -1760,14 +1759,13 @@ static CURLcode parseurlandfillconn(struct Curl_easy *data, if(!use_set_uh) {
char *newurl;
uc = curl_url_set(uh, CURLUPART_URL, data->state.url,
- CURLU_GUESS_SCHEME |
- CURLU_NON_SUPPORT_SCHEME |
- (data->set.disallow_username_in_url ?
- CURLU_DISALLOW_USER : 0) |
- (data->set.path_as_is ? CURLU_PATH_AS_IS : 0));
+ CURLU_GUESS_SCHEME |
+ CURLU_NON_SUPPORT_SCHEME |
+ (data->set.disallow_username_in_url ?
+ CURLU_DISALLOW_USER : 0) |
+ (data->set.path_as_is ? CURLU_PATH_AS_IS : 0));
if(uc) {
- DEBUGF(infof(data, "curl_url_set rejected %s: %s", data->state.url,
- curl_url_strerror(uc)));
+ failf(data, "URL rejected: %s", curl_url_strerror(uc));
return Curl_uc_to_curlcode(uc);
}
@@ -1821,11 +1819,6 @@ static CURLcode parseurlandfillconn(struct Curl_easy *data, result = Curl_idnconvert_hostname(&conn->host);
if(result)
return result;
- if(conn->bits.conn_to_host) {
- result = Curl_idnconvert_hostname(&conn->conn_to_host);
- if(result)
- return result;
- }
#ifndef CURL_DISABLE_HSTS
/* HSTS upgrade */
@@ -2161,8 +2154,12 @@ static CURLcode parse_proxy(struct Curl_easy *data, goto error;
}
- if(strcasecompare("https", scheme))
- proxytype = CURLPROXY_HTTPS;
+ if(strcasecompare("https", scheme)) {
+ if(proxytype != CURLPROXY_HTTPS2)
+ proxytype = CURLPROXY_HTTPS;
+ else
+ proxytype = CURLPROXY_HTTPS2;
+ }
else if(strcasecompare("socks5h", scheme))
proxytype = CURLPROXY_SOCKS5_HOSTNAME;
else if(strcasecompare("socks5", scheme))
@@ -2182,7 +2179,8 @@ static CURLcode parse_proxy(struct Curl_easy *data, }
}
else {
- failf(data, "Unsupported proxy syntax in \'%s\'", proxy);
+ failf(data, "Unsupported proxy syntax in \'%s\': %s", proxy,
+ curl_url_strerror(uc));
result = CURLE_COULDNT_RESOLVE_PROXY;
goto error;
}
@@ -2190,9 +2188,9 @@ static CURLcode parse_proxy(struct Curl_easy *data, #ifdef USE_SSL
if(!Curl_ssl_supports(data, SSLSUPP_HTTPS_PROXY))
#endif
- if(proxytype == CURLPROXY_HTTPS) {
+ if(IS_HTTPS_PROXY(proxytype)) {
failf(data, "Unsupported proxy \'%s\', libcurl is built without the "
- "HTTPS-proxy support.", proxy);
+ "HTTPS-proxy support.", proxy);
result = CURLE_NOT_BUILT_IN;
goto error;
}
@@ -2249,7 +2247,7 @@ static CURLcode parse_proxy(struct Curl_easy *data, given */
port = (int)data->set.proxyport;
else {
- if(proxytype == CURLPROXY_HTTPS)
+ if(IS_HTTPS_PROXY(proxytype))
port = CURL_DEFAULT_HTTPS_PROXY_PORT;
else
port = CURL_DEFAULT_PROXY_PORT;
@@ -2307,7 +2305,7 @@ static CURLcode parse_proxy(struct Curl_easy *data, }
#endif
- error:
+error:
free(proxyuser);
free(proxypasswd);
free(host);
@@ -2329,22 +2327,17 @@ static CURLcode parse_proxy_auth(struct Curl_easy *data, data->state.aptr.proxyuser : "";
const char *proxypasswd = data->state.aptr.proxypasswd ?
data->state.aptr.proxypasswd : "";
- CURLcode result = CURLE_OK;
-
- if(proxyuser) {
- result = Curl_urldecode(proxyuser, 0, &conn->http_proxy.user, NULL,
- REJECT_ZERO);
- if(!result)
- result = Curl_setstropt(&data->state.aptr.proxyuser,
- conn->http_proxy.user);
- }
- if(!result && proxypasswd) {
+ CURLcode result = Curl_urldecode(proxyuser, 0, &conn->http_proxy.user, NULL,
+ REJECT_ZERO);
+ if(!result)
+ result = Curl_setstropt(&data->state.aptr.proxyuser,
+ conn->http_proxy.user);
+ if(!result)
result = Curl_urldecode(proxypasswd, 0, &conn->http_proxy.passwd,
NULL, REJECT_ZERO);
- if(!result)
- result = Curl_setstropt(&data->state.aptr.proxypasswd,
- conn->http_proxy.passwd);
- }
+ if(!result)
+ result = Curl_setstropt(&data->state.aptr.proxypasswd,
+ conn->http_proxy.passwd);
return result;
}
@@ -2569,29 +2562,13 @@ CURLcode Curl_parse_login_details(const char *login, const size_t len, size_t plen;
size_t olen;
- /* the input length check is because this is called directly from setopt
- and isn't going through the regular string length check */
- size_t llen = strlen(login);
- if(llen > CURL_MAX_INPUT_LENGTH)
- return CURLE_BAD_FUNCTION_ARGUMENT;
-
/* Attempt to find the password separator */
- if(passwdp) {
- psep = strchr(login, ':');
-
- /* Within the constraint of the login string */
- if(psep >= login + len)
- psep = NULL;
- }
+ if(passwdp)
+ psep = memchr(login, ':', len);
/* Attempt to find the options separator */
- if(optionsp) {
- osep = strchr(login, ';');
-
- /* Within the constraint of the login string */
- if(osep >= login + len)
- osep = NULL;
- }
+ if(optionsp)
+ osep = memchr(login, ';', len);
/* Calculate the portion lengths */
ulen = (psep ?
@@ -2916,17 +2893,16 @@ static CURLcode parse_connect_to_host_port(struct Curl_easy *data, }
/* now, clone the cleaned host name */
- if(hostptr) {
- *hostname_result = strdup(hostptr);
- if(!*hostname_result) {
- result = CURLE_OUT_OF_MEMORY;
- goto error;
- }
+ DEBUGASSERT(hostptr);
+ *hostname_result = strdup(hostptr);
+ if(!*hostname_result) {
+ result = CURLE_OUT_OF_MEMORY;
+ goto error;
}
*port_result = port;
- error:
+error:
free(host_dup);
return result;
}
@@ -3503,6 +3479,11 @@ static CURLcode create_conn(struct Curl_easy *data, return result;
}
#endif
+ if(conn->bits.conn_to_host) {
+ result = Curl_idnconvert_hostname(&conn->conn_to_host);
+ if(result)
+ return result;
+ }
/*************************************************************
* Check whether the host and the "connect to host" are equal.
@@ -4050,9 +4031,9 @@ CURLcode Curl_data_priority_add_child(struct Curl_easy *parent, #endif /* USE_NGHTTP2 */
-void Curl_data_priority_cleanup(struct Curl_easy *data)
-{
#ifdef USE_NGHTTP2
+static void data_priority_cleanup(struct Curl_easy *data)
+{
while(data->set.priority.children) {
struct Curl_easy *tmp = data->set.priority.children->data;
priority_remove_child(data, tmp);
@@ -4062,9 +4043,8 @@ void Curl_data_priority_cleanup(struct Curl_easy *data) if(data->set.priority.parent)
priority_remove_child(data->set.priority.parent, data);
-#endif
- (void)data;
}
+#endif
void Curl_data_priority_clear_state(struct Curl_easy *data)
{
diff --git a/libs/libcurl/src/url.h b/libs/libcurl/src/url.h index 1b7b7f1e88..8603d3ff02 100644 --- a/libs/libcurl/src/url.h +++ b/libs/libcurl/src/url.h @@ -60,10 +60,8 @@ void Curl_verboseconnect(struct Curl_easy *data, struct connectdata *conn); #endif
#if defined(USE_HTTP2) || defined(USE_HTTP3)
-void Curl_data_priority_cleanup(struct Curl_easy *data);
void Curl_data_priority_clear_state(struct Curl_easy *data);
#else
-#define Curl_data_priority_cleanup(x)
#define Curl_data_priority_clear_state(x)
#endif /* !(defined(USE_HTTP2) || defined(USE_HTTP3)) */
diff --git a/libs/libcurl/src/urlapi-int.h b/libs/libcurl/src/urlapi-int.h index caf56b93b6..bf759382aa 100644 --- a/libs/libcurl/src/urlapi-int.h +++ b/libs/libcurl/src/urlapi-int.h @@ -28,6 +28,9 @@ size_t Curl_is_absolute_url(const char *url, char *buf, size_t buflen,
bool guess_scheme);
+CURLUcode Curl_url_set_authority(CURLU *u, const char *authority,
+ unsigned int flags);
+
#ifdef DEBUGBUILD
CURLUcode Curl_parse_port(struct Curl_URL *u, struct dynbuf *host,
bool has_scheme);
diff --git a/libs/libcurl/src/urlapi.c b/libs/libcurl/src/urlapi.c index 780a26747a..9574ed9727 100644 --- a/libs/libcurl/src/urlapi.c +++ b/libs/libcurl/src/urlapi.c @@ -34,6 +34,7 @@ #include "inet_ntop.h"
#include "strdup.h"
#include "idn.h"
+#include "curl_memrchr.h"
/* The last 3 #include files should be in this order */
#include "curl_printf.h"
@@ -375,27 +376,30 @@ static char *concat_url(char *base, const char *relurl) return Curl_dyn_ptr(&newest);
}
-/* scan for byte values < 31 or 127 */
-static bool junkscan(const char *part, unsigned int flags)
+/* scan for byte values <= 31, 127 and sometimes space */
+static CURLUcode junkscan(const char *url, size_t *urllen, unsigned int flags)
{
- if(part) {
- static const char badbytes[]={
- /* */ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
- 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
- 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
- 0x7f, 0x00 /* null-terminate */
- };
- size_t n = strlen(part);
- size_t nfine = strcspn(part, badbytes);
- if(nfine != n)
- /* since we don't know which part is scanned, return a generic error
- code */
- return TRUE;
- if(!(flags & CURLU_ALLOW_SPACE) && strchr(part, ' '))
- return TRUE;
- }
- return FALSE;
+ static const char badbytes[]={
+ /* */ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x7f, 0x00 /* null-terminate */
+ };
+ size_t n = strlen(url);
+ size_t nfine;
+
+ if(n > CURL_MAX_INPUT_LENGTH)
+ /* excessive input length */
+ return CURLUE_MALFORMED_INPUT;
+
+ nfine = strcspn(url, badbytes);
+ if((nfine != n) ||
+ (!(flags & CURLU_ALLOW_SPACE) && strchr(url, ' ')))
+ return CURLUE_MALFORMED_INPUT;
+
+ *urllen = n;
+ return CURLUE_OK;
}
/*
@@ -406,8 +410,10 @@ static bool junkscan(const char *part, unsigned int flags) *
*/
static CURLUcode parse_hostname_login(struct Curl_URL *u,
- struct dynbuf *host,
- unsigned int flags)
+ const char *login,
+ size_t len,
+ unsigned int flags,
+ size_t *offset) /* to the host name */
{
CURLUcode result = CURLUE_OK;
CURLcode ccode;
@@ -423,13 +429,12 @@ static CURLUcode parse_hostname_login(struct Curl_URL *u, *
* We need somewhere to put the embedded details, so do that first.
*/
-
- char *login = Curl_dyn_ptr(host);
char *ptr;
DEBUGASSERT(login);
- ptr = strchr(login, '@');
+ *offset = 0;
+ ptr = memchr(login, '@', len);
if(!ptr)
goto out;
@@ -459,35 +464,25 @@ static CURLUcode parse_hostname_login(struct Curl_URL *u, result = CURLUE_USER_NOT_ALLOWED;
goto out;
}
- if(junkscan(userp, flags)) {
- result = CURLUE_BAD_USER;
- goto out;
- }
+ free(u->user);
u->user = userp;
}
if(passwdp) {
- if(junkscan(passwdp, flags)) {
- result = CURLUE_BAD_PASSWORD;
- goto out;
- }
+ free(u->password);
u->password = passwdp;
}
if(optionsp) {
- if(junkscan(optionsp, flags)) {
- result = CURLUE_BAD_LOGIN;
- goto out;
- }
+ free(u->options);
u->options = optionsp;
}
- /* move the name to the start of the host buffer */
- if(Curl_dyn_tail(host, strlen(ptr)))
- return CURLUE_OUT_OF_MEMORY;
-
+ /* the host name starts at this offset */
+ *offset = ptr - login;
return CURLUE_OK;
- out:
+
+out:
free(userp);
free(passwdp);
@@ -505,8 +500,7 @@ UNITTEST CURLUcode Curl_parse_port(struct Curl_URL *u, struct dynbuf *host, char *portptr;
char *hostname = Curl_dyn_ptr(host);
/*
- * Find the end of an IPv6 address, either on the ']' ending bracket or
- * a percent-encoded zone index.
+ * Find the end of an IPv6 address on the ']' ending bracket.
*/
if(hostname[0] == '[') {
portptr = strchr(hostname, ']');
@@ -527,7 +521,6 @@ UNITTEST CURLUcode Curl_parse_port(struct Curl_URL *u, struct dynbuf *host, if(portptr) {
char *rest;
long port;
- char portbuf[7];
size_t keep = portptr - hostname;
/* Browser behavior adaptation. If there's a colon with no digits after,
@@ -553,11 +546,10 @@ UNITTEST CURLUcode Curl_parse_port(struct Curl_URL *u, struct dynbuf *host, if(rest[0])
return CURLUE_BAD_PORT_NUMBER;
- *rest = 0;
- /* generate a new port number string to get rid of leading zeroes etc */
- msnprintf(portbuf, sizeof(portbuf), "%ld", port);
u->portnum = port;
- u->port = strdup(portbuf);
+ /* generate a new port number string to get rid of leading zeroes etc */
+ free(u->port);
+ u->port = aprintf("%ld", port);
if(!u->port)
return CURLUE_OUT_OF_MEMORY;
}
@@ -565,68 +557,76 @@ UNITTEST CURLUcode Curl_parse_port(struct Curl_URL *u, struct dynbuf *host, return CURLUE_OK;
}
-static CURLUcode hostname_check(struct Curl_URL *u, char *hostname,
- size_t hlen) /* length of hostname */
+/* this assumes 'hostname' now starts with [ */
+static CURLUcode ipv6_parse(struct Curl_URL *u, char *hostname,
+ size_t hlen) /* length of hostname */
{
size_t len;
- DEBUGASSERT(hostname);
-
- if(!hostname[0])
- return CURLUE_NO_HOST;
- else if(hostname[0] == '[') {
- const char *l = "0123456789abcdefABCDEF:.";
- if(hlen < 4) /* '[::]' is the shortest possible valid string */
- return CURLUE_BAD_IPV6;
- hostname++;
- hlen -= 2;
-
- /* only valid IPv6 letters are ok */
- len = strspn(hostname, l);
-
- if(hlen != len) {
- hlen = len;
- if(hostname[len] == '%') {
- /* this could now be '%[zone id]' */
- char zoneid[16];
- int i = 0;
- char *h = &hostname[len + 1];
- /* pass '25' if present and is a url encoded percent sign */
- if(!strncmp(h, "25", 2) && h[2] && (h[2] != ']'))
- h += 2;
- while(*h && (*h != ']') && (i < 15))
- zoneid[i++] = *h++;
- if(!i || (']' != *h))
- return CURLUE_BAD_IPV6;
- zoneid[i] = 0;
- u->zoneid = strdup(zoneid);
- if(!u->zoneid)
- return CURLUE_OUT_OF_MEMORY;
- hostname[len] = ']'; /* insert end bracket */
- hostname[len + 1] = 0; /* terminate the hostname */
- }
- else
+ DEBUGASSERT(*hostname == '[');
+ if(hlen < 4) /* '[::]' is the shortest possible valid string */
+ return CURLUE_BAD_IPV6;
+ hostname++;
+ hlen -= 2;
+
+ /* only valid IPv6 letters are ok */
+ len = strspn(hostname, "0123456789abcdefABCDEF:.");
+
+ if(hlen != len) {
+ hlen = len;
+ if(hostname[len] == '%') {
+ /* this could now be '%[zone id]' */
+ char zoneid[16];
+ int i = 0;
+ char *h = &hostname[len + 1];
+ /* pass '25' if present and is a url encoded percent sign */
+ if(!strncmp(h, "25", 2) && h[2] && (h[2] != ']'))
+ h += 2;
+ while(*h && (*h != ']') && (i < 15))
+ zoneid[i++] = *h++;
+ if(!i || (']' != *h))
return CURLUE_BAD_IPV6;
- /* hostname is fine */
+ zoneid[i] = 0;
+ u->zoneid = strdup(zoneid);
+ if(!u->zoneid)
+ return CURLUE_OUT_OF_MEMORY;
+ hostname[len] = ']'; /* insert end bracket */
+ hostname[len + 1] = 0; /* terminate the hostname */
}
+ else
+ return CURLUE_BAD_IPV6;
+ /* hostname is fine */
+ }
- /* Check the IPv6 address. */
- {
- char dest[16]; /* fits a binary IPv6 address */
- char norm[MAX_IPADR_LEN];
- hostname[hlen] = 0; /* end the address there */
- if(1 != Curl_inet_pton(AF_INET6, hostname, dest))
- return CURLUE_BAD_IPV6;
+ /* Check the IPv6 address. */
+ {
+ char dest[16]; /* fits a binary IPv6 address */
+ char norm[MAX_IPADR_LEN];
+ hostname[hlen] = 0; /* end the address there */
+ if(1 != Curl_inet_pton(AF_INET6, hostname, dest))
+ return CURLUE_BAD_IPV6;
- /* check if it can be done shorter */
- if(Curl_inet_ntop(AF_INET6, dest, norm, sizeof(norm)) &&
- (strlen(norm) < hlen)) {
- strcpy(hostname, norm);
- hlen = strlen(norm);
- hostname[hlen + 1] = 0;
- }
- hostname[hlen] = ']'; /* restore ending bracket */
+ /* check if it can be done shorter */
+ if(Curl_inet_ntop(AF_INET6, dest, norm, sizeof(norm)) &&
+ (strlen(norm) < hlen)) {
+ strcpy(hostname, norm);
+ hlen = strlen(norm);
+ hostname[hlen + 1] = 0;
}
+ hostname[hlen] = ']'; /* restore ending bracket */
}
+ return CURLUE_OK;
+}
+
+static CURLUcode hostname_check(struct Curl_URL *u, char *hostname,
+ size_t hlen) /* length of hostname */
+{
+ size_t len;
+ DEBUGASSERT(hostname);
+
+ if(!hlen)
+ return CURLUE_NO_HOST;
+ else if(hostname[0] == '[')
+ return ipv6_parse(u, hostname, hlen);
else {
/* letters from the second string are not ok */
len = strcspn(hostname, " \r\n\t/:#?!@{}[]\\$\'\"^`*<>=;,+&()%");
@@ -637,50 +637,52 @@ static CURLUcode hostname_check(struct Curl_URL *u, char *hostname, return CURLUE_OK;
}
-#define HOSTNAME_END(x) (((x) == '/') || ((x) == '?') || ((x) == '#'))
-
/*
* Handle partial IPv4 numerical addresses and different bases, like
* '16843009', '0x7f', '0x7f.1' '0177.1.1.1' etc.
*
- * If the given input string is syntactically wrong or any part for example is
- * too big, this function returns FALSE and doesn't create any output.
+ * If the given input string is syntactically wrong IPv4 or any part for
+ * example is too big, this function returns HOST_NAME.
*
* Output the "normalized" version of that input string in plain quad decimal
- * integers and return TRUE.
+ * integers.
+ *
+ * Returns the host type.
*/
-static bool ipv4_normalize(const char *hostname, char *outp, size_t olen)
+
+#define HOST_ERROR -1 /* out of memory */
+#define HOST_BAD -2 /* bad IPv4 address */
+
+#define HOST_NAME 1
+#define HOST_IPV4 2
+#define HOST_IPV6 3
+
+static int ipv4_normalize(struct dynbuf *host)
{
bool done = FALSE;
int n = 0;
- const char *c = hostname;
+ const char *c = Curl_dyn_ptr(host);
unsigned long parts[4] = {0, 0, 0, 0};
+ CURLcode result = CURLE_OK;
+
+ if(*c == '[')
+ return HOST_IPV6;
while(!done) {
char *endp;
unsigned long l;
- if((*c < '0') || (*c > '9'))
+ if(!ISDIGIT(*c))
/* most importantly this doesn't allow a leading plus or minus */
- return FALSE;
+ return HOST_NAME;
l = strtoul(c, &endp, 0);
- /* overflow or nothing parsed at all */
- if(((l == ULONG_MAX) && (errno == ERANGE)) || (endp == c))
- return FALSE;
-
-#if SIZEOF_LONG > 4
- /* a value larger than 32 bits */
- if(l > UINT_MAX)
- return FALSE;
-#endif
-
parts[n] = l;
c = endp;
- switch (*c) {
- case '.' :
+ switch(*c) {
+ case '.':
if(n == 3)
- return FALSE;
+ return HOST_NAME;
n++;
c++;
break;
@@ -690,51 +692,63 @@ static bool ipv4_normalize(const char *hostname, char *outp, size_t olen) break;
default:
- return FALSE;
+ return HOST_NAME;
}
- }
- /* this is deemed a valid IPv4 numerical address */
+ /* overflow */
+ if((l == ULONG_MAX) && (errno == ERANGE))
+ return HOST_NAME;
+
+#if SIZEOF_LONG > 4
+ /* a value larger than 32 bits */
+ if(l > UINT_MAX)
+ return HOST_NAME;
+#endif
+ }
switch(n) {
case 0: /* a -- 32 bits */
- msnprintf(outp, olen, "%u.%u.%u.%u",
- parts[0] >> 24, (parts[0] >> 16) & 0xff,
- (parts[0] >> 8) & 0xff, parts[0] & 0xff);
+ Curl_dyn_reset(host);
+
+ result = Curl_dyn_addf(host, "%u.%u.%u.%u",
+ parts[0] >> 24, (parts[0] >> 16) & 0xff,
+ (parts[0] >> 8) & 0xff, parts[0] & 0xff);
break;
case 1: /* a.b -- 8.24 bits */
if((parts[0] > 0xff) || (parts[1] > 0xffffff))
- return FALSE;
- msnprintf(outp, olen, "%u.%u.%u.%u",
- parts[0], (parts[1] >> 16) & 0xff,
- (parts[1] >> 8) & 0xff, parts[1] & 0xff);
+ return HOST_NAME;
+ Curl_dyn_reset(host);
+ result = Curl_dyn_addf(host, "%u.%u.%u.%u",
+ parts[0], (parts[1] >> 16) & 0xff,
+ (parts[1] >> 8) & 0xff, parts[1] & 0xff);
break;
case 2: /* a.b.c -- 8.8.16 bits */
if((parts[0] > 0xff) || (parts[1] > 0xff) || (parts[2] > 0xffff))
- return FALSE;
- msnprintf(outp, olen, "%u.%u.%u.%u",
- parts[0], parts[1], (parts[2] >> 8) & 0xff,
- parts[2] & 0xff);
+ return HOST_NAME;
+ Curl_dyn_reset(host);
+ result = Curl_dyn_addf(host, "%u.%u.%u.%u",
+ parts[0], parts[1], (parts[2] >> 8) & 0xff,
+ parts[2] & 0xff);
break;
case 3: /* a.b.c.d -- 8.8.8.8 bits */
if((parts[0] > 0xff) || (parts[1] > 0xff) || (parts[2] > 0xff) ||
(parts[3] > 0xff))
- return FALSE;
- msnprintf(outp, olen, "%u.%u.%u.%u",
- parts[0], parts[1], parts[2], parts[3]);
+ return HOST_NAME;
+ Curl_dyn_reset(host);
+ result = Curl_dyn_addf(host, "%u.%u.%u.%u",
+ parts[0], parts[1], parts[2], parts[3]);
break;
}
- return TRUE;
+ if(result)
+ return HOST_ERROR;
+ return HOST_IPV4;
}
/* if necessary, replace the host content with a URL decoded version */
-static CURLUcode decode_host(struct dynbuf *host)
+static CURLUcode urldecode_host(struct dynbuf *host)
{
char *per = NULL;
const char *hostname = Curl_dyn_ptr(host);
- if(hostname[0] == '[')
- /* only decode if not an ipv6 numerical */
- return CURLUE_OK;
per = strchr(hostname, '%');
if(!per)
/* nothing to decode */
@@ -757,6 +771,78 @@ static CURLUcode decode_host(struct dynbuf *host) return CURLUE_OK;
}
+static CURLUcode parse_authority(struct Curl_URL *u,
+ const char *auth, size_t authlen,
+ unsigned int flags,
+ struct dynbuf *host,
+ bool has_scheme)
+{
+ size_t offset;
+ CURLUcode result;
+
+ /*
+ * Parse the login details and strip them out of the host name.
+ */
+ result = parse_hostname_login(u, auth, authlen, flags, &offset);
+ if(result)
+ goto out;
+
+ if(Curl_dyn_addn(host, auth + offset, authlen - offset)) {
+ result = CURLUE_OUT_OF_MEMORY;
+ goto out;
+ }
+
+ result = Curl_parse_port(u, host, has_scheme);
+ if(result)
+ goto out;
+
+ if(!Curl_dyn_len(host))
+ return CURLUE_NO_HOST;
+
+ switch(ipv4_normalize(host)) {
+ case HOST_IPV4:
+ break;
+ case HOST_IPV6:
+ result = ipv6_parse(u, Curl_dyn_ptr(host), Curl_dyn_len(host));
+ break;
+ case HOST_NAME:
+ result = urldecode_host(host);
+ if(!result)
+ result = hostname_check(u, Curl_dyn_ptr(host), Curl_dyn_len(host));
+ break;
+ case HOST_ERROR:
+ result = CURLUE_OUT_OF_MEMORY;
+ break;
+ case HOST_BAD:
+ default:
+ result = CURLUE_BAD_HOSTNAME; /* Bad IPv4 address even */
+ break;
+ }
+
+out:
+ return result;
+}
+
+CURLUcode Curl_url_set_authority(CURLU *u, const char *authority,
+ unsigned int flags)
+{
+ CURLUcode result;
+ struct dynbuf host;
+
+ DEBUGASSERT(authority);
+ Curl_dyn_init(&host, CURL_MAX_INPUT_LENGTH);
+
+ result = parse_authority(u, authority, strlen(authority), flags,
+ &host, !!u->scheme);
+ if(result)
+ Curl_dyn_free(&host);
+ else {
+ free(u->host);
+ u->host = Curl_dyn_ptr(&host);
+ }
+ return result;
+}
+
/*
* "Remove Dot Segments"
* https://datatracker.ietf.org/doc/html/rfc3986#section-5.2.4
@@ -781,8 +867,7 @@ UNITTEST int dedotdotify(const char *input, size_t clen, char **outp); UNITTEST int dedotdotify(const char *input, size_t clen, char **outp)
{
char *outptr;
- const char *orginput = input;
- char *queryp;
+ const char *endp = &input[clen];
char *out;
*outp = NULL;
@@ -797,13 +882,6 @@ UNITTEST int dedotdotify(const char *input, size_t clen, char **outp) *out = 0; /* null-terminates, for inputs like "./" */
outptr = out;
- /*
- * To handle query-parts properly, we must find it and remove it during the
- * dotdot-operation and then append it again at the end to the output
- * string.
- */
- queryp = strchr(input, '?');
-
do {
bool dotdot = TRUE;
if(*input == '.') {
@@ -889,17 +967,8 @@ UNITTEST int dedotdotify(const char *input, size_t clen, char **outp) *outptr = 0;
}
- /* continue until end of input string OR, if there is a terminating
- query part, stop there */
- } while(*input && (!queryp || (input < queryp)));
-
- if(queryp) {
- size_t qlen;
- /* There was a query part, append that to the output. */
- size_t oindex = queryp - orginput;
- qlen = strlen(&orginput[oindex]);
- memcpy(outptr, &orginput[oindex], qlen + 1); /* include zero byte */
- }
+ /* continue until end of path */
+ } while(input < endp);
*outp = out;
return 0; /* success */
@@ -909,11 +978,9 @@ static CURLUcode parseurl(const char *url, CURLU *u, unsigned int flags) {
const char *path;
size_t pathlen;
- bool uncpath = FALSE;
char *query = NULL;
char *fragment = NULL;
char schemebuf[MAX_SCHEME_LEN + 1];
- const char *schemep = NULL;
size_t schemelen = 0;
size_t urllen;
CURLUcode result = CURLUE_OK;
@@ -924,16 +991,9 @@ static CURLUcode parseurl(const char *url, CURLU *u, unsigned int flags) Curl_dyn_init(&host, CURL_MAX_INPUT_LENGTH);
- /*************************************************************
- * Parse the URL.
- ************************************************************/
- /* allocate scratch area */
- urllen = strlen(url);
- if(urllen > CURL_MAX_INPUT_LENGTH) {
- /* excessive input length */
- result = CURLUE_MALFORMED_INPUT;
+ result = junkscan(url, &urllen, flags);
+ if(result)
goto fail;
- }
schemelen = Curl_is_absolute_url(url, schemebuf, sizeof(schemebuf),
flags & (CURLU_GUESS_SCHEME|
@@ -941,6 +1001,7 @@ static CURLUcode parseurl(const char *url, CURLU *u, unsigned int flags) /* handle the file: scheme */
if(schemelen && !strcmp(schemebuf, "file")) {
+ bool uncpath = FALSE;
if(urllen <= 6) {
/* file:/ is not enough to actually be a complete file: URL */
result = CURLUE_BAD_FILE_URL;
@@ -949,8 +1010,9 @@ static CURLUcode parseurl(const char *url, CURLU *u, unsigned int flags) /* path has been allocated large enough to hold this */
path = (char *)&url[5];
+ pathlen = urllen - 5;
- schemep = u->scheme = strdup("file");
+ u->scheme = strdup("file");
if(!u->scheme) {
result = CURLUE_OUT_OF_MEMORY;
goto fail;
@@ -1025,6 +1087,7 @@ static CURLUcode parseurl(const char *url, CURLU *u, unsigned int flags) }
path = ptr;
+ pathlen = urllen - (ptr - url);
}
if(!uncpath)
@@ -1051,14 +1114,14 @@ static CURLUcode parseurl(const char *url, CURLU *u, unsigned int flags) }
else {
/* clear path */
- const char *p;
+ const char *schemep = NULL;
const char *hostp;
- size_t len;
+ size_t hostlen;
if(schemelen) {
int i = 0;
- p = &url[schemelen + 1];
- while(p && (*p == '/') && (i < 4)) {
+ const char *p = &url[schemelen + 1];
+ while((*p == '/') && (i < 4)) {
p++;
i++;
}
@@ -1070,15 +1133,12 @@ static CURLUcode parseurl(const char *url, CURLU *u, unsigned int flags) goto fail;
}
- if((i < 1) || (i>3)) {
+ if((i < 1) || (i > 3)) {
/* less than one or more than three slashes */
result = CURLUE_BAD_SLASHES;
goto fail;
}
- if(junkscan(schemep, flags)) {
- result = CURLUE_BAD_SCHEME;
- goto fail;
- }
+ hostp = p; /* host name starts here */
}
else {
/* no scheme! */
@@ -1093,61 +1153,99 @@ static CURLUcode parseurl(const char *url, CURLU *u, unsigned int flags) /*
* The URL was badly formatted, let's try without scheme specified.
*/
- p = url;
+ hostp = url;
}
- hostp = p; /* host name starts here */
-
- /* find the end of the host name + port number */
- while(*p && !HOSTNAME_END(*p))
- p++;
- len = p - hostp;
- if(len) {
- if(Curl_dyn_addn(&host, hostp, len)) {
+ if(schemep) {
+ u->scheme = strdup(schemep);
+ if(!u->scheme) {
result = CURLUE_OUT_OF_MEMORY;
goto fail;
}
}
- else {
- if(!(flags & CURLU_NO_AUTHORITY)) {
- result = CURLUE_NO_HOST;
+
+ /* find the end of the host name + port number */
+ hostlen = strcspn(hostp, "/?#");
+ path = &hostp[hostlen];
+
+ /* this pathlen also contains the query and the fragment */
+ pathlen = urllen - (path - url);
+ if(hostlen) {
+
+ result = parse_authority(u, hostp, hostlen, flags, &host, schemelen);
+ if(result)
goto fail;
- }
- }
- path = (char *)p;
+ if((flags & CURLU_GUESS_SCHEME) && !schemep) {
+ const char *hostname = Curl_dyn_ptr(&host);
+ /* legacy curl-style guess based on host name */
+ if(checkprefix("ftp.", hostname))
+ schemep = "ftp";
+ else if(checkprefix("dict.", hostname))
+ schemep = "dict";
+ else if(checkprefix("ldap.", hostname))
+ schemep = "ldap";
+ else if(checkprefix("imap.", hostname))
+ schemep = "imap";
+ else if(checkprefix("smtp.", hostname))
+ schemep = "smtp";
+ else if(checkprefix("pop3.", hostname))
+ schemep = "pop3";
+ else
+ schemep = "http";
- if(schemep) {
- u->scheme = strdup(schemep);
- if(!u->scheme) {
+ u->scheme = strdup(schemep);
+ if(!u->scheme) {
+ result = CURLUE_OUT_OF_MEMORY;
+ goto fail;
+ }
+ }
+ }
+ else if(flags & CURLU_NO_AUTHORITY) {
+ /* allowed to be empty. */
+ if(Curl_dyn_add(&host, "")) {
result = CURLUE_OUT_OF_MEMORY;
goto fail;
}
}
+ else {
+ result = CURLUE_NO_HOST;
+ goto fail;
+ }
}
fragment = strchr(path, '#');
if(fragment) {
- fraglen = strlen(fragment);
+ fraglen = pathlen - (fragment - path);
if(fraglen > 1) {
/* skip the leading '#' in the copy but include the terminating null */
- u->fragment = Curl_memdup(fragment + 1, fraglen);
- if(!u->fragment) {
- result = CURLUE_OUT_OF_MEMORY;
- goto fail;
+ if(flags & CURLU_URLENCODE) {
+ struct dynbuf enc;
+ Curl_dyn_init(&enc, CURL_MAX_INPUT_LENGTH);
+ if(urlencode_str(&enc, fragment + 1, fraglen, TRUE, FALSE)) {
+ result = CURLUE_OUT_OF_MEMORY;
+ goto fail;
+ }
+ u->fragment = Curl_dyn_ptr(&enc);
}
-
- if(junkscan(u->fragment, flags)) {
- result = CURLUE_BAD_FRAGMENT;
- goto fail;
+ else {
+ u->fragment = Curl_memdup(fragment + 1, fraglen);
+ if(!u->fragment) {
+ result = CURLUE_OUT_OF_MEMORY;
+ goto fail;
+ }
}
}
+ /* after this, pathlen still contains the query */
+ pathlen -= fraglen;
}
- query = strchr(path, '?');
- if(query && (!fragment || (query < fragment))) {
- size_t qlen = strlen(query) - fraglen; /* includes '?' */
- pathlen = strlen(path) - qlen - fraglen;
+ DEBUGASSERT(pathlen < urllen);
+ query = memchr(path, '?', pathlen);
+ if(query) {
+ size_t qlen = fragment ? (size_t)(fragment - query) :
+ pathlen - (query - path);
+ pathlen -= qlen;
if(qlen > 1) {
if(flags & CURLU_URLENCODE) {
struct dynbuf enc;
@@ -1167,11 +1265,6 @@ static CURLUcode parseurl(const char *url, CURLU *u, unsigned int flags) }
u->query[qlen - 1] = 0;
}
-
- if(junkscan(u->query, flags)) {
- result = CURLUE_BAD_QUERY;
- goto fail;
- }
}
else {
/* single byte query */
@@ -1182,8 +1275,6 @@ static CURLUcode parseurl(const char *url, CURLU *u, unsigned int flags) }
}
}
- else
- pathlen = strlen(path) - fraglen;
if(pathlen && (flags & CURLU_URLENCODE)) {
struct dynbuf enc;
@@ -1214,11 +1305,6 @@ static CURLUcode parseurl(const char *url, CURLU *u, unsigned int flags) /* it might have encoded more than just the path so cut it */
u->path[pathlen] = 0;
- if(junkscan(u->path, flags)) {
- result = CURLUE_BAD_PATH;
- goto fail;
- }
-
if(!(flags & CURLU_PATH_AS_IS)) {
/* remove ../ and ./ sequences according to RFC3986 */
char *dedot;
@@ -1234,76 +1320,10 @@ static CURLUcode parseurl(const char *url, CURLU *u, unsigned int flags) }
}
- if(Curl_dyn_len(&host)) {
- char normalized_ipv4[sizeof("255.255.255.255") + 1];
-
- /*
- * Parse the login details and strip them out of the host name.
- */
- result = parse_hostname_login(u, &host, flags);
- if(!result)
- result = Curl_parse_port(u, &host, schemelen);
- if(result)
- goto fail;
-
- if(junkscan(Curl_dyn_ptr(&host), flags)) {
- result = CURLUE_BAD_HOSTNAME;
- goto fail;
- }
-
- if(ipv4_normalize(Curl_dyn_ptr(&host),
- normalized_ipv4, sizeof(normalized_ipv4))) {
- Curl_dyn_reset(&host);
- if(Curl_dyn_add(&host, normalized_ipv4)) {
- result = CURLUE_OUT_OF_MEMORY;
- goto fail;
- }
- }
- else {
- result = decode_host(&host);
- if(!result)
- result = hostname_check(u, Curl_dyn_ptr(&host), Curl_dyn_len(&host));
- if(result)
- goto fail;
- }
-
- if((flags & CURLU_GUESS_SCHEME) && !schemep) {
- const char *hostname = Curl_dyn_ptr(&host);
- /* legacy curl-style guess based on host name */
- if(checkprefix("ftp.", hostname))
- schemep = "ftp";
- else if(checkprefix("dict.", hostname))
- schemep = "dict";
- else if(checkprefix("ldap.", hostname))
- schemep = "ldap";
- else if(checkprefix("imap.", hostname))
- schemep = "imap";
- else if(checkprefix("smtp.", hostname))
- schemep = "smtp";
- else if(checkprefix("pop3.", hostname))
- schemep = "pop3";
- else
- schemep = "http";
-
- u->scheme = strdup(schemep);
- if(!u->scheme) {
- result = CURLUE_OUT_OF_MEMORY;
- goto fail;
- }
- }
- }
- else if(flags & CURLU_NO_AUTHORITY) {
- /* allowed to be empty. */
- if(Curl_dyn_add(&host, "")) {
- result = CURLUE_OUT_OF_MEMORY;
- goto fail;
- }
- }
-
u->host = Curl_dyn_ptr(&host);
return result;
- fail:
+fail:
Curl_dyn_free(&host);
free_urlhandle(u);
return result;
@@ -1366,7 +1386,7 @@ CURLU *curl_url_dup(const CURLU *in) u->portnum = in->portnum;
}
return u;
- fail:
+fail:
curl_url_cleanup(u);
return NULL;
}
@@ -1525,36 +1545,6 @@ CURLUcode curl_url_get(const CURLU *u, CURLUPart what, #endif
}
}
- else {
- /* only encode '%' in output host name */
- char *host = u->host;
- bool percent = FALSE;
- /* first, count number of percents present in the name */
- while(*host) {
- if(*host == '%') {
- percent = TRUE;
- break;
- }
- host++;
- }
- /* if there were percent(s), encode the host name */
- if(percent) {
- struct dynbuf enc;
- CURLcode result;
- Curl_dyn_init(&enc, CURL_MAX_INPUT_LENGTH);
- host = u->host;
- while(*host) {
- if(*host == '%')
- result = Curl_dyn_addn(&enc, "%25", 3);
- else
- result = Curl_dyn_addn(&enc, host, 1);
- if(result)
- return CURLUE_OUT_OF_MEMORY;
- host++;
- }
- allochost = Curl_dyn_ptr(&enc);
- }
- }
url = aprintf("%s://%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
scheme,
@@ -1704,9 +1694,11 @@ CURLUcode curl_url_set(CURLU *u, CURLUPart what, }
switch(what) {
- case CURLUPART_SCHEME:
- if(strlen(part) > MAX_SCHEME_LEN)
- /* too long */
+ case CURLUPART_SCHEME: {
+ size_t plen = strlen(part);
+ const char *s = part;
+ if((plen > MAX_SCHEME_LEN) || (plen < 1))
+ /* too long or too short */
return CURLUE_BAD_SCHEME;
if(!(flags & CURLU_NON_SUPPORT_SCHEME) &&
/* verify that it is a fine scheme */
@@ -1714,7 +1706,15 @@ CURLUcode curl_url_set(CURLU *u, CURLUPart what, return CURLUE_UNSUPPORTED_SCHEME;
storep = &u->scheme;
urlencode = FALSE; /* never */
+ /* ALPHA *( ALPHA / DIGIT / "+" / "-" / "." ) */
+ while(plen--) {
+ if(ISALNUM(*s) || (*s == '+') || (*s == '-') || (*s == '.'))
+ s++; /* fine */
+ else
+ return CURLUE_BAD_SCHEME;
+ }
break;
+ }
case CURLUPART_USER:
storep = &u->user;
break;
@@ -1724,15 +1724,10 @@ CURLUcode curl_url_set(CURLU *u, CURLUPart what, case CURLUPART_OPTIONS:
storep = &u->options;
break;
- case CURLUPART_HOST: {
- size_t len = strcspn(part, " \r\n");
- if(strlen(part) != len)
- /* hostname with bad content */
- return CURLUE_BAD_HOSTNAME;
+ case CURLUPART_HOST:
storep = &u->host;
Curl_safefree(u->zoneid);
break;
- }
case CURLUPART_ZONEID:
storep = &u->zoneid;
break;
@@ -1882,7 +1877,7 @@ CURLUcode curl_url_set(CURLU *u, CURLUPart what, free(*storep);
*storep = Curl_dyn_ptr(&enc);
return CURLUE_OK;
- nomem:
+nomem:
free((char *)newp);
return CURLUE_OUT_OF_MEMORY;
}
@@ -1894,7 +1889,7 @@ CURLUcode curl_url_set(CURLU *u, CURLUPart what, /* Skip hostname check, it's allowed to be empty. */
}
else {
- if(hostname_check(u, (char *)newp, n)) {
+ if(!n || hostname_check(u, (char *)newp, n)) {
free((char *)newp);
return CURLUE_BAD_HOSTNAME;
}
diff --git a/libs/libcurl/src/urldata.h b/libs/libcurl/src/urldata.h index 0905e1bc51..0295f37c02 100644 --- a/libs/libcurl/src/urldata.h +++ b/libs/libcurl/src/urldata.h @@ -134,6 +134,7 @@ typedef unsigned int curl_prot_t; #include "hash.h"
#include "splay.h"
#include "dynbuf.h"
+#include "dynhds.h"
/* return the count of bytes sent, or -1 on error */
typedef ssize_t (Curl_send)(struct Curl_easy *data, /* transfer */
@@ -208,8 +209,17 @@ typedef CURLcode (*Curl_datastream)(struct Curl_easy *data, #define UPLOADBUFFER_MIN CURL_MAX_WRITE_SIZE
#define CURLEASY_MAGIC_NUMBER 0xc0dedbadU
+#ifdef DEBUGBUILD
+/* On a debug build, we want to fail hard on easy handles that
+ * are not NULL, but no longer have the MAGIC touch. This gives
+ * us early warning on things only discovered by valgrind otherwise. */
+#define GOOD_EASY_HANDLE(x) \
+ (((x) && ((x)->magic == CURLEASY_MAGIC_NUMBER))? TRUE: \
+ (DEBUGASSERT(!(x)), FALSE))
+#else
#define GOOD_EASY_HANDLE(x) \
((x) && ((x)->magic == CURLEASY_MAGIC_NUMBER))
+#endif
#ifdef HAVE_GSSAPI
/* Types needed for krb5-ftp connections */
@@ -1020,7 +1030,7 @@ struct connectdata { struct mqtt_conn mqtt;
#endif
#ifdef USE_WEBSOCKETS
- struct ws_conn ws;
+ struct websocket *ws;
#endif
} proto;
@@ -1039,7 +1049,6 @@ struct connectdata { wrong connections. */
char *localdev;
unsigned short localportrange;
- int cselect_bits; /* bitmask of socket events */
int waitfor; /* current READ/WRITE bits to wait for */
#if defined(HAVE_GSSAPI) || defined(USE_WINDOWS_SSPI)
int socks5_gssapi_enctype;
@@ -1055,8 +1064,12 @@ struct connectdata { unsigned short localport;
unsigned short secondary_port; /* secondary socket remote port to connect to
(ftp) */
+ unsigned char cselect_bits; /* bitmask of socket events */
unsigned char alpn; /* APLN TLS negotiated protocol, a CURL_HTTP_VERSION*
value */
+#ifndef CURL_DISABLE_PROXY
+ unsigned char proxy_alpn; /* APLN of proxy tunnel, CURL_HTTP_VERSION* */
+#endif
unsigned char transport; /* one of the TRNSPRT_* defines */
unsigned char ip_version; /* copied from the Curl_easy at creation time */
unsigned char httpversion; /* the HTTP version*10 reported by the server */
@@ -1331,11 +1344,6 @@ struct UrlState { /* a place to store the most recently set (S)FTP entrypath */
char *most_recent_ftp_entrypath;
- unsigned char httpwant; /* when non-zero, a specific HTTP version requested
- to be used in the library's request(s) */
- unsigned char httpversion; /* the lowest HTTP version*10 reported by any
- server involved in this request */
-
#if !defined(WIN32) && !defined(MSDOS) && !defined(__EMX__)
/* do FTP line-end conversions on most platforms */
#define CURL_DO_LINEEND_CONV
@@ -1353,14 +1361,14 @@ struct UrlState { long rtsp_next_client_CSeq; /* the session's next client CSeq */
long rtsp_next_server_CSeq; /* the session's next server CSeq */
long rtsp_CSeq_recv; /* most recent CSeq received */
+
+ unsigned char rtp_channel_mask[32]; /* for the correctness checking of the
+ interleaved data */
#endif
curl_off_t infilesize; /* size of file to upload, -1 means unknown.
Copied from set.filesize at start of operation */
#if defined(USE_HTTP2) || defined(USE_HTTP3)
- size_t drain; /* Increased when this stream has data to read, even if its
- socket is not necessarily is readable. Decreased when
- checked. */
struct Curl_data_priority priority; /* shallow copy of data->set */
#endif
@@ -1368,8 +1376,6 @@ struct UrlState { void *in; /* CURLOPT_READDATA */
CURLU *uh; /* URL handle for the current parsed URL */
struct urlpieces up;
- unsigned char httpreq; /* Curl_HttpReq; what kind of HTTP request (if any)
- is this */
char *url; /* work URL, copied from UserDefined */
char *referer; /* referer string */
struct curl_slist *resolve; /* set to point to the set.resolve list when
@@ -1410,6 +1416,15 @@ struct UrlState { char *proxypasswd;
} aptr;
+ unsigned char httpwant; /* when non-zero, a specific HTTP version requested
+ to be used in the library's request(s) */
+ unsigned char httpversion; /* the lowest HTTP version*10 reported by any
+ server involved in this request */
+ unsigned char httpreq; /* Curl_HttpReq; what kind of HTTP request (if any)
+ is this */
+ unsigned char dselect_bits; /* != 0 -> bitmask of socket events for this
+ transfer overriding anything the socket may
+ report */
#ifdef CURLDEBUG
BIT(conncache_lock);
#endif
@@ -1446,6 +1461,7 @@ struct UrlState { BIT(rewindbeforesend);/* TRUE when the sending couldn't be stopped even
though it will be discarded. We must call the data
rewind callback before trying to send again. */
+ BIT(upload); /* upload request */
};
/*
@@ -1546,6 +1562,7 @@ enum dupstring { STRING_DNS_LOCAL_IP4,
STRING_DNS_LOCAL_IP6,
STRING_SSL_EC_CURVES,
+ STRING_AWS_SIGV4, /* Parameters for V4 signature */
/* -- end of null-terminated strings -- */
@@ -1555,8 +1572,6 @@ enum dupstring { STRING_COPYPOSTFIELDS, /* if POST, set the fields' values here */
- STRING_AWS_SIGV4, /* Parameters for V4 signature */
-
STRING_LAST /* not used, just an end-of-list marker */
};
@@ -1822,7 +1837,6 @@ struct UserDefined { BIT(http_auto_referer); /* set "correct" referer when following
location: */
BIT(opt_no_body); /* as set with CURLOPT_NOBODY */
- BIT(upload); /* upload request */
BIT(verbose); /* output verbosity */
BIT(krb); /* Kerberos connection requested */
BIT(reuse_forbid); /* forbidden to be reused, close after use */
@@ -1894,7 +1908,8 @@ struct Curl_easy { struct Curl_easy *prev;
struct connectdata *conn;
- struct Curl_llist_element connect_queue;
+ struct Curl_llist_element connect_queue; /* for the pending and msgsent
+ lists */
struct Curl_llist_element conn_queue; /* list per connectdata */
CURLMstate mstate; /* the handle's state */
diff --git a/libs/libcurl/src/vauth/digest.c b/libs/libcurl/src/vauth/digest.c index ec9fb2d770..770bac7682 100644 --- a/libs/libcurl/src/vauth/digest.c +++ b/libs/libcurl/src/vauth/digest.c @@ -694,6 +694,7 @@ static CURLcode auth_create_digest_http_message( char *hashthis = NULL;
char *tmp = NULL;
+ memset(hashbuf, 0, sizeof(hashbuf));
if(!digest->nc)
digest->nc = 1;
diff --git a/libs/libcurl/src/vauth/ntlm.c b/libs/libcurl/src/vauth/ntlm.c index 3a3b5799e3..d59d2df0ff 100644 --- a/libs/libcurl/src/vauth/ntlm.c +++ b/libs/libcurl/src/vauth/ntlm.c @@ -380,8 +380,8 @@ CURLcode Curl_auth_create_ntlm_type1_message(struct Curl_easy *data, (void)data;
(void)userp;
(void)passwdp;
- (void)service,
- (void)hostname,
+ (void)service;
+ (void)hostname;
/* Clean up any former leftovers and initialise to defaults */
Curl_auth_cleanup_ntlm(ntlm);
@@ -511,6 +511,8 @@ CURLcode Curl_auth_create_ntlm_type3_message(struct Curl_easy *data, size_t userlen = 0;
size_t domlen = 0;
+ memset(lmresp, 0, sizeof(lmresp));
+ memset(ntresp, 0, sizeof(ntresp));
user = strchr(userp, '\\');
if(!user)
user = strchr(userp, '/');
diff --git a/libs/libcurl/src/vauth/vauth.h b/libs/libcurl/src/vauth/vauth.h index b7d06bc361..56a62574e5 100644 --- a/libs/libcurl/src/vauth/vauth.h +++ b/libs/libcurl/src/vauth/vauth.h @@ -219,7 +219,7 @@ bool Curl_auth_is_spnego_supported(void); message */
CURLcode Curl_auth_decode_spnego_message(struct Curl_easy *data,
const char *user,
- const char *passwood,
+ const char *password,
const char *service,
const char *host,
const char *chlg64,
diff --git a/libs/libcurl/src/vquic/curl_msh3.c b/libs/libcurl/src/vquic/curl_msh3.c index 94c0f829cb..7bf7065774 100644 --- a/libs/libcurl/src/vquic/curl_msh3.c +++ b/libs/libcurl/src/vquic/curl_msh3.c @@ -35,7 +35,7 @@ #include "cf-socket.h"
#include "connect.h"
#include "progress.h"
-#include "h2h3.h"
+#include "http1.h"
#include "curl_msh3.h"
#include "socketpair.h"
#include "vquic/vquic.h"
@@ -45,16 +45,10 @@ #include "curl_memory.h"
#include "memdebug.h"
-#define DEBUG_CF 1
-
-#if DEBUG_CF && defined(DEBUGBUILD)
-#define CF_DEBUGF(x) x
-#else
-#define CF_DEBUGF(x) do { } while(0)
-#endif
-
-#define MSH3_REQ_INIT_BUF_LEN 16384
-#define MSH3_REQ_MAX_BUF_LEN 0x100000
+#define H3_STREAM_WINDOW_SIZE (128 * 1024)
+#define H3_STREAM_CHUNK_SIZE (16 * 1024)
+#define H3_STREAM_RECV_CHUNKS \
+ (H3_STREAM_WINDOW_SIZE / H3_STREAM_CHUNK_SIZE)
#ifdef _WIN32
#define msh3_lock CRITICAL_SECTION
@@ -116,6 +110,7 @@ struct cf_msh3_ctx { curl_socket_t sock[2]; /* fake socket pair until we get support in msh3 */
char l_ip[MAX_IPADR_LEN]; /* local IP as string */
int l_port; /* local port number */
+ struct cf_call_data call_data;
struct curltime connect_started; /* time the current attempt started */
struct curltime handshake_at; /* time connect handshake finished */
/* Flags written by msh3/msquic thread */
@@ -127,6 +122,104 @@ struct cf_msh3_ctx { BIT(active);
};
+/* How to access `call_data` from a cf_msh3 filter */
+#define CF_CTX_CALL_DATA(cf) \
+ ((struct cf_msh3_ctx *)(cf)->ctx)->call_data
+
+/**
+ * All about the H3 internals of a stream
+ */
+struct stream_ctx {
+ struct MSH3_REQUEST *req;
+ struct bufq recvbuf; /* h3 response */
+#ifdef _WIN32
+ CRITICAL_SECTION recv_lock;
+#else /* !_WIN32 */
+ pthread_mutex_t recv_lock;
+#endif /* _WIN32 */
+ uint64_t error3; /* HTTP/3 stream error code */
+ int status_code; /* HTTP status code */
+ CURLcode recv_error;
+ bool closed;
+ bool reset;
+ bool upload_done;
+ bool firstheader; /* FALSE until headers arrive */
+ bool recv_header_complete;
+};
+
+#define H3_STREAM_CTX(d) ((struct stream_ctx *)(((d) && (d)->req.p.http)? \
+ ((struct HTTP *)(d)->req.p.http)->h3_ctx \
+ : NULL))
+#define H3_STREAM_LCTX(d) ((struct HTTP *)(d)->req.p.http)->h3_ctx
+#define H3_STREAM_ID(d) (H3_STREAM_CTX(d)? \
+ H3_STREAM_CTX(d)->id : -2)
+
+
+static CURLcode h3_data_setup(struct Curl_cfilter *cf,
+ struct Curl_easy *data)
+{
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
+
+ if(stream)
+ return CURLE_OK;
+
+ stream = calloc(1, sizeof(*stream));
+ if(!stream)
+ return CURLE_OUT_OF_MEMORY;
+
+ H3_STREAM_LCTX(data) = stream;
+ stream->req = ZERO_NULL;
+ msh3_lock_initialize(&stream->recv_lock);
+ Curl_bufq_init2(&stream->recvbuf, H3_STREAM_CHUNK_SIZE,
+ H3_STREAM_RECV_CHUNKS, BUFQ_OPT_SOFT_LIMIT);
+ DEBUGF(LOG_CF(data, cf, "data setup (easy %p)", (void *)data));
+ return CURLE_OK;
+}
+
+static void h3_data_done(struct Curl_cfilter *cf, struct Curl_easy *data)
+{
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
+
+ (void)cf;
+ if(stream) {
+ DEBUGF(LOG_CF(data, cf, "easy handle is done"));
+ Curl_bufq_free(&stream->recvbuf);
+ free(stream);
+ H3_STREAM_LCTX(data) = NULL;
+ }
+}
+
+static void drain_stream_from_other_thread(struct Curl_easy *data,
+ struct stream_ctx *stream)
+{
+ unsigned char bits;
+
+ /* risky */
+ bits = CURL_CSELECT_IN;
+ if(stream && !stream->upload_done)
+ bits |= CURL_CSELECT_OUT;
+ if(data->state.dselect_bits != bits) {
+ data->state.dselect_bits = bits;
+ /* cannot expire from other thread */
+ }
+}
+
+static void drain_stream(struct Curl_cfilter *cf,
+ struct Curl_easy *data)
+{
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
+ unsigned char bits;
+
+ (void)cf;
+ bits = CURL_CSELECT_IN;
+ if(stream && !stream->upload_done)
+ bits |= CURL_CSELECT_OUT;
+ if(data->state.dselect_bits != bits) {
+ data->state.dselect_bits = bits;
+ Curl_expire(data, 0, EXPIRE_RUN_NOW);
+ }
+}
+
static const MSH3_CONNECTION_IF msh3_conn_if = {
msh3_conn_connected,
msh3_conn_shutdown_complete,
@@ -136,10 +229,12 @@ static const MSH3_CONNECTION_IF msh3_conn_if = { static void MSH3_CALL msh3_conn_connected(MSH3_CONNECTION *Connection,
void *IfContext)
{
- struct cf_msh3_ctx *ctx = IfContext;
+ struct Curl_cfilter *cf = IfContext;
+ struct cf_msh3_ctx *ctx = cf->ctx;
+ struct Curl_easy *data = CF_DATA_CURRENT(cf);
(void)Connection;
- if(ctx->verbose)
- CF_DEBUGF(fprintf(stderr, "* [MSH3] evt: connected\n"));
+
+ DEBUGF(LOG_CF(data, cf, "[MSH3] connected"));
ctx->handshake_succeeded = true;
ctx->connected = true;
ctx->handshake_complete = true;
@@ -148,10 +243,12 @@ static void MSH3_CALL msh3_conn_connected(MSH3_CONNECTION *Connection, static void MSH3_CALL msh3_conn_shutdown_complete(MSH3_CONNECTION *Connection,
void *IfContext)
{
- struct cf_msh3_ctx *ctx = IfContext;
+ struct Curl_cfilter *cf = IfContext;
+ struct cf_msh3_ctx *ctx = cf->ctx;
+ struct Curl_easy *data = CF_DATA_CURRENT(cf);
+
(void)Connection;
- if(ctx->verbose)
- CF_DEBUGF(fprintf(stderr, "* [MSH3] evt: shutdown complete\n"));
+ DEBUGF(LOG_CF(data, cf, "[MSH3] shutdown complete"));
ctx->connected = false;
ctx->handshake_complete = true;
}
@@ -173,173 +270,167 @@ static const MSH3_REQUEST_IF msh3_request_if = { msh3_data_sent
};
-static CURLcode msh3_data_setup(struct Curl_cfilter *cf,
- struct Curl_easy *data)
+/* Decode HTTP status code. Returns -1 if no valid status code was
+ decoded. (duplicate from http2.c) */
+static int decode_status_code(const char *value, size_t len)
{
- struct HTTP *stream = data->req.p.http;
- (void)cf;
+ int i;
+ int res;
- DEBUGASSERT(stream);
- if(!stream->recv_buf) {
- DEBUGF(LOG_CF(data, cf, "req: setup"));
- stream->recv_buf = malloc(MSH3_REQ_INIT_BUF_LEN);
- if(!stream->recv_buf) {
- return CURLE_OUT_OF_MEMORY;
+ if(len != 3) {
+ return -1;
+ }
+
+ res = 0;
+
+ for(i = 0; i < 3; ++i) {
+ char c = value[i];
+
+ if(c < '0' || c > '9') {
+ return -1;
}
- stream->req = ZERO_NULL;
- msh3_lock_initialize(&stream->recv_lock);
- stream->recv_buf_alloc = MSH3_REQ_INIT_BUF_LEN;
- stream->recv_buf_max = MSH3_REQ_MAX_BUF_LEN;
- stream->recv_header_len = 0;
- stream->recv_header_complete = false;
- stream->recv_data_len = 0;
- stream->recv_data_complete = false;
- stream->recv_error = CURLE_OK;
+
+ res *= 10;
+ res += c - '0';
}
- return CURLE_OK;
+
+ return res;
}
-/* Requires stream->recv_lock to be held */
-static bool msh3request_ensure_room(struct HTTP *stream, size_t len)
-{
- uint8_t *new_recv_buf;
- const size_t cur_recv_len = stream->recv_header_len + stream->recv_data_len;
-
- if(cur_recv_len + len > stream->recv_buf_alloc) {
- size_t new_recv_buf_alloc_len = stream->recv_buf_alloc;
- do {
- new_recv_buf_alloc_len <<= 1; /* TODO - handle overflow */
- } while(cur_recv_len + len > new_recv_buf_alloc_len);
- CF_DEBUGF(fprintf(stderr, "* enlarging buffer to %zu\n",
- new_recv_buf_alloc_len));
- new_recv_buf = malloc(new_recv_buf_alloc_len);
- if(!new_recv_buf) {
- CF_DEBUGF(fprintf(stderr, "* FAILED: enlarging buffer to %zu\n",
- new_recv_buf_alloc_len));
- return false;
- }
- if(cur_recv_len) {
- memcpy(new_recv_buf, stream->recv_buf, cur_recv_len);
- }
- stream->recv_buf_alloc = new_recv_buf_alloc_len;
- free(stream->recv_buf);
- stream->recv_buf = new_recv_buf;
+/*
+ * write_resp_raw() copies response data in raw format to the `data`'s
+ * receive buffer. If not enough space is available, it appends to the
+ * `data`'s overflow buffer.
+ */
+static CURLcode write_resp_raw(struct Curl_easy *data,
+ const void *mem, size_t memlen)
+{
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
+ CURLcode result = CURLE_OK;
+ ssize_t nwritten;
+
+ if(!stream)
+ return CURLE_RECV_ERROR;
+
+ nwritten = Curl_bufq_write(&stream->recvbuf, mem, memlen, &result);
+ if(nwritten < 0) {
+ return result;
+ }
+
+ if((size_t)nwritten < memlen) {
+ /* This MUST not happen. Our recbuf is dimensioned to hold the
+ * full max_stream_window and then some for this very reason. */
+ DEBUGASSERT(0);
+ return CURLE_RECV_ERROR;
}
- return true;
+ return result;
}
static void MSH3_CALL msh3_header_received(MSH3_REQUEST *Request,
- void *IfContext,
- const MSH3_HEADER *Header)
+ void *userp,
+ const MSH3_HEADER *hd)
{
- struct Curl_easy *data = IfContext;
- struct HTTP *stream = data->req.p.http;
- size_t total_len;
+ struct Curl_easy *data = userp;
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
+ CURLcode result;
(void)Request;
- if(stream->recv_header_complete) {
- CF_DEBUGF(fprintf(stderr, "* ignoring header after data\n"));
+ if(!stream || stream->recv_header_complete) {
return;
}
msh3_lock_acquire(&stream->recv_lock);
- if((Header->NameLength == 7) &&
- !strncmp(H2H3_PSEUDO_STATUS, (char *)Header->Name, 7)) {
- total_len = 10 + Header->ValueLength;
- if(!msh3request_ensure_room(stream, total_len)) {
- CF_DEBUGF(fprintf(stderr, "* ERROR: unable to buffer: %.*s\n",
- (int)Header->NameLength, Header->Name));
- stream->recv_error = CURLE_OUT_OF_MEMORY;
- goto release_lock;
- }
- msnprintf((char *)stream->recv_buf + stream->recv_header_len,
- stream->recv_buf_alloc - stream->recv_header_len,
- "HTTP/3 %.*s \r\n", (int)Header->ValueLength, Header->Value);
+ if((hd->NameLength == 7) &&
+ !strncmp(HTTP_PSEUDO_STATUS, (char *)hd->Name, 7)) {
+ char line[14]; /* status line is always 13 characters long */
+ size_t ncopy;
+
+ DEBUGASSERT(!stream->firstheader);
+ stream->status_code = decode_status_code(hd->Value, hd->ValueLength);
+ DEBUGASSERT(stream->status_code != -1);
+ ncopy = msnprintf(line, sizeof(line), "HTTP/3 %03d \r\n",
+ stream->status_code);
+ result = write_resp_raw(data, line, ncopy);
+ if(result)
+ stream->recv_error = result;
+ stream->firstheader = TRUE;
}
else {
- total_len = 4 + Header->NameLength + Header->ValueLength;
- if(!msh3request_ensure_room(stream, total_len)) {
- CF_DEBUGF(fprintf(stderr, "* ERROR: unable to buffer: %.*s\n",
- (int)Header->NameLength, Header->Name));
- stream->recv_error = CURLE_OUT_OF_MEMORY;
- goto release_lock;
+ /* store as an HTTP1-style header */
+ DEBUGASSERT(stream->firstheader);
+ result = write_resp_raw(data, hd->Name, hd->NameLength);
+ if(!result)
+ result = write_resp_raw(data, ": ", 2);
+ if(!result)
+ result = write_resp_raw(data, hd->Value, hd->ValueLength);
+ if(!result)
+ result = write_resp_raw(data, "\r\n", 2);
+ if(result) {
+ stream->recv_error = result;
}
- msnprintf((char *)stream->recv_buf + stream->recv_header_len,
- stream->recv_buf_alloc - stream->recv_header_len,
- "%.*s: %.*s\r\n",
- (int)Header->NameLength, Header->Name,
- (int)Header->ValueLength, Header->Value);
}
- stream->recv_header_len += total_len;
- data->state.drain = 1;
-
-release_lock:
+ drain_stream_from_other_thread(data, stream);
msh3_lock_release(&stream->recv_lock);
}
static bool MSH3_CALL msh3_data_received(MSH3_REQUEST *Request,
- void *IfContext, uint32_t *Length,
- const uint8_t *Data)
+ void *IfContext, uint32_t *buflen,
+ const uint8_t *buf)
{
struct Curl_easy *data = IfContext;
- struct HTTP *stream = data->req.p.http;
- size_t cur_recv_len = stream->recv_header_len + stream->recv_data_len;
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
+ CURLcode result;
+ bool rv = FALSE;
+ /* TODO: we would like to limit the amount of data we are buffer here.
+ * There seems to be no mechanism in msh3 to adjust flow control and
+ * it is undocumented what happens if we return FALSE here or less
+ * length (buflen is an inout parameter).
+ */
(void)Request;
- if(data && data->set.verbose)
- CF_DEBUGF(fprintf(stderr, "* [MSH3] req: evt: received %u. %zu buffered, "
- "%zu allocated\n",
- *Length, cur_recv_len, stream->recv_buf_alloc));
- /* TODO - Update this code to limit data bufferring by `stream->recv_buf_max`
- and return `false` when we reach that limit. Then, when curl drains some
- of the buffer, making room, call MsH3RequestSetReceiveEnabled to enable
- receive callbacks again. */
+ if(!stream)
+ return FALSE;
+
msh3_lock_acquire(&stream->recv_lock);
if(!stream->recv_header_complete) {
- if(data && data->set.verbose)
- CF_DEBUGF(fprintf(stderr, "* [MSH3] req: Headers complete!\n"));
- if(!msh3request_ensure_room(stream, 2)) {
- stream->recv_error = CURLE_OUT_OF_MEMORY;
- goto release_lock;
+ result = write_resp_raw(data, "\r\n", 2);
+ if(result) {
+ stream->recv_error = result;
+ goto out;
}
- stream->recv_buf[stream->recv_header_len++] = '\r';
- stream->recv_buf[stream->recv_header_len++] = '\n';
stream->recv_header_complete = true;
- cur_recv_len += 2;
}
- if(!msh3request_ensure_room(stream, *Length)) {
- stream->recv_error = CURLE_OUT_OF_MEMORY;
- goto release_lock;
+
+ result = write_resp_raw(data, buf, *buflen);
+ if(result) {
+ stream->recv_error = result;
}
- memcpy(stream->recv_buf + cur_recv_len, Data, *Length);
- stream->recv_data_len += (size_t)*Length;
- data->state.drain = 1;
+ rv = TRUE;
-release_lock:
+out:
msh3_lock_release(&stream->recv_lock);
- return true;
+ return rv;
}
static void MSH3_CALL msh3_complete(MSH3_REQUEST *Request, void *IfContext,
- bool Aborted, uint64_t AbortError)
+ bool aborted, uint64_t error)
{
struct Curl_easy *data = IfContext;
- struct HTTP *stream = data->req.p.http;
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
(void)Request;
- (void)AbortError;
- if(data && data->set.verbose)
- CF_DEBUGF(fprintf(stderr, "* [MSH3] req: evt: complete, aborted=%s\n",
- Aborted ? "true" : "false"));
+ if(!stream)
+ return;
msh3_lock_acquire(&stream->recv_lock);
- if(Aborted) {
- stream->recv_error = CURLE_HTTP3; /* TODO - how do we pass AbortError? */
- }
+ stream->closed = TRUE;
stream->recv_header_complete = true;
- stream->recv_data_complete = true;
+ if(error)
+ stream->error3 = error;
+ if(aborted)
+ stream->reset = TRUE;
msh3_lock_release(&stream->recv_lock);
}
@@ -347,7 +438,10 @@ static void MSH3_CALL msh3_shutdown_complete(MSH3_REQUEST *Request, void *IfContext)
{
struct Curl_easy *data = IfContext;
- struct HTTP *stream = data->req.p.http;
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
+
+ if(!stream)
+ return;
(void)Request;
(void)stream;
}
@@ -356,138 +450,225 @@ static void MSH3_CALL msh3_data_sent(MSH3_REQUEST *Request, void *IfContext, void *SendContext)
{
struct Curl_easy *data = IfContext;
- struct HTTP *stream = data->req.p.http;
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
+ if(!stream)
+ return;
(void)Request;
(void)stream;
(void)SendContext;
}
+static ssize_t recv_closed_stream(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ CURLcode *err)
+{
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
+ ssize_t nread = -1;
+
+ if(!stream) {
+ *err = CURLE_RECV_ERROR;
+ return -1;
+ }
+ (void)cf;
+ if(stream->reset) {
+ failf(data, "HTTP/3 stream reset by server");
+ *err = CURLE_PARTIAL_FILE;
+ DEBUGF(LOG_CF(data, cf, "cf_recv, was reset -> %d", *err));
+ goto out;
+ }
+ else if(stream->error3) {
+ failf(data, "HTTP/3 stream was not closed cleanly: (error %zd)",
+ (ssize_t)stream->error3);
+ *err = CURLE_HTTP3;
+ DEBUGF(LOG_CF(data, cf, "cf_recv, closed uncleanly -> %d", *err));
+ goto out;
+ }
+ else {
+ DEBUGF(LOG_CF(data, cf, "cf_recv, closed ok -> %d", *err));
+ }
+ *err = CURLE_OK;
+ nread = 0;
+
+out:
+ return nread;
+}
+
+static void set_quic_expire(struct Curl_cfilter *cf, struct Curl_easy *data)
+{
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
+
+ /* we have no indication from msh3 when it would be a good time
+ * to juggle the connection again. So, we compromise by calling
+ * us again every some milliseconds. */
+ (void)cf;
+ if(stream && stream->req && !stream->closed) {
+ Curl_expire(data, 10, EXPIRE_QUIC);
+ }
+ else {
+ Curl_expire(data, 50, EXPIRE_QUIC);
+ }
+}
+
static ssize_t cf_msh3_recv(struct Curl_cfilter *cf, struct Curl_easy *data,
char *buf, size_t len, CURLcode *err)
{
- struct HTTP *stream = data->req.p.http;
- size_t outsize = 0;
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
+ ssize_t nread = -1;
+ struct cf_call_data save;
(void)cf;
+ if(!stream) {
+ *err = CURLE_RECV_ERROR;
+ return -1;
+ }
+ CF_DATA_SAVE(save, cf, data);
DEBUGF(LOG_CF(data, cf, "req: recv with %zu byte buffer", len));
+ msh3_lock_acquire(&stream->recv_lock);
+
if(stream->recv_error) {
failf(data, "request aborted");
- data->state.drain = 0;
*err = stream->recv_error;
- return -1;
+ goto out;
}
*err = CURLE_OK;
- msh3_lock_acquire(&stream->recv_lock);
- if(stream->recv_header_len) {
- outsize = len;
- if(stream->recv_header_len < outsize) {
- outsize = stream->recv_header_len;
- }
- memcpy(buf, stream->recv_buf, outsize);
- if(outsize < stream->recv_header_len + stream->recv_data_len) {
- memmove(stream->recv_buf, stream->recv_buf + outsize,
- stream->recv_header_len + stream->recv_data_len - outsize);
- }
- stream->recv_header_len -= outsize;
- DEBUGF(LOG_CF(data, cf, "req: returned %zu bytes of header", outsize));
- }
- else if(stream->recv_data_len) {
- outsize = len;
- if(stream->recv_data_len < outsize) {
- outsize = stream->recv_data_len;
- }
- memcpy(buf, stream->recv_buf, outsize);
- if(outsize < stream->recv_data_len) {
- memmove(stream->recv_buf, stream->recv_buf + outsize,
- stream->recv_data_len - outsize);
- }
- stream->recv_data_len -= outsize;
- DEBUGF(LOG_CF(data, cf, "req: returned %zu bytes of data", outsize));
- if(stream->recv_data_len == 0 && stream->recv_data_complete)
- data->state.drain = 1;
+ if(!Curl_bufq_is_empty(&stream->recvbuf)) {
+ nread = Curl_bufq_read(&stream->recvbuf,
+ (unsigned char *)buf, len, err);
+ DEBUGF(LOG_CF(data, cf, "read recvbuf(len=%zu) -> %zd, %d",
+ len, nread, *err));
+ if(nread < 0)
+ goto out;
+ if(stream->closed)
+ drain_stream(cf, data);
}
- else if(stream->recv_data_complete) {
- DEBUGF(LOG_CF(data, cf, "req: receive complete"));
- data->state.drain = 0;
+ else if(stream->closed) {
+ nread = recv_closed_stream(cf, data, err);
+ goto out;
}
else {
DEBUGF(LOG_CF(data, cf, "req: nothing here, call again"));
*err = CURLE_AGAIN;
- outsize = -1;
}
+out:
msh3_lock_release(&stream->recv_lock);
-
- return (ssize_t)outsize;
+ set_quic_expire(cf, data);
+ CF_DATA_RESTORE(cf, save);
+ return nread;
}
static ssize_t cf_msh3_send(struct Curl_cfilter *cf, struct Curl_easy *data,
const void *buf, size_t len, CURLcode *err)
{
struct cf_msh3_ctx *ctx = cf->ctx;
- struct HTTP *stream = data->req.p.http;
- struct h2h3req *hreq;
- size_t hdrlen = 0;
- size_t sentlen = 0;
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
+ struct h1_req_parser h1;
+ struct dynhds h2_headers;
+ MSH3_HEADER *nva = NULL;
+ size_t nheader, i;
+ ssize_t nwritten = -1;
+ struct cf_call_data save;
+ bool eos;
+
+ CF_DATA_SAVE(save, cf, data);
+
+ Curl_h1_req_parse_init(&h1, H1_PARSE_DEFAULT_MAX_LINE_LEN);
+ Curl_dynhds_init(&h2_headers, 0, DYN_HTTP_REQUEST);
/* Sizes must match for cast below to work" */
- DEBUGASSERT(sizeof(MSH3_HEADER) == sizeof(struct h2h3pseudo));
+ DEBUGASSERT(stream);
DEBUGF(LOG_CF(data, cf, "req: send %zu bytes", len));
if(!stream->req) {
/* The first send on the request contains the headers and possibly some
data. Parse out the headers and create the request, then if there is
any data left over go ahead and send it too. */
+ nwritten = Curl_h1_req_parse_read(&h1, buf, len, NULL, 0, err);
+ if(nwritten < 0)
+ goto out;
+ DEBUGASSERT(h1.done);
+ DEBUGASSERT(h1.req);
- *err = msh3_data_setup(cf, data);
+ *err = Curl_http_req_to_h2(&h2_headers, h1.req, data);
if(*err) {
- failf(data, "could not setup data");
- return -1;
+ nwritten = -1;
+ goto out;
}
- *err = Curl_pseudo_headers(data, buf, len, &hdrlen, &hreq);
- if(*err) {
- failf(data, "Curl_pseudo_headers failed");
- return -1;
+ nheader = Curl_dynhds_count(&h2_headers);
+ nva = malloc(sizeof(MSH3_HEADER) * nheader);
+ if(!nva) {
+ *err = CURLE_OUT_OF_MEMORY;
+ nwritten = -1;
+ goto out;
}
- DEBUGF(LOG_CF(data, cf, "req: send %zu headers", hreq->entries));
+ for(i = 0; i < nheader; ++i) {
+ struct dynhds_entry *e = Curl_dynhds_getn(&h2_headers, i);
+ nva[i].Name = e->name;
+ nva[i].NameLength = e->namelen;
+ nva[i].Value = e->value;
+ nva[i].ValueLength = e->valuelen;
+ }
+
+ switch(data->state.httpreq) {
+ case HTTPREQ_POST:
+ case HTTPREQ_POST_FORM:
+ case HTTPREQ_POST_MIME:
+ case HTTPREQ_PUT:
+ /* known request body size or -1 */
+ eos = FALSE;
+ break;
+ default:
+ /* there is not request body */
+ eos = TRUE;
+ stream->upload_done = TRUE;
+ break;
+ }
+
+ DEBUGF(LOG_CF(data, cf, "req: send %zu headers", nheader));
stream->req = MsH3RequestOpen(ctx->qconn, &msh3_request_if, data,
- (MSH3_HEADER*)hreq->header, hreq->entries,
- hdrlen == len ? MSH3_REQUEST_FLAG_FIN :
+ nva, nheader,
+ eos ? MSH3_REQUEST_FLAG_FIN :
MSH3_REQUEST_FLAG_NONE);
- Curl_pseudo_free(hreq);
if(!stream->req) {
failf(data, "request open failed");
*err = CURLE_SEND_ERROR;
- return -1;
+ goto out;
}
*err = CURLE_OK;
- return len;
+ nwritten = len;
+ goto out;
}
+ else {
+ /* request is open */
+ DEBUGF(LOG_CF(data, cf, "req: send %zd body bytes", len));
+ if(len > 0xFFFFFFFF) {
+ len = 0xFFFFFFFF;
+ }
- DEBUGF(LOG_CF(data, cf, "req: send %zd body bytes", len));
- if(len > 0xFFFFFFFF) {
- /* msh3 doesn't support size_t sends currently. */
- *err = CURLE_SEND_ERROR;
- return -1;
- }
+ if(!MsH3RequestSend(stream->req, MSH3_REQUEST_FLAG_NONE, buf,
+ (uint32_t)len, stream)) {
+ *err = CURLE_SEND_ERROR;
+ goto out;
+ }
- /* TODO - Need an explicit signal to know when to FIN. */
- if(!MsH3RequestSend(stream->req, MSH3_REQUEST_FLAG_FIN, buf, (uint32_t)len,
- stream)) {
- *err = CURLE_SEND_ERROR;
- return -1;
+ /* TODO - msh3/msquic will hold onto this memory until the send complete
+ event. How do we make sure curl doesn't free it until then? */
+ *err = CURLE_OK;
+ nwritten = len;
}
- /* TODO - msh3/msquic will hold onto this memory until the send complete
- event. How do we make sure curl doesn't free it until then? */
- sentlen += len;
- *err = CURLE_OK;
- return sentlen;
+out:
+ set_quic_expire(cf, data);
+ free(nva);
+ Curl_h1_req_parse_free(&h1);
+ Curl_dynhds_free(&h2_headers);
+ CF_DATA_RESTORE(cf, save);
+ return nwritten;
}
static int cf_msh3_get_select_socks(struct Curl_cfilter *cf,
@@ -495,36 +676,50 @@ static int cf_msh3_get_select_socks(struct Curl_cfilter *cf, curl_socket_t *socks)
{
struct cf_msh3_ctx *ctx = cf->ctx;
- struct HTTP *stream = data->req.p.http;
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
int bitmap = GETSOCK_BLANK;
+ struct cf_call_data save;
+ CF_DATA_SAVE(save, cf, data);
if(stream && ctx->sock[SP_LOCAL] != CURL_SOCKET_BAD) {
socks[0] = ctx->sock[SP_LOCAL];
if(stream->recv_error) {
bitmap |= GETSOCK_READSOCK(0);
- data->state.drain = 1;
+ drain_stream(cf, data);
}
- else if(stream->recv_header_len || stream->recv_data_len) {
+ else if(stream->req) {
bitmap |= GETSOCK_READSOCK(0);
- data->state.drain = 1;
+ drain_stream(cf, data);
}
}
- DEBUGF(LOG_CF(data, cf, "select_sock %u -> %d",
- (uint32_t)data->state.drain, bitmap));
-
+ DEBUGF(LOG_CF(data, cf, "select_sock -> %d", bitmap));
+ CF_DATA_RESTORE(cf, save);
return bitmap;
}
static bool cf_msh3_data_pending(struct Curl_cfilter *cf,
const struct Curl_easy *data)
{
- struct HTTP *stream = data->req.p.http;
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
+ struct cf_call_data save;
+ bool pending = FALSE;
+
+ CF_DATA_SAVE(save, cf, data);
(void)cf;
- DEBUGF(LOG_CF((struct Curl_easy *)data, cf, "data pending = %hhu",
- (bool)(stream->recv_header_len || stream->recv_data_len)));
- return stream->recv_header_len || stream->recv_data_len;
+ if(stream && stream->req) {
+ msh3_lock_acquire(&stream->recv_lock);
+ DEBUGF(LOG_CF((struct Curl_easy *)data, cf, "data pending = %zu",
+ Curl_bufq_len(&stream->recvbuf)));
+ pending = !Curl_bufq_is_empty(&stream->recvbuf);
+ msh3_lock_release(&stream->recv_lock);
+ if(pending)
+ drain_stream(cf, (struct Curl_easy *)data);
+ }
+
+ CF_DATA_RESTORE(cf, save);
+ return pending;
}
static void cf_msh3_active(struct Curl_cfilter *cf, struct Curl_easy *data)
@@ -544,36 +739,52 @@ static void cf_msh3_active(struct Curl_cfilter *cf, struct Curl_easy *data) ctx->active = TRUE;
}
+static CURLcode h3_data_pause(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ bool pause)
+{
+ if(!pause) {
+ drain_stream(cf, data);
+ Curl_expire(data, 0, EXPIRE_RUN_NOW);
+ }
+ return CURLE_OK;
+}
+
static CURLcode cf_msh3_data_event(struct Curl_cfilter *cf,
struct Curl_easy *data,
int event, int arg1, void *arg2)
{
- struct HTTP *stream = data->req.p.http;
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
+ struct cf_call_data save;
CURLcode result = CURLE_OK;
+ CF_DATA_SAVE(save, cf, data);
+
(void)arg1;
(void)arg2;
switch(event) {
case CF_CTRL_DATA_SETUP:
- result = msh3_data_setup(cf, data);
+ result = h3_data_setup(cf, data);
+ break;
+ case CF_CTRL_DATA_PAUSE:
+ result = h3_data_pause(cf, data, (arg1 != 0));
break;
case CF_CTRL_DATA_DONE:
- DEBUGF(LOG_CF(data, cf, "req: done"));
+ h3_data_done(cf, data);
+ break;
+ case CF_CTRL_DATA_DONE_SEND:
+ DEBUGF(LOG_CF(data, cf, "req: send done"));
if(stream) {
- if(stream->recv_buf) {
- Curl_safefree(stream->recv_buf);
- msh3_lock_uninitialize(&stream->recv_lock);
- }
+ stream->upload_done = TRUE;
if(stream->req) {
- MsH3RequestClose(stream->req);
- stream->req = ZERO_NULL;
+ char buf[1];
+ if(!MsH3RequestSend(stream->req, MSH3_REQUEST_FLAG_FIN,
+ buf, 0, data)) {
+ result = CURLE_SEND_ERROR;
+ }
}
}
break;
- case CF_CTRL_DATA_DONE_SEND:
- DEBUGF(LOG_CF(data, cf, "req: send done"));
- stream->upload_done = TRUE;
- break;
case CF_CTRL_CONN_INFO_UPDATE:
DEBUGF(LOG_CF(data, cf, "req: update info"));
cf_msh3_active(cf, data);
@@ -581,6 +792,8 @@ static CURLcode cf_msh3_data_event(struct Curl_cfilter *cf, default:
break;
}
+
+ CF_DATA_RESTORE(cf, save);
return result;
}
@@ -590,9 +803,10 @@ static CURLcode cf_connect_start(struct Curl_cfilter *cf, struct cf_msh3_ctx *ctx = cf->ctx;
bool verify = !!cf->conn->ssl_config.verifypeer;
MSH3_ADDR addr = {0};
+ CURLcode result;
+
memcpy(&addr, &ctx->addr.sa_addr, ctx->addr.addrlen);
MSH3_SET_PORT(&addr, (uint16_t)cf->conn->remote_port);
- ctx->verbose = (data && data->set.verbose);
if(verify && (cf->conn->ssl_config.CAfile || cf->conn->ssl_config.CApath)) {
/* TODO: need a way to provide trust anchors to MSH3 */
@@ -618,7 +832,7 @@ static CURLcode cf_connect_start(struct Curl_cfilter *cf, ctx->qconn = MsH3ConnectionOpen(ctx->api,
&msh3_conn_if,
- ctx,
+ cf,
cf->conn->host.name,
&addr,
!verify);
@@ -631,6 +845,10 @@ static CURLcode cf_connect_start(struct Curl_cfilter *cf, return CURLE_FAILED_INIT;
}
+ result = h3_data_setup(cf, data);
+ if(result)
+ return result;
+
return CURLE_OK;
}
@@ -639,6 +857,7 @@ static CURLcode cf_msh3_connect(struct Curl_cfilter *cf, bool blocking, bool *done)
{
struct cf_msh3_ctx *ctx = cf->ctx;
+ struct cf_call_data save;
CURLcode result = CURLE_OK;
(void)blocking;
@@ -647,6 +866,8 @@ static CURLcode cf_msh3_connect(struct Curl_cfilter *cf, return CURLE_OK;
}
+ CF_DATA_SAVE(save, cf, data);
+
if(ctx->sock[SP_LOCAL] == CURL_SOCKET_BAD) {
if(Curl_socketpair(AF_UNIX, SOCK_STREAM, 0, &ctx->sock[0]) < 0) {
ctx->sock[SP_LOCAL] = CURL_SOCKET_BAD;
@@ -666,6 +887,7 @@ static CURLcode cf_msh3_connect(struct Curl_cfilter *cf, if(ctx->handshake_complete) {
ctx->handshake_at = Curl_now();
if(ctx->handshake_succeeded) {
+ DEBUGF(LOG_CF(data, cf, "handshake succeeded"));
cf->conn->bits.multiplex = TRUE; /* at least potentially multiplexed */
cf->conn->httpversion = 30;
cf->conn->bundle->multiuse = BUNDLE_MULTIPLEX;
@@ -682,26 +904,35 @@ static CURLcode cf_msh3_connect(struct Curl_cfilter *cf, }
out:
+ CF_DATA_RESTORE(cf, save);
return result;
}
static void cf_msh3_close(struct Curl_cfilter *cf, struct Curl_easy *data)
{
struct cf_msh3_ctx *ctx = cf->ctx;
+ struct cf_call_data save;
(void)data;
+ CF_DATA_SAVE(save, cf, data);
+
if(ctx) {
DEBUGF(LOG_CF(data, cf, "destroying"));
- if(ctx->qconn)
+ if(ctx->qconn) {
MsH3ConnectionClose(ctx->qconn);
- if(ctx->api)
+ ctx->qconn = NULL;
+ }
+ if(ctx->api) {
MsH3ApiClose(ctx->api);
+ ctx->api = NULL;
+ }
if(ctx->active) {
/* We share our socket at cf->conn->sock[cf->sockindex] when active.
* If it is no longer there, someone has stolen (and hopefully
* closed it) and we just forget about it.
*/
+ ctx->active = FALSE;
if(ctx->sock[SP_LOCAL] == cf->conn->sock[cf->sockindex]) {
DEBUGF(LOG_CF(data, cf, "cf_msh3_close(%d) active",
(int)ctx->sock[SP_LOCAL]));
@@ -721,17 +952,22 @@ static void cf_msh3_close(struct Curl_cfilter *cf, struct Curl_easy *data) if(ctx->sock[SP_REMOTE] != CURL_SOCKET_BAD) {
sclose(ctx->sock[SP_REMOTE]);
}
- memset(ctx, 0, sizeof(*ctx));
ctx->sock[SP_LOCAL] = CURL_SOCKET_BAD;
ctx->sock[SP_REMOTE] = CURL_SOCKET_BAD;
}
+ CF_DATA_RESTORE(cf, save);
}
static void cf_msh3_destroy(struct Curl_cfilter *cf, struct Curl_easy *data)
{
+ struct cf_call_data save;
+
+ CF_DATA_SAVE(save, cf, data);
cf_msh3_close(cf, data);
free(cf->ctx);
cf->ctx = NULL;
+ /* no CF_DATA_RESTORE(cf, save); its gone */
+
}
static CURLcode cf_msh3_query(struct Curl_cfilter *cf,
diff --git a/libs/libcurl/src/vquic/curl_ngtcp2.c b/libs/libcurl/src/vquic/curl_ngtcp2.c index 73d2ca5e5e..f47ec4e7c2 100644 --- a/libs/libcurl/src/vquic/curl_ngtcp2.c +++ b/libs/libcurl/src/vquic/curl_ngtcp2.c @@ -24,7 +24,7 @@ #include "curl_setup.h"
-#ifdef USE_NGTCP2
+#if defined(USE_NGTCP2) && defined(USE_NGHTTP3)
#include <ngtcp2/ngtcp2.h>
#include <nghttp3/nghttp3.h>
@@ -56,10 +56,10 @@ #include "progress.h"
#include "strerror.h"
#include "dynbuf.h"
+#include "http1.h"
#include "select.h"
#include "vquic.h"
#include "vquic_int.h"
-#include "h2h3.h"
#include "vtls/keylog.h"
#include "vtls/vtls.h"
#include "curl_ngtcp2.h"
@@ -75,25 +75,32 @@ #define H3_ALPN_H3_29 "\x5h3-29"
#define H3_ALPN_H3 "\x2h3"
-/*
- * This holds outgoing HTTP/3 stream data that is used by nghttp3 until acked.
- * It is used as a circular buffer. Add new bytes at the end until it reaches
- * the far end, then start over at index 0 again.
- */
-
-#define H3_SEND_SIZE (256*1024)
-struct h3out {
- uint8_t buf[H3_SEND_SIZE];
- size_t used; /* number of bytes used in the buffer */
- size_t windex; /* index in the buffer where to start writing the next
- data block */
-};
-
#define QUIC_MAX_STREAMS (256*1024)
#define QUIC_MAX_DATA (1*1024*1024)
#define QUIC_IDLE_TIMEOUT (60*NGTCP2_SECONDS)
#define QUIC_HANDSHAKE_TIMEOUT (10*NGTCP2_SECONDS)
+/* A stream window is the maximum amount we need to buffer for
+ * each active transfer. We use HTTP/3 flow control and only ACK
+ * when we take things out of the buffer.
+ * Chunk size is large enough to take a full DATA frame */
+#define H3_STREAM_WINDOW_SIZE (128 * 1024)
+#define H3_STREAM_CHUNK_SIZE (16 * 1024)
+/* The pool keeps spares around and half of a full stream windows
+ * seems good. More does not seem to improve performance.
+ * The benefit of the pool is that stream buffer to not keep
+ * spares. So memory consumption goes down when streams run empty,
+ * have a large upload done, etc. */
+#define H3_STREAM_POOL_SPARES \
+ (H3_STREAM_WINDOW_SIZE / H3_STREAM_CHUNK_SIZE ) / 2
+/* Receive and Send max number of chunks just follows from the
+ * chunk size and window size */
+#define H3_STREAM_RECV_CHUNKS \
+ (H3_STREAM_WINDOW_SIZE / H3_STREAM_CHUNK_SIZE)
+#define H3_STREAM_SEND_CHUNKS \
+ (H3_STREAM_WINDOW_SIZE / H3_STREAM_CHUNK_SIZE)
+
+
#ifdef USE_OPENSSL
#define QUIC_CIPHERS \
"TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_" \
@@ -133,7 +140,7 @@ struct cf_ngtcp2_ctx { uint32_t version;
ngtcp2_settings settings;
ngtcp2_transport_params transport_params;
- ngtcp2_connection_close_error last_error;
+ ngtcp2_ccerr last_error;
ngtcp2_crypto_conn_ref conn_ref;
#ifdef USE_OPENSSL
SSL_CTX *sslctx;
@@ -147,11 +154,13 @@ struct cf_ngtcp2_ctx { struct cf_call_data call_data;
nghttp3_conn *h3conn;
nghttp3_settings h3settings;
- int qlogfd;
struct curltime started_at; /* time the current attempt started */
struct curltime handshake_at; /* time connect handshake finished */
struct curltime first_byte_at; /* when first byte was recvd */
- struct curltime reconnect_at; /* time the next attempt should start */
+ struct curltime reconnect_at; /* time the next attempt should start */
+ struct bufc_pool stream_bufcp; /* chunk pool for streams */
+ size_t max_stream_window; /* max flow window for one stream */
+ int qlogfd;
BIT(got_first_byte); /* if first byte was received */
};
@@ -159,6 +168,79 @@ struct cf_ngtcp2_ctx { #define CF_CTX_CALL_DATA(cf) \
((struct cf_ngtcp2_ctx *)(cf)->ctx)->call_data
+/**
+ * All about the H3 internals of a stream
+ */
+struct stream_ctx {
+ int64_t id; /* HTTP/3 protocol identifier */
+ struct bufq sendbuf; /* h3 request body */
+ struct bufq recvbuf; /* h3 response body */
+ size_t sendbuf_len_in_flight; /* sendbuf amount "in flight" */
+ size_t recv_buf_nonflow; /* buffered bytes, not counting for flow control */
+ uint64_t error3; /* HTTP/3 stream error code */
+ curl_off_t upload_left; /* number of request bytes left to upload */
+ int status_code; /* HTTP status code */
+ bool resp_hds_complete; /* we have a complete, final response */
+ bool closed; /* TRUE on stream close */
+ bool reset; /* TRUE on stream reset */
+ bool send_closed; /* stream is local closed */
+};
+
+#define H3_STREAM_CTX(d) ((struct stream_ctx *)(((d) && (d)->req.p.http)? \
+ ((struct HTTP *)(d)->req.p.http)->h3_ctx \
+ : NULL))
+#define H3_STREAM_LCTX(d) ((struct HTTP *)(d)->req.p.http)->h3_ctx
+#define H3_STREAM_ID(d) (H3_STREAM_CTX(d)? \
+ H3_STREAM_CTX(d)->id : -2)
+
+static CURLcode h3_data_setup(struct Curl_cfilter *cf,
+ struct Curl_easy *data)
+{
+ struct cf_ngtcp2_ctx *ctx = cf->ctx;
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
+
+ if(!data || !data->req.p.http) {
+ failf(data, "initialization failure, transfer not http initialized");
+ return CURLE_FAILED_INIT;
+ }
+
+ if(stream)
+ return CURLE_OK;
+
+ stream = calloc(1, sizeof(*stream));
+ if(!stream)
+ return CURLE_OUT_OF_MEMORY;
+
+ stream->id = -1;
+ /* on send, we control how much we put into the buffer */
+ Curl_bufq_initp(&stream->sendbuf, &ctx->stream_bufcp,
+ H3_STREAM_SEND_CHUNKS, BUFQ_OPT_NONE);
+ stream->sendbuf_len_in_flight = 0;
+ /* on recv, we need a flexible buffer limit since we also write
+ * headers to it that are not counted against the nghttp3 flow limits. */
+ Curl_bufq_initp(&stream->recvbuf, &ctx->stream_bufcp,
+ H3_STREAM_RECV_CHUNKS, BUFQ_OPT_SOFT_LIMIT);
+ stream->recv_buf_nonflow = 0;
+
+ H3_STREAM_LCTX(data) = stream;
+ DEBUGF(LOG_CF(data, cf, "data setup (easy %p)", (void *)data));
+ return CURLE_OK;
+}
+
+static void h3_data_done(struct Curl_cfilter *cf, struct Curl_easy *data)
+{
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
+
+ (void)cf;
+ if(stream) {
+ DEBUGF(LOG_CF(data, cf, "[h3sid=%"PRId64"] easy handle is done",
+ stream->id));
+ Curl_bufq_free(&stream->sendbuf);
+ Curl_bufq_free(&stream->recvbuf);
+ free(stream);
+ H3_STREAM_LCTX(data) = NULL;
+ }
+}
/* ngtcp2 default congestion controller does not perform pacing. Limit
the maximum packet burst to MAX_PKT_BURST packets. */
@@ -168,7 +250,7 @@ static CURLcode cf_process_ingress(struct Curl_cfilter *cf, struct Curl_easy *data);
static CURLcode cf_flush_egress(struct Curl_cfilter *cf,
struct Curl_easy *data);
-static int cb_h3_acked_stream_data(nghttp3_conn *conn, int64_t stream_id,
+static int cb_h3_acked_req_body(nghttp3_conn *conn, int64_t stream_id,
uint64_t datalen, void *user_data,
void *stream_user_data);
@@ -222,7 +304,6 @@ static void quic_settings(struct cf_ngtcp2_ctx *ctx, {
ngtcp2_settings *s = &ctx->settings;
ngtcp2_transport_params *t = &ctx->transport_params;
- size_t stream_win_size = CURL_MAX_READ_SIZE;
ngtcp2_settings_default(s);
ngtcp2_transport_params_default(t);
@@ -235,13 +316,13 @@ static void quic_settings(struct cf_ngtcp2_ctx *ctx, (void)data;
s->initial_ts = timestamp();
s->handshake_timeout = QUIC_HANDSHAKE_TIMEOUT;
- s->max_window = 100 * stream_win_size;
- s->max_stream_window = stream_win_size;
+ s->max_window = 100 * ctx->max_stream_window;
+ s->max_stream_window = ctx->max_stream_window;
- t->initial_max_data = 10 * stream_win_size;
- t->initial_max_stream_data_bidi_local = stream_win_size;
- t->initial_max_stream_data_bidi_remote = stream_win_size;
- t->initial_max_stream_data_uni = stream_win_size;
+ t->initial_max_data = 10 * ctx->max_stream_window;
+ t->initial_max_stream_data_bidi_local = ctx->max_stream_window;
+ t->initial_max_stream_data_bidi_remote = ctx->max_stream_window;
+ t->initial_max_stream_data_uni = ctx->max_stream_window;
t->initial_max_streams_bidi = QUIC_MAX_STREAMS;
t->initial_max_streams_uni = QUIC_MAX_STREAMS;
t->max_idle_timeout = QUIC_IDLE_TIMEOUT;
@@ -605,9 +686,11 @@ static void report_consumed_data(struct Curl_cfilter *cf, struct Curl_easy *data,
size_t consumed)
{
- struct HTTP *stream = data->req.p.http;
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
struct cf_ngtcp2_ctx *ctx = cf->ctx;
+ if(!stream)
+ return;
/* the HTTP/1.1 response headers are written to the buffer, but
* consuming those does not count against flow control. */
if(stream->recv_buf_nonflow) {
@@ -622,17 +705,11 @@ static void report_consumed_data(struct Curl_cfilter *cf, }
if(consumed > 0) {
DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] consumed %zu DATA bytes",
- stream->stream3_id, consumed));
- ngtcp2_conn_extend_max_stream_offset(ctx->qconn, stream->stream3_id,
+ stream->id, consumed));
+ ngtcp2_conn_extend_max_stream_offset(ctx->qconn, stream->id,
consumed);
ngtcp2_conn_extend_max_offset(ctx->qconn, consumed);
}
- if(!stream->closed && data->state.drain
- && !stream->memlen
- && !Curl_dyn_len(&stream->overflow)) {
- /* nothing buffered any more */
- data->state.drain = 0;
- }
}
static int cb_recv_stream_data(ngtcp2_conn *tconn, uint32_t flags,
@@ -653,9 +730,9 @@ static int cb_recv_stream_data(ngtcp2_conn *tconn, uint32_t flags, DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] read_stream(len=%zu) -> %zd",
stream_id, buflen, nconsumed));
if(nconsumed < 0) {
- ngtcp2_connection_close_error_set_application_error(
- &ctx->last_error,
- nghttp3_err_infer_quic_app_error_code((int)nconsumed), NULL, 0);
+ ngtcp2_ccerr_set_application_error(
+ &ctx->last_error,
+ nghttp3_err_infer_quic_app_error_code((int)nconsumed), NULL, 0);
return NGTCP2_ERR_CALLBACK_FAILURE;
}
@@ -712,8 +789,8 @@ static int cb_stream_close(ngtcp2_conn *tconn, uint32_t flags, DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] quic close(err=%"
PRIu64 ") -> %d", stream3_id, app_error_code, rv));
if(rv) {
- ngtcp2_connection_close_error_set_application_error(
- &ctx->last_error, nghttp3_err_infer_quic_app_error_code(rv), NULL, 0);
+ ngtcp2_ccerr_set_application_error(
+ &ctx->last_error, nghttp3_err_infer_quic_app_error_code(rv), NULL, 0);
return NGTCP2_ERR_CALLBACK_FAILURE;
}
@@ -892,63 +969,69 @@ static int cf_ngtcp2_get_select_socks(struct Curl_cfilter *cf, struct cf_ngtcp2_ctx *ctx = cf->ctx;
struct SingleRequest *k = &data->req;
int rv = GETSOCK_BLANK;
- struct HTTP *stream = data->req.p.http;
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
struct cf_call_data save;
CF_DATA_SAVE(save, cf, data);
socks[0] = ctx->q.sockfd;
- /* in an HTTP/3 connection we can basically always get a frame so we should
- always be ready for one */
+ /* in HTTP/3 we can always get a frame, so check read */
rv |= GETSOCK_READSOCK(0);
/* we're still uploading or the HTTP/2 layer wants to send data */
if((k->keepon & KEEP_SENDBITS) == KEEP_SEND &&
- (!stream->h3out || stream->h3out->used < H3_SEND_SIZE) &&
ngtcp2_conn_get_cwnd_left(ctx->qconn) &&
ngtcp2_conn_get_max_data_left(ctx->qconn) &&
- nghttp3_conn_is_stream_writable(ctx->h3conn, stream->stream3_id))
+ stream && nghttp3_conn_is_stream_writable(ctx->h3conn, stream->id))
rv |= GETSOCK_WRITESOCK(0);
- DEBUGF(LOG_CF(data, cf, "get_select_socks -> %x (sock=%d)",
- rv, (int)socks[0]));
+ /* DEBUGF(LOG_CF(data, cf, "get_select_socks -> %x (sock=%d)",
+ rv, (int)socks[0])); */
CF_DATA_RESTORE(cf, save);
return rv;
}
-static void notify_drain(struct Curl_cfilter *cf,
+static void drain_stream(struct Curl_cfilter *cf,
struct Curl_easy *data)
{
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
+ unsigned char bits;
+
(void)cf;
- if(!data->state.drain) {
- data->state.drain = 1;
+ bits = CURL_CSELECT_IN;
+ if(stream && !stream->send_closed && stream->upload_left)
+ bits |= CURL_CSELECT_OUT;
+ if(data->state.dselect_bits != bits) {
+ data->state.dselect_bits = bits;
Curl_expire(data, 0, EXPIRE_RUN_NOW);
}
}
-
static int cb_h3_stream_close(nghttp3_conn *conn, int64_t stream_id,
uint64_t app_error_code, void *user_data,
void *stream_user_data)
{
struct Curl_cfilter *cf = user_data;
struct Curl_easy *data = stream_user_data;
- struct HTTP *stream = data->req.p.http;
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
(void)conn;
(void)stream_id;
(void)app_error_code;
(void)cf;
- DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] h3 close(err=%" PRIx64 ")",
+ /* we might be called by nghttp3 after we already cleaned up */
+ if(!stream)
+ return 0;
+
+ DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] h3 close(err=%" PRId64 ")",
stream_id, app_error_code));
stream->closed = TRUE;
stream->error3 = app_error_code;
if(app_error_code == NGHTTP3_H3_INTERNAL_ERROR) {
- /* TODO: we do not get a specific error when the remote end closed
- * the response before it was complete. */
stream->reset = TRUE;
+ stream->send_closed = TRUE;
}
- notify_drain(cf, data);
+ drain_stream(cf, data);
return 0;
}
@@ -962,34 +1045,30 @@ static CURLcode write_resp_raw(struct Curl_cfilter *cf, const void *mem, size_t memlen,
bool flow)
{
- struct HTTP *stream = data->req.p.http;
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
CURLcode result = CURLE_OK;
- const char *buf = mem;
- size_t ncopy = memlen;
- /* copy as much as possible to the receive buffer */
- if(stream->len) {
- size_t len = CURLMIN(ncopy, stream->len);
- memcpy(stream->mem + stream->memlen, buf, len);
- stream->len -= len;
- stream->memlen += len;
- buf += len;
- ncopy -= len;
- DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] resp_raw: added %zu bytes"
- " to data buffer", stream->stream3_id, len));
- }
- /* copy the rest to the overflow buffer */
- if(ncopy) {
- result = Curl_dyn_addn(&stream->overflow, buf, ncopy);
- DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] resp_raw: added %zu bytes"
- " to overflow buffer -> %d",
- stream->stream3_id, ncopy, result));
- notify_drain(cf, data);
+ ssize_t nwritten;
+
+ (void)cf;
+ if(!stream) {
+ return CURLE_RECV_ERROR;
+ }
+ nwritten = Curl_bufq_write(&stream->recvbuf, mem, memlen, &result);
+ /* DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] add recvbuf(len=%zu) "
+ "-> %zd, %d", stream->id, memlen, nwritten, result));
+ */
+ if(nwritten < 0) {
+ return result;
}
if(!flow)
- stream->recv_buf_nonflow += memlen;
- if(CF_DATA_CURRENT(cf) != data) {
- notify_drain(cf, data);
+ stream->recv_buf_nonflow += (size_t)nwritten;
+
+ if((size_t)nwritten < memlen) {
+ /* This MUST not happen. Our recbuf is dimensioned to hold the
+ * full max_stream_window and then some for this very reason. */
+ DEBUGASSERT(0);
+ return CURLE_RECV_ERROR;
}
return result;
}
@@ -1006,6 +1085,7 @@ static int cb_h3_recv_data(nghttp3_conn *conn, int64_t stream3_id, (void)stream3_id;
result = write_resp_raw(cf, data, buf, buflen, TRUE);
+ drain_stream(cf, data);
return result? -1 : 0;
}
@@ -1025,58 +1105,32 @@ static int cb_h3_deferred_consume(nghttp3_conn *conn, int64_t stream3_id, return 0;
}
-/* Decode HTTP status code. Returns -1 if no valid status code was
- decoded. (duplicate from http2.c) */
-static int decode_status_code(const uint8_t *value, size_t len)
-{
- int i;
- int res;
-
- if(len != 3) {
- return -1;
- }
-
- res = 0;
-
- for(i = 0; i < 3; ++i) {
- char c = value[i];
-
- if(c < '0' || c > '9') {
- return -1;
- }
-
- res *= 10;
- res += c - '0';
- }
-
- return res;
-}
-
static int cb_h3_end_headers(nghttp3_conn *conn, int64_t stream_id,
int fin, void *user_data, void *stream_user_data)
{
struct Curl_cfilter *cf = user_data;
struct Curl_easy *data = stream_user_data;
- struct HTTP *stream = data->req.p.http;
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
CURLcode result = CURLE_OK;
(void)conn;
(void)stream_id;
(void)fin;
(void)cf;
+ if(!stream)
+ return 0;
/* add a CRLF only if we've received some headers */
- if(stream->firstheader) {
- result = write_resp_raw(cf, data, "\r\n", 2, FALSE);
- if(result) {
- return -1;
- }
+ result = write_resp_raw(cf, data, "\r\n", 2, FALSE);
+ if(result) {
+ return -1;
}
DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] end_headers(status_code=%d",
stream_id, stream->status_code));
if(stream->status_code / 100 != 1) {
- stream->bodystarted = TRUE;
+ stream->resp_hds_complete = TRUE;
}
+ drain_stream(cf, data);
return 0;
}
@@ -1089,7 +1143,7 @@ static int cb_h3_recv_header(nghttp3_conn *conn, int64_t stream_id, nghttp3_vec h3name = nghttp3_rcbuf_get_buf(name);
nghttp3_vec h3val = nghttp3_rcbuf_get_buf(value);
struct Curl_easy *data = stream_user_data;
- struct HTTP *stream = data->req.p.http;
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
CURLcode result = CURLE_OK;
(void)conn;
(void)stream_id;
@@ -1097,13 +1151,18 @@ static int cb_h3_recv_header(nghttp3_conn *conn, int64_t stream_id, (void)flags;
(void)cf;
+ /* we might have cleaned up this transfer already */
+ if(!stream)
+ return 0;
+
if(token == NGHTTP3_QPACK_TOKEN__STATUS) {
char line[14]; /* status line is always 13 characters long */
size_t ncopy;
- DEBUGASSERT(!stream->firstheader);
- stream->status_code = decode_status_code(h3val.base, h3val.len);
- DEBUGASSERT(stream->status_code != -1);
+ result = Curl_http_decode_status(&stream->status_code,
+ (const char *)h3val.base, h3val.len);
+ if(result)
+ return -1;
ncopy = msnprintf(line, sizeof(line), "HTTP/3 %03d \r\n",
stream->status_code);
DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] status: %s",
@@ -1112,11 +1171,9 @@ static int cb_h3_recv_header(nghttp3_conn *conn, int64_t stream_id, if(result) {
return -1;
}
- stream->firstheader = TRUE;
}
else {
/* store as an HTTP1-style header */
- DEBUGASSERT(stream->firstheader);
DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] header: %.*s: %.*s",
stream_id, (int)h3name.len, h3name.base,
(int)h3val.len, h3val.base));
@@ -1179,7 +1236,7 @@ static int cb_h3_reset_stream(nghttp3_conn *conn, int64_t stream_id, }
static nghttp3_callbacks ngh3_callbacks = {
- cb_h3_acked_stream_data, /* acked_stream_data */
+ cb_h3_acked_req_body, /* acked_stream_data */
cb_h3_stream_close,
cb_h3_recv_data,
cb_h3_deferred_consume,
@@ -1202,7 +1259,7 @@ static int init_ngh3_conn(struct Curl_cfilter *cf) int rc;
int64_t ctrl_stream_id, qpack_enc_stream_id, qpack_dec_stream_id;
- if(ngtcp2_conn_get_max_local_streams_uni(ctx->qconn) < 3) {
+ if(ngtcp2_conn_get_streams_uni_left(ctx->qconn) < 3) {
return CURLE_QUIC_CONNECT_ERROR;
}
@@ -1250,80 +1307,55 @@ static int init_ngh3_conn(struct Curl_cfilter *cf) }
return CURLE_OK;
- fail:
+fail:
return result;
}
-static void drain_overflow_buffer(struct Curl_cfilter *cf,
- struct Curl_easy *data)
-{
- struct HTTP *stream = data->req.p.http;
- size_t overlen = Curl_dyn_len(&stream->overflow);
- size_t ncopy = CURLMIN(overlen, stream->len);
-
- (void)cf;
- if(ncopy > 0) {
- memcpy(stream->mem + stream->memlen,
- Curl_dyn_ptr(&stream->overflow), ncopy);
- stream->len -= ncopy;
- stream->memlen += ncopy;
- if(ncopy != overlen)
- /* make the buffer only keep the tail */
- (void)Curl_dyn_tail(&stream->overflow, overlen - ncopy);
- else {
- Curl_dyn_reset(&stream->overflow);
- }
- }
-}
-
static ssize_t recv_closed_stream(struct Curl_cfilter *cf,
struct Curl_easy *data,
+ struct stream_ctx *stream,
CURLcode *err)
{
- struct HTTP *stream = data->req.p.http;
ssize_t nread = -1;
(void)cf;
-
if(stream->reset) {
failf(data,
- "HTTP/3 stream %" PRId64 " reset by server", stream->stream3_id);
+ "HTTP/3 stream %" PRId64 " reset by server", stream->id);
*err = CURLE_PARTIAL_FILE;
DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] cf_recv, was reset -> %d",
- stream->stream3_id, *err));
+ stream->id, *err));
goto out;
}
else if(stream->error3 != NGHTTP3_H3_NO_ERROR) {
failf(data,
- "HTTP/3 stream %" PRId64 " was not closed cleanly: (err 0x%" PRIx64
- ")",
- stream->stream3_id, stream->error3);
+ "HTTP/3 stream %" PRId64 " was not closed cleanly: "
+ "(err %"PRId64")", stream->id, stream->error3);
*err = CURLE_HTTP3;
DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] cf_recv, closed uncleanly"
- " -> %d", stream->stream3_id, *err));
+ " -> %d", stream->id, *err));
goto out;
}
- if(!stream->bodystarted) {
+ if(!stream->resp_hds_complete) {
failf(data,
"HTTP/3 stream %" PRId64 " was closed cleanly, but before getting"
" all response header fields, treated as error",
- stream->stream3_id);
+ stream->id);
*err = CURLE_HTTP3;
DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] cf_recv, closed incomplete"
- " -> %d", stream->stream3_id, *err));
+ " -> %d", stream->id, *err));
goto out;
}
else {
DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] cf_recv, closed ok"
- " -> %d", stream->stream3_id, *err));
+ " -> %d", stream->id, *err));
}
*err = CURLE_OK;
nread = 0;
out:
- data->state.drain = 0;
return nread;
}
@@ -1332,7 +1364,7 @@ static ssize_t cf_ngtcp2_recv(struct Curl_cfilter *cf, struct Curl_easy *data, char *buf, size_t len, CURLcode *err)
{
struct cf_ngtcp2_ctx *ctx = cf->ctx;
- struct HTTP *stream = data->req.p.http;
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
ssize_t nread = -1;
struct cf_call_data save;
@@ -1345,25 +1377,20 @@ static ssize_t cf_ngtcp2_recv(struct Curl_cfilter *cf, struct Curl_easy *data, DEBUGASSERT(ctx->h3conn);
*err = CURLE_OK;
- DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] cf_recv(len=%zu) start",
- stream->stream3_id, len));
- /* TODO: this implementation of response DATA buffering is fragile.
- * It makes the following assumptions:
- * - the `buf` passed here has the same lifetime as the easy handle
- * - data returned in `buf` from this call is immediately used and `buf`
- * can be overwritten during any handling of other transfers at
- * this connection.
- */
- if(!stream->memlen) {
- /* `buf` was not known before or is currently not used by stream,
- * assign it (again). */
- stream->mem = buf;
- stream->len = len;
+ if(!stream) {
+ *err = CURLE_RECV_ERROR;
+ goto out;
}
- /* if there's data in the overflow buffer, move as much
- as possible to the receive buffer now */
- drain_overflow_buffer(cf, data);
+ if(!Curl_bufq_is_empty(&stream->recvbuf)) {
+ nread = Curl_bufq_read(&stream->recvbuf,
+ (unsigned char *)buf, len, err);
+ DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] read recvbuf(len=%zu) "
+ "-> %zd, %d", stream->id, len, nread, *err));
+ if(nread < 0)
+ goto out;
+ report_consumed_data(cf, data, nread);
+ }
if(cf_process_ingress(cf, data)) {
*err = CURLE_RECV_ERROR;
@@ -1371,270 +1398,268 @@ static ssize_t cf_ngtcp2_recv(struct Curl_cfilter *cf, struct Curl_easy *data, goto out;
}
- if(stream->memlen) {
- nread = stream->memlen;
- /* reset to allow more data to come */
- /* TODO: very brittle buffer use design:
- * - stream->mem has now `nread` bytes of response data
- * - we assume that the caller will use those immediately and
- * we can overwrite that with new data on our next invocation from
- * anywhere.
- */
- stream->mem = buf;
- stream->memlen = 0;
- stream->len = len;
- /* extend the stream window with the data we're consuming and send out
- any additional packets to tell the server that we can receive more */
- DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] cf_recv -> %zd bytes",
- stream->stream3_id, nread));
+ /* recvbuf had nothing before, maybe after progressing ingress? */
+ if(nread < 0 && !Curl_bufq_is_empty(&stream->recvbuf)) {
+ nread = Curl_bufq_read(&stream->recvbuf,
+ (unsigned char *)buf, len, err);
+ DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] read recvbuf(len=%zu) "
+ "-> %zd, %d", stream->id, len, nread, *err));
+ if(nread < 0)
+ goto out;
report_consumed_data(cf, data, nread);
- if(cf_flush_egress(cf, data)) {
- *err = CURLE_SEND_ERROR;
- nread = -1;
- }
- goto out;
}
- if(stream->closed) {
- nread = recv_closed_stream(cf, data, err);
- goto out;
+ if(nread > 0) {
+ drain_stream(cf, data);
+ }
+ else {
+ if(stream->closed) {
+ nread = recv_closed_stream(cf, data, stream, err);
+ goto out;
+ }
+ *err = CURLE_AGAIN;
+ nread = -1;
}
- DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] cf_recv -> EAGAIN",
- stream->stream3_id));
- *err = CURLE_AGAIN;
- nread = -1;
out:
if(cf_flush_egress(cf, data)) {
*err = CURLE_SEND_ERROR;
nread = -1;
- goto out;
}
-
+ DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] cf_recv(len=%zu) -> %zd, %d",
+ stream? stream->id : -1, len, nread, *err));
CF_DATA_RESTORE(cf, save);
return nread;
}
-/* this amount of data has now been acked on this stream */
-static int cb_h3_acked_stream_data(nghttp3_conn *conn, int64_t stream_id,
- uint64_t datalen, void *user_data,
- void *stream_user_data)
+static int cb_h3_acked_req_body(nghttp3_conn *conn, int64_t stream_id,
+ uint64_t datalen, void *user_data,
+ void *stream_user_data)
{
struct Curl_cfilter *cf = user_data;
struct Curl_easy *data = stream_user_data;
- struct HTTP *stream = data->req.p.http;
- (void)user_data;
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
+ size_t skiplen;
(void)cf;
- if(!data->set.postfields) {
- stream->h3out->used -= datalen;
- DEBUGF(LOG_CF(data, cf, "cb_h3_acked_stream_data, %"PRIu64" bytes, "
- "%zd left unacked", datalen, stream->h3out->used));
- DEBUGASSERT(stream->h3out->used < H3_SEND_SIZE);
-
- if(stream->h3out->used == 0) {
- int rv = nghttp3_conn_resume_stream(conn, stream_id);
- if(rv) {
- return NGTCP2_ERR_CALLBACK_FAILURE;
- }
+ if(!stream)
+ return 0;
+ /* The server acknowledged `datalen` of bytes from our request body.
+ * This is a delta. We have kept this data in `sendbuf` for
+ * re-transmissions and can free it now. */
+ if(datalen >= (uint64_t)stream->sendbuf_len_in_flight)
+ skiplen = stream->sendbuf_len_in_flight;
+ else
+ skiplen = (size_t)datalen;
+ Curl_bufq_skip(&stream->sendbuf, skiplen);
+ stream->sendbuf_len_in_flight -= skiplen;
+
+ /* `sendbuf` *might* now have more room. If so, resume this
+ * possibly paused stream. And also tell our transfer engine that
+ * it may continue KEEP_SEND if told to PAUSE. */
+ if(!Curl_bufq_is_full(&stream->sendbuf)) {
+ int rv = nghttp3_conn_resume_stream(conn, stream_id);
+ if(rv) {
+ return NGTCP2_ERR_CALLBACK_FAILURE;
+ }
+ if((data->req.keepon & KEEP_SEND_HOLD) &&
+ (data->req.keepon & KEEP_SEND)) {
+ data->req.keepon &= ~KEEP_SEND_HOLD;
+ drain_stream(cf, data);
+ DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] unpausing acks",
+ stream_id));
}
}
return 0;
}
-static nghttp3_ssize cb_h3_readfunction(nghttp3_conn *conn, int64_t stream_id,
- nghttp3_vec *vec, size_t veccnt,
- uint32_t *pflags, void *user_data,
- void *stream_user_data)
+static nghttp3_ssize
+cb_h3_read_req_body(nghttp3_conn *conn, int64_t stream_id,
+ nghttp3_vec *vec, size_t veccnt,
+ uint32_t *pflags, void *user_data,
+ void *stream_user_data)
{
struct Curl_cfilter *cf = user_data;
struct Curl_easy *data = stream_user_data;
- size_t nread;
- struct HTTP *stream = data->req.p.http;
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
+ ssize_t nwritten = 0;
+ size_t nvecs = 0;
(void)cf;
(void)conn;
(void)stream_id;
(void)user_data;
(void)veccnt;
- if(data->set.postfields) {
- vec[0].base = data->set.postfields;
- vec[0].len = data->state.infilesize;
- *pflags = NGHTTP3_DATA_FLAG_EOF;
- return 1;
+ if(!stream)
+ return NGHTTP3_ERR_CALLBACK_FAILURE;
+ /* nghttp3 keeps references to the sendbuf data until it is ACKed
+ * by the server (see `cb_h3_acked_req_body()` for updates).
+ * `sendbuf_len_in_flight` is the amount of bytes in `sendbuf`
+ * that we have already passed to nghttp3, but which have not been
+ * ACKed yet.
+ * Any amount beyond `sendbuf_len_in_flight` we need still to pass
+ * to nghttp3. Do that now, if we can. */
+ if(stream->sendbuf_len_in_flight < Curl_bufq_len(&stream->sendbuf)) {
+ nvecs = 0;
+ while(nvecs < veccnt &&
+ Curl_bufq_peek_at(&stream->sendbuf,
+ stream->sendbuf_len_in_flight,
+ (const unsigned char **)&vec[nvecs].base,
+ &vec[nvecs].len)) {
+ stream->sendbuf_len_in_flight += vec[nvecs].len;
+ nwritten += vec[nvecs].len;
+ ++nvecs;
+ }
+ DEBUGASSERT(nvecs > 0); /* we SHOULD have been be able to peek */
}
- if(stream->upload_len && H3_SEND_SIZE <= stream->h3out->used) {
- return NGHTTP3_ERR_WOULDBLOCK;
- }
+ if(nwritten > 0 && stream->upload_left != -1)
+ stream->upload_left -= nwritten;
- nread = CURLMIN(stream->upload_len, H3_SEND_SIZE - stream->h3out->used);
- if(nread > 0) {
- /* nghttp3 wants us to hold on to the data until it tells us it is okay to
- delete it. Append the data at the end of the h3out buffer. Since we can
- only return consecutive data, copy the amount that fits and the next
- part comes in next invoke. */
- struct h3out *out = stream->h3out;
- if(nread + out->windex > H3_SEND_SIZE)
- nread = H3_SEND_SIZE - out->windex;
-
- memcpy(&out->buf[out->windex], stream->upload_mem, nread);
-
- /* that's the chunk we return to nghttp3 */
- vec[0].base = &out->buf[out->windex];
- vec[0].len = nread;
-
- out->windex += nread;
- out->used += nread;
-
- if(out->windex == H3_SEND_SIZE)
- out->windex = 0; /* wrap */
- stream->upload_mem += nread;
- stream->upload_len -= nread;
- if(data->state.infilesize != -1) {
- stream->upload_left -= nread;
- if(!stream->upload_left)
- *pflags = NGHTTP3_DATA_FLAG_EOF;
- }
- DEBUGF(LOG_CF(data, cf, "cb_h3_readfunction %zd bytes%s (at %zd unacked)",
- nread, *pflags == NGHTTP3_DATA_FLAG_EOF?" EOF":"",
- out->used));
- }
- if(stream->upload_done && !stream->upload_len &&
- (stream->upload_left <= 0)) {
- DEBUGF(LOG_CF(data, cf, "cb_h3_readfunction sets EOF"));
+ /* When we stopped sending and everything in `sendbuf` is "in flight",
+ * we are at the end of the request body. */
+ if(stream->upload_left == 0) {
*pflags = NGHTTP3_DATA_FLAG_EOF;
- return nread ? 1 : 0;
+ stream->send_closed = TRUE;
}
- else if(!nread) {
+ else if(!nwritten) {
+ /* Not EOF, and nothing to give, we signal WOULDBLOCK. */
+ DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] read req body -> AGAIN",
+ stream->id));
return NGHTTP3_ERR_WOULDBLOCK;
}
- return 1;
+
+ DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] read req body -> "
+ "%d vecs%s with %zu (buffered=%zu, left=%zd)", stream->id,
+ (int)nvecs, *pflags == NGHTTP3_DATA_FLAG_EOF?" EOF":"",
+ nwritten, Curl_bufq_len(&stream->sendbuf),
+ stream->upload_left));
+ return (nghttp3_ssize)nvecs;
}
/* Index where :authority header field will appear in request header
field list. */
#define AUTHORITY_DST_IDX 3
-static CURLcode h3_stream_open(struct Curl_cfilter *cf,
- struct Curl_easy *data,
- const void *mem,
- size_t len)
+static ssize_t h3_stream_open(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ const void *buf, size_t len,
+ CURLcode *err)
{
struct cf_ngtcp2_ctx *ctx = cf->ctx;
- struct HTTP *stream = data->req.p.http;
+ struct stream_ctx *stream = NULL;
+ struct h1_req_parser h1;
+ struct dynhds h2_headers;
size_t nheader;
- CURLcode result = CURLE_OK;
nghttp3_nv *nva = NULL;
- int64_t stream3_id;
int rc = 0;
- struct h3out *h3out = NULL;
- struct h2h3req *hreq = NULL;
+ unsigned int i;
+ ssize_t nwritten = -1;
+ nghttp3_data_reader reader;
+ nghttp3_data_reader *preader = NULL;
+
+ Curl_h1_req_parse_init(&h1, H1_PARSE_DEFAULT_MAX_LINE_LEN);
+ Curl_dynhds_init(&h2_headers, 0, DYN_HTTP_REQUEST);
+
+ *err = h3_data_setup(cf, data);
+ if(*err)
+ goto out;
+ stream = H3_STREAM_CTX(data);
+ DEBUGASSERT(stream);
- rc = ngtcp2_conn_open_bidi_stream(ctx->qconn, &stream3_id, NULL);
+ rc = ngtcp2_conn_open_bidi_stream(ctx->qconn, &stream->id, NULL);
if(rc) {
failf(data, "can get bidi streams");
- goto fail;
+ *err = CURLE_SEND_ERROR;
+ goto out;
}
- stream->stream3_id = stream3_id;
- stream->h3req = TRUE;
- Curl_dyn_init(&stream->overflow, CURL_MAX_READ_SIZE);
- stream->recv_buf_nonflow = 0;
+ nwritten = Curl_h1_req_parse_read(&h1, buf, len, NULL, 0, err);
+ if(nwritten < 0)
+ goto out;
+ DEBUGASSERT(h1.done);
+ DEBUGASSERT(h1.req);
- result = Curl_pseudo_headers(data, mem, len, NULL, &hreq);
- if(result)
- goto fail;
- nheader = hreq->entries;
+ *err = Curl_http_req_to_h2(&h2_headers, h1.req, data);
+ if(*err) {
+ nwritten = -1;
+ goto out;
+ }
+ nheader = Curl_dynhds_count(&h2_headers);
nva = malloc(sizeof(nghttp3_nv) * nheader);
if(!nva) {
- result = CURLE_OUT_OF_MEMORY;
- goto fail;
+ *err = CURLE_OUT_OF_MEMORY;
+ nwritten = -1;
+ goto out;
}
- else {
- unsigned int i;
- for(i = 0; i < nheader; i++) {
- nva[i].name = (unsigned char *)hreq->header[i].name;
- nva[i].namelen = hreq->header[i].namelen;
- nva[i].value = (unsigned char *)hreq->header[i].value;
- nva[i].valuelen = hreq->header[i].valuelen;
- nva[i].flags = NGHTTP3_NV_FLAG_NONE;
- }
+
+ for(i = 0; i < nheader; ++i) {
+ struct dynhds_entry *e = Curl_dynhds_getn(&h2_headers, i);
+ nva[i].name = (unsigned char *)e->name;
+ nva[i].namelen = e->namelen;
+ nva[i].value = (unsigned char *)e->value;
+ nva[i].valuelen = e->valuelen;
+ nva[i].flags = NGHTTP3_NV_FLAG_NONE;
}
switch(data->state.httpreq) {
case HTTPREQ_POST:
case HTTPREQ_POST_FORM:
case HTTPREQ_POST_MIME:
- case HTTPREQ_PUT: {
- nghttp3_data_reader data_reader;
+ case HTTPREQ_PUT:
+ /* known request body size or -1 */
if(data->state.infilesize != -1)
stream->upload_left = data->state.infilesize;
else
/* data sending without specifying the data amount up front */
- stream->upload_left = -1; /* unknown, but not zero */
-
- data_reader.read_data = cb_h3_readfunction;
-
- h3out = calloc(sizeof(struct h3out), 1);
- if(!h3out) {
- result = CURLE_OUT_OF_MEMORY;
- goto fail;
- }
- stream->h3out = h3out;
-
- rc = nghttp3_conn_submit_request(ctx->h3conn, stream->stream3_id,
- nva, nheader, &data_reader, data);
- if(rc)
- goto fail;
+ stream->upload_left = -1; /* unknown */
+ reader.read_data = cb_h3_read_req_body;
+ preader = &reader;
break;
- }
default:
- stream->upload_left = 0; /* nothing left to send */
- rc = nghttp3_conn_submit_request(ctx->h3conn, stream->stream3_id,
- nva, nheader, NULL, data);
- if(rc)
- goto fail;
+ /* there is not request body */
+ stream->upload_left = 0; /* no request body */
+ preader = NULL;
break;
}
- Curl_safefree(nva);
-
- infof(data, "Using HTTP/3 Stream ID: %" PRId64 " (easy handle %p)",
- stream3_id, (void *)data);
- DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] opened for %s",
- stream3_id, data->state.url));
-
- Curl_pseudo_free(hreq);
- return CURLE_OK;
-
-fail:
+ rc = nghttp3_conn_submit_request(ctx->h3conn, stream->id,
+ nva, nheader, preader, data);
if(rc) {
switch(rc) {
case NGHTTP3_ERR_CONN_CLOSING:
DEBUGF(LOG_CF(data, cf, "h3sid[%"PRId64"] failed to send, "
- "connection is closing", stream->stream3_id));
- result = CURLE_RECV_ERROR;
+ "connection is closing", stream->id));
break;
default:
DEBUGF(LOG_CF(data, cf, "h3sid[%"PRId64"] failed to send -> %d (%s)",
- stream->stream3_id, rc, ngtcp2_strerror(rc)));
- result = CURLE_SEND_ERROR;
+ stream->id, rc, ngtcp2_strerror(rc)));
break;
}
+ *err = CURLE_SEND_ERROR;
+ nwritten = -1;
+ goto out;
}
+
+ infof(data, "Using HTTP/3 Stream ID: %" PRId64 " (easy handle %p)",
+ stream->id, (void *)data);
+ DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] opened for %s",
+ stream->id, data->state.url));
+
+out:
free(nva);
- Curl_pseudo_free(hreq);
- return result;
+ Curl_h1_req_parse_free(&h1);
+ Curl_dynhds_free(&h2_headers);
+ return nwritten;
}
static ssize_t cf_ngtcp2_send(struct Curl_cfilter *cf, struct Curl_easy *data,
const void *buf, size_t len, CURLcode *err)
{
struct cf_ngtcp2_ctx *ctx = cf->ctx;
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
ssize_t sent = 0;
- struct HTTP *stream = data->req.p.http;
struct cf_call_data save;
CF_DATA_SAVE(save, cf, data);
@@ -1643,37 +1668,36 @@ static ssize_t cf_ngtcp2_send(struct Curl_cfilter *cf, struct Curl_easy *data, DEBUGASSERT(ctx->h3conn);
*err = CURLE_OK;
- if(stream->closed) {
+ if(stream && stream->closed) {
*err = CURLE_HTTP3;
sent = -1;
goto out;
}
- if(!stream->h3req) {
- CURLcode result = h3_stream_open(cf, data, buf, len);
- if(result) {
- DEBUGF(LOG_CF(data, cf, "failed to open stream -> %d", result));
- sent = -1;
+ if(!stream || stream->id < 0) {
+ sent = h3_stream_open(cf, data, buf, len, err);
+ if(sent < 0) {
+ DEBUGF(LOG_CF(data, cf, "failed to open stream -> %d", *err));
goto out;
}
- /* Assume that mem of length len only includes HTTP/1.1 style
- header fields. In other words, it does not contain request
- body. */
- sent = len;
}
else {
- DEBUGF(LOG_CF(data, cf, "ngh3_stream_send() wants to send %zd bytes",
- len));
- if(!stream->upload_len) {
- stream->upload_mem = buf;
- stream->upload_len = len;
- (void)nghttp3_conn_resume_stream(ctx->h3conn, stream->stream3_id);
- }
- else {
- *err = CURLE_AGAIN;
- sent = -1;
+ sent = Curl_bufq_write(&stream->sendbuf, buf, len, err);
+ DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] cf_send, add to "
+ "sendbuf(len=%zu) -> %zd, %d",
+ stream->id, len, sent, *err));
+ if(sent < 0) {
+ if(*err == CURLE_AGAIN) {
+ /* Can't add more to the send buf, needs to drain first.
+ * Pause the sending to avoid a busy loop. */
+ data->req.keepon |= KEEP_SEND_HOLD;
+ DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] pause send",
+ stream->id));
+ }
goto out;
}
+
+ (void)nghttp3_conn_resume_stream(ctx->h3conn, stream->id);
}
if(cf_flush_egress(cf, data)) {
@@ -1682,24 +1706,6 @@ static ssize_t cf_ngtcp2_send(struct Curl_cfilter *cf, struct Curl_easy *data, goto out;
}
- /* Reset post upload buffer after resumed. */
- if(stream->upload_mem) {
- if(data->set.postfields) {
- sent = len;
- }
- else {
- sent = len - stream->upload_len;
- }
-
- stream->upload_mem = NULL;
- stream->upload_len = 0;
-
- if(sent == 0) {
- *err = CURLE_AGAIN;
- sent = -1;
- goto out;
- }
- }
out:
CF_DATA_RESTORE(cf, save);
return sent;
@@ -1717,7 +1723,7 @@ static CURLcode qng_verify_peer(struct Curl_cfilter *cf, Curl_conn_get_host(data, cf->sockindex, &hostname, &disp_hostname, &port);
snihost = Curl_ssl_snihost(data, hostname, NULL);
if(!snihost)
- return CURLE_PEER_FAILED_VERIFICATION;
+ return CURLE_PEER_FAILED_VERIFICATION;
cf->conn->bits.multiplex = TRUE; /* at least potentially multiplexed */
cf->conn->httpversion = 30;
@@ -1757,281 +1763,308 @@ static CURLcode qng_verify_peer(struct Curl_cfilter *cf, return result;
}
-static CURLcode cf_process_ingress(struct Curl_cfilter *cf,
- struct Curl_easy *data)
+struct recv_ctx {
+ struct Curl_cfilter *cf;
+ struct Curl_easy *data;
+ ngtcp2_tstamp ts;
+ size_t pkt_count;
+};
+
+static CURLcode recv_pkt(const unsigned char *pkt, size_t pktlen,
+ struct sockaddr_storage *remote_addr,
+ socklen_t remote_addrlen, int ecn,
+ void *userp)
{
- struct cf_ngtcp2_ctx *ctx = cf->ctx;
- ssize_t recvd;
- int rv;
- uint8_t buf[65536];
- int bufsize = (int)sizeof(buf);
- size_t pktcount = 0, total_recvd = 0;
- struct sockaddr_storage remote_addr;
- socklen_t remote_addrlen;
+ struct recv_ctx *r = userp;
+ struct cf_ngtcp2_ctx *ctx = r->cf->ctx;
+ ngtcp2_pkt_info pi;
ngtcp2_path path;
- ngtcp2_tstamp ts = timestamp();
- ngtcp2_pkt_info pi = { 0 };
+ int rv;
- for(;;) {
- remote_addrlen = sizeof(remote_addr);
- while((recvd = recvfrom(ctx->q.sockfd, (char *)buf, bufsize, 0,
- (struct sockaddr *)&remote_addr,
- &remote_addrlen)) == -1 &&
- SOCKERRNO == EINTR)
- ;
- if(recvd == -1) {
- if(SOCKERRNO == EAGAIN || SOCKERRNO == EWOULDBLOCK) {
- DEBUGF(LOG_CF(data, cf, "ingress, recvfrom -> EAGAIN"));
- goto out;
+ ++r->pkt_count;
+ ngtcp2_addr_init(&path.local, (struct sockaddr *)&ctx->q.local_addr,
+ ctx->q.local_addrlen);
+ ngtcp2_addr_init(&path.remote, (struct sockaddr *)remote_addr,
+ remote_addrlen);
+ pi.ecn = (uint32_t)ecn;
+
+ rv = ngtcp2_conn_read_pkt(ctx->qconn, &path, &pi, pkt, pktlen, r->ts);
+ if(rv) {
+ DEBUGF(LOG_CF(r->data, r->cf, "ingress, read_pkt -> %s",
+ ngtcp2_strerror(rv)));
+ if(!ctx->last_error.error_code) {
+ if(rv == NGTCP2_ERR_CRYPTO) {
+ ngtcp2_ccerr_set_tls_alert(&ctx->last_error,
+ ngtcp2_conn_get_tls_alert(ctx->qconn),
+ NULL, 0);
}
- if(!cf->connected && SOCKERRNO == ECONNREFUSED) {
- const char *r_ip;
- int r_port;
- Curl_cf_socket_peek(cf->next, data, NULL, NULL,
- &r_ip, &r_port, NULL, NULL);
- failf(data, "ngtcp2: connection to %s port %u refused",
- r_ip, r_port);
- return CURLE_COULDNT_CONNECT;
+ else {
+ ngtcp2_ccerr_set_liberr(&ctx->last_error, rv, NULL, 0);
}
- failf(data, "ngtcp2: recvfrom() unexpectedly returned %zd (errno=%d)",
- recvd, SOCKERRNO);
- return CURLE_RECV_ERROR;
}
- if(recvd > 0 && !ctx->got_first_byte) {
- ctx->first_byte_at = Curl_now();
- ctx->got_first_byte = TRUE;
- }
-
- ++pktcount;
- total_recvd += recvd;
-
- ngtcp2_addr_init(&path.local, (struct sockaddr *)&ctx->q.local_addr,
- ctx->q.local_addrlen);
- ngtcp2_addr_init(&path.remote, (struct sockaddr *)&remote_addr,
- remote_addrlen);
-
- rv = ngtcp2_conn_read_pkt(ctx->qconn, &path, &pi, buf, recvd, ts);
- if(rv) {
- DEBUGF(LOG_CF(data, cf, "ingress, read_pkt -> %s",
- ngtcp2_strerror(rv)));
- if(!ctx->last_error.error_code) {
- if(rv == NGTCP2_ERR_CRYPTO) {
- ngtcp2_connection_close_error_set_transport_error_tls_alert(
- &ctx->last_error,
- ngtcp2_conn_get_tls_alert(ctx->qconn), NULL, 0);
- }
- else {
- ngtcp2_connection_close_error_set_transport_error_liberr(
- &ctx->last_error, rv, NULL, 0);
- }
- }
-
- if(rv == NGTCP2_ERR_CRYPTO)
- /* this is a "TLS problem", but a failed certificate verification
- is a common reason for this */
- return CURLE_PEER_FAILED_VERIFICATION;
- return CURLE_RECV_ERROR;
- }
+ if(rv == NGTCP2_ERR_CRYPTO)
+ /* this is a "TLS problem", but a failed certificate verification
+ is a common reason for this */
+ return CURLE_PEER_FAILED_VERIFICATION;
+ return CURLE_RECV_ERROR;
}
-out:
- (void)pktcount;
- (void)total_recvd;
- DEBUGF(LOG_CF(data, cf, "ingress, recvd %zu packets with %zd bytes",
- pktcount, total_recvd));
return CURLE_OK;
}
-static CURLcode cf_flush_egress(struct Curl_cfilter *cf,
- struct Curl_easy *data)
+static CURLcode cf_process_ingress(struct Curl_cfilter *cf,
+ struct Curl_easy *data)
{
struct cf_ngtcp2_ctx *ctx = cf->ctx;
- int rv;
- size_t sent;
- ngtcp2_ssize outlen;
- uint8_t *outpos = ctx->q.pktbuf;
- size_t max_udp_payload_size =
- ngtcp2_conn_get_max_tx_udp_payload_size(ctx->qconn);
- size_t path_max_udp_payload_size =
- ngtcp2_conn_get_path_max_tx_udp_payload_size(ctx->qconn);
- size_t max_pktcnt =
- CURLMIN(MAX_PKT_BURST, ctx->q.pktbuflen / max_udp_payload_size);
- size_t pktcnt = 0;
- size_t gsolen = 0; /* this disables gso until we have a clue */
- ngtcp2_path_storage ps;
- ngtcp2_tstamp ts = timestamp();
- ngtcp2_tstamp expiry;
- ngtcp2_duration timeout;
- int64_t stream_id;
- nghttp3_ssize veccnt;
- int fin;
- nghttp3_vec vec[16];
- ngtcp2_ssize ndatalen;
- uint32_t flags;
- CURLcode curlcode;
+ struct recv_ctx rctx;
+ size_t pkts_chunk = 128, i;
+ size_t pkts_max = 10 * pkts_chunk;
+ CURLcode result;
- rv = ngtcp2_conn_handle_expiry(ctx->qconn, ts);
- if(rv) {
- failf(data, "ngtcp2_conn_handle_expiry returned error: %s",
- ngtcp2_strerror(rv));
- ngtcp2_connection_close_error_set_transport_error_liberr(&ctx->last_error,
- rv, NULL, 0);
- return CURLE_SEND_ERROR;
- }
+ rctx.cf = cf;
+ rctx.data = data;
+ rctx.ts = timestamp();
+ rctx.pkt_count = 0;
- if(ctx->q.num_blocked_pkt) {
- curlcode = vquic_send_blocked_pkt(cf, data, &ctx->q);
- if(curlcode) {
- if(curlcode == CURLE_AGAIN) {
- Curl_expire(data, 1, EXPIRE_QUIC);
- return CURLE_OK;
- }
- return curlcode;
- }
+ for(i = 0; i < pkts_max; i += pkts_chunk) {
+ rctx.pkt_count = 0;
+ result = vquic_recv_packets(cf, data, &ctx->q, pkts_chunk,
+ recv_pkt, &rctx);
+ if(result) /* error */
+ break;
+ if(rctx.pkt_count < pkts_chunk) /* got less than we could */
+ break;
+ /* give egress a chance before we receive more */
+ result = cf_flush_egress(cf, data);
}
+ return result;
+}
- ngtcp2_path_storage_zero(&ps);
+struct read_ctx {
+ struct Curl_cfilter *cf;
+ struct Curl_easy *data;
+ ngtcp2_tstamp ts;
+ ngtcp2_path_storage *ps;
+};
+/**
+ * Read a network packet to send from ngtcp2 into `buf`.
+ * Return number of bytes written or -1 with *err set.
+ */
+static ssize_t read_pkt_to_send(void *userp,
+ unsigned char *buf, size_t buflen,
+ CURLcode *err)
+{
+ struct read_ctx *x = userp;
+ struct cf_ngtcp2_ctx *ctx = x->cf->ctx;
+ nghttp3_vec vec[16];
+ nghttp3_ssize veccnt;
+ ngtcp2_ssize ndatalen;
+ uint32_t flags;
+ int64_t stream_id;
+ int fin;
+ ssize_t nwritten, n;
+ veccnt = 0;
+ stream_id = -1;
+ fin = 0;
+
+ /* ngtcp2 may want to put several frames from different streams into
+ * this packet. `NGTCP2_WRITE_STREAM_FLAG_MORE` tells it to do so.
+ * When `NGTCP2_ERR_WRITE_MORE` is returned, we *need* to make
+ * another iteration.
+ * When ngtcp2 is happy (because it has no other frame that would fit
+ * or it has nothing more to send), it returns the total length
+ * of the assembled packet. This may be 0 if there was nothing to send. */
+ nwritten = 0;
+ *err = CURLE_OK;
for(;;) {
- veccnt = 0;
- stream_id = -1;
- fin = 0;
if(ctx->h3conn && ngtcp2_conn_get_max_data_left(ctx->qconn)) {
veccnt = nghttp3_conn_writev_stream(ctx->h3conn, &stream_id, &fin, vec,
sizeof(vec) / sizeof(vec[0]));
if(veccnt < 0) {
- failf(data, "nghttp3_conn_writev_stream returned error: %s",
+ failf(x->data, "nghttp3_conn_writev_stream returned error: %s",
nghttp3_strerror((int)veccnt));
- ngtcp2_connection_close_error_set_application_error(
- &ctx->last_error,
- nghttp3_err_infer_quic_app_error_code((int)veccnt), NULL, 0);
- return CURLE_SEND_ERROR;
+ ngtcp2_ccerr_set_application_error(
+ &ctx->last_error,
+ nghttp3_err_infer_quic_app_error_code((int)veccnt), NULL, 0);
+ *err = CURLE_SEND_ERROR;
+ return -1;
}
}
flags = NGTCP2_WRITE_STREAM_FLAG_MORE |
(fin ? NGTCP2_WRITE_STREAM_FLAG_FIN : 0);
- outlen = ngtcp2_conn_writev_stream(ctx->qconn, &ps.path, NULL, outpos,
- max_udp_payload_size,
- &ndatalen, flags, stream_id,
- (const ngtcp2_vec *)vec, veccnt, ts);
- if(outlen == 0) {
- /* ngtcp2 does not want to send more packets, if the buffer is
- * not empty, send that now */
- if(outpos != ctx->q.pktbuf) {
- curlcode = vquic_send_packet(cf, data, &ctx->q, ctx->q.pktbuf,
- outpos - ctx->q.pktbuf, gsolen, &sent);
- if(curlcode) {
- if(curlcode == CURLE_AGAIN) {
- vquic_push_blocked_pkt(cf, &ctx->q, ctx->q.pktbuf + sent,
- outpos - ctx->q.pktbuf - sent,
- gsolen);
- Curl_expire(data, 1, EXPIRE_QUIC);
- return CURLE_OK;
- }
- return curlcode;
- }
- }
- /* done for now */
+ n = ngtcp2_conn_writev_stream(ctx->qconn, x->ps? &x->ps->path : NULL,
+ NULL, buf, buflen,
+ &ndatalen, flags, stream_id,
+ (const ngtcp2_vec *)vec, veccnt, x->ts);
+ if(n == 0) {
+ /* nothing to send */
+ *err = CURLE_AGAIN;
+ nwritten = -1;
goto out;
}
- if(outlen < 0) {
- switch(outlen) {
+ else if(n < 0) {
+ switch(n) {
case NGTCP2_ERR_STREAM_DATA_BLOCKED:
- assert(ndatalen == -1);
+ DEBUGASSERT(ndatalen == -1);
nghttp3_conn_block_stream(ctx->h3conn, stream_id);
- continue;
+ n = 0;
+ break;
case NGTCP2_ERR_STREAM_SHUT_WR:
- assert(ndatalen == -1);
+ DEBUGASSERT(ndatalen == -1);
nghttp3_conn_shutdown_stream_write(ctx->h3conn, stream_id);
- continue;
+ n = 0;
+ break;
case NGTCP2_ERR_WRITE_MORE:
/* ngtcp2 wants to send more. update the flow of the stream whose data
* is in the buffer and continue */
- assert(ndatalen >= 0);
- rv = nghttp3_conn_add_write_offset(ctx->h3conn, stream_id, ndatalen);
- if(rv) {
- failf(data, "nghttp3_conn_add_write_offset returned error: %s\n",
- nghttp3_strerror(rv));
- return CURLE_SEND_ERROR;
- }
- continue;
+ DEBUGASSERT(ndatalen >= 0);
+ n = 0;
+ break;
default:
- assert(ndatalen == -1);
- failf(data, "ngtcp2_conn_writev_stream returned error: %s",
- ngtcp2_strerror((int)outlen));
- ngtcp2_connection_close_error_set_transport_error_liberr(
- &ctx->last_error, (int)outlen, NULL, 0);
- return CURLE_SEND_ERROR;
+ DEBUGASSERT(ndatalen == -1);
+ failf(x->data, "ngtcp2_conn_writev_stream returned error: %s",
+ ngtcp2_strerror((int)n));
+ ngtcp2_ccerr_set_liberr(&ctx->last_error, (int)n, NULL, 0);
+ *err = CURLE_SEND_ERROR;
+ nwritten = -1;
+ goto out;
}
}
- else if(ndatalen >= 0) {
- /* ngtcp2 thinks it has added all it wants. Update the stream */
- rv = nghttp3_conn_add_write_offset(ctx->h3conn, stream_id, ndatalen);
+
+ if(ndatalen >= 0) {
+ /* we add the amount of data bytes to the flow windows */
+ int rv = nghttp3_conn_add_write_offset(ctx->h3conn, stream_id, ndatalen);
if(rv) {
- failf(data, "nghttp3_conn_add_write_offset returned error: %s\n",
+ failf(x->data, "nghttp3_conn_add_write_offset returned error: %s\n",
nghttp3_strerror(rv));
return CURLE_SEND_ERROR;
}
}
- /* advance to the end of the buffered packet data */
- outpos += outlen;
+ if(n > 0) {
+ /* packet assembled, leave */
+ nwritten = n;
+ goto out;
+ }
+ }
+out:
+ return nwritten;
+}
- if(pktcnt == 0) {
- /* first packet buffer chunk. use this as gsolen. It's how ngtcp2
- * indicates the intended segment size. */
- gsolen = outlen;
+static CURLcode cf_flush_egress(struct Curl_cfilter *cf,
+ struct Curl_easy *data)
+{
+ struct cf_ngtcp2_ctx *ctx = cf->ctx;
+ int rv;
+ ssize_t nread;
+ size_t max_payload_size, path_max_payload_size, max_pktcnt;
+ size_t pktcnt = 0;
+ size_t gsolen = 0; /* this disables gso until we have a clue */
+ ngtcp2_path_storage ps;
+ ngtcp2_tstamp ts = timestamp();
+ ngtcp2_tstamp expiry;
+ ngtcp2_duration timeout;
+ CURLcode curlcode;
+ struct read_ctx readx;
+
+ rv = ngtcp2_conn_handle_expiry(ctx->qconn, ts);
+ if(rv) {
+ failf(data, "ngtcp2_conn_handle_expiry returned error: %s",
+ ngtcp2_strerror(rv));
+ ngtcp2_ccerr_set_liberr(&ctx->last_error, rv, NULL, 0);
+ return CURLE_SEND_ERROR;
+ }
+
+ curlcode = vquic_flush(cf, data, &ctx->q);
+ if(curlcode) {
+ if(curlcode == CURLE_AGAIN) {
+ Curl_expire(data, 1, EXPIRE_QUIC);
+ return CURLE_OK;
}
- else if((size_t)outlen > gsolen ||
- (gsolen > path_max_udp_payload_size && (size_t)outlen != gsolen)) {
- /* Packet larger than path_max_udp_payload_size is PMTUD probe
- packet and it might not be sent because of EMSGSIZE. Send
- them separately to minimize the loss. */
- /* send the pktbuf *before* the last addition */
- curlcode = vquic_send_packet(cf, data, &ctx->q, ctx->q.pktbuf,
- outpos - outlen - ctx->q.pktbuf, gsolen, &sent);
+ return curlcode;
+ }
+
+ ngtcp2_path_storage_zero(&ps);
+
+ /* In UDP, there is a maximum theoretical packet paload length and
+ * a minimum payload length that is "guarantueed" to work.
+ * To detect if this minimum payload can be increased, ngtcp2 sends
+ * now and then a packet payload larger than the minimum. It that
+ * is ACKed by the peer, both parties know that it works and
+ * the subsequent packets can use a larger one.
+ * This is called PMTUD (Path Maximum Transmission Unit Discovery).
+ * Since a PMTUD might be rejected right on send, we do not want it
+ * be followed by other packets of lesser size. Because those would
+ * also fail then. So, if we detect a PMTUD while buffering, we flush.
+ */
+ max_payload_size = ngtcp2_conn_get_max_tx_udp_payload_size(ctx->qconn);
+ path_max_payload_size =
+ ngtcp2_conn_get_path_max_tx_udp_payload_size(ctx->qconn);
+ /* maximum number of packets buffered before we flush to the socket */
+ max_pktcnt = CURLMIN(MAX_PKT_BURST,
+ ctx->q.sendbuf.chunk_size / max_payload_size);
+
+ readx.cf = cf;
+ readx.data = data;
+ readx.ts = ts;
+ readx.ps = &ps;
+
+ for(;;) {
+ /* add the next packet to send, if any, to our buffer */
+ nread = Curl_bufq_sipn(&ctx->q.sendbuf, max_payload_size,
+ read_pkt_to_send, &readx, &curlcode);
+ /* DEBUGF(LOG_CF(data, cf, "sip packet(maxlen=%zu) -> %zd, %d",
+ max_payload_size, nread, curlcode)); */
+ if(nread < 0) {
+ if(curlcode != CURLE_AGAIN)
+ return curlcode;
+ /* Nothing more to add, flush and leave */
+ curlcode = vquic_send(cf, data, &ctx->q, gsolen);
if(curlcode) {
if(curlcode == CURLE_AGAIN) {
- /* blocked, add the pktbuf *before* and *at* the last addition
- * separately to the blocked packages */
- vquic_push_blocked_pkt(cf, &ctx->q, ctx->q.pktbuf + sent,
- outpos - outlen - ctx->q.pktbuf - sent, gsolen);
- vquic_push_blocked_pkt(cf, &ctx->q, outpos - outlen, outlen, outlen);
Curl_expire(data, 1, EXPIRE_QUIC);
return CURLE_OK;
}
return curlcode;
}
- /* send the pktbuf *at* the last addition */
- curlcode = vquic_send_packet(cf, data, &ctx->q, outpos - outlen, outlen,
- outlen, &sent);
+ goto out;
+ }
+
+ DEBUGASSERT(nread > 0);
+ if(pktcnt == 0) {
+ /* first packet in buffer. This is either of a known, "good"
+ * payload size or it is a PMTUD. We'll see. */
+ gsolen = (size_t)nread;
+ }
+ else if((size_t)nread > gsolen ||
+ (gsolen > path_max_payload_size && (size_t)nread != gsolen)) {
+ /* The just added packet is a PMTUD *or* the one(s) before the
+ * just added were PMTUD and the last one is smaller.
+ * Flush the buffer before the last add. */
+ curlcode = vquic_send_tail_split(cf, data, &ctx->q,
+ gsolen, nread, nread);
if(curlcode) {
if(curlcode == CURLE_AGAIN) {
- assert(0 == sent);
- vquic_push_blocked_pkt(cf, &ctx->q, outpos - outlen, outlen, outlen);
Curl_expire(data, 1, EXPIRE_QUIC);
return CURLE_OK;
}
return curlcode;
}
- /* pktbuf has been completely sent */
pktcnt = 0;
- outpos = ctx->q.pktbuf;
continue;
}
- if(++pktcnt >= max_pktcnt || (size_t)outlen < gsolen) {
- /* enough packets or last one is shorter than the intended
- * segment size, indicating that it is time to send. */
- curlcode = vquic_send_packet(cf, data, &ctx->q, ctx->q.pktbuf,
- outpos - ctx->q.pktbuf, gsolen, &sent);
+ if(++pktcnt >= max_pktcnt || (size_t)nread < gsolen) {
+ /* Reached MAX_PKT_BURST *or*
+ * the capacity of our buffer *or*
+ * last add was shorter than the previous ones, flush */
+ curlcode = vquic_send(cf, data, &ctx->q, gsolen);
if(curlcode) {
if(curlcode == CURLE_AGAIN) {
- vquic_push_blocked_pkt(cf, &ctx->q, ctx->q.pktbuf + sent,
- outpos - ctx->q.pktbuf - sent, gsolen);
Curl_expire(data, 1, EXPIRE_QUIC);
return CURLE_OK;
}
@@ -2039,7 +2072,6 @@ static CURLcode cf_flush_egress(struct Curl_cfilter *cf, }
/* pktbuf has been completely sent */
pktcnt = 0;
- outpos = ctx->q.pktbuf;
}
}
@@ -2069,13 +2101,22 @@ out: static bool cf_ngtcp2_data_pending(struct Curl_cfilter *cf,
const struct Curl_easy *data)
{
- /* We may have received more data than we're able to hold in the receive
- buffer and allocated an overflow buffer. Since it's possible that
- there's no more data coming on the socket, we need to keep reading
- until the overflow buffer is empty. */
- const struct HTTP *stream = data->req.p.http;
+ const struct stream_ctx *stream = H3_STREAM_CTX(data);
(void)cf;
- return Curl_dyn_len(&stream->overflow) > 0;
+ return stream && !Curl_bufq_is_empty(&stream->recvbuf);
+}
+
+static CURLcode h3_data_pause(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ bool pause)
+{
+ /* TODO: there seems right now no API in ngtcp2 to shrink/enlarge
+ * the streams windows. As we do in HTTP/2. */
+ if(!pause) {
+ drain_stream(cf, data);
+ Curl_expire(data, 0, EXPIRE_RUN_NOW);
+ }
+ return CURLE_OK;
}
static CURLcode cf_ngtcp2_data_event(struct Curl_cfilter *cf,
@@ -2090,16 +2131,22 @@ static CURLcode cf_ngtcp2_data_event(struct Curl_cfilter *cf, (void)arg1;
(void)arg2;
switch(event) {
+ case CF_CTRL_DATA_SETUP:
+ break;
+ case CF_CTRL_DATA_PAUSE:
+ result = h3_data_pause(cf, data, (arg1 != 0));
+ break;
case CF_CTRL_DATA_DONE: {
- struct HTTP *stream = data->req.p.http;
- Curl_dyn_free(&stream->overflow);
- free(stream->h3out);
+ h3_data_done(cf, data);
break;
}
case CF_CTRL_DATA_DONE_SEND: {
- struct HTTP *stream = data->req.p.http;
- stream->upload_done = TRUE;
- (void)nghttp3_conn_resume_stream(ctx->h3conn, stream->stream3_id);
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
+ if(stream && !stream->send_closed) {
+ stream->send_closed = TRUE;
+ stream->upload_left = Curl_bufq_len(&stream->sendbuf);
+ (void)nghttp3_conn_resume_stream(ctx->h3conn, stream->id);
+ }
break;
}
case CF_CTRL_DATA_IDLE:
@@ -2147,6 +2194,7 @@ static void cf_ngtcp2_ctx_clear(struct cf_ngtcp2_ctx *ctx) nghttp3_conn_del(ctx->h3conn);
if(ctx->qconn)
ngtcp2_conn_del(ctx->qconn);
+ Curl_bufcp_free(&ctx->stream_bufcp);
memset(ctx, 0, sizeof(*ctx));
ctx->qlogfd = -1;
@@ -2212,6 +2260,10 @@ static CURLcode cf_connect_start(struct Curl_cfilter *cf, int qfd;
ctx->version = NGTCP2_PROTO_VER_MAX;
+ ctx->max_stream_window = H3_STREAM_WINDOW_SIZE;
+ Curl_bufcp_init(&ctx->stream_bufcp, H3_STREAM_CHUNK_SIZE,
+ H3_STREAM_POOL_SPARES);
+
#ifdef USE_OPENSSL
result = quic_ssl_ctx(&ctx->sslctx, cf, data);
if(result)
@@ -2244,8 +2296,7 @@ static CURLcode cf_connect_start(struct Curl_cfilter *cf, ctx->qlogfd = qfd; /* -1 if failure above */
quic_settings(ctx, data);
- result = vquic_ctx_init(&ctx->q,
- NGTCP2_MAX_PMTUD_UDP_PAYLOAD_SIZE * MAX_PKT_BURST);
+ result = vquic_ctx_init(&ctx->q);
if(result)
return result;
@@ -2277,7 +2328,7 @@ static CURLcode cf_connect_start(struct Curl_cfilter *cf, ngtcp2_conn_set_tls_native_handle(ctx->qconn, ctx->ssl);
#endif
- ngtcp2_connection_close_error_default(&ctx->last_error);
+ ngtcp2_ccerr_default(&ctx->last_error);
ctx->conn_ref.get_conn = get_conn;
ctx->conn_ref.user_data = cf;
@@ -2524,7 +2575,7 @@ out: *pcf = (!result)? cf : NULL;
if(result) {
if(udp_cf)
- Curl_conn_cf_discard(udp_cf, data);
+ Curl_conn_cf_discard_sub(cf, udp_cf, data, TRUE);
Curl_safefree(cf);
Curl_safefree(ctx);
}
diff --git a/libs/libcurl/src/vquic/curl_ngtcp2.h b/libs/libcurl/src/vquic/curl_ngtcp2.h index 93a9d2dbcf..7b8a158b76 100644 --- a/libs/libcurl/src/vquic/curl_ngtcp2.h +++ b/libs/libcurl/src/vquic/curl_ngtcp2.h @@ -26,7 +26,7 @@ #include "curl_setup.h"
-#ifdef USE_NGTCP2
+#if defined(USE_NGTCP2) && defined(USE_NGHTTP3)
#ifdef HAVE_NETINET_UDP_H
#include <netinet/udp.h>
diff --git a/libs/libcurl/src/vquic/curl_quiche.c b/libs/libcurl/src/vquic/curl_quiche.c index 90f98a69d1..1cf37f7a16 100644 --- a/libs/libcurl/src/vquic/curl_quiche.c +++ b/libs/libcurl/src/vquic/curl_quiche.c @@ -28,6 +28,7 @@ #include <quiche.h>
#include <openssl/err.h>
#include <openssl/ssl.h>
+#include "bufq.h"
#include "urldata.h"
#include "cfilters.h"
#include "cf-socket.h"
@@ -39,11 +40,11 @@ #include "connect.h"
#include "progress.h"
#include "strerror.h"
+#include "http1.h"
#include "vquic.h"
#include "vquic_int.h"
#include "curl_quiche.h"
#include "transfer.h"
-#include "h2h3.h"
#include "vtls/openssl.h"
#include "vtls/keylog.h"
@@ -52,14 +53,26 @@ #include "curl_memory.h"
#include "memdebug.h"
-
-#define QUIC_MAX_STREAMS (256*1024)
-#define QUIC_MAX_DATA (1*1024*1024)
-#define QUIC_IDLE_TIMEOUT (60 * 1000) /* milliseconds */
-
-/* how many UDP packets to send max in one call */
-#define MAX_PKT_BURST 10
-#define MAX_UDP_PAYLOAD_SIZE 1452
+/* #define DEBUG_QUICHE */
+
+#define QUIC_MAX_STREAMS (100)
+#define QUIC_IDLE_TIMEOUT (5 * 1000) /* milliseconds */
+
+#define H3_STREAM_WINDOW_SIZE (128 * 1024)
+#define H3_STREAM_CHUNK_SIZE (16 * 1024)
+/* The pool keeps spares around and half of a full stream windows
+ * seems good. More does not seem to improve performance.
+ * The benefit of the pool is that stream buffer to not keep
+ * spares. So memory consumption goes down when streams run empty,
+ * have a large upload done, etc. */
+#define H3_STREAM_POOL_SPARES \
+ (H3_STREAM_WINDOW_SIZE / H3_STREAM_CHUNK_SIZE ) / 2
+/* Receive and Send max number of chunks just follows from the
+ * chunk size and window size */
+#define H3_STREAM_RECV_CHUNKS \
+ (H3_STREAM_WINDOW_SIZE / H3_STREAM_CHUNK_SIZE)
+#define H3_STREAM_SEND_CHUNKS \
+ (H3_STREAM_WINDOW_SIZE / H3_STREAM_CHUNK_SIZE)
/*
* Store quiche version info in this buffer.
@@ -123,18 +136,6 @@ static SSL_CTX *quic_ssl_ctx(struct Curl_easy *data) return ssl_ctx;
}
-struct quic_handshake {
- char *buf; /* pointer to the buffer */
- size_t alloclen; /* size of allocation */
- size_t len; /* size of content in buffer */
- size_t nread; /* how many bytes have been read */
-};
-
-struct h3_event_node {
- struct h3_event_node *next;
- quiche_h3_event *ev;
-};
-
struct cf_quiche_ctx {
struct cf_quic_ctx q;
quiche_conn *qconn;
@@ -148,11 +149,13 @@ struct cf_quiche_ctx { struct curltime handshake_at; /* time connect handshake finished */
struct curltime first_byte_at; /* when first byte was recvd */
struct curltime reconnect_at; /* time the next attempt should start */
+ struct bufc_pool stream_bufcp; /* chunk pool for streams */
+ curl_off_t data_recvd;
+ size_t sends_on_hold; /* # of streams with SEND_HOLD set */
BIT(goaway); /* got GOAWAY from server */
BIT(got_first_byte); /* if first byte was received */
};
-
#ifdef DEBUG_QUICHE
static void quiche_debug_log(const char *line, void *argp)
{
@@ -161,21 +164,6 @@ static void quiche_debug_log(const char *line, void *argp) }
#endif
-static void h3_clear_pending(struct Curl_easy *data)
-{
- struct HTTP *stream = data->req.p.http;
-
- if(stream->pending) {
- struct h3_event_node *node, *next;
- for(node = stream->pending; node; node = next) {
- next = node->next;
- quiche_h3_event_free(node->ev);
- free(node);
- }
- stream->pending = NULL;
- }
-}
-
static void cf_quiche_ctx_clear(struct cf_quiche_ctx *ctx)
{
if(ctx) {
@@ -188,129 +176,300 @@ static void cf_quiche_ctx_clear(struct cf_quiche_ctx *ctx) quiche_h3_conn_free(ctx->h3c);
if(ctx->cfg)
quiche_config_free(ctx->cfg);
+ Curl_bufcp_free(&ctx->stream_bufcp);
memset(ctx, 0, sizeof(*ctx));
}
}
-static void notify_drain(struct Curl_cfilter *cf,
- struct Curl_easy *data)
+/**
+ * All about the H3 internals of a stream
+ */
+struct stream_ctx {
+ int64_t id; /* HTTP/3 protocol stream identifier */
+ struct bufq recvbuf; /* h3 response */
+ uint64_t error3; /* HTTP/3 stream error code */
+ curl_off_t upload_left; /* number of request bytes left to upload */
+ bool closed; /* TRUE on stream close */
+ bool reset; /* TRUE on stream reset */
+ bool send_closed; /* stream is locally closed */
+ bool resp_hds_complete; /* complete, final response has been received */
+ bool resp_got_header; /* TRUE when h3 stream has recvd some HEADER */
+};
+
+#define H3_STREAM_CTX(d) ((struct stream_ctx *)(((d) && (d)->req.p.http)? \
+ ((struct HTTP *)(d)->req.p.http)->h3_ctx \
+ : NULL))
+#define H3_STREAM_LCTX(d) ((struct HTTP *)(d)->req.p.http)->h3_ctx
+#define H3_STREAM_ID(d) (H3_STREAM_CTX(d)? \
+ H3_STREAM_CTX(d)->id : -2)
+
+static bool stream_send_is_suspended(struct Curl_easy *data)
{
- (void)cf;
- data->state.drain = 1;
- Curl_expire(data, 0, EXPIRE_RUN_NOW);
+ return (data->req.keepon & KEEP_SEND_HOLD);
}
-static CURLcode h3_add_event(struct Curl_cfilter *cf,
- struct Curl_easy *data,
- int64_t stream3_id, quiche_h3_event *ev)
+static void stream_send_suspend(struct Curl_cfilter *cf,
+ struct Curl_easy *data)
{
- struct Curl_easy *mdata;
- struct h3_event_node *node, **pnext;
+ struct cf_quiche_ctx *ctx = cf->ctx;
- DEBUGASSERT(data->multi);
- for(mdata = data->multi->easyp; mdata; mdata = mdata->next) {
- if(mdata->req.p.http && mdata->req.p.http->stream3_id == stream3_id) {
- break;
+ if((data->req.keepon & KEEP_SENDBITS) == KEEP_SEND) {
+ data->req.keepon |= KEEP_SEND_HOLD;
+ ++ctx->sends_on_hold;
+ if(H3_STREAM_ID(data) >= 0)
+ DEBUGF(LOG_CF(data, cf, "[h3sid=%"PRId64"] suspend sending",
+ H3_STREAM_ID(data)));
+ else
+ DEBUGF(LOG_CF(data, cf, "[%s] suspend sending",
+ data->state.url));
+ }
+}
+
+static void stream_send_resume(struct Curl_cfilter *cf,
+ struct Curl_easy *data)
+{
+ struct cf_quiche_ctx *ctx = cf->ctx;
+
+ if(stream_send_is_suspended(data)) {
+ data->req.keepon &= ~KEEP_SEND_HOLD;
+ --ctx->sends_on_hold;
+ if(H3_STREAM_ID(data) >= 0)
+ DEBUGF(LOG_CF(data, cf, "[h3sid=%"PRId64"] resume sending",
+ H3_STREAM_ID(data)));
+ else
+ DEBUGF(LOG_CF(data, cf, "[%s] resume sending",
+ data->state.url));
+ Curl_expire(data, 0, EXPIRE_RUN_NOW);
+ }
+}
+
+static void check_resumes(struct Curl_cfilter *cf,
+ struct Curl_easy *data)
+{
+ struct cf_quiche_ctx *ctx = cf->ctx;
+ struct Curl_easy *sdata;
+
+ if(ctx->sends_on_hold) {
+ DEBUGASSERT(data->multi);
+ for(sdata = data->multi->easyp;
+ sdata && ctx->sends_on_hold; sdata = sdata->next) {
+ if(stream_send_is_suspended(sdata)) {
+ stream_send_resume(cf, sdata);
+ }
}
}
+}
- if(!mdata) {
- DEBUGF(LOG_CF(data, cf, "[h3sid=%"PRId64"] event discarded, easy handle "
- "not found", stream3_id));
- quiche_h3_event_free(ev);
+static CURLcode h3_data_setup(struct Curl_cfilter *cf,
+ struct Curl_easy *data)
+{
+ struct cf_quiche_ctx *ctx = cf->ctx;
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
+
+ if(stream)
return CURLE_OK;
- }
- node = calloc(sizeof(*node), 1);
- if(!node) {
- quiche_h3_event_free(ev);
+ stream = calloc(1, sizeof(*stream));
+ if(!stream)
return CURLE_OUT_OF_MEMORY;
+
+ H3_STREAM_LCTX(data) = stream;
+ stream->id = -1;
+ Curl_bufq_initp(&stream->recvbuf, &ctx->stream_bufcp,
+ H3_STREAM_RECV_CHUNKS, BUFQ_OPT_SOFT_LIMIT);
+ DEBUGF(LOG_CF(data, cf, "data setup (easy %p)", (void *)data));
+ return CURLE_OK;
+}
+
+static void h3_data_done(struct Curl_cfilter *cf, struct Curl_easy *data)
+{
+ struct cf_quiche_ctx *ctx = cf->ctx;
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
+
+ (void)cf;
+ if(stream) {
+ DEBUGF(LOG_CF(data, cf, "[h3sid=%"PRId64"] easy handle is done",
+ stream->id));
+ if(stream_send_is_suspended(data)) {
+ data->req.keepon &= ~KEEP_SEND_HOLD;
+ --ctx->sends_on_hold;
+ }
+ Curl_bufq_free(&stream->recvbuf);
+ free(stream);
+ H3_STREAM_LCTX(data) = NULL;
}
- node->ev = ev;
- /* append to process them in order of arrival */
- pnext = &mdata->req.p.http->pending;
- while(*pnext) {
- pnext = &((*pnext)->next);
+}
+
+static void drain_stream(struct Curl_cfilter *cf,
+ struct Curl_easy *data)
+{
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
+ unsigned char bits;
+
+ (void)cf;
+ bits = CURL_CSELECT_IN;
+ if(stream && !stream->send_closed && stream->upload_left)
+ bits |= CURL_CSELECT_OUT;
+ if(data->state.dselect_bits != bits) {
+ data->state.dselect_bits = bits;
+ Curl_expire(data, 0, EXPIRE_RUN_NOW);
}
- *pnext = node;
- notify_drain(cf, mdata);
- return CURLE_OK;
}
-struct h3h1header {
- char *dest;
- size_t destlen; /* left to use */
- size_t nlen; /* used */
+static struct Curl_easy *get_stream_easy(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ int64_t stream3_id)
+{
+ struct Curl_easy *sdata;
+
+ (void)cf;
+ if(H3_STREAM_ID(data) == stream3_id) {
+ return data;
+ }
+ else {
+ DEBUGASSERT(data->multi);
+ for(sdata = data->multi->easyp; sdata; sdata = sdata->next) {
+ if(H3_STREAM_ID(sdata) == stream3_id) {
+ return sdata;
+ }
+ }
+ }
+ return NULL;
+}
+
+/*
+ * write_resp_raw() copies response data in raw format to the `data`'s
+ * receive buffer. If not enough space is available, it appends to the
+ * `data`'s overflow buffer.
+ */
+static CURLcode write_resp_raw(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ const void *mem, size_t memlen)
+{
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
+ CURLcode result = CURLE_OK;
+ ssize_t nwritten;
+
+ (void)cf;
+ if(!stream)
+ return CURLE_RECV_ERROR;
+ nwritten = Curl_bufq_write(&stream->recvbuf, mem, memlen, &result);
+ if(nwritten < 0)
+ return result;
+
+ if((size_t)nwritten < memlen) {
+ /* This MUST not happen. Our recbuf is dimensioned to hold the
+ * full max_stream_window and then some for this very reason. */
+ DEBUGASSERT(0);
+ return CURLE_RECV_ERROR;
+ }
+ return result;
+}
+
+struct cb_ctx {
+ struct Curl_cfilter *cf;
+ struct Curl_easy *data;
};
static int cb_each_header(uint8_t *name, size_t name_len,
uint8_t *value, size_t value_len,
void *argp)
{
- struct h3h1header *headers = (struct h3h1header *)argp;
- size_t olen = 0;
+ struct cb_ctx *x = argp;
+ struct stream_ctx *stream = H3_STREAM_CTX(x->data);
+ CURLcode result;
- if((name_len == 7) && !strncmp(H2H3_PSEUDO_STATUS, (char *)name, 7)) {
- msnprintf(headers->dest,
- headers->destlen, "HTTP/3 %.*s \r\n",
- (int) value_len, value);
- }
- else if(!headers->nlen) {
- return CURLE_HTTP3;
+ (void)stream;
+ if((name_len == 7) && !strncmp(HTTP_PSEUDO_STATUS, (char *)name, 7)) {
+ result = write_resp_raw(x->cf, x->data, "HTTP/3 ", sizeof("HTTP/3 ") - 1);
+ if(!result)
+ result = write_resp_raw(x->cf, x->data, value, value_len);
+ if(!result)
+ result = write_resp_raw(x->cf, x->data, " \r\n", 3);
}
else {
- msnprintf(headers->dest,
- headers->destlen, "%.*s: %.*s\r\n",
- (int)name_len, name, (int) value_len, value);
- }
- olen = strlen(headers->dest);
- headers->destlen -= olen;
- headers->nlen += olen;
- headers->dest += olen;
- return 0;
+ result = write_resp_raw(x->cf, x->data, name, name_len);
+ if(!result)
+ result = write_resp_raw(x->cf, x->data, ": ", 2);
+ if(!result)
+ result = write_resp_raw(x->cf, x->data, value, value_len);
+ if(!result)
+ result = write_resp_raw(x->cf, x->data, "\r\n", 2);
+ }
+ if(result) {
+ DEBUGF(LOG_CF(x->data, x->cf,
+ "[h3sid=%"PRId64"][HEADERS][%.*s: %.*s] error %d",
+ stream? stream->id : -1, (int)name_len, name,
+ (int)value_len, value, result));
+ }
+ return result;
}
-static ssize_t cf_recv_body(struct Curl_cfilter *cf,
- struct Curl_easy *data,
- char *buf, size_t len,
+static ssize_t stream_resp_read(void *reader_ctx,
+ unsigned char *buf, size_t len,
CURLcode *err)
{
- struct cf_quiche_ctx *ctx = cf->ctx;
- struct HTTP *stream = data->req.p.http;
+ struct cb_ctx *x = reader_ctx;
+ struct cf_quiche_ctx *ctx = x->cf->ctx;
+ struct stream_ctx *stream = H3_STREAM_CTX(x->data);
ssize_t nread;
- size_t offset = 0;
- if(!stream->firstbody) {
- /* add a header-body separator CRLF */
- offset = 2;
+ if(!stream) {
+ *err = CURLE_RECV_ERROR;
+ return -1;
}
- nread = quiche_h3_recv_body(ctx->h3c, ctx->qconn, stream->stream3_id,
- (unsigned char *)buf + offset, len - offset);
+
+ nread = quiche_h3_recv_body(ctx->h3c, ctx->qconn, stream->id,
+ buf, len);
if(nread >= 0) {
- DEBUGF(LOG_CF(data, cf, "[h3sid=%"PRId64"][DATA] len=%zd",
- stream->stream3_id, nread));
- if(!stream->firstbody) {
- stream->firstbody = TRUE;
- buf[0] = '\r';
- buf[1] = '\n';
- nread += offset;
- }
+ *err = CURLE_OK;
+ return nread;
}
- else if(nread == -1) {
+ else if(nread < 0) {
*err = CURLE_AGAIN;
- stream->h3_recving_data = FALSE;
+ return -1;
}
else {
+ *err = stream->resp_got_header? CURLE_PARTIAL_FILE : CURLE_RECV_ERROR;
+ return -1;
+ }
+}
+
+static CURLcode cf_recv_body(struct Curl_cfilter *cf,
+ struct Curl_easy *data)
+{
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
+ ssize_t nwritten;
+ struct cb_ctx cb_ctx;
+ CURLcode result = CURLE_OK;
+
+ if(!stream)
+ return CURLE_RECV_ERROR;
+
+ if(!stream->resp_hds_complete) {
+ result = write_resp_raw(cf, data, "\r\n", 2);
+ if(result)
+ return result;
+ stream->resp_hds_complete = TRUE;
+ }
+
+ cb_ctx.cf = cf;
+ cb_ctx.data = data;
+ nwritten = Curl_bufq_slurp(&stream->recvbuf,
+ stream_resp_read, &cb_ctx, &result);
+
+ if(nwritten < 0 && result != CURLE_AGAIN) {
+ DEBUGF(LOG_CF(data, cf, "[h3sid=%"PRId64"] recv_body error %zd",
+ stream->id, nwritten));
failf(data, "Error %zd in HTTP/3 response body for stream[%"PRId64"]",
- nread, stream->stream3_id);
+ nwritten, stream->id);
stream->closed = TRUE;
stream->reset = TRUE;
+ stream->send_closed = TRUE;
streamclose(cf->conn, "Reset of stream");
- stream->h3_recving_data = FALSE;
- nread = -1;
- *err = stream->h3_got_header? CURLE_PARTIAL_FILE : CURLE_RECV_ERROR;
+ return result;
}
- return nread;
+ return CURLE_OK;
}
#ifdef DEBUGBUILD
@@ -335,64 +494,57 @@ static const char *cf_ev_name(quiche_h3_event *ev) #define cf_ev_name(x) ""
#endif
-static ssize_t h3_process_event(struct Curl_cfilter *cf,
- struct Curl_easy *data,
- char *buf, size_t len,
- int64_t stream3_id,
- quiche_h3_event *ev,
- CURLcode *err)
+static CURLcode h3_process_event(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ int64_t stream3_id,
+ quiche_h3_event *ev)
{
- struct HTTP *stream = data->req.p.http;
- ssize_t recvd = 0;
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
+ struct cb_ctx cb_ctx;
+ CURLcode result = CURLE_OK;
int rc;
- struct h3h1header headers;
-
- DEBUGASSERT(stream3_id == stream->stream3_id);
- *err = CURLE_OK;
+ if(!stream)
+ return CURLE_OK;
+ DEBUGASSERT(stream3_id == stream->id);
switch(quiche_h3_event_type(ev)) {
case QUICHE_H3_EVENT_HEADERS:
- stream->h3_got_header = TRUE;
- headers.dest = buf;
- headers.destlen = len;
- headers.nlen = 0;
- rc = quiche_h3_event_for_each_header(ev, cb_each_header, &headers);
+ stream->resp_got_header = TRUE;
+ cb_ctx.cf = cf;
+ cb_ctx.data = data;
+ rc = quiche_h3_event_for_each_header(ev, cb_each_header, &cb_ctx);
if(rc) {
failf(data, "Error %d in HTTP/3 response header for stream[%"PRId64"]",
rc, stream3_id);
- *err = CURLE_RECV_ERROR;
- recvd = -1;
- break;
+ return CURLE_RECV_ERROR;
}
- recvd = headers.nlen;
- DEBUGF(LOG_CF(data, cf, "[h3sid=%"PRId64"][HEADERS] len=%zd",
- stream3_id, recvd));
+ DEBUGF(LOG_CF(data, cf, "[h3sid=%"PRId64"][HEADERS]", stream3_id));
break;
case QUICHE_H3_EVENT_DATA:
- DEBUGASSERT(!stream->closed);
- stream->h3_recving_data = TRUE;
- recvd = cf_recv_body(cf, data, buf, len, err);
- if(recvd < 0) {
- if(*err != CURLE_AGAIN)
- return -1;
- recvd = 0;
+ if(!stream->closed) {
+ result = cf_recv_body(cf, data);
}
break;
case QUICHE_H3_EVENT_RESET:
- DEBUGF(LOG_CF(data, cf, "[h3sid=%"PRId64"][RESET]", stream3_id));
+ DEBUGF(LOG_CF(data, cf, "[h3sid=%"PRId64"][RESET]", stream3_id));
stream->closed = TRUE;
stream->reset = TRUE;
- /* streamclose(cf->conn, "Reset of stream");*/
- stream->h3_recving_data = FALSE;
+ stream->send_closed = TRUE;
+ streamclose(cf->conn, "Reset of stream");
break;
case QUICHE_H3_EVENT_FINISHED:
DEBUGF(LOG_CF(data, cf, "[h3sid=%"PRId64"][FINISHED]", stream3_id));
+ if(!stream->resp_hds_complete) {
+ result = write_resp_raw(cf, data, "\r\n", 2);
+ if(result)
+ return result;
+ stream->resp_hds_complete = TRUE;
+ }
stream->closed = TRUE;
- /* streamclose(cf->conn, "End of stream");*/
- stream->h3_recving_data = FALSE;
+ streamclose(cf->conn, "End of stream");
break;
case QUICHE_H3_EVENT_GOAWAY:
@@ -404,126 +556,159 @@ static ssize_t h3_process_event(struct Curl_cfilter *cf, stream3_id, quiche_h3_event_type(ev)));
break;
}
- return recvd;
+ return result;
}
-static ssize_t h3_process_pending(struct Curl_cfilter *cf,
- struct Curl_easy *data,
- char *buf, size_t len,
- CURLcode *err)
+static CURLcode cf_poll_events(struct Curl_cfilter *cf,
+ struct Curl_easy *data)
{
- struct HTTP *stream = data->req.p.http;
- struct h3_event_node *node = stream->pending, **pnext = &stream->pending;
- ssize_t recvd = 0, erecvd;
+ struct cf_quiche_ctx *ctx = cf->ctx;
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
+ struct Curl_easy *sdata;
+ quiche_h3_event *ev;
+ CURLcode result;
- *err = CURLE_OK;
- DEBUGASSERT(stream);
- while(node && len) {
- erecvd = h3_process_event(cf, data, buf, len,
- stream->stream3_id, node->ev, err);
- quiche_h3_event_free(node->ev);
- *pnext = node->next;
- free(node);
- node = *pnext;
- if(erecvd < 0) {
- DEBUGF(LOG_CF(data, cf, "[h3sid=%"PRId64"] process event -> %d",
- stream->stream3_id, *err));
- return erecvd;
+ /* Take in the events and distribute them to the transfers. */
+ while(ctx->h3c) {
+ int64_t stream3_id = quiche_h3_conn_poll(ctx->h3c, ctx->qconn, &ev);
+ if(stream3_id == QUICHE_H3_ERR_DONE) {
+ break;
+ }
+ else if(stream3_id < 0) {
+ DEBUGF(LOG_CF(data, cf, "[h3sid=%"PRId64"] error poll: %"PRId64,
+ stream? stream->id : -1, stream3_id));
+ return CURLE_HTTP3;
+ }
+
+ sdata = get_stream_easy(cf, data, stream3_id);
+ if(!sdata) {
+ DEBUGF(LOG_CF(data, cf, "[h3sid=%"PRId64"] discard event %s for "
+ "unknown [h3sid=%"PRId64"]",
+ stream? stream->id : -1, cf_ev_name(ev),
+ stream3_id));
+ }
+ else {
+ result = h3_process_event(cf, sdata, stream3_id, ev);
+ drain_stream(cf, sdata);
+ if(result) {
+ DEBUGF(LOG_CF(data, cf, "[h3sid=%"PRId64"] error processing event %s "
+ "for [h3sid=%"PRId64"] -> %d",
+ stream? stream->id : -1, cf_ev_name(ev),
+ stream3_id, result));
+ quiche_h3_event_free(ev);
+ return result;
+ }
+ quiche_h3_event_free(ev);
}
- recvd += erecvd;
- *err = CURLE_OK;
- buf += erecvd;
- len -= erecvd;
}
- return recvd;
+ return CURLE_OK;
}
-static CURLcode cf_process_ingress(struct Curl_cfilter *cf,
- struct Curl_easy *data)
+struct recv_ctx {
+ struct Curl_cfilter *cf;
+ struct Curl_easy *data;
+ int pkts;
+};
+
+static CURLcode recv_pkt(const unsigned char *pkt, size_t pktlen,
+ struct sockaddr_storage *remote_addr,
+ socklen_t remote_addrlen, int ecn,
+ void *userp)
{
- struct cf_quiche_ctx *ctx = cf->ctx;
- int64_t stream3_id = data->req.p.http? data->req.p.http->stream3_id : -1;
- uint8_t buf[65536];
- int bufsize = (int)sizeof(buf);
- struct sockaddr_storage remote_addr;
- socklen_t remote_addrlen;
+ struct recv_ctx *r = userp;
+ struct cf_quiche_ctx *ctx = r->cf->ctx;
quiche_recv_info recv_info;
- ssize_t recvd, nread;
- ssize_t total = 0, pkts = 0;
+ ssize_t nread;
- DEBUGASSERT(ctx->qconn);
+ (void)ecn;
+ ++r->pkts;
- /* in case the timeout expired */
- quiche_conn_on_timeout(ctx->qconn);
-
- do {
- remote_addrlen = sizeof(remote_addr);
- while((recvd = recvfrom(ctx->q.sockfd, (char *)buf, bufsize, 0,
- (struct sockaddr *)&remote_addr,
- &remote_addrlen)) == -1 &&
- SOCKERRNO == EINTR)
- ;
- if(recvd < 0) {
- if((SOCKERRNO == EAGAIN) || (SOCKERRNO == EWOULDBLOCK)) {
- break;
- }
- if(SOCKERRNO == ECONNREFUSED) {
- const char *r_ip;
- int r_port;
- Curl_cf_socket_peek(cf->next, data, NULL, NULL,
- &r_ip, &r_port, NULL, NULL);
- failf(data, "quiche: connection to %s:%u refused",
- r_ip, r_port);
- return CURLE_COULDNT_CONNECT;
- }
- failf(data, "quiche: recvfrom() unexpectedly returned %zd "
- "(errno: %d, socket %d)", recvd, SOCKERRNO, ctx->q.sockfd);
- return CURLE_RECV_ERROR;
- }
+ recv_info.to = (struct sockaddr *)&ctx->q.local_addr;
+ recv_info.to_len = ctx->q.local_addrlen;
+ recv_info.from = (struct sockaddr *)remote_addr;
+ recv_info.from_len = remote_addrlen;
- total += recvd;
- ++pkts;
- if(recvd > 0 && !ctx->got_first_byte) {
- ctx->first_byte_at = Curl_now();
- ctx->got_first_byte = TRUE;
+ nread = quiche_conn_recv(ctx->qconn, (unsigned char *)pkt, pktlen,
+ &recv_info);
+ if(nread < 0) {
+ if(QUICHE_ERR_DONE == nread) {
+ DEBUGF(LOG_CF(r->data, r->cf, "ingress, quiche is DONE"));
+ return CURLE_OK;
}
- recv_info.from = (struct sockaddr *) &remote_addr;
- recv_info.from_len = remote_addrlen;
- recv_info.to = (struct sockaddr *) &ctx->q.local_addr;
- recv_info.to_len = ctx->q.local_addrlen;
-
- nread = quiche_conn_recv(ctx->qconn, buf, recvd, &recv_info);
- if(nread < 0) {
- if(QUICHE_ERR_DONE == nread) {
- DEBUGF(LOG_CF(data, cf, "ingress, quiche is DONE"));
- return CURLE_OK;
- }
- else if(QUICHE_ERR_TLS_FAIL == nread) {
- long verify_ok = SSL_get_verify_result(ctx->ssl);
- if(verify_ok != X509_V_OK) {
- failf(data, "SSL certificate problem: %s",
- X509_verify_cert_error_string(verify_ok));
- return CURLE_PEER_FAILED_VERIFICATION;
- }
- }
- else {
- failf(data, "quiche_conn_recv() == %zd", nread);
- return CURLE_RECV_ERROR;
+ else if(QUICHE_ERR_TLS_FAIL == nread) {
+ long verify_ok = SSL_get_verify_result(ctx->ssl);
+ if(verify_ok != X509_V_OK) {
+ failf(r->data, "SSL certificate problem: %s",
+ X509_verify_cert_error_string(verify_ok));
+ return CURLE_PEER_FAILED_VERIFICATION;
}
}
- else if(nread < recvd) {
- DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] ingress, quiche only "
- "accepted %zd/%zd bytes",
- stream3_id, nread, recvd));
+ else {
+ failf(r->data, "quiche_conn_recv() == %zd", nread);
+ return CURLE_RECV_ERROR;
}
+ }
+ else if((size_t)nread < pktlen) {
+ DEBUGF(LOG_CF(r->data, r->cf, "ingress, quiche only read %zd/%zd bytes",
+ nread, pktlen));
+ }
- } while(pkts < 1000); /* arbitrary */
-
- DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] ingress, recvd %zd bytes "
- "in %zd packets", stream3_id, total, pkts));
return CURLE_OK;
}
+static CURLcode cf_process_ingress(struct Curl_cfilter *cf,
+ struct Curl_easy *data)
+{
+ struct cf_quiche_ctx *ctx = cf->ctx;
+ struct recv_ctx rctx;
+ CURLcode result;
+
+ DEBUGASSERT(ctx->qconn);
+ rctx.cf = cf;
+ rctx.data = data;
+ rctx.pkts = 0;
+
+ result = vquic_recv_packets(cf, data, &ctx->q, 1000, recv_pkt, &rctx);
+ if(result)
+ return result;
+
+ if(rctx.pkts > 0) {
+ /* quiche digested ingress packets. It might have opened flow control
+ * windows again. */
+ check_resumes(cf, data);
+ }
+ return cf_poll_events(cf, data);
+}
+
+struct read_ctx {
+ struct Curl_cfilter *cf;
+ struct Curl_easy *data;
+ quiche_send_info send_info;
+};
+
+static ssize_t read_pkt_to_send(void *userp,
+ unsigned char *buf, size_t buflen,
+ CURLcode *err)
+{
+ struct read_ctx *x = userp;
+ struct cf_quiche_ctx *ctx = x->cf->ctx;
+ ssize_t nwritten;
+
+ nwritten = quiche_conn_send(ctx->qconn, buf, buflen, &x->send_info);
+ if(nwritten == QUICHE_ERR_DONE) {
+ *err = CURLE_AGAIN;
+ return -1;
+ }
+
+ if(nwritten < 0) {
+ failf(x->data, "quiche_conn_send returned %zd", nwritten);
+ *err = CURLE_SEND_ERROR;
+ return -1;
+ }
+ *err = CURLE_OK;
+ return nwritten;
+}
+
/*
* flush_egress drains the buffers and sends off data.
* Calls failf() on errors.
@@ -532,60 +717,59 @@ static CURLcode cf_flush_egress(struct Curl_cfilter *cf, struct Curl_easy *data)
{
struct cf_quiche_ctx *ctx = cf->ctx;
- int64_t stream3_id = data->req.p.http? data->req.p.http->stream3_id : -1;
- quiche_send_info send_info;
- ssize_t outlen, total_len = 0;
- size_t max_udp_payload_size =
- quiche_conn_max_send_udp_payload_size(ctx->qconn);
- size_t gsolen = max_udp_payload_size;
- size_t sent, pktcnt = 0;
+ ssize_t nread;
CURLcode result;
int64_t timeout_ns;
+ struct read_ctx readx;
+ size_t pkt_count, gsolen;
- ctx->q.no_gso = TRUE;
- if(ctx->q.num_blocked_pkt) {
- result = vquic_send_blocked_pkt(cf, data, &ctx->q);
- if(result) {
- if(result == CURLE_AGAIN) {
- DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] egress, still not "
- "able to send blocked packet", stream3_id));
- Curl_expire(data, 1, EXPIRE_QUIC);
- return CURLE_OK;
- }
- goto out;
+ result = vquic_flush(cf, data, &ctx->q);
+ if(result) {
+ if(result == CURLE_AGAIN) {
+ Curl_expire(data, 1, EXPIRE_QUIC);
+ return CURLE_OK;
}
+ return result;
}
+ readx.cf = cf;
+ readx.data = data;
+ memset(&readx.send_info, 0, sizeof(readx.send_info));
+ pkt_count = 0;
+ gsolen = quiche_conn_max_send_udp_payload_size(ctx->qconn);
for(;;) {
- outlen = quiche_conn_send(ctx->qconn, ctx->q.pktbuf, max_udp_payload_size,
- &send_info);
- if(outlen == QUICHE_ERR_DONE) {
- result = CURLE_OK;
- goto out;
- }
+ /* add the next packet to send, if any, to our buffer */
+ nread = Curl_bufq_sipn(&ctx->q.sendbuf, 0,
+ read_pkt_to_send, &readx, &result);
+ /* DEBUGF(LOG_CF(data, cf, "sip packet(maxlen=%zu) -> %zd, %d",
+ (size_t)0, nread, result)); */
- if(outlen < 0) {
- failf(data, "quiche_conn_send returned %zd", outlen);
- result = CURLE_SEND_ERROR;
+ if(nread < 0) {
+ if(result != CURLE_AGAIN)
+ return result;
+ /* Nothing more to add, flush and leave */
+ result = vquic_send(cf, data, &ctx->q, gsolen);
+ if(result) {
+ if(result == CURLE_AGAIN) {
+ Curl_expire(data, 1, EXPIRE_QUIC);
+ return CURLE_OK;
+ }
+ return result;
+ }
goto out;
}
- /* send the pktbuf *before* the last addition */
- result = vquic_send_packet(cf, data, &ctx->q, ctx->q.pktbuf,
- outlen, gsolen, &sent);
- ++pktcnt;
- total_len += outlen;
- if(result) {
- if(result == CURLE_AGAIN) {
- /* blocked, add the pktbuf *before* and *at* the last addition
- * separately to the blocked packages */
- DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] egress, pushing blocked "
- "packet with %zd bytes", stream3_id, outlen));
- vquic_push_blocked_pkt(cf, &ctx->q, ctx->q.pktbuf, outlen, gsolen);
- Curl_expire(data, 1, EXPIRE_QUIC);
- return CURLE_OK;
+ ++pkt_count;
+ if((size_t)nread < gsolen || pkt_count >= MAX_PKT_BURST) {
+ result = vquic_send(cf, data, &ctx->q, gsolen);
+ if(result) {
+ if(result == CURLE_AGAIN) {
+ Curl_expire(data, 1, EXPIRE_QUIC);
+ return CURLE_OK;
+ }
+ goto out;
}
- goto out;
+ pkt_count = 0;
}
}
@@ -595,9 +779,6 @@ out: timeout_ns += 1000000;
/* expire resolution is milliseconds */
Curl_expire(data, (timeout_ns / 1000000), EXPIRE_QUIC);
- if(pktcnt)
- DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] egress, sent %zd packets "
- "with %zd bytes", stream3_id, pktcnt, total_len));
return result;
}
@@ -605,205 +786,166 @@ static ssize_t recv_closed_stream(struct Curl_cfilter *cf, struct Curl_easy *data,
CURLcode *err)
{
- struct HTTP *stream = data->req.p.http;
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
ssize_t nread = -1;
+ DEBUGASSERT(stream);
if(stream->reset) {
failf(data,
- "HTTP/3 stream %" PRId64 " reset by server", stream->stream3_id);
- *err = stream->h3_got_header? CURLE_PARTIAL_FILE : CURLE_RECV_ERROR;
+ "HTTP/3 stream %" PRId64 " reset by server", stream->id);
+ *err = stream->resp_got_header? CURLE_PARTIAL_FILE : CURLE_RECV_ERROR;
DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] cf_recv, was reset -> %d",
- stream->stream3_id, *err));
- goto out;
+ stream->id, *err));
}
-
- if(!stream->h3_got_header) {
+ else if(!stream->resp_got_header) {
failf(data,
"HTTP/3 stream %" PRId64 " was closed cleanly, but before getting"
" all response header fields, treated as error",
- stream->stream3_id);
+ stream->id);
/* *err = CURLE_PARTIAL_FILE; */
*err = CURLE_RECV_ERROR;
DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] cf_recv, closed incomplete"
- " -> %d", stream->stream3_id, *err));
- goto out;
+ " -> %d", stream->id, *err));
}
else {
+ *err = CURLE_OK;
+ nread = 0;
DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] cf_recv, closed ok"
- " -> %d", stream->stream3_id, *err));
+ " -> %d", stream->id, *err));
}
- *err = CURLE_OK;
- nread = 0;
-
-out:
return nread;
}
-static CURLcode cf_poll_events(struct Curl_cfilter *cf,
- struct Curl_easy *data)
-{
- struct cf_quiche_ctx *ctx = cf->ctx;
- struct HTTP *stream = data->req.p.http;
- quiche_h3_event *ev;
-
- /* Take in the events and distribute them to the transfers. */
- while(1) {
- int64_t stream3_id = quiche_h3_conn_poll(ctx->h3c, ctx->qconn, &ev);
- if(stream3_id < 0) {
- /* nothing more to do */
- break;
- }
- DEBUGF(LOG_CF(data, cf, "[h3sid=%"PRId64"] recv, queue event %s "
- "for [h3sid=%"PRId64"]",
- stream? stream->stream3_id : -1, cf_ev_name(ev),
- stream3_id));
- if(h3_add_event(cf, data, stream3_id, ev) != CURLE_OK) {
- return CURLE_OUT_OF_MEMORY;
- }
- }
- return CURLE_OK;
-}
-
-static ssize_t cf_recv_transfer_data(struct Curl_cfilter *cf,
- struct Curl_easy *data,
- char *buf, size_t len,
- CURLcode *err)
-{
- struct HTTP *stream = data->req.p.http;
- ssize_t recvd = -1;
- size_t offset = 0;
-
- if(stream->h3_recving_data) {
- /* try receiving body first */
- recvd = cf_recv_body(cf, data, buf, len, err);
- if(recvd < 0) {
- if(*err != CURLE_AGAIN)
- return -1;
- recvd = 0;
- }
- if(recvd > 0) {
- offset = recvd;
- }
- }
-
- if(offset < len && stream->pending) {
- /* process any pending events for `data` first. if there are,
- * return so the transfer can handle those. We do not want to
- * progress ingress while events are pending here. */
- recvd = h3_process_pending(cf, data, buf + offset, len - offset, err);
- if(recvd < 0) {
- if(*err != CURLE_AGAIN)
- return -1;
- recvd = 0;
- }
- if(recvd > 0) {
- offset += recvd;
- }
- }
-
- if(offset) {
- *err = CURLE_OK;
- return offset;
- }
- *err = CURLE_AGAIN;
- return 0;
-}
-
static ssize_t cf_quiche_recv(struct Curl_cfilter *cf, struct Curl_easy *data,
char *buf, size_t len, CURLcode *err)
{
- struct HTTP *stream = data->req.p.http;
- ssize_t recvd = -1;
-
- *err = CURLE_AGAIN;
+ struct cf_quiche_ctx *ctx = cf->ctx;
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
+ ssize_t nread = -1;
+ CURLcode result;
- recvd = cf_recv_transfer_data(cf, data, buf, len, err);
- if(recvd)
- goto out;
- if(stream->closed) {
- recvd = recv_closed_stream(cf, data, err);
+ if(!stream) {
+ *err = CURLE_RECV_ERROR;
goto out;
}
- /* we did get nothing from the quiche buffers or pending events.
- * Take in more data from the connection, any error is fatal */
+ if(!Curl_bufq_is_empty(&stream->recvbuf)) {
+ nread = Curl_bufq_read(&stream->recvbuf,
+ (unsigned char *)buf, len, err);
+ DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] read recvbuf(len=%zu) "
+ "-> %zd, %d", stream->id, len, nread, *err));
+ if(nread < 0)
+ goto out;
+ }
+
if(cf_process_ingress(cf, data)) {
- DEBUGF(LOG_CF(data, cf, "h3_stream_recv returns on ingress"));
+ DEBUGF(LOG_CF(data, cf, "cf_recv, error on ingress"));
*err = CURLE_RECV_ERROR;
- recvd = -1;
+ nread = -1;
goto out;
}
- /* poll quiche and distribute the events to the transfers */
- *err = cf_poll_events(cf, data);
- if(*err) {
- recvd = -1;
- goto out;
+
+ /* recvbuf had nothing before, maybe after progressing ingress? */
+ if(nread < 0 && !Curl_bufq_is_empty(&stream->recvbuf)) {
+ nread = Curl_bufq_read(&stream->recvbuf,
+ (unsigned char *)buf, len, err);
+ DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] read recvbuf(len=%zu) "
+ "-> %zd, %d", stream->id, len, nread, *err));
+ if(nread < 0)
+ goto out;
}
- /* try to receive again for this transfer */
- recvd = cf_recv_transfer_data(cf, data, buf, len, err);
- if(recvd)
- goto out;
- if(stream->closed) {
- recvd = recv_closed_stream(cf, data, err);
- goto out;
+ if(nread > 0) {
+ if(stream->closed)
+ drain_stream(cf, data);
+ }
+ else {
+ if(stream->closed) {
+ nread = recv_closed_stream(cf, data, err);
+ goto out;
+ }
+ else if(quiche_conn_is_draining(ctx->qconn)) {
+ failf(data, "QUIC connection is draining");
+ *err = CURLE_HTTP3;
+ nread = -1;
+ goto out;
+ }
+ *err = CURLE_AGAIN;
+ nread = -1;
}
- recvd = -1;
- *err = CURLE_AGAIN;
- data->state.drain = 0;
out:
- if(cf_flush_egress(cf, data)) {
+ result = cf_flush_egress(cf, data);
+ if(result) {
DEBUGF(LOG_CF(data, cf, "cf_recv, flush egress failed"));
- *err = CURLE_SEND_ERROR;
- return -1;
+ *err = result;
+ nread = -1;
}
- DEBUGF(LOG_CF(data, cf, "[h3sid=%"PRId64"] cf_recv -> %zd, err=%d",
- stream->stream3_id, recvd, *err));
- if(recvd > 0)
- notify_drain(cf, data);
- return recvd;
+ if(nread > 0)
+ ctx->data_recvd += nread;
+ DEBUGF(LOG_CF(data, cf, "[h3sid=%"PRId64"] cf_recv(total=%zd) -> %zd, %d",
+ stream->id, ctx->data_recvd, nread, *err));
+ return nread;
}
/* Index where :authority header field will appear in request header
field list. */
#define AUTHORITY_DST_IDX 3
-static CURLcode cf_http_request(struct Curl_cfilter *cf,
- struct Curl_easy *data,
- const void *mem,
- size_t len)
+static ssize_t h3_open_stream(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ const void *buf, size_t len,
+ CURLcode *err)
{
struct cf_quiche_ctx *ctx = cf->ctx;
- struct HTTP *stream = data->req.p.http;
- size_t nheader;
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
+ size_t nheader, i;
int64_t stream3_id;
+ struct h1_req_parser h1;
+ struct dynhds h2_headers;
quiche_h3_header *nva = NULL;
- CURLcode result = CURLE_OK;
- struct h2h3req *hreq = NULL;
+ ssize_t nwritten;
- stream->h3req = TRUE; /* send off! */
- stream->closed = FALSE;
- stream->reset = FALSE;
+ if(!stream) {
+ *err = h3_data_setup(cf, data);
+ if(*err) {
+ nwritten = -1;
+ goto out;
+ }
+ stream = H3_STREAM_CTX(data);
+ DEBUGASSERT(stream);
+ }
- result = Curl_pseudo_headers(data, mem, len, NULL, &hreq);
- if(result)
- goto fail;
- nheader = hreq->entries;
+ Curl_h1_req_parse_init(&h1, H1_PARSE_DEFAULT_MAX_LINE_LEN);
+ Curl_dynhds_init(&h2_headers, 0, DYN_HTTP_REQUEST);
+ DEBUGASSERT(stream);
+ nwritten = Curl_h1_req_parse_read(&h1, buf, len, NULL, 0, err);
+ if(nwritten < 0)
+ goto out;
+ DEBUGASSERT(h1.done);
+ DEBUGASSERT(h1.req);
+
+ *err = Curl_http_req_to_h2(&h2_headers, h1.req, data);
+ if(*err) {
+ nwritten = -1;
+ goto out;
+ }
+
+ nheader = Curl_dynhds_count(&h2_headers);
nva = malloc(sizeof(quiche_h3_header) * nheader);
if(!nva) {
- result = CURLE_OUT_OF_MEMORY;
- goto fail;
+ *err = CURLE_OUT_OF_MEMORY;
+ nwritten = -1;
+ goto out;
}
- else {
- unsigned int i;
- for(i = 0; i < nheader; i++) {
- nva[i].name = (unsigned char *)hreq->header[i].name;
- nva[i].name_len = hreq->header[i].namelen;
- nva[i].value = (unsigned char *)hreq->header[i].value;
- nva[i].value_len = hreq->header[i].valuelen;
- }
+
+ for(i = 0; i < nheader; ++i) {
+ struct dynhds_entry *e = Curl_dynhds_getn(&h2_headers, i);
+ nva[i].name = (unsigned char *)e->name;
+ nva[i].name_len = e->namelen;
+ nva[i].value = (unsigned char *)e->value;
+ nva[i].value_len = e->valuelen;
}
switch(data->state.httpreq) {
@@ -815,104 +957,131 @@ static CURLcode cf_http_request(struct Curl_cfilter *cf, stream->upload_left = data->state.infilesize;
else
/* data sending without specifying the data amount up front */
- stream->upload_left = -1; /* unknown, but not zero */
-
- stream->upload_done = !stream->upload_left;
- stream3_id = quiche_h3_send_request(ctx->h3c, ctx->qconn, nva, nheader,
- stream->upload_done);
+ stream->upload_left = -1; /* unknown */
break;
default:
- stream->upload_left = 0;
- stream->upload_done = TRUE;
- stream3_id = quiche_h3_send_request(ctx->h3c, ctx->qconn, nva, nheader,
- TRUE);
+ stream->upload_left = 0; /* no request body */
break;
}
- Curl_safefree(nva);
+ if(stream->upload_left == 0)
+ stream->send_closed = TRUE;
+ stream3_id = quiche_h3_send_request(ctx->h3c, ctx->qconn, nva, nheader,
+ stream->send_closed);
if(stream3_id < 0) {
if(QUICHE_H3_ERR_STREAM_BLOCKED == stream3_id) {
- DEBUGF(LOG_CF(data, cf, "send_request(%s, body_len=%ld) rejected "
- "with H3_ERR_STREAM_BLOCKED",
- data->state.url, (long)stream->upload_left));
- result = CURLE_AGAIN;
- goto fail;
+ /* quiche seems to report this error if the connection window is
+ * exhausted. Which happens frequently and intermittent. */
+ DEBUGF(LOG_CF(data, cf, "send_request(%s) rejected with BLOCKED",
+ data->state.url));
+ stream_send_suspend(cf, data);
+ *err = CURLE_AGAIN;
+ nwritten = -1;
+ goto out;
}
else {
- DEBUGF(LOG_CF(data, cf, "send_request(%s, body_len=%ld) -> %" PRId64,
- data->state.url, (long)stream->upload_left, stream3_id));
+ DEBUGF(LOG_CF(data, cf, "send_request(%s) -> %" PRId64,
+ data->state.url, stream3_id));
}
- result = CURLE_SEND_ERROR;
- goto fail;
+ *err = CURLE_SEND_ERROR;
+ nwritten = -1;
+ goto out;
}
- stream->stream3_id = stream3_id;
+ DEBUGASSERT(stream->id == -1);
+ *err = CURLE_OK;
+ stream->id = stream3_id;
+ stream->closed = FALSE;
+ stream->reset = FALSE;
+
infof(data, "Using HTTP/3 Stream ID: %" PRId64 " (easy handle %p)",
stream3_id, (void *)data);
DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] opened for %s",
stream3_id, data->state.url));
- Curl_pseudo_free(hreq);
- return CURLE_OK;
-
-fail:
+out:
free(nva);
- Curl_pseudo_free(hreq);
- return result;
+ Curl_h1_req_parse_free(&h1);
+ Curl_dynhds_free(&h2_headers);
+ return nwritten;
}
static ssize_t cf_quiche_send(struct Curl_cfilter *cf, struct Curl_easy *data,
const void *buf, size_t len, CURLcode *err)
{
struct cf_quiche_ctx *ctx = cf->ctx;
- struct HTTP *stream = data->req.p.http;
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
+ CURLcode result;
ssize_t nwritten;
- DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] cf_send(len=%zu) start",
- stream->h3req? stream->stream3_id : -1, len));
*err = cf_process_ingress(cf, data);
- if(*err)
- return -1;
+ if(*err) {
+ nwritten = -1;
+ goto out;
+ }
- if(!stream->h3req) {
- CURLcode result = cf_http_request(cf, data, buf, len);
- if(result) {
- *err = result;
- return -1;
- }
- nwritten = len;
+ if(!stream || stream->id < 0) {
+ nwritten = h3_open_stream(cf, data, buf, len, err);
+ if(nwritten < 0)
+ goto out;
+ stream = H3_STREAM_CTX(data);
}
else {
- nwritten = quiche_h3_send_body(ctx->h3c, ctx->qconn, stream->stream3_id,
- (uint8_t *)buf, len, FALSE);
- DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] send body(len=%zu) -> %zd",
- stream->stream3_id, len, nwritten));
- if(nwritten == QUICHE_H3_ERR_DONE) {
- /* no error, nothing to do (flow control?) */
+ bool eof = (stream->upload_left >= 0 &&
+ (curl_off_t)len >= stream->upload_left);
+ nwritten = quiche_h3_send_body(ctx->h3c, ctx->qconn, stream->id,
+ (uint8_t *)buf, len, eof);
+ if(nwritten == QUICHE_H3_ERR_DONE || (nwritten == 0 && len > 0)) {
+ /* TODO: we seem to be blocked on flow control and should HOLD
+ * sending. But when do we open again? */
+ if(!quiche_conn_stream_writable(ctx->qconn, stream->id, len)) {
+ DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] send_body(len=%zu) "
+ "-> window exhausted", stream->id, len));
+ stream_send_suspend(cf, data);
+ }
*err = CURLE_AGAIN;
nwritten = -1;
+ goto out;
}
else if(nwritten == QUICHE_H3_TRANSPORT_ERR_FINAL_SIZE) {
- DEBUGF(LOG_CF(data, cf, "send_body(len=%zu) -> exceeds size", len));
+ DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] send_body(len=%zu) "
+ "-> exceeds size", stream->id, len));
*err = CURLE_SEND_ERROR;
nwritten = -1;
+ goto out;
}
else if(nwritten < 0) {
- DEBUGF(LOG_CF(data, cf, "send_body(len=%zu) -> SEND_ERROR", len));
+ DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] send_body(len=%zu) "
+ "-> quiche err %zd", stream->id, len, nwritten));
*err = CURLE_SEND_ERROR;
nwritten = -1;
+ goto out;
}
else {
+ /* quiche accepted all or at least a part of the buf */
+ if(stream->upload_left > 0) {
+ stream->upload_left = (nwritten < stream->upload_left)?
+ (stream->upload_left - nwritten) : 0;
+ }
+ if(stream->upload_left == 0)
+ stream->send_closed = TRUE;
+
+ DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] send body(len=%zu, "
+ "left=%zd) -> %zd",
+ stream->id, len, stream->upload_left, nwritten));
*err = CURLE_OK;
}
}
- if(cf_flush_egress(cf, data)) {
- *err = CURLE_SEND_ERROR;
- return -1;
+out:
+ result = cf_flush_egress(cf, data);
+ if(result) {
+ *err = result;
+ nwritten = -1;
}
-
+ DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] cf_send(len=%zu) -> %zd, %d",
+ stream? stream->id : -1, len, nwritten, *err));
return nwritten;
}
@@ -920,19 +1089,10 @@ static bool stream_is_writeable(struct Curl_cfilter *cf, struct Curl_easy *data)
{
struct cf_quiche_ctx *ctx = cf->ctx;
- struct HTTP *stream = data->req.p.http;
-
- /* surely, there must be a better way */
- quiche_stream_iter *qiter = quiche_conn_writable(ctx->qconn);
- if(qiter) {
- uint64_t stream_id;
- while(quiche_stream_iter_next(qiter, &stream_id)) {
- if(stream_id == (uint64_t)stream->stream3_id)
- return TRUE;
- }
- quiche_stream_iter_free(qiter);
- }
- return FALSE;
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
+
+ return stream &&
+ quiche_conn_stream_writable(ctx->qconn, (uint64_t)stream->id, 1);
}
static int cf_quiche_get_select_socks(struct Curl_cfilter *cf,
@@ -964,57 +1124,63 @@ static int cf_quiche_get_select_socks(struct Curl_cfilter *cf, static bool cf_quiche_data_pending(struct Curl_cfilter *cf,
const struct Curl_easy *data)
{
- struct HTTP *stream = data->req.p.http;
+ const struct stream_ctx *stream = H3_STREAM_CTX(data);
+ (void)cf;
+ return stream && !Curl_bufq_is_empty(&stream->recvbuf);
+}
- if(stream->pending) {
- DEBUGF(LOG_CF((struct Curl_easy *)data, cf,
- "[h3sid=%"PRId64"] has event pending", stream->stream3_id));
- return TRUE;
- }
- if(stream->h3_recving_data) {
- DEBUGF(LOG_CF((struct Curl_easy *)data, cf,
- "[h3sid=%"PRId64"] is receiving DATA", stream->stream3_id));
- return TRUE;
- }
- if(data->state.drain) {
- DEBUGF(LOG_CF((struct Curl_easy *)data, cf,
- "[h3sid=%"PRId64"] is draining", stream->stream3_id));
- return TRUE;
+static CURLcode h3_data_pause(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ bool pause)
+{
+ /* TODO: there seems right now no API in quiche to shrink/enlarge
+ * the streams windows. As we do in HTTP/2. */
+ if(!pause) {
+ drain_stream(cf, data);
+ Curl_expire(data, 0, EXPIRE_RUN_NOW);
}
- return FALSE;
+ return CURLE_OK;
}
static CURLcode cf_quiche_data_event(struct Curl_cfilter *cf,
struct Curl_easy *data,
int event, int arg1, void *arg2)
{
- struct cf_quiche_ctx *ctx = cf->ctx;
CURLcode result = CURLE_OK;
(void)arg1;
(void)arg2;
switch(event) {
+ case CF_CTRL_DATA_SETUP: {
+ result = h3_data_setup(cf, data);
+ break;
+ }
+ case CF_CTRL_DATA_PAUSE:
+ result = h3_data_pause(cf, data, (arg1 != 0));
+ break;
case CF_CTRL_DATA_DONE: {
- struct HTTP *stream = data->req.p.http;
- DEBUGF(LOG_CF(data, cf, "[h3sid=%"PRId64"] easy handle is %s",
- stream->stream3_id, arg1? "cancelled" : "done"));
- h3_clear_pending(data);
+ h3_data_done(cf, data);
break;
}
case CF_CTRL_DATA_DONE_SEND: {
- struct HTTP *stream = data->req.p.http;
- ssize_t sent;
- stream->upload_done = TRUE;
- sent = quiche_h3_send_body(ctx->h3c, ctx->qconn, stream->stream3_id,
- NULL, 0, TRUE);
- DEBUGF(LOG_CF(data, cf, "[h3sid=%"PRId64"] send_body FINISHED",
- stream->stream3_id));
- if(sent < 0)
- return CURLE_SEND_ERROR;
+ struct stream_ctx *stream = H3_STREAM_CTX(data);
+ if(stream && !stream->send_closed) {
+ unsigned char body[1];
+ ssize_t sent;
+
+ stream->send_closed = TRUE;
+ stream->upload_left = 0;
+ body[0] = 'X';
+ sent = cf_quiche_send(cf, data, body, 0, &result);
+ DEBUGF(LOG_CF(data, cf, "[h3sid=%"PRId64"] DONE_SEND -> %zd, %d",
+ stream->id, sent, result));
+ }
break;
}
case CF_CTRL_DATA_IDLE:
- /* anything to do? */
+ result = cf_flush_egress(cf, data);
+ if(result)
+ DEBUGF(LOG_CF(data, cf, "data idle, flush egress -> %d", result));
break;
default:
break;
@@ -1095,8 +1261,11 @@ static CURLcode cf_connect_start(struct Curl_cfilter *cf, debug_log_init = 1;
}
#endif
+ Curl_bufcp_init(&ctx->stream_bufcp, H3_STREAM_CHUNK_SIZE,
+ H3_STREAM_POOL_SPARES);
+ ctx->data_recvd = 0;
- result = vquic_ctx_init(&ctx->q, MAX_UDP_PAYLOAD_SIZE * MAX_PKT_BURST);
+ result = vquic_ctx_init(&ctx->q);
if(result)
return result;
@@ -1105,15 +1274,23 @@ static CURLcode cf_connect_start(struct Curl_cfilter *cf, failf(data, "can't create quiche config");
return CURLE_FAILED_INIT;
}
+ quiche_config_enable_pacing(ctx->cfg, false);
quiche_config_set_max_idle_timeout(ctx->cfg, QUIC_IDLE_TIMEOUT);
- quiche_config_set_initial_max_data(ctx->cfg, QUIC_MAX_DATA);
- quiche_config_set_initial_max_stream_data_bidi_local(
- ctx->cfg, QUIC_MAX_DATA);
- quiche_config_set_initial_max_stream_data_bidi_remote(
- ctx->cfg, QUIC_MAX_DATA);
- quiche_config_set_initial_max_stream_data_uni(ctx->cfg, QUIC_MAX_DATA);
+ quiche_config_set_initial_max_data(ctx->cfg, (1 * 1024 * 1024)
+ /* (QUIC_MAX_STREAMS/2) * H3_STREAM_WINDOW_SIZE */);
quiche_config_set_initial_max_streams_bidi(ctx->cfg, QUIC_MAX_STREAMS);
quiche_config_set_initial_max_streams_uni(ctx->cfg, QUIC_MAX_STREAMS);
+ quiche_config_set_initial_max_stream_data_bidi_local(ctx->cfg,
+ H3_STREAM_WINDOW_SIZE);
+ quiche_config_set_initial_max_stream_data_bidi_remote(ctx->cfg,
+ H3_STREAM_WINDOW_SIZE);
+ quiche_config_set_initial_max_stream_data_uni(ctx->cfg,
+ H3_STREAM_WINDOW_SIZE);
+ quiche_config_set_disable_active_migration(ctx->cfg, TRUE);
+
+ quiche_config_set_max_connection_window(ctx->cfg,
+ 10 * QUIC_MAX_STREAMS * H3_STREAM_WINDOW_SIZE);
+ quiche_config_set_max_stream_window(ctx->cfg, 10 * H3_STREAM_WINDOW_SIZE);
quiche_config_set_application_protos(ctx->cfg,
(uint8_t *)
QUICHE_H3_APPLICATION_PROTOCOL,
@@ -1166,6 +1343,11 @@ static CURLcode cf_connect_start(struct Curl_cfilter *cf, }
#endif
+ /* we do not get a setup event for the initial transfer */
+ result = h3_data_setup(cf, data);
+ if(result)
+ return result;
+
result = cf_flush_egress(cf, data);
if(result)
return result;
@@ -1293,7 +1475,6 @@ static void cf_quiche_close(struct Curl_cfilter *cf, struct Curl_easy *data) {
struct cf_quiche_ctx *ctx = cf->ctx;
- (void)data;
if(ctx) {
if(ctx->qconn) {
(void)quiche_conn_close(ctx->qconn, TRUE, 0, NULL, 0);
@@ -1437,7 +1618,7 @@ out: *pcf = (!result)? cf : NULL;
if(result) {
if(udp_cf)
- Curl_conn_cf_discard(udp_cf, data);
+ Curl_conn_cf_discard_sub(cf, udp_cf, data, TRUE);
Curl_safefree(cf);
Curl_safefree(ctx);
}
diff --git a/libs/libcurl/src/vquic/vquic.c b/libs/libcurl/src/vquic/vquic.c index be9e151669..0cb53f6b31 100644 --- a/libs/libcurl/src/vquic/vquic.c +++ b/libs/libcurl/src/vquic/vquic.c @@ -22,12 +22,25 @@ *
***************************************************************************/
+/* WIP, experimental: use recvmmsg() on linux
+ * we have no configure check, yet
+ * and also it is only available for _GNU_SOURCE, which
+ * we do not use otherwise.
+#define HAVE_SENDMMSG
+ */
+#if defined(HAVE_SENDMMSG)
+#define _GNU_SOURCE
+#include <sys/socket.h>
+#undef _GNU_SOURCE
+#endif
+
#include "curl_setup.h"
#ifdef HAVE_FCNTL_H
#include <fcntl.h>
#endif
#include "urldata.h"
+#include "bufq.h"
#include "dynbuf.h"
#include "cfilters.h"
#include "curl_log.h"
@@ -51,9 +64,13 @@ #define QLOGMODE O_WRONLY|O_CREAT
#endif
+#define NW_CHUNK_SIZE (64 * 1024)
+#define NW_SEND_CHUNKS 2
+
+
void Curl_quic_ver(char *p, size_t len)
{
-#ifdef USE_NGTCP2
+#if defined(USE_NGTCP2) && defined(USE_NGHTTP3)
Curl_ngtcp2_ver(p, len);
#elif defined(USE_QUICHE)
Curl_quiche_ver(p, len);
@@ -62,17 +79,10 @@ void Curl_quic_ver(char *p, size_t len) #endif
}
-CURLcode vquic_ctx_init(struct cf_quic_ctx *qctx, size_t pktbuflen)
+CURLcode vquic_ctx_init(struct cf_quic_ctx *qctx)
{
- qctx->num_blocked_pkt = 0;
- qctx->num_blocked_pkt_sent = 0;
- memset(&qctx->blocked_pkt, 0, sizeof(qctx->blocked_pkt));
-
- qctx->pktbuflen = pktbuflen;
- qctx->pktbuf = malloc(qctx->pktbuflen);
- if(!qctx->pktbuf)
- return CURLE_OUT_OF_MEMORY;
-
+ Curl_bufq_init2(&qctx->sendbuf, NW_CHUNK_SIZE, NW_SEND_CHUNKS,
+ BUFQ_OPT_SOFT_LIMIT);
#if defined(__linux__) && defined(UDP_SEGMENT) && defined(HAVE_SENDMSG)
qctx->no_gso = FALSE;
#else
@@ -84,8 +94,7 @@ CURLcode vquic_ctx_init(struct cf_quic_ctx *qctx, size_t pktbuflen) void vquic_ctx_free(struct cf_quic_ctx *qctx)
{
- free(qctx->pktbuf);
- qctx->pktbuf = NULL;
+ Curl_bufq_free(&qctx->sendbuf);
}
static CURLcode send_packet_no_gso(struct Curl_cfilter *cf,
@@ -215,11 +224,11 @@ static CURLcode send_packet_no_gso(struct Curl_cfilter *cf, return CURLE_OK;
}
-CURLcode vquic_send_packet(struct Curl_cfilter *cf,
- struct Curl_easy *data,
- struct cf_quic_ctx *qctx,
- const uint8_t *pkt, size_t pktlen, size_t gsolen,
- size_t *psent)
+static CURLcode vquic_send_packets(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ struct cf_quic_ctx *qctx,
+ const uint8_t *pkt, size_t pktlen,
+ size_t gsolen, size_t *psent)
{
if(qctx->no_gso && pktlen > gsolen) {
return send_packet_no_gso(cf, data, qctx, pkt, pktlen, gsolen, psent);
@@ -228,53 +237,270 @@ CURLcode vquic_send_packet(struct Curl_cfilter *cf, return do_sendmsg(cf, data, qctx, pkt, pktlen, gsolen, psent);
}
+CURLcode vquic_flush(struct Curl_cfilter *cf, struct Curl_easy *data,
+ struct cf_quic_ctx *qctx)
+{
+ const unsigned char *buf;
+ size_t blen, sent;
+ CURLcode result;
+ size_t gsolen;
+
+ while(Curl_bufq_peek(&qctx->sendbuf, &buf, &blen)) {
+ gsolen = qctx->gsolen;
+ if(qctx->split_len) {
+ gsolen = qctx->split_gsolen;
+ if(blen > qctx->split_len)
+ blen = qctx->split_len;
+ }
+
+ DEBUGF(LOG_CF(data, cf, "vquic_send(len=%zu, gso=%zu)",
+ blen, gsolen));
+ result = vquic_send_packets(cf, data, qctx, buf, blen, gsolen, &sent);
+ DEBUGF(LOG_CF(data, cf, "vquic_send(len=%zu, gso=%zu) -> %d, sent=%zu",
+ blen, gsolen, result, sent));
+ if(result) {
+ if(result == CURLE_AGAIN) {
+ Curl_bufq_skip(&qctx->sendbuf, sent);
+ if(qctx->split_len)
+ qctx->split_len -= sent;
+ }
+ return result;
+ }
+ Curl_bufq_skip(&qctx->sendbuf, sent);
+ if(qctx->split_len)
+ qctx->split_len -= sent;
+ }
+ return CURLE_OK;
+}
+
+CURLcode vquic_send(struct Curl_cfilter *cf, struct Curl_easy *data,
+ struct cf_quic_ctx *qctx, size_t gsolen)
+{
+ qctx->gsolen = gsolen;
+ return vquic_flush(cf, data, qctx);
+}
+CURLcode vquic_send_tail_split(struct Curl_cfilter *cf, struct Curl_easy *data,
+ struct cf_quic_ctx *qctx, size_t gsolen,
+ size_t tail_len, size_t tail_gsolen)
+{
+ DEBUGASSERT(Curl_bufq_len(&qctx->sendbuf) > tail_len);
+ qctx->split_len = Curl_bufq_len(&qctx->sendbuf) - tail_len;
+ qctx->split_gsolen = gsolen;
+ qctx->gsolen = tail_gsolen;
+ DEBUGF(LOG_CF(data, cf, "vquic_send_tail_split: [%zu gso=%zu][%zu gso=%zu]",
+ qctx->split_len, qctx->split_gsolen,
+ tail_len, qctx->gsolen));
+ return vquic_flush(cf, data, qctx);
+}
-void vquic_push_blocked_pkt(struct Curl_cfilter *cf,
- struct cf_quic_ctx *qctx,
- const uint8_t *pkt, size_t pktlen, size_t gsolen)
+#ifdef HAVE_SENDMMSG
+static CURLcode recvmmsg_packets(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ struct cf_quic_ctx *qctx,
+ size_t max_pkts,
+ vquic_recv_pkt_cb *recv_cb, void *userp)
{
- struct vquic_blocked_pkt *blkpkt;
+#define MMSG_NUM 64
+ struct iovec msg_iov[MMSG_NUM];
+ struct mmsghdr mmsg[MMSG_NUM];
+ uint8_t bufs[MMSG_NUM][2*1024];
+ struct sockaddr_storage remote_addr[MMSG_NUM];
+ size_t total_nread, pkts;
+ int mcount, i, n;
+ CURLcode result = CURLE_OK;
+
+ DEBUGASSERT(max_pkts > 0);
+ pkts = 0;
+ total_nread = 0;
+ while(pkts < max_pkts) {
+ n = (int)CURLMIN(MMSG_NUM, max_pkts);
+ memset(&mmsg, 0, sizeof(mmsg));
+ for(i = 0; i < n; ++i) {
+ msg_iov[i].iov_base = bufs[i];
+ msg_iov[i].iov_len = (int)sizeof(bufs[i]);
+ mmsg[i].msg_hdr.msg_iov = &msg_iov[i];
+ mmsg[i].msg_hdr.msg_iovlen = 1;
+ mmsg[i].msg_hdr.msg_name = &remote_addr[i];
+ mmsg[i].msg_hdr.msg_namelen = sizeof(remote_addr[i]);
+ }
- (void)cf;
- assert(qctx->num_blocked_pkt <
- sizeof(qctx->blocked_pkt) / sizeof(qctx->blocked_pkt[0]));
+ while((mcount = recvmmsg(qctx->sockfd, mmsg, n, 0, NULL)) == -1 &&
+ SOCKERRNO == EINTR)
+ ;
+ if(mcount == -1) {
+ if(SOCKERRNO == EAGAIN || SOCKERRNO == EWOULDBLOCK) {
+ DEBUGF(LOG_CF(data, cf, "ingress, recvmmsg -> EAGAIN"));
+ goto out;
+ }
+ if(!cf->connected && SOCKERRNO == ECONNREFUSED) {
+ const char *r_ip;
+ int r_port;
+ Curl_cf_socket_peek(cf->next, data, NULL, NULL,
+ &r_ip, &r_port, NULL, NULL);
+ failf(data, "QUIC: connection to %s port %u refused",
+ r_ip, r_port);
+ result = CURLE_COULDNT_CONNECT;
+ goto out;
+ }
+ failf(data, "QUIC: recvmsg() unexpectedly returned %d (errno=%d)",
+ mcount, SOCKERRNO);
+ result = CURLE_RECV_ERROR;
+ goto out;
+ }
- blkpkt = &qctx->blocked_pkt[qctx->num_blocked_pkt++];
+ DEBUGF(LOG_CF(data, cf, "recvmmsg() -> %d packets", mcount));
+ pkts += mcount;
+ for(i = 0; i < mcount; ++i) {
+ total_nread += mmsg[i].msg_len;
+ result = recv_cb(bufs[i], mmsg[i].msg_len,
+ mmsg[i].msg_hdr.msg_name, mmsg[i].msg_hdr.msg_namelen,
+ 0, userp);
+ if(result)
+ goto out;
+ }
+ }
- blkpkt->pkt = pkt;
- blkpkt->pktlen = pktlen;
- blkpkt->gsolen = gsolen;
+out:
+ DEBUGF(LOG_CF(data, cf, "recvd %zu packets with %zd bytes -> %d",
+ pkts, total_nread, result));
+ return result;
}
-CURLcode vquic_send_blocked_pkt(struct Curl_cfilter *cf,
+#elif defined(HAVE_SENDMSG)
+static CURLcode recvmsg_packets(struct Curl_cfilter *cf,
struct Curl_easy *data,
- struct cf_quic_ctx *qctx)
+ struct cf_quic_ctx *qctx,
+ size_t max_pkts,
+ vquic_recv_pkt_cb *recv_cb, void *userp)
{
- size_t sent;
- CURLcode curlcode;
- struct vquic_blocked_pkt *blkpkt;
+ struct iovec msg_iov;
+ struct msghdr msg;
+ uint8_t buf[64*1024];
+ struct sockaddr_storage remote_addr;
+ size_t total_nread, pkts;
+ ssize_t nread;
+ CURLcode result = CURLE_OK;
- (void)cf;
- for(; qctx->num_blocked_pkt_sent < qctx->num_blocked_pkt;
- ++qctx->num_blocked_pkt_sent) {
- blkpkt = &qctx->blocked_pkt[qctx->num_blocked_pkt_sent];
- curlcode = vquic_send_packet(cf, data, qctx, blkpkt->pkt,
- blkpkt->pktlen, blkpkt->gsolen, &sent);
-
- if(curlcode) {
- if(curlcode == CURLE_AGAIN) {
- blkpkt->pkt += sent;
- blkpkt->pktlen -= sent;
+ msg_iov.iov_base = buf;
+ msg_iov.iov_len = (int)sizeof(buf);
+
+ memset(&msg, 0, sizeof(msg));
+ msg.msg_iov = &msg_iov;
+ msg.msg_iovlen = 1;
+
+ DEBUGASSERT(max_pkts > 0);
+ for(pkts = 0, total_nread = 0; pkts < max_pkts;) {
+ msg.msg_name = &remote_addr;
+ msg.msg_namelen = sizeof(remote_addr);
+ while((nread = recvmsg(qctx->sockfd, &msg, 0)) == -1 &&
+ SOCKERRNO == EINTR)
+ ;
+ if(nread == -1) {
+ if(SOCKERRNO == EAGAIN || SOCKERRNO == EWOULDBLOCK) {
+ goto out;
}
- return curlcode;
+ if(!cf->connected && SOCKERRNO == ECONNREFUSED) {
+ const char *r_ip;
+ int r_port;
+ Curl_cf_socket_peek(cf->next, data, NULL, NULL,
+ &r_ip, &r_port, NULL, NULL);
+ failf(data, "QUIC: connection to %s port %u refused",
+ r_ip, r_port);
+ result = CURLE_COULDNT_CONNECT;
+ goto out;
+ }
+ failf(data, "QUIC: recvmsg() unexpectedly returned %zd (errno=%d)",
+ nread, SOCKERRNO);
+ result = CURLE_RECV_ERROR;
+ goto out;
+ }
+
+ ++pkts;
+ total_nread += (size_t)nread;
+ result = recv_cb(buf, (size_t)nread, msg.msg_name, msg.msg_namelen,
+ 0, userp);
+ if(result)
+ goto out;
+ }
+
+out:
+ DEBUGF(LOG_CF(data, cf, "recvd %zu packets with %zd bytes -> %d",
+ pkts, total_nread, result));
+ return result;
+}
+
+#else /* HAVE_SENDMMSG || HAVE_SENDMSG */
+static CURLcode recvfrom_packets(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ struct cf_quic_ctx *qctx,
+ size_t max_pkts,
+ vquic_recv_pkt_cb *recv_cb, void *userp)
+{
+ uint8_t buf[64*1024];
+ int bufsize = (int)sizeof(buf);
+ struct sockaddr_storage remote_addr;
+ socklen_t remote_addrlen = sizeof(remote_addr);
+ size_t total_nread, pkts;
+ ssize_t nread;
+ CURLcode result = CURLE_OK;
+
+ DEBUGASSERT(max_pkts > 0);
+ for(pkts = 0, total_nread = 0; pkts < max_pkts;) {
+ while((nread = recvfrom(qctx->sockfd, (char *)buf, bufsize, 0,
+ (struct sockaddr *)&remote_addr,
+ &remote_addrlen)) == -1 &&
+ SOCKERRNO == EINTR)
+ ;
+ if(nread == -1) {
+ if(SOCKERRNO == EAGAIN || SOCKERRNO == EWOULDBLOCK) {
+ DEBUGF(LOG_CF(data, cf, "ingress, recvfrom -> EAGAIN"));
+ goto out;
+ }
+ if(!cf->connected && SOCKERRNO == ECONNREFUSED) {
+ const char *r_ip;
+ int r_port;
+ Curl_cf_socket_peek(cf->next, data, NULL, NULL,
+ &r_ip, &r_port, NULL, NULL);
+ failf(data, "QUIC: connection to %s port %u refused",
+ r_ip, r_port);
+ result = CURLE_COULDNT_CONNECT;
+ goto out;
+ }
+ failf(data, "QUIC: recvfrom() unexpectedly returned %zd (errno=%d)",
+ nread, SOCKERRNO);
+ result = CURLE_RECV_ERROR;
+ goto out;
}
+
+ ++pkts;
+ total_nread += (size_t)nread;
+ result = recv_cb(buf, (size_t)nread, &remote_addr, remote_addrlen,
+ 0, userp);
+ if(result)
+ goto out;
}
- qctx->num_blocked_pkt = 0;
- qctx->num_blocked_pkt_sent = 0;
+out:
+ DEBUGF(LOG_CF(data, cf, "recvd %zu packets with %zd bytes -> %d",
+ pkts, total_nread, result));
+ return result;
+}
+#endif /* !HAVE_SENDMMSG && !HAVE_SENDMSG */
- return CURLE_OK;
+CURLcode vquic_recv_packets(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ struct cf_quic_ctx *qctx,
+ size_t max_pkts,
+ vquic_recv_pkt_cb *recv_cb, void *userp)
+{
+#if defined(HAVE_SENDMMSG)
+ return recvmmsg_packets(cf, data, qctx, max_pkts, recv_cb, userp);
+#elif defined(HAVE_SENDMSG)
+ return recvmsg_packets(cf, data, qctx, max_pkts, recv_cb, userp);
+#else
+ return recvfrom_packets(cf, data, qctx, max_pkts, recv_cb, userp);
+#endif
}
/*
@@ -330,7 +556,7 @@ CURLcode Curl_cf_quic_create(struct Curl_cfilter **pcf, {
(void)transport;
DEBUGASSERT(transport == TRNSPRT_QUIC);
-#ifdef USE_NGTCP2
+#if defined(USE_NGTCP2) && defined(USE_NGHTTP3)
return Curl_cf_ngtcp2_create(pcf, data, conn, ai);
#elif defined(USE_QUICHE)
return Curl_cf_quiche_create(pcf, data, conn, ai);
@@ -349,7 +575,7 @@ bool Curl_conn_is_http3(const struct Curl_easy *data, const struct connectdata *conn,
int sockindex)
{
-#ifdef USE_NGTCP2
+#if defined(USE_NGTCP2) && defined(USE_NGHTTP3)
return Curl_conn_is_ngtcp2(data, conn, sockindex);
#elif defined(USE_QUICHE)
return Curl_conn_is_quiche(data, conn, sockindex);
diff --git a/libs/libcurl/src/vquic/vquic_int.h b/libs/libcurl/src/vquic/vquic_int.h index 775658306a..6c137fc494 100644 --- a/libs/libcurl/src/vquic/vquic_int.h +++ b/libs/libcurl/src/vquic/vquic_int.h @@ -25,47 +25,57 @@ ***************************************************************************/
#include "curl_setup.h"
+#include "bufq.h"
#ifdef ENABLE_QUIC
-struct vquic_blocked_pkt {
- const uint8_t *pkt;
- size_t pktlen;
- size_t gsolen;
-};
+#define MAX_PKT_BURST 10
+#define MAX_UDP_PAYLOAD_SIZE 1452
struct cf_quic_ctx {
- curl_socket_t sockfd;
- struct sockaddr_storage local_addr;
- socklen_t local_addrlen;
- struct vquic_blocked_pkt blocked_pkt[2];
- uint8_t *pktbuf;
- /* the number of entries in blocked_pkt */
- size_t num_blocked_pkt;
- size_t num_blocked_pkt_sent;
- /* the packets blocked by sendmsg (EAGAIN or EWOULDBLOCK) */
- size_t pktbuflen;
- /* the number of processed entries in blocked_pkt */
- bool no_gso;
+ curl_socket_t sockfd; /* connected UDP socket */
+ struct sockaddr_storage local_addr; /* address socket is bound to */
+ socklen_t local_addrlen; /* length of local address */
+
+ struct bufq sendbuf; /* buffer for sending one or more packets */
+ size_t gsolen; /* length of individual packets in send buf */
+ size_t split_len; /* if != 0, buffer length after which GSO differs */
+ size_t split_gsolen; /* length of individual packets after split_len */
+ bool no_gso; /* do not use gso on sending */
};
-CURLcode vquic_ctx_init(struct cf_quic_ctx *qctx, size_t pktbuflen);
+CURLcode vquic_ctx_init(struct cf_quic_ctx *qctx);
void vquic_ctx_free(struct cf_quic_ctx *qctx);
-CURLcode vquic_send_packet(struct Curl_cfilter *cf,
- struct Curl_easy *data,
- struct cf_quic_ctx *qctx,
- const uint8_t *pkt, size_t pktlen, size_t gsolen,
- size_t *psent);
-
void vquic_push_blocked_pkt(struct Curl_cfilter *cf,
struct cf_quic_ctx *qctx,
const uint8_t *pkt, size_t pktlen, size_t gsolen);
-CURLcode vquic_send_blocked_pkt(struct Curl_cfilter *cf,
- struct Curl_easy *data,
- struct cf_quic_ctx *qctx);
+CURLcode vquic_send_blocked_pkts(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ struct cf_quic_ctx *qctx);
+
+CURLcode vquic_send(struct Curl_cfilter *cf, struct Curl_easy *data,
+ struct cf_quic_ctx *qctx, size_t gsolen);
+
+CURLcode vquic_send_tail_split(struct Curl_cfilter *cf, struct Curl_easy *data,
+ struct cf_quic_ctx *qctx, size_t gsolen,
+ size_t tail_len, size_t tail_gsolen);
+CURLcode vquic_flush(struct Curl_cfilter *cf, struct Curl_easy *data,
+ struct cf_quic_ctx *qctx);
+
+
+typedef CURLcode vquic_recv_pkt_cb(const unsigned char *pkt, size_t pktlen,
+ struct sockaddr_storage *remote_addr,
+ socklen_t remote_addrlen, int ecn,
+ void *userp);
+
+CURLcode vquic_recv_packets(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ struct cf_quic_ctx *qctx,
+ size_t max_pkts,
+ vquic_recv_pkt_cb *recv_cb, void *userp);
#endif /* !ENABLE_QUIC */
diff --git a/libs/libcurl/src/vssh/libssh.c b/libs/libcurl/src/vssh/libssh.c index 3fdec459d9..aae6f5aa54 100644 --- a/libs/libcurl/src/vssh/libssh.c +++ b/libs/libcurl/src/vssh/libssh.c @@ -576,7 +576,7 @@ cleanup: rc = SSH_ERROR; \
} while(0)
-#define MOVE_TO_LAST_AUTH do { \
+#define MOVE_TO_PASSWD_AUTH do { \
if(sshc->auth_methods & SSH_AUTH_METHOD_PASSWORD) { \
rc = SSH_OK; \
state(data, SSH_AUTH_PASS_INIT); \
@@ -586,23 +586,23 @@ cleanup: } \
} while(0)
-#define MOVE_TO_TERTIARY_AUTH do { \
+#define MOVE_TO_KEY_AUTH do { \
if(sshc->auth_methods & SSH_AUTH_METHOD_INTERACTIVE) { \
rc = SSH_OK; \
state(data, SSH_AUTH_KEY_INIT); \
} \
else { \
- MOVE_TO_LAST_AUTH; \
+ MOVE_TO_PASSWD_AUTH; \
} \
} while(0)
-#define MOVE_TO_SECONDARY_AUTH do { \
+#define MOVE_TO_GSSAPI_AUTH do { \
if(sshc->auth_methods & SSH_AUTH_METHOD_GSSAPI_MIC) { \
rc = SSH_OK; \
state(data, SSH_AUTH_GSSAPI); \
} \
else { \
- MOVE_TO_TERTIARY_AUTH; \
+ MOVE_TO_KEY_AUTH; \
} \
} while(0)
@@ -753,6 +753,16 @@ static CURLcode myssh_statemach_act(struct Curl_easy *data, bool *block) }
sshc->auth_methods = ssh_userauth_list(sshc->ssh_session, NULL);
+ if(sshc->auth_methods)
+ infof(data, "SSH authentication methods available: %s%s%s%s",
+ sshc->auth_methods & SSH_AUTH_METHOD_PUBLICKEY ?
+ "public key, ": "",
+ sshc->auth_methods & SSH_AUTH_METHOD_GSSAPI_MIC ?
+ "GSSAPI, " : "",
+ sshc->auth_methods & SSH_AUTH_METHOD_INTERACTIVE ?
+ "keyboard-interactive, " : "",
+ sshc->auth_methods & SSH_AUTH_METHOD_PASSWORD ?
+ "password": "");
if(sshc->auth_methods & SSH_AUTH_METHOD_PUBLICKEY) {
state(data, SSH_AUTH_PKEY_INIT);
infof(data, "Authentication using SSH public key file");
@@ -775,7 +785,7 @@ static CURLcode myssh_statemach_act(struct Curl_easy *data, bool *block) }
case SSH_AUTH_PKEY_INIT:
if(!(data->set.ssh_auth_types & CURLSSH_AUTH_PUBLICKEY)) {
- MOVE_TO_SECONDARY_AUTH;
+ MOVE_TO_GSSAPI_AUTH;
break;
}
@@ -791,7 +801,7 @@ static CURLcode myssh_statemach_act(struct Curl_easy *data, bool *block) }
if(rc != SSH_OK) {
- MOVE_TO_SECONDARY_AUTH;
+ MOVE_TO_GSSAPI_AUTH;
break;
}
}
@@ -826,7 +836,7 @@ static CURLcode myssh_statemach_act(struct Curl_easy *data, bool *block) break;
}
- MOVE_TO_SECONDARY_AUTH;
+ MOVE_TO_GSSAPI_AUTH;
}
break;
case SSH_AUTH_PKEY:
@@ -844,13 +854,13 @@ static CURLcode myssh_statemach_act(struct Curl_easy *data, bool *block) }
else {
infof(data, "Failed public key authentication (rc: %d)", rc);
- MOVE_TO_SECONDARY_AUTH;
+ MOVE_TO_GSSAPI_AUTH;
}
break;
case SSH_AUTH_GSSAPI:
if(!(data->set.ssh_auth_types & CURLSSH_AUTH_GSSAPI)) {
- MOVE_TO_TERTIARY_AUTH;
+ MOVE_TO_KEY_AUTH;
break;
}
@@ -868,7 +878,7 @@ static CURLcode myssh_statemach_act(struct Curl_easy *data, bool *block) break;
}
- MOVE_TO_TERTIARY_AUTH;
+ MOVE_TO_KEY_AUTH;
break;
case SSH_AUTH_KEY_INIT:
@@ -876,13 +886,12 @@ static CURLcode myssh_statemach_act(struct Curl_easy *data, bool *block) state(data, SSH_AUTH_KEY);
}
else {
- MOVE_TO_LAST_AUTH;
+ MOVE_TO_PASSWD_AUTH;
}
break;
case SSH_AUTH_KEY:
-
- /* Authentication failed. Continue with keyboard-interactive now. */
+ /* keyboard-interactive authentication */
rc = myssh_auth_interactive(conn);
if(rc == SSH_AGAIN) {
break;
@@ -890,13 +899,15 @@ static CURLcode myssh_statemach_act(struct Curl_easy *data, bool *block) if(rc == SSH_OK) {
sshc->authed = TRUE;
infof(data, "completed keyboard interactive authentication");
+ state(data, SSH_AUTH_DONE);
+ }
+ else {
+ MOVE_TO_PASSWD_AUTH;
}
- state(data, SSH_AUTH_DONE);
break;
case SSH_AUTH_PASS_INIT:
if(!(data->set.ssh_auth_types & CURLSSH_AUTH_PASSWORD)) {
- /* Host key authentication is intentionally not implemented */
MOVE_TO_ERROR_STATE(CURLE_LOGIN_DENIED);
break;
}
@@ -1209,7 +1220,7 @@ static CURLcode myssh_statemach_act(struct Curl_easy *data, bool *block) }
case SSH_SFTP_TRANS_INIT:
- if(data->set.upload)
+ if(data->state.upload)
state(data, SSH_SFTP_UPLOAD_INIT);
else {
if(protop->path[strlen(protop->path)-1] == '/')
@@ -1597,7 +1608,7 @@ static CURLcode myssh_statemach_act(struct Curl_easy *data, bool *block) MOVE_TO_SFTP_CLOSE_STATE();
break;
}
-
+ sftp_file_set_nonblocking(sshc->sftp_file);
state(data, SSH_SFTP_DOWNLOAD_STAT);
break;
@@ -1802,7 +1813,7 @@ static CURLcode myssh_statemach_act(struct Curl_easy *data, bool *block) /* Functions from the SCP subsystem cannot handle/return SSH_AGAIN */
ssh_set_blocking(sshc->ssh_session, 1);
- if(data->set.upload) {
+ if(data->state.upload) {
if(data->state.infilesize < 0) {
failf(data, "SCP requires a known file size for upload");
sshc->actualcode = CURLE_UPLOAD_FAILED;
@@ -1907,7 +1918,7 @@ static CURLcode myssh_statemach_act(struct Curl_easy *data, bool *block) break;
}
case SSH_SCP_DONE:
- if(data->set.upload)
+ if(data->state.upload)
state(data, SSH_SCP_SEND_EOF);
else
state(data, SSH_SCP_CHANNEL_FREE);
diff --git a/libs/libcurl/src/vssh/libssh2.c b/libs/libcurl/src/vssh/libssh2.c index edc1c8f158..115d90de89 100644 --- a/libs/libcurl/src/vssh/libssh2.c +++ b/libs/libcurl/src/vssh/libssh2.c @@ -728,11 +728,10 @@ static CURLcode ssh_check_fingerprint(struct Curl_easy *data) */
if((pub_pos != b64_pos) ||
strncmp(fingerprint_b64, pubkey_sha256, pub_pos)) {
- free(fingerprint_b64);
-
failf(data,
"Denied establishing ssh session: mismatch sha256 fingerprint. "
"Remote %s is not equal to %s", fingerprint_b64, pubkey_sha256);
+ free(fingerprint_b64);
state(data, SSH_SESSION_FREE);
sshc->actualcode = CURLE_PEER_FAILED_VERIFICATION;
return sshc->actualcode;
@@ -2019,7 +2018,7 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block) }
case SSH_SFTP_TRANS_INIT:
- if(data->set.upload)
+ if(data->state.upload)
state(data, SSH_SFTP_UPLOAD_INIT);
else {
if(sshp->path[strlen(sshp->path)-1] == '/')
@@ -2691,7 +2690,7 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block) break;
}
- if(data->set.upload) {
+ if(data->state.upload) {
if(data->state.infilesize < 0) {
failf(data, "SCP requires a known file size for upload");
sshc->actualcode = CURLE_UPLOAD_FAILED;
@@ -2831,7 +2830,7 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block) break;
case SSH_SCP_DONE:
- if(data->set.upload)
+ if(data->state.upload)
state(data, SSH_SCP_SEND_EOF);
else
state(data, SSH_SCP_CHANNEL_FREE);
@@ -3274,13 +3273,23 @@ static CURLcode ssh_connect(struct Curl_easy *data, bool *done) my_libssh2_free,
my_libssh2_realloc, data);
#else
- sshc->ssh_session = libssh2_session_init();
+ sshc->ssh_session = libssh2_session_init_ex(NULL, NULL, NULL, data);
#endif
if(!sshc->ssh_session) {
failf(data, "Failure initialising ssh session");
return CURLE_FAILED_INIT;
}
+#ifdef HAVE_LIBSSH2_VERSION
+ /* Set the packet read timeout if the libssh2 version supports it */
+#if LIBSSH2_VERSION_NUM >= 0x010B00
+ if(data->set.server_response_timeout > 0) {
+ libssh2_session_set_read_timeout(sshc->ssh_session,
+ data->set.server_response_timeout / 1000);
+ }
+#endif
+#endif
+
#ifndef CURL_DISABLE_PROXY
if(conn->http_proxy.proxytype == CURLPROXY_HTTPS) {
/*
diff --git a/libs/libcurl/src/vssh/wolfssh.c b/libs/libcurl/src/vssh/wolfssh.c index bc642c306f..269199c221 100644 --- a/libs/libcurl/src/vssh/wolfssh.c +++ b/libs/libcurl/src/vssh/wolfssh.c @@ -425,7 +425,7 @@ static CURLcode wssh_connect(struct Curl_easy *data, bool *done) state(data, SSH_SFTP_INIT);
return wssh_multi_statemach(data, done);
- error:
+error:
wolfSSH_free(sshc->ssh_session);
wolfSSH_CTX_free(sshc->ctx);
return CURLE_FAILED_INIT;
@@ -557,7 +557,7 @@ static CURLcode wssh_statemach_act(struct Curl_easy *data, bool *block) }
break;
case SSH_SFTP_TRANS_INIT:
- if(data->set.upload)
+ if(data->state.upload)
state(data, SSH_SFTP_UPLOAD_INIT);
else {
if(sftp_scp->path[strlen(sftp_scp->path)-1] == '/')
diff --git a/libs/libcurl/src/vtls/bearssl.c b/libs/libcurl/src/vtls/bearssl.c index 4977f162d9..b273e493ea 100644 --- a/libs/libcurl/src/vtls/bearssl.c +++ b/libs/libcurl/src/vtls/bearssl.c @@ -849,7 +849,7 @@ static CURLcode bearssl_connect_step3(struct Curl_cfilter *cf, DEBUGASSERT(ssl_connect_3 == connssl->connecting_state);
DEBUGASSERT(backend);
- if(cf->conn->bits.tls_enable_alpn) {
+ if(connssl->alpn) {
const char *proto;
proto = br_ssl_engine_get_selected_protocol(&backend->ctx.eng);
@@ -897,7 +897,7 @@ static ssize_t bearssl_send(struct Curl_cfilter *cf, struct Curl_easy *data, for(;;) {
*err = bearssl_run_until(cf, data, BR_SSL_SENDAPP);
- if (*err != CURLE_OK)
+ if(*err)
return -1;
app = br_ssl_engine_sendapp_buf(&backend->ctx.eng, &applen);
if(!app) {
diff --git a/libs/libcurl/src/vtls/gskit.c b/libs/libcurl/src/vtls/gskit.c index 84d692da55..79d1851720 100644 --- a/libs/libcurl/src/vtls/gskit.c +++ b/libs/libcurl/src/vtls/gskit.c @@ -511,7 +511,8 @@ static void close_async_handshake(struct ssl_connect_data *connssl) BACKEND->iocport = -1;
}
-static int pipe_ssloverssl(struct Curl_cfilter *cf, int directions)
+static int pipe_ssloverssl(struct Curl_cfilter *cf, struct Curl_easy *data,
+ int directions)
{
struct ssl_connect_data *connssl = cf->ctx;
struct Curl_cfilter *cf_ssl_next = Curl_ssl_cf_get_ssl(cf->next);
@@ -594,7 +595,7 @@ static void close_one(struct Curl_cfilter *cf, struct Curl_easy *data) gskit_status(data, gsk_secure_soc_close(&BACKEND->handle),
"gsk_secure_soc_close()", 0);
/* Last chance to drain output. */
- while(pipe_ssloverssl(cf, SOS_WRITE) > 0)
+ while(pipe_ssloverssl(cf, data, SOS_WRITE) > 0)
;
BACKEND->handle = (gsk_handle) NULL;
if(BACKEND->localfd >= 0) {
@@ -621,13 +622,13 @@ static ssize_t gskit_send(struct Curl_cfilter *cf, struct Curl_easy *data, DEBUGASSERT(BACKEND);
- if(pipe_ssloverssl(cf, SOS_WRITE) >= 0) {
+ if(pipe_ssloverssl(cf, data, SOS_WRITE) >= 0) {
cc = gskit_status(data,
gsk_secure_soc_write(BACKEND->handle,
(char *) mem, (int) len, &written),
"gsk_secure_soc_write()", CURLE_SEND_ERROR);
if(cc == CURLE_OK)
- if(pipe_ssloverssl(cf, SOS_WRITE) < 0)
+ if(pipe_ssloverssl(cf, data, SOS_WRITE) < 0)
cc = CURLE_SEND_ERROR;
}
if(cc != CURLE_OK) {
@@ -649,7 +650,7 @@ static ssize_t gskit_recv(struct Curl_cfilter *cf, struct Curl_easy *data, (void)data;
DEBUGASSERT(BACKEND);
- if(pipe_ssloverssl(cf, SOS_READ) >= 0) {
+ if(pipe_ssloverssl(cf, data, SOS_READ) >= 0) {
int buffsize = buffersize > (size_t) INT_MAX? INT_MAX: (int) buffersize;
cc = gskit_status(data, gsk_secure_soc_read(BACKEND->handle,
buf, buffsize, &nread),
@@ -716,7 +717,7 @@ static CURLcode gskit_connect_step1(struct Curl_cfilter *cf, gsk_handle envir;
CURLcode result;
const char * const keyringfile = conn_config->CAfile;
- const char * const keyringpwd = conn_config->key_passwd;
+ const char * const keyringpwd = ssl_config->key_passwd;
const char * const keyringlabel = ssl_config->primary.clientcert;
const long int ssl_version = conn_config->version;
const bool verifypeer = conn_config->verifypeer;
@@ -932,7 +933,7 @@ static CURLcode gskit_connect_step1(struct Curl_cfilter *cf, }
/* Error: rollback. */
- close_one(connssl, data, conn, sockindex);
+ close_one(cf, data);
return result;
}
@@ -1111,7 +1112,7 @@ static CURLcode gskit_connect_common(struct Curl_cfilter *cf, /* Handle handshake pipelining. */
if(!result)
- if(pipe_ssloverssl(cf, SOS_READ | SOS_WRITE) < 0)
+ if(pipe_ssloverssl(cf, data, SOS_READ | SOS_WRITE) < 0)
result = CURLE_SSL_CONNECT_ERROR;
/* Step 2: check if handshake is over. */
@@ -1130,7 +1131,7 @@ static CURLcode gskit_connect_common(struct Curl_cfilter *cf, /* Handle handshake pipelining. */
if(!result)
- if(pipe_ssloverssl(cf, SOS_READ | SOS_WRITE) < 0)
+ if(pipe_ssloverssl(cf, data, SOS_READ | SOS_WRITE) < 0)
result = CURLE_SSL_CONNECT_ERROR;
/* Step 3: gather certificate info, verify host. */
@@ -1138,7 +1139,7 @@ static CURLcode gskit_connect_common(struct Curl_cfilter *cf, result = gskit_connect_step3(cf, data);
if(result)
- close_one(connssl, data, conn, sockindex);
+ close_one(cf, data);
else if(connssl->connecting_state == ssl_connect_done) {
connssl->state = ssl_connection_complete;
connssl->connecting_state = ssl_connect_1;
@@ -1271,7 +1272,7 @@ static int gskit_check_cxn(struct Curl_cfilter *cf, err = 0;
errlen = sizeof(err);
- if(getsockopt(cxn->sock[FIRSTSOCKET], SOL_SOCKET, SO_ERROR,
+ if(getsockopt(Curl_conn_cf_get_socket(cf, data), SOL_SOCKET, SO_ERROR,
(unsigned char *) &err, &errlen) ||
errlen != sizeof(err) || err)
return 0; /* connection has been closed */
diff --git a/libs/libcurl/src/vtls/gtls.c b/libs/libcurl/src/vtls/gtls.c index 72e2d5324d..c280cedc78 100644 --- a/libs/libcurl/src/vtls/gtls.c +++ b/libs/libcurl/src/vtls/gtls.c @@ -1252,7 +1252,7 @@ static CURLcode gtls_verifyserver(struct Curl_cfilter *cf, if(result)
goto out;
- if(cf->conn->bits.tls_enable_alpn) {
+ if(connssl->alpn) {
gnutls_datum_t proto;
int rc;
diff --git a/libs/libcurl/src/vtls/hostcheck.c b/libs/libcurl/src/vtls/hostcheck.c index 4b4a364377..c2d1fb0ba1 100644 --- a/libs/libcurl/src/vtls/hostcheck.c +++ b/libs/libcurl/src/vtls/hostcheck.c @@ -71,7 +71,12 @@ static bool pmatch(const char *hostname, size_t hostlen, * apparent distinction between a name and an IP. We need to detect the use of
* an IP address and not wildcard match on such names.
*
+ * Only match on "*" being used for the leftmost label, not "a*", "a*b" nor
+ * "*b".
+ *
* Return TRUE on a match. FALSE if not.
+ *
+ * @unittest: 1397
*/
static bool hostmatch(const char *hostname,
@@ -79,53 +84,42 @@ static bool hostmatch(const char *hostname, const char *pattern,
size_t patternlen)
{
- const char *pattern_label_end, *wildcard, *hostname_label_end;
- size_t prefixlen, suffixlen;
+ const char *pattern_label_end;
- /* normalize pattern and hostname by stripping off trailing dots */
+ DEBUGASSERT(pattern);
DEBUGASSERT(patternlen);
+ DEBUGASSERT(hostname);
+ DEBUGASSERT(hostlen);
+
+ /* normalize pattern and hostname by stripping off trailing dots */
if(hostname[hostlen-1]=='.')
hostlen--;
if(pattern[patternlen-1]=='.')
patternlen--;
- wildcard = memchr(pattern, '*', patternlen);
- if(!wildcard)
+ if(strncmp(pattern, "*.", 2))
return pmatch(hostname, hostlen, pattern, patternlen);
/* detect IP address as hostname and fail the match if so */
- if(Curl_host_is_ipnum(hostname))
+ else if(Curl_host_is_ipnum(hostname))
return FALSE;
/* We require at least 2 dots in the pattern to avoid too wide wildcard
match. */
pattern_label_end = memchr(pattern, '.', patternlen);
if(!pattern_label_end ||
- (memrchr(pattern, '.', patternlen) == pattern_label_end) ||
- strncasecompare(pattern, "xn--", 4))
+ (memrchr(pattern, '.', patternlen) == pattern_label_end))
return pmatch(hostname, hostlen, pattern, patternlen);
-
- hostname_label_end = memchr(hostname, '.', hostlen);
- if(!hostname_label_end)
- return FALSE;
else {
- size_t skiphost = hostname_label_end - hostname;
- size_t skiplen = pattern_label_end - pattern;
- if(!pmatch(hostname_label_end, hostlen - skiphost,
- pattern_label_end, patternlen - skiplen))
- return FALSE;
+ const char *hostname_label_end = memchr(hostname, '.', hostlen);
+ if(hostname_label_end) {
+ size_t skiphost = hostname_label_end - hostname;
+ size_t skiplen = pattern_label_end - pattern;
+ return pmatch(hostname_label_end, hostlen - skiphost,
+ pattern_label_end, patternlen - skiplen);
+ }
}
- /* The wildcard must match at least one character, so the left-most
- label of the hostname is at least as large as the left-most label
- of the pattern. */
- if(hostname_label_end - hostname < pattern_label_end - pattern)
- return FALSE;
-
- prefixlen = wildcard - pattern;
- suffixlen = pattern_label_end - (wildcard + 1);
- return strncasecompare(pattern, hostname, prefixlen) &&
- strncasecompare(wildcard + 1, hostname_label_end - suffixlen,
- suffixlen) ? TRUE : FALSE;
+ return FALSE;
}
/*
diff --git a/libs/libcurl/src/vtls/mbedtls.c b/libs/libcurl/src/vtls/mbedtls.c index d68ed174a2..7d084af412 100644 --- a/libs/libcurl/src/vtls/mbedtls.c +++ b/libs/libcurl/src/vtls/mbedtls.c @@ -831,7 +831,7 @@ mbed_connect_step2(struct Curl_cfilter *cf, struct Curl_easy *data) result = Curl_pin_peer_pubkey(data,
pinnedpubkey,
&pubkey[PUB_DER_MAX_BYTES - size], size);
- pinnedpubkey_error:
+pinnedpubkey_error:
mbedtls_x509_crt_free(p);
free(p);
free(pubkey);
diff --git a/libs/libcurl/src/vtls/nss.c b/libs/libcurl/src/vtls/nss.c index 2bbf96ab96..505853c61b 100644 --- a/libs/libcurl/src/vtls/nss.c +++ b/libs/libcurl/src/vtls/nss.c @@ -852,14 +852,13 @@ static void HandshakeCallback(PRFileDesc *sock, void *arg) struct Curl_cfilter *cf = (struct Curl_cfilter *)arg;
struct ssl_connect_data *connssl = cf->ctx;
struct Curl_easy *data = connssl->backend->data;
- struct connectdata *conn = cf->conn;
unsigned int buflenmax = 50;
unsigned char buf[50];
unsigned int buflen;
SSLNextProtoState state;
DEBUGASSERT(data);
- if(!conn->bits.tls_enable_alpn) {
+ if(!connssl->alpn) {
return;
}
@@ -2096,7 +2095,7 @@ static CURLcode nss_setup_connect(struct Curl_cfilter *cf, #ifdef SSL_ENABLE_ALPN
if(SSL_OptionSet(backend->handle, SSL_ENABLE_ALPN,
- cf->conn->bits.tls_enable_alpn ? PR_TRUE : PR_FALSE)
+ connssl->alpn ? PR_TRUE : PR_FALSE)
!= SECSuccess)
goto error;
#endif
diff --git a/libs/libcurl/src/vtls/openssl.c b/libs/libcurl/src/vtls/openssl.c index c9cc52a184..470daedb2c 100644 --- a/libs/libcurl/src/vtls/openssl.c +++ b/libs/libcurl/src/vtls/openssl.c @@ -207,8 +207,10 @@ #if ((OPENSSL_VERSION_NUMBER >= 0x10101000L) && \
!defined(LIBRESSL_VERSION_NUMBER) && \
!defined(OPENSSL_IS_BORINGSSL))
-#define HAVE_SSL_CTX_SET_CIPHERSUITES
-#define HAVE_SSL_CTX_SET_POST_HANDSHAKE_AUTH
+ #define HAVE_SSL_CTX_SET_CIPHERSUITES
+ #if !defined(OPENSSL_IS_AWSLC)
+ #define HAVE_SSL_CTX_SET_POST_HANDSHAKE_AUTH
+ #endif
#endif
/*
@@ -227,6 +229,8 @@ #define OSSL_PACKAGE "LibreSSL"
#elif defined(OPENSSL_IS_BORINGSSL)
#define OSSL_PACKAGE "BoringSSL"
+#elif defined(OPENSSL_IS_AWSLC)
+#define OSSL_PACKAGE "AWS-LC"
#else
#define OSSL_PACKAGE "OpenSSL"
#endif
@@ -257,7 +261,8 @@ #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) && \
!(defined(LIBRESSL_VERSION_NUMBER) && \
LIBRESSL_VERSION_NUMBER < 0x2070100fL) && \
- !defined(OPENSSL_IS_BORINGSSL)
+ !defined(OPENSSL_IS_BORINGSSL) && \
+ !defined(OPENSSL_IS_AWSLC)
#define HAVE_OPENSSL_VERSION
#endif
@@ -360,8 +365,8 @@ static int asn1_object_dump(ASN1_OBJECT *a, char *buf, size_t len) }
static void X509V3_ext(struct Curl_easy *data,
- int certnum,
- CONST_EXTS STACK_OF(X509_EXTENSION) *exts)
+ int certnum,
+ CONST_EXTS STACK_OF(X509_EXTENSION) *exts)
{
int i;
@@ -393,7 +398,7 @@ static void X509V3_ext(struct Curl_easy *data, }
}
-#ifdef OPENSSL_IS_BORINGSSL
+#if defined(OPENSSL_IS_BORINGSSL) || defined(OPENSSL_IS_AWSLC)
typedef size_t numcert_t;
#else
typedef int numcert_t;
@@ -617,7 +622,7 @@ CURLcode Curl_ossl_certchain(struct Curl_easy *data, SSL *ssl) FREE_PKEY_PARAM_BIGNUM(q);
FREE_PKEY_PARAM_BIGNUM(g);
FREE_PKEY_PARAM_BIGNUM(pub_key);
- }
+ }
break;
}
}
@@ -840,9 +845,9 @@ ossl_log_tls12_secret(const SSL *ssl, bool *keylog_done) if(!session || *keylog_done)
return;
-#if OPENSSL_VERSION_NUMBER >= 0x10100000L && \
- !(defined(LIBRESSL_VERSION_NUMBER) && \
- LIBRESSL_VERSION_NUMBER < 0x20700000L)
+#if OPENSSL_VERSION_NUMBER >= 0x10100000L && \
+ !(defined(LIBRESSL_VERSION_NUMBER) && \
+ LIBRESSL_VERSION_NUMBER < 0x20700000L)
/* ssl->s3 is not checked in openssl 1.1.0-pre6, but let's assume that
* we have a valid SSL context if we have a non-NULL session. */
SSL_get_client_random(ssl, client_random, SSL3_RANDOM_SIZE);
@@ -926,7 +931,7 @@ static char *ossl_strerror(unsigned long error, char *buf, size_t size) *buf = '\0';
}
-#ifdef OPENSSL_IS_BORINGSSL
+#if defined(OPENSSL_IS_BORINGSSL) || defined(OPENSSL_IS_AWSLC)
ERR_error_string_n((uint32_t)error, buf, size);
#else
ERR_error_string_n(error, buf, size);
@@ -1148,7 +1153,7 @@ SSL_CTX_use_certificate_blob(SSL_CTX *ctx, const struct curl_blob *blob, }
ret = SSL_CTX_use_certificate(ctx, x);
- end:
+end:
X509_free(x);
BIO_free(in);
return ret;
@@ -1156,7 +1161,7 @@ SSL_CTX_use_certificate_blob(SSL_CTX *ctx, const struct curl_blob *blob, static int
SSL_CTX_use_PrivateKey_blob(SSL_CTX *ctx, const struct curl_blob *blob,
- int type, const char *key_passwd)
+ int type, const char *key_passwd)
{
int ret = 0;
EVP_PKEY *pkey = NULL;
@@ -1179,7 +1184,7 @@ SSL_CTX_use_PrivateKey_blob(SSL_CTX *ctx, const struct curl_blob *blob, }
ret = SSL_CTX_use_PrivateKey(ctx, pkey);
EVP_PKEY_free(pkey);
- end:
+end:
BIO_free(in);
return ret;
}
@@ -1190,8 +1195,8 @@ SSL_CTX_use_certificate_chain_blob(SSL_CTX *ctx, const struct curl_blob *blob, {
/* SSL_CTX_add1_chain_cert introduced in OpenSSL 1.0.2 */
#if (OPENSSL_VERSION_NUMBER >= 0x1000200fL) && /* OpenSSL 1.0.2 or later */ \
- !(defined(LIBRESSL_VERSION_NUMBER) && \
- (LIBRESSL_VERSION_NUMBER < 0x2090100fL)) /* LibreSSL 2.9.1 or later */
+ !(defined(LIBRESSL_VERSION_NUMBER) && \
+ (LIBRESSL_VERSION_NUMBER < 0x2090100fL)) /* LibreSSL 2.9.1 or later */
int ret = 0;
X509 *x = NULL;
void *passwd_callback_userdata = (void *)key_passwd;
@@ -1242,7 +1247,7 @@ SSL_CTX_use_certificate_chain_blob(SSL_CTX *ctx, const struct curl_blob *blob, ret = 0;
}
- end:
+end:
X509_free(x);
BIO_free(in);
return ret;
@@ -1310,7 +1315,7 @@ int cert_stuff(struct Curl_easy *data, cert_use_result = cert_blob ?
SSL_CTX_use_certificate_blob(ctx, cert_blob,
file_type, key_passwd) :
- SSL_CTX_use_certificate_file(ctx, cert_file, file_type);
+ SSL_CTX_use_certificate_file(ctx, cert_file, file_type);
if(cert_use_result != 1) {
failf(data,
"could not load ASN1 client certificate from %s, " OSSL_PACKAGE
@@ -1324,67 +1329,67 @@ int cert_stuff(struct Curl_easy *data, break;
case SSL_FILETYPE_ENGINE:
#if defined(USE_OPENSSL_ENGINE) && defined(ENGINE_CTRL_GET_CMD_FROM_NAME)
- {
- /* Implicitly use pkcs11 engine if none was provided and the
- * cert_file is a PKCS#11 URI */
- if(!data->state.engine) {
- if(is_pkcs11_uri(cert_file)) {
- if(ossl_set_engine(data, "pkcs11") != CURLE_OK) {
- return 0;
- }
+ {
+ /* Implicitly use pkcs11 engine if none was provided and the
+ * cert_file is a PKCS#11 URI */
+ if(!data->state.engine) {
+ if(is_pkcs11_uri(cert_file)) {
+ if(ossl_set_engine(data, "pkcs11") != CURLE_OK) {
+ return 0;
}
}
+ }
- if(data->state.engine) {
- const char *cmd_name = "LOAD_CERT_CTRL";
- struct {
- const char *cert_id;
- X509 *cert;
- } params;
-
- params.cert_id = cert_file;
- params.cert = NULL;
+ if(data->state.engine) {
+ const char *cmd_name = "LOAD_CERT_CTRL";
+ struct {
+ const char *cert_id;
+ X509 *cert;
+ } params;
- /* Does the engine supports LOAD_CERT_CTRL ? */
- if(!ENGINE_ctrl(data->state.engine, ENGINE_CTRL_GET_CMD_FROM_NAME,
- 0, (void *)cmd_name, NULL)) {
- failf(data, "ssl engine does not support loading certificates");
- return 0;
- }
+ params.cert_id = cert_file;
+ params.cert = NULL;
- /* Load the certificate from the engine */
- if(!ENGINE_ctrl_cmd(data->state.engine, cmd_name,
- 0, ¶ms, NULL, 1)) {
- failf(data, "ssl engine cannot load client cert with id"
- " '%s' [%s]", cert_file,
- ossl_strerror(ERR_get_error(), error_buffer,
- sizeof(error_buffer)));
- return 0;
- }
+ /* Does the engine supports LOAD_CERT_CTRL ? */
+ if(!ENGINE_ctrl(data->state.engine, ENGINE_CTRL_GET_CMD_FROM_NAME,
+ 0, (void *)cmd_name, NULL)) {
+ failf(data, "ssl engine does not support loading certificates");
+ return 0;
+ }
- if(!params.cert) {
- failf(data, "ssl engine didn't initialized the certificate "
- "properly.");
- return 0;
- }
+ /* Load the certificate from the engine */
+ if(!ENGINE_ctrl_cmd(data->state.engine, cmd_name,
+ 0, ¶ms, NULL, 1)) {
+ failf(data, "ssl engine cannot load client cert with id"
+ " '%s' [%s]", cert_file,
+ ossl_strerror(ERR_get_error(), error_buffer,
+ sizeof(error_buffer)));
+ return 0;
+ }
- if(SSL_CTX_use_certificate(ctx, params.cert) != 1) {
- failf(data, "unable to set client certificate [%s]",
- ossl_strerror(ERR_get_error(), error_buffer,
- sizeof(error_buffer)));
- return 0;
- }
- X509_free(params.cert); /* we don't need the handle any more... */
+ if(!params.cert) {
+ failf(data, "ssl engine didn't initialized the certificate "
+ "properly.");
+ return 0;
}
- else {
- failf(data, "crypto engine not set, can't load certificate");
+
+ if(SSL_CTX_use_certificate(ctx, params.cert) != 1) {
+ failf(data, "unable to set client certificate [%s]",
+ ossl_strerror(ERR_get_error(), error_buffer,
+ sizeof(error_buffer)));
return 0;
}
+ X509_free(params.cert); /* we don't need the handle any more... */
}
- break;
+ else {
+ failf(data, "crypto engine not set, can't load certificate");
+ return 0;
+ }
+ }
+ break;
#else
- failf(data, "file type ENG for certificate not implemented");
- return 0;
+ failf(data, "file type ENG for certificate not implemented");
+ return 0;
#endif
case SSL_FILETYPE_PKCS12:
@@ -1491,7 +1496,7 @@ int cert_stuff(struct Curl_easy *data, }
cert_done = 1;
- fail:
+fail:
EVP_PKEY_free(pri);
X509_free(x509);
sk_X509_pop_free(ca, X509_free);
@@ -1519,7 +1524,7 @@ int cert_stuff(struct Curl_easy *data, case SSL_FILETYPE_ASN1:
cert_use_result = key_blob ?
SSL_CTX_use_PrivateKey_blob(ctx, key_blob, file_type, key_passwd) :
- SSL_CTX_use_PrivateKey_file(ctx, key_file, file_type);
+ SSL_CTX_use_PrivateKey_file(ctx, key_file, file_type);
if(cert_use_result != 1) {
failf(data, "unable to set private key file: '%s' type %s",
key_file?key_file:"(memory blob)", key_type?key_type:"PEM");
@@ -1528,57 +1533,57 @@ int cert_stuff(struct Curl_easy *data, break;
case SSL_FILETYPE_ENGINE:
#ifdef USE_OPENSSL_ENGINE
- { /* XXXX still needs some work */
- EVP_PKEY *priv_key = NULL;
-
- /* Implicitly use pkcs11 engine if none was provided and the
- * key_file is a PKCS#11 URI */
- if(!data->state.engine) {
- if(is_pkcs11_uri(key_file)) {
- if(ossl_set_engine(data, "pkcs11") != CURLE_OK) {
- return 0;
- }
- }
- }
+ {
+ EVP_PKEY *priv_key = NULL;
- if(data->state.engine) {
- UI_METHOD *ui_method =
- UI_create_method((char *)"curl user interface");
- if(!ui_method) {
- failf(data, "unable do create " OSSL_PACKAGE
- " user-interface method");
- return 0;
- }
- UI_method_set_opener(ui_method, UI_method_get_opener(UI_OpenSSL()));
- UI_method_set_closer(ui_method, UI_method_get_closer(UI_OpenSSL()));
- UI_method_set_reader(ui_method, ssl_ui_reader);
- UI_method_set_writer(ui_method, ssl_ui_writer);
- /* the typecast below was added to please mingw32 */
- priv_key = (EVP_PKEY *)
- ENGINE_load_private_key(data->state.engine, key_file,
- ui_method,
- key_passwd);
- UI_destroy_method(ui_method);
- if(!priv_key) {
- failf(data, "failed to load private key from crypto engine");
- return 0;
- }
- if(SSL_CTX_use_PrivateKey(ctx, priv_key) != 1) {
- failf(data, "unable to set private key");
- EVP_PKEY_free(priv_key);
+ /* Implicitly use pkcs11 engine if none was provided and the
+ * key_file is a PKCS#11 URI */
+ if(!data->state.engine) {
+ if(is_pkcs11_uri(key_file)) {
+ if(ossl_set_engine(data, "pkcs11") != CURLE_OK) {
return 0;
}
- EVP_PKEY_free(priv_key); /* we don't need the handle any more... */
}
- else {
- failf(data, "crypto engine not set, can't load private key");
+ }
+
+ if(data->state.engine) {
+ UI_METHOD *ui_method =
+ UI_create_method((char *)"curl user interface");
+ if(!ui_method) {
+ failf(data, "unable do create " OSSL_PACKAGE
+ " user-interface method");
+ return 0;
+ }
+ UI_method_set_opener(ui_method, UI_method_get_opener(UI_OpenSSL()));
+ UI_method_set_closer(ui_method, UI_method_get_closer(UI_OpenSSL()));
+ UI_method_set_reader(ui_method, ssl_ui_reader);
+ UI_method_set_writer(ui_method, ssl_ui_writer);
+ /* the typecast below was added to please mingw32 */
+ priv_key = (EVP_PKEY *)
+ ENGINE_load_private_key(data->state.engine, key_file,
+ ui_method,
+ key_passwd);
+ UI_destroy_method(ui_method);
+ if(!priv_key) {
+ failf(data, "failed to load private key from crypto engine");
+ return 0;
+ }
+ if(SSL_CTX_use_PrivateKey(ctx, priv_key) != 1) {
+ failf(data, "unable to set private key");
+ EVP_PKEY_free(priv_key);
return 0;
}
+ EVP_PKEY_free(priv_key); /* we don't need the handle any more... */
}
- break;
+ else {
+ failf(data, "crypto engine not set, can't load private key");
+ return 0;
+ }
+ }
+ break;
#else
- failf(data, "file type ENG for private key not supported");
- return 0;
+ failf(data, "file type ENG for private key not supported");
+ return 0;
#endif
case SSL_FILETYPE_PKCS12:
if(!cert_done) {
@@ -1607,8 +1612,8 @@ int cert_stuff(struct Curl_easy *data, EVP_PKEY_free(pktmp);
}
-#if !defined(OPENSSL_NO_RSA) && !defined(OPENSSL_IS_BORINGSSL) && \
- !defined(OPENSSL_NO_DEPRECATED_3_0)
+#if !defined(OPENSSL_NO_RSA) && !defined(OPENSSL_IS_BORINGSSL) && \
+ !defined(OPENSSL_NO_DEPRECATED_3_0)
{
/* If RSA is used, don't check the private key if its flags indicate
* it doesn't support it. */
@@ -1746,8 +1751,8 @@ static int ossl_init(void) /* Global cleanup */
static void ossl_cleanup(void)
{
-#if (OPENSSL_VERSION_NUMBER >= 0x10100000L) && \
- !defined(LIBRESSL_VERSION_NUMBER)
+#if (OPENSSL_VERSION_NUMBER >= 0x10100000L) && \
+ !defined(LIBRESSL_VERSION_NUMBER)
/* OpenSSL 1.1 deprecates all these cleanup functions and
turns them into no-ops in OpenSSL 1.0 compatibility mode */
#else
@@ -1930,7 +1935,7 @@ static int ossl_shutdown(struct Curl_cfilter *cf, we do not send one. Let's hope other servers do the same... */
if(data->set.ftp_ccc == CURLFTPSSL_CCC_ACTIVE)
- (void)SSL_shutdown(backend->handle);
+ (void)SSL_shutdown(backend->handle);
#endif
if(backend->handle) {
@@ -2031,7 +2036,7 @@ static void ossl_close_all(struct Curl_easy *data) #else
(void)data;
#endif
-#if !defined(HAVE_ERR_REMOVE_THREAD_STATE_DEPRECATED) && \
+#if !defined(HAVE_ERR_REMOVE_THREAD_STATE_DEPRECATED) && \
defined(HAVE_ERR_REMOVE_THREAD_STATE)
/* OpenSSL 1.0.1 and 1.0.2 build an error queue that is stored per-thread
so we need to clean it here in case the thread will be killed. All OpenSSL
@@ -2059,7 +2064,7 @@ static bool subj_alt_hostcheck(struct Curl_easy *data, #endif
if(Curl_cert_hostcheck(match_pattern, matchlen, hostname, hostlen)) {
infof(data, " subjectAltName: host \"%s\" matched cert's \"%s\"",
- dispname, match_pattern);
+ dispname, match_pattern);
return TRUE;
}
return FALSE;
@@ -2147,7 +2152,7 @@ ossl_verifyhost(struct Curl_easy *data, struct connectdata *conn, altnames = X509_get_ext_d2i(server_cert, NID_subject_alt_name, NULL, NULL);
if(altnames) {
-#ifdef OPENSSL_IS_BORINGSSL
+#if defined(OPENSSL_IS_BORINGSSL) || defined(OPENSSL_IS_AWSLC)
size_t numalts;
size_t i;
#else
@@ -2303,7 +2308,7 @@ ossl_verifyhost(struct Curl_easy *data, struct connectdata *conn, }
#if (OPENSSL_VERSION_NUMBER >= 0x0090808fL) && !defined(OPENSSL_NO_TLSEXT) && \
- !defined(OPENSSL_NO_OCSP)
+ !defined(OPENSSL_NO_OCSP)
static CURLcode verifystatus(struct Curl_cfilter *cf,
struct Curl_easy *data)
{
@@ -2477,81 +2482,81 @@ static const char *ssl_msg_type(int ssl_ver, int msg) #ifdef SSL2_VERSION_MAJOR
if(ssl_ver == SSL2_VERSION_MAJOR) {
switch(msg) {
- case SSL2_MT_ERROR:
- return "Error";
- case SSL2_MT_CLIENT_HELLO:
- return "Client hello";
- case SSL2_MT_CLIENT_MASTER_KEY:
- return "Client key";
- case SSL2_MT_CLIENT_FINISHED:
- return "Client finished";
- case SSL2_MT_SERVER_HELLO:
- return "Server hello";
- case SSL2_MT_SERVER_VERIFY:
- return "Server verify";
- case SSL2_MT_SERVER_FINISHED:
- return "Server finished";
- case SSL2_MT_REQUEST_CERTIFICATE:
- return "Request CERT";
- case SSL2_MT_CLIENT_CERTIFICATE:
- return "Client CERT";
+ case SSL2_MT_ERROR:
+ return "Error";
+ case SSL2_MT_CLIENT_HELLO:
+ return "Client hello";
+ case SSL2_MT_CLIENT_MASTER_KEY:
+ return "Client key";
+ case SSL2_MT_CLIENT_FINISHED:
+ return "Client finished";
+ case SSL2_MT_SERVER_HELLO:
+ return "Server hello";
+ case SSL2_MT_SERVER_VERIFY:
+ return "Server verify";
+ case SSL2_MT_SERVER_FINISHED:
+ return "Server finished";
+ case SSL2_MT_REQUEST_CERTIFICATE:
+ return "Request CERT";
+ case SSL2_MT_CLIENT_CERTIFICATE:
+ return "Client CERT";
}
}
else
#endif
if(ssl_ver == SSL3_VERSION_MAJOR) {
switch(msg) {
- case SSL3_MT_HELLO_REQUEST:
- return "Hello request";
- case SSL3_MT_CLIENT_HELLO:
- return "Client hello";
- case SSL3_MT_SERVER_HELLO:
- return "Server hello";
+ case SSL3_MT_HELLO_REQUEST:
+ return "Hello request";
+ case SSL3_MT_CLIENT_HELLO:
+ return "Client hello";
+ case SSL3_MT_SERVER_HELLO:
+ return "Server hello";
#ifdef SSL3_MT_NEWSESSION_TICKET
- case SSL3_MT_NEWSESSION_TICKET:
- return "Newsession Ticket";
-#endif
- case SSL3_MT_CERTIFICATE:
- return "Certificate";
- case SSL3_MT_SERVER_KEY_EXCHANGE:
- return "Server key exchange";
- case SSL3_MT_CLIENT_KEY_EXCHANGE:
- return "Client key exchange";
- case SSL3_MT_CERTIFICATE_REQUEST:
- return "Request CERT";
- case SSL3_MT_SERVER_DONE:
- return "Server finished";
- case SSL3_MT_CERTIFICATE_VERIFY:
- return "CERT verify";
- case SSL3_MT_FINISHED:
- return "Finished";
+ case SSL3_MT_NEWSESSION_TICKET:
+ return "Newsession Ticket";
+#endif
+ case SSL3_MT_CERTIFICATE:
+ return "Certificate";
+ case SSL3_MT_SERVER_KEY_EXCHANGE:
+ return "Server key exchange";
+ case SSL3_MT_CLIENT_KEY_EXCHANGE:
+ return "Client key exchange";
+ case SSL3_MT_CERTIFICATE_REQUEST:
+ return "Request CERT";
+ case SSL3_MT_SERVER_DONE:
+ return "Server finished";
+ case SSL3_MT_CERTIFICATE_VERIFY:
+ return "CERT verify";
+ case SSL3_MT_FINISHED:
+ return "Finished";
#ifdef SSL3_MT_CERTIFICATE_STATUS
- case SSL3_MT_CERTIFICATE_STATUS:
- return "Certificate Status";
+ case SSL3_MT_CERTIFICATE_STATUS:
+ return "Certificate Status";
#endif
#ifdef SSL3_MT_ENCRYPTED_EXTENSIONS
- case SSL3_MT_ENCRYPTED_EXTENSIONS:
- return "Encrypted Extensions";
+ case SSL3_MT_ENCRYPTED_EXTENSIONS:
+ return "Encrypted Extensions";
#endif
#ifdef SSL3_MT_SUPPLEMENTAL_DATA
- case SSL3_MT_SUPPLEMENTAL_DATA:
- return "Supplemental data";
+ case SSL3_MT_SUPPLEMENTAL_DATA:
+ return "Supplemental data";
#endif
#ifdef SSL3_MT_END_OF_EARLY_DATA
- case SSL3_MT_END_OF_EARLY_DATA:
- return "End of early data";
+ case SSL3_MT_END_OF_EARLY_DATA:
+ return "End of early data";
#endif
#ifdef SSL3_MT_KEY_UPDATE
- case SSL3_MT_KEY_UPDATE:
- return "Key update";
+ case SSL3_MT_KEY_UPDATE:
+ return "Key update";
#endif
#ifdef SSL3_MT_NEXT_PROTO
- case SSL3_MT_NEXT_PROTO:
- return "Next protocol";
+ case SSL3_MT_NEXT_PROTO:
+ return "Next protocol";
#endif
#ifdef SSL3_MT_MESSAGE_HASH
- case SSL3_MT_MESSAGE_HASH:
- return "Message hash";
+ case SSL3_MT_MESSAGE_HASH:
+ return "Message hash";
#endif
}
}
@@ -2596,7 +2601,7 @@ static void ossl_trace(int direction, int ssl_ver, int content_type, if(!data || !data->set.fdebug || (direction && direction != 1))
return;
- switch(ssl_ver) {
+ switch(ssl_ver) {
#ifdef SSL2_VERSION /* removed in recent versions */
case SSL2_VERSION:
verstr = "SSLv2";
@@ -2701,8 +2706,8 @@ static void ossl_trace(int direction, int ssl_ver, int content_type, /* Check for OpenSSL 1.0.2 which has ALPN support. */
#undef HAS_ALPN
-#if OPENSSL_VERSION_NUMBER >= 0x10002000L \
- && !defined(OPENSSL_NO_TLSEXT)
+#if OPENSSL_VERSION_NUMBER >= 0x10002000L \
+ && !defined(OPENSSL_NO_TLSEXT)
# define HAS_ALPN 1
#endif
@@ -2716,7 +2721,9 @@ set_ssl_version_min_max(struct Curl_cfilter *cf, SSL_CTX *ctx) long curl_ssl_version_max;
/* convert curl min SSL version option to OpenSSL constant */
-#if defined(OPENSSL_IS_BORINGSSL) || defined(LIBRESSL_VERSION_NUMBER)
+#if (defined(OPENSSL_IS_BORINGSSL) || \
+ defined(OPENSSL_IS_AWSLC) || \
+ defined(LIBRESSL_VERSION_NUMBER))
uint16_t ossl_ssl_version_min = 0;
uint16_t ossl_ssl_version_max = 0;
#else
@@ -2724,22 +2731,22 @@ set_ssl_version_min_max(struct Curl_cfilter *cf, SSL_CTX *ctx) long ossl_ssl_version_max = 0;
#endif
switch(curl_ssl_version_min) {
- case CURL_SSLVERSION_TLSv1: /* TLS 1.x */
- case CURL_SSLVERSION_TLSv1_0:
- ossl_ssl_version_min = TLS1_VERSION;
- break;
- case CURL_SSLVERSION_TLSv1_1:
- ossl_ssl_version_min = TLS1_1_VERSION;
- break;
- case CURL_SSLVERSION_TLSv1_2:
- ossl_ssl_version_min = TLS1_2_VERSION;
- break;
- case CURL_SSLVERSION_TLSv1_3:
+ case CURL_SSLVERSION_TLSv1: /* TLS 1.x */
+ case CURL_SSLVERSION_TLSv1_0:
+ ossl_ssl_version_min = TLS1_VERSION;
+ break;
+ case CURL_SSLVERSION_TLSv1_1:
+ ossl_ssl_version_min = TLS1_1_VERSION;
+ break;
+ case CURL_SSLVERSION_TLSv1_2:
+ ossl_ssl_version_min = TLS1_2_VERSION;
+ break;
+ case CURL_SSLVERSION_TLSv1_3:
#ifdef TLS1_3_VERSION
- ossl_ssl_version_min = TLS1_3_VERSION;
- break;
+ ossl_ssl_version_min = TLS1_3_VERSION;
+ break;
#else
- return CURLE_NOT_BUILT_IN;
+ return CURLE_NOT_BUILT_IN;
#endif
}
@@ -2760,29 +2767,29 @@ set_ssl_version_min_max(struct Curl_cfilter *cf, SSL_CTX *ctx) /* convert curl max SSL version option to OpenSSL constant */
switch(curl_ssl_version_max) {
- case CURL_SSLVERSION_MAX_TLSv1_0:
- ossl_ssl_version_max = TLS1_VERSION;
- break;
- case CURL_SSLVERSION_MAX_TLSv1_1:
- ossl_ssl_version_max = TLS1_1_VERSION;
- break;
- case CURL_SSLVERSION_MAX_TLSv1_2:
- ossl_ssl_version_max = TLS1_2_VERSION;
- break;
+ case CURL_SSLVERSION_MAX_TLSv1_0:
+ ossl_ssl_version_max = TLS1_VERSION;
+ break;
+ case CURL_SSLVERSION_MAX_TLSv1_1:
+ ossl_ssl_version_max = TLS1_1_VERSION;
+ break;
+ case CURL_SSLVERSION_MAX_TLSv1_2:
+ ossl_ssl_version_max = TLS1_2_VERSION;
+ break;
#ifdef TLS1_3_VERSION
- case CURL_SSLVERSION_MAX_TLSv1_3:
- ossl_ssl_version_max = TLS1_3_VERSION;
- break;
+ case CURL_SSLVERSION_MAX_TLSv1_3:
+ ossl_ssl_version_max = TLS1_3_VERSION;
+ break;
#endif
- case CURL_SSLVERSION_MAX_NONE: /* none selected */
- case CURL_SSLVERSION_MAX_DEFAULT: /* max selected */
- default:
- /* SSL_CTX_set_max_proto_version states that:
- setting the maximum to 0 will enable
- protocol versions up to the highest version
- supported by the library */
- ossl_ssl_version_max = 0;
- break;
+ case CURL_SSLVERSION_MAX_NONE: /* none selected */
+ case CURL_SSLVERSION_MAX_DEFAULT: /* max selected */
+ default:
+ /* SSL_CTX_set_max_proto_version states that:
+ setting the maximum to 0 will enable
+ protocol versions up to the highest version
+ supported by the library */
+ ossl_ssl_version_max = 0;
+ break;
}
if(!SSL_CTX_set_max_proto_version(ctx, ossl_ssl_version_max)) {
@@ -2793,7 +2800,7 @@ set_ssl_version_min_max(struct Curl_cfilter *cf, SSL_CTX *ctx) }
#endif
-#ifdef OPENSSL_IS_BORINGSSL
+#if defined(OPENSSL_IS_BORINGSSL) || defined(OPENSSL_IS_AWSLC)
typedef uint32_t ctx_option_t;
#elif OPENSSL_VERSION_NUMBER >= 0x30000000L
typedef uint64_t ctx_option_t;
@@ -2814,63 +2821,63 @@ set_ssl_version_min_max_legacy(ctx_option_t *ctx_options, (void) data; /* In case it's unused. */
switch(ssl_version) {
- case CURL_SSLVERSION_TLSv1_3:
+ case CURL_SSLVERSION_TLSv1_3:
#ifdef TLS1_3_VERSION
- {
- struct ssl_connect_data *connssl = cf->ctx;
- DEBUGASSERT(connssl->backend);
- SSL_CTX_set_max_proto_version(connssl->backend->ctx, TLS1_3_VERSION);
- *ctx_options |= SSL_OP_NO_TLSv1_2;
- }
+ {
+ struct ssl_connect_data *connssl = cf->ctx;
+ DEBUGASSERT(connssl->backend);
+ SSL_CTX_set_max_proto_version(connssl->backend->ctx, TLS1_3_VERSION);
+ *ctx_options |= SSL_OP_NO_TLSv1_2;
+ }
#else
- (void)ctx_options;
- failf(data, OSSL_PACKAGE " was built without TLS 1.3 support");
- return CURLE_NOT_BUILT_IN;
+ (void)ctx_options;
+ failf(data, OSSL_PACKAGE " was built without TLS 1.3 support");
+ return CURLE_NOT_BUILT_IN;
#endif
- /* FALLTHROUGH */
- case CURL_SSLVERSION_TLSv1_2:
+ /* FALLTHROUGH */
+ case CURL_SSLVERSION_TLSv1_2:
#if OPENSSL_VERSION_NUMBER >= 0x1000100FL
- *ctx_options |= SSL_OP_NO_TLSv1_1;
+ *ctx_options |= SSL_OP_NO_TLSv1_1;
#else
- failf(data, OSSL_PACKAGE " was built without TLS 1.2 support");
- return CURLE_NOT_BUILT_IN;
+ failf(data, OSSL_PACKAGE " was built without TLS 1.2 support");
+ return CURLE_NOT_BUILT_IN;
#endif
- /* FALLTHROUGH */
- case CURL_SSLVERSION_TLSv1_1:
+ /* FALLTHROUGH */
+ case CURL_SSLVERSION_TLSv1_1:
#if OPENSSL_VERSION_NUMBER >= 0x1000100FL
- *ctx_options |= SSL_OP_NO_TLSv1;
+ *ctx_options |= SSL_OP_NO_TLSv1;
#else
- failf(data, OSSL_PACKAGE " was built without TLS 1.1 support");
- return CURLE_NOT_BUILT_IN;
+ failf(data, OSSL_PACKAGE " was built without TLS 1.1 support");
+ return CURLE_NOT_BUILT_IN;
#endif
- /* FALLTHROUGH */
- case CURL_SSLVERSION_TLSv1_0:
- case CURL_SSLVERSION_TLSv1:
- break;
+ /* FALLTHROUGH */
+ case CURL_SSLVERSION_TLSv1_0:
+ case CURL_SSLVERSION_TLSv1:
+ break;
}
switch(ssl_version_max) {
- case CURL_SSLVERSION_MAX_TLSv1_0:
+ case CURL_SSLVERSION_MAX_TLSv1_0:
#if OPENSSL_VERSION_NUMBER >= 0x1000100FL
- *ctx_options |= SSL_OP_NO_TLSv1_1;
+ *ctx_options |= SSL_OP_NO_TLSv1_1;
#endif
- /* FALLTHROUGH */
- case CURL_SSLVERSION_MAX_TLSv1_1:
+ /* FALLTHROUGH */
+ case CURL_SSLVERSION_MAX_TLSv1_1:
#if OPENSSL_VERSION_NUMBER >= 0x1000100FL
- *ctx_options |= SSL_OP_NO_TLSv1_2;
+ *ctx_options |= SSL_OP_NO_TLSv1_2;
#endif
- /* FALLTHROUGH */
- case CURL_SSLVERSION_MAX_TLSv1_2:
+ /* FALLTHROUGH */
+ case CURL_SSLVERSION_MAX_TLSv1_2:
#ifdef TLS1_3_VERSION
- *ctx_options |= SSL_OP_NO_TLSv1_3;
+ *ctx_options |= SSL_OP_NO_TLSv1_3;
#endif
- break;
- case CURL_SSLVERSION_MAX_TLSv1_3:
+ break;
+ case CURL_SSLVERSION_MAX_TLSv1_3:
#ifdef TLS1_3_VERSION
- break;
+ break;
#else
- failf(data, OSSL_PACKAGE " was built without TLS 1.3 support");
- return CURLE_NOT_BUILT_IN;
+ failf(data, OSSL_PACKAGE " was built without TLS 1.3 support");
+ return CURLE_NOT_BUILT_IN;
#endif
}
return CURLE_OK;
@@ -3363,11 +3370,11 @@ CURLcode Curl_ssl_setup_x509_store(struct Curl_cfilter *cf, or no source is provided and we are falling back to openssl's built-in
default. */
cache_criteria_met = (data->set.general_ssl.ca_cache_timeout != 0) &&
- conn_config->verifypeer &&
- !conn_config->CApath &&
- !conn_config->ca_info_blob &&
- !ssl_config->primary.CRLfile &&
- !ssl_config->native_ca_store;
+ conn_config->verifypeer &&
+ !conn_config->CApath &&
+ !conn_config->ca_info_blob &&
+ !ssl_config->primary.CRLfile &&
+ !ssl_config->native_ca_store;
cached_store = get_cached_x509_store(cf, data);
if(cached_store && cache_criteria_met && X509_STORE_up_ref(cached_store)) {
@@ -3549,34 +3556,34 @@ static CURLcode ossl_connect_step1(struct Curl_cfilter *cf, #endif
switch(ssl_version) {
- case CURL_SSLVERSION_SSLv2:
- case CURL_SSLVERSION_SSLv3:
- return CURLE_NOT_BUILT_IN;
+ case CURL_SSLVERSION_SSLv2:
+ case CURL_SSLVERSION_SSLv3:
+ return CURLE_NOT_BUILT_IN;
/* "--tlsv<x.y>" options mean TLS >= version <x.y> */
- case CURL_SSLVERSION_DEFAULT:
- case CURL_SSLVERSION_TLSv1: /* TLS >= version 1.0 */
- case CURL_SSLVERSION_TLSv1_0: /* TLS >= version 1.0 */
- case CURL_SSLVERSION_TLSv1_1: /* TLS >= version 1.1 */
- case CURL_SSLVERSION_TLSv1_2: /* TLS >= version 1.2 */
- case CURL_SSLVERSION_TLSv1_3: /* TLS >= version 1.3 */
- /* asking for any TLS version as the minimum, means no SSL versions
- allowed */
- ctx_options |= SSL_OP_NO_SSLv2;
- ctx_options |= SSL_OP_NO_SSLv3;
+ case CURL_SSLVERSION_DEFAULT:
+ case CURL_SSLVERSION_TLSv1: /* TLS >= version 1.0 */
+ case CURL_SSLVERSION_TLSv1_0: /* TLS >= version 1.0 */
+ case CURL_SSLVERSION_TLSv1_1: /* TLS >= version 1.1 */
+ case CURL_SSLVERSION_TLSv1_2: /* TLS >= version 1.2 */
+ case CURL_SSLVERSION_TLSv1_3: /* TLS >= version 1.3 */
+ /* asking for any TLS version as the minimum, means no SSL versions
+ allowed */
+ ctx_options |= SSL_OP_NO_SSLv2;
+ ctx_options |= SSL_OP_NO_SSLv3;
#if (OPENSSL_VERSION_NUMBER >= 0x10100000L) /* 1.1.0 */
- result = set_ssl_version_min_max(cf, backend->ctx);
+ result = set_ssl_version_min_max(cf, backend->ctx);
#else
- result = set_ssl_version_min_max_legacy(&ctx_options, cf, data);
+ result = set_ssl_version_min_max_legacy(&ctx_options, cf, data);
#endif
- if(result != CURLE_OK)
- return result;
- break;
+ if(result != CURLE_OK)
+ return result;
+ break;
- default:
- failf(data, "Unrecognized parameter passed via CURLOPT_SSLVERSION");
- return CURLE_SSL_CONNECT_ERROR;
+ default:
+ failf(data, "Unrecognized parameter passed via CURLOPT_SSLVERSION");
+ return CURLE_SSL_CONNECT_ERROR;
}
SSL_CTX_set_options(backend->ctx, ctx_options);
@@ -3693,7 +3700,8 @@ static CURLcode ossl_connect_step1(struct Curl_cfilter *cf, * an internal session cache.
*/
SSL_CTX_set_session_cache_mode(backend->ctx,
- SSL_SESS_CACHE_CLIENT | SSL_SESS_CACHE_NO_INTERNAL);
+ SSL_SESS_CACHE_CLIENT |
+ SSL_SESS_CACHE_NO_INTERNAL);
SSL_CTX_sess_set_new_cb(backend->ctx, ossl_new_session_cb);
/* give application a chance to interfere with SSL set up. */
@@ -3720,12 +3728,13 @@ static CURLcode ossl_connect_step1(struct Curl_cfilter *cf, SSL_set_app_data(backend->handle, cf);
#if (OPENSSL_VERSION_NUMBER >= 0x0090808fL) && !defined(OPENSSL_NO_TLSEXT) && \
- !defined(OPENSSL_NO_OCSP)
+ !defined(OPENSSL_NO_OCSP)
if(conn_config->verifystatus)
SSL_set_tlsext_status_type(backend->handle, TLSEXT_STATUSTYPE_ocsp);
#endif
-#if defined(OPENSSL_IS_BORINGSSL) && defined(ALLOW_RENEG)
+#if (defined(OPENSSL_IS_BORINGSSL) || defined(OPENSSL_IS_AWSLC)) && \
+ defined(ALLOW_RENEG)
SSL_set_renegotiate_mode(backend->handle, ssl_renegotiate_freely);
#endif
@@ -3884,17 +3893,19 @@ static CURLcode ossl_connect_step2(struct Curl_cfilter *cf, error_buffer */
strcpy(error_buffer, "SSL certificate verification failed");
}
-#if (OPENSSL_VERSION_NUMBER >= 0x10101000L && \
- !defined(LIBRESSL_VERSION_NUMBER) && \
- !defined(OPENSSL_IS_BORINGSSL))
+#if (OPENSSL_VERSION_NUMBER >= 0x10101000L && \
+ !defined(LIBRESSL_VERSION_NUMBER) && \
+ !defined(OPENSSL_IS_BORINGSSL) && \
+ !defined(OPENSSL_IS_AWSLC))
+
/* SSL_R_TLSV13_ALERT_CERTIFICATE_REQUIRED is only available on
- OpenSSL version above v1.1.1, not LibreSSL nor BoringSSL */
+ OpenSSL version above v1.1.1, not LibreSSL, BoringSSL, or AWS-LC */
else if((lib == ERR_LIB_SSL) &&
(reason == SSL_R_TLSV13_ALERT_CERTIFICATE_REQUIRED)) {
- /* If client certificate is required, communicate the
- error to client */
- result = CURLE_SSL_CLIENTCERT;
- ossl_strerror(errdetail, error_buffer, sizeof(error_buffer));
+ /* If client certificate is required, communicate the
+ error to client */
+ result = CURLE_SSL_CLIENTCERT;
+ ossl_strerror(errdetail, error_buffer, sizeof(error_buffer));
}
#endif
else {
@@ -3939,7 +3950,7 @@ static CURLcode ossl_connect_step2(struct Curl_cfilter *cf, /* Sets data and len to negotiated protocol, len is 0 if no protocol was
* negotiated
*/
- if(cf->conn->bits.tls_enable_alpn) {
+ if(connssl->alpn) {
const unsigned char *neg_protocol;
unsigned int len;
SSL_get0_alpn_selected(backend->handle, &neg_protocol, &len);
@@ -3978,7 +3989,7 @@ static CURLcode pkp_pin_peer_pubkey(struct Curl_easy *data, X509* cert, /* Thanks to Viktor Dukhovni on the OpenSSL mailing list */
/* https://groups.google.com/group/mailing.openssl.users/browse_thread
- /thread/d61858dae102c6c7 */
+ /thread/d61858dae102c6c7 */
len1 = i2d_X509_PUBKEY(X509_get_X509_PUBKEY(cert), NULL);
if(len1 < 1)
break; /* failed */
@@ -4199,7 +4210,7 @@ static CURLcode servercert(struct Curl_cfilter *cf, }
#if (OPENSSL_VERSION_NUMBER >= 0x0090808fL) && !defined(OPENSSL_NO_TLSEXT) && \
- !defined(OPENSSL_NO_OCSP)
+ !defined(OPENSSL_NO_OCSP)
if(conn_config->verifystatus) {
result = verifystatus(cf, data);
if(result) {
@@ -4247,7 +4258,7 @@ static CURLcode ossl_connect_step3(struct Curl_cfilter *cf, */
result = servercert(cf, data, conn_config->verifypeer ||
- conn_config->verifyhost);
+ conn_config->verifyhost);
if(!result)
connssl->connecting_state = ssl_connect_done;
@@ -4433,35 +4444,35 @@ static ssize_t ossl_send(struct Curl_cfilter *cf, rc = -1;
goto out;
case SSL_ERROR_SYSCALL:
- {
- int sockerr = SOCKERRNO;
+ {
+ int sockerr = SOCKERRNO;
- if(backend->io_result == CURLE_AGAIN) {
- *curlcode = CURLE_AGAIN;
- rc = -1;
- goto out;
- }
- sslerror = ERR_get_error();
- if(sslerror)
- ossl_strerror(sslerror, error_buffer, sizeof(error_buffer));
- else if(sockerr)
- Curl_strerror(sockerr, error_buffer, sizeof(error_buffer));
- else {
- strncpy(error_buffer, SSL_ERROR_to_str(err), sizeof(error_buffer));
- error_buffer[sizeof(error_buffer) - 1] = '\0';
- }
- failf(data, OSSL_PACKAGE " SSL_write: %s, errno %d",
- error_buffer, sockerr);
- *curlcode = CURLE_SEND_ERROR;
+ if(backend->io_result == CURLE_AGAIN) {
+ *curlcode = CURLE_AGAIN;
rc = -1;
goto out;
}
+ sslerror = ERR_get_error();
+ if(sslerror)
+ ossl_strerror(sslerror, error_buffer, sizeof(error_buffer));
+ else if(sockerr)
+ Curl_strerror(sockerr, error_buffer, sizeof(error_buffer));
+ else {
+ strncpy(error_buffer, SSL_ERROR_to_str(err), sizeof(error_buffer));
+ error_buffer[sizeof(error_buffer) - 1] = '\0';
+ }
+ failf(data, OSSL_PACKAGE " SSL_write: %s, errno %d",
+ error_buffer, sockerr);
+ *curlcode = CURLE_SEND_ERROR;
+ rc = -1;
+ goto out;
+ }
case SSL_ERROR_SSL: {
/* A failure in the SSL library occurred, usually a protocol error.
The OpenSSL error queue contains more information on the error. */
struct Curl_cfilter *cf_ssl_next = Curl_ssl_cf_get_ssl(cf->next);
struct ssl_connect_data *connssl_next = cf_ssl_next?
- cf_ssl_next->ctx : NULL;
+ cf_ssl_next->ctx : NULL;
sslerror = ERR_get_error();
if(ERR_GET_LIB(sslerror) == ERR_LIB_SSL &&
ERR_GET_REASON(sslerror) == SSL_R_BIO_NOT_SET &&
@@ -4628,6 +4639,10 @@ static size_t ossl_version(char *buffer, size_t size) #else
return msnprintf(buffer, size, OSSL_PACKAGE);
#endif
+#elif defined(OPENSSL_IS_AWSLC)
+ return msnprintf(buffer, size, "%s/%s",
+ OSSL_PACKAGE,
+ AWSLC_VERSION_NUMBER_STRING);
#elif defined(HAVE_OPENSSL_VERSION) && defined(OPENSSL_VERSION_STRING)
return msnprintf(buffer, size, "%s/%s",
OSSL_PACKAGE, OpenSSL_version(OPENSSL_VERSION_STRING));
@@ -4714,7 +4729,7 @@ static CURLcode ossl_sha256sum(const unsigned char *tmp, /* input */ static bool ossl_cert_status_request(void)
{
#if (OPENSSL_VERSION_NUMBER >= 0x0090808fL) && !defined(OPENSSL_NO_TLSEXT) && \
- !defined(OPENSSL_NO_OCSP)
+ !defined(OPENSSL_NO_OCSP)
return TRUE;
#else
return FALSE;
@@ -4728,7 +4743,7 @@ static void *ossl_get_internals(struct ssl_connect_data *connssl, struct ssl_backend_data *backend = connssl->backend;
DEBUGASSERT(backend);
return info == CURLINFO_TLS_SESSION ?
- (void *)backend->ctx : (void *)backend->handle;
+ (void *)backend->ctx : (void *)backend->handle;
}
static void ossl_free_multi_ssl_backend_data(
diff --git a/libs/libcurl/src/vtls/rustls.c b/libs/libcurl/src/vtls/rustls.c index 923795ff66..36e966eec5 100644 --- a/libs/libcurl/src/vtls/rustls.c +++ b/libs/libcurl/src/vtls/rustls.c @@ -102,6 +102,10 @@ read_cb(void *userdata, uint8_t *buf, uintptr_t len, uintptr_t *out_n) ret = EINVAL;
}
*out_n = (int)nread;
+ /*
+ DEBUGF(LOG_CF(io_ctx->data, io_ctx->cf, "cf->next recv(len=%zu) -> %zd, %d",
+ len, nread, result));
+ */
return ret;
}
@@ -121,48 +125,30 @@ write_cb(void *userdata, const uint8_t *buf, uintptr_t len, uintptr_t *out_n) ret = EINVAL;
}
*out_n = (int)nwritten;
+ /*
+ DEBUGF(LOG_CF(io_ctx->data, io_ctx->cf, "cf->next send(len=%zu) -> %zd, %d",
+ len, nwritten, result));
+ */
return ret;
}
-/*
- * On each run:
- * - Read a chunk of bytes from the socket into rustls' TLS input buffer.
- * - Tell rustls to process any new packets.
- * - Read out as many plaintext bytes from rustls as possible, until hitting
- * error, EOF, or EAGAIN/EWOULDBLOCK, or plainbuf/plainlen is filled up.
- *
- * It's okay to call this function with plainbuf == NULL and plainlen == 0.
- * In that case, it will copy bytes from the socket into rustls' TLS input
- * buffer, and process packets, but won't consume bytes from rustls' plaintext
- * output buffer.
- */
-static ssize_t
-cr_recv(struct Curl_cfilter *cf, struct Curl_easy *data,
- char *plainbuf, size_t plainlen, CURLcode *err)
+static ssize_t tls_recv_more(struct Curl_cfilter *cf,
+ struct Curl_easy *data, CURLcode *err)
{
struct ssl_connect_data *const connssl = cf->ctx;
struct ssl_backend_data *const backend = connssl->backend;
- struct rustls_connection *rconn = NULL;
struct io_ctx io_ctx;
-
- size_t n = 0;
size_t tls_bytes_read = 0;
- size_t plain_bytes_copied = 0;
- rustls_result rresult = 0;
- char errorbuf[255];
- size_t errorlen;
rustls_io_result io_error;
-
- DEBUGASSERT(backend);
- rconn = backend->conn;
+ rustls_result rresult = 0;
io_ctx.cf = cf;
io_ctx.data = data;
-
- io_error = rustls_connection_read_tls(rconn, read_cb, &io_ctx,
+ io_error = rustls_connection_read_tls(backend->conn, read_cb, &io_ctx,
&tls_bytes_read);
if(io_error == EAGAIN || io_error == EWOULDBLOCK) {
- DEBUGF(LOG_CF(data, cf, "cr_recv: EAGAIN or EWOULDBLOCK"));
+ *err = CURLE_AGAIN;
+ return -1;
}
else if(io_error) {
char buffer[STRERROR_LEN];
@@ -172,10 +158,10 @@ cr_recv(struct Curl_cfilter *cf, struct Curl_easy *data, return -1;
}
- DEBUGF(LOG_CF(data, cf, "cr_recv: read %ld TLS bytes", tls_bytes_read));
-
- rresult = rustls_connection_process_new_packets(rconn);
+ rresult = rustls_connection_process_new_packets(backend->conn);
if(rresult != RUSTLS_RESULT_OK) {
+ char errorbuf[255];
+ size_t errorlen;
rustls_error(rresult, errorbuf, sizeof(errorbuf), &errorlen);
failf(data, "rustls_connection_process_new_packets: %.*s",
errorlen, errorbuf);
@@ -184,60 +170,102 @@ cr_recv(struct Curl_cfilter *cf, struct Curl_easy *data, }
backend->data_pending = TRUE;
+ *err = CURLE_OK;
+ return (ssize_t)tls_bytes_read;
+}
+
+/*
+ * On each run:
+ * - Read a chunk of bytes from the socket into rustls' TLS input buffer.
+ * - Tell rustls to process any new packets.
+ * - Read out as many plaintext bytes from rustls as possible, until hitting
+ * error, EOF, or EAGAIN/EWOULDBLOCK, or plainbuf/plainlen is filled up.
+ *
+ * It's okay to call this function with plainbuf == NULL and plainlen == 0.
+ * In that case, it will copy bytes from the socket into rustls' TLS input
+ * buffer, and process packets, but won't consume bytes from rustls' plaintext
+ * output buffer.
+ */
+static ssize_t
+cr_recv(struct Curl_cfilter *cf, struct Curl_easy *data,
+ char *plainbuf, size_t plainlen, CURLcode *err)
+{
+ struct ssl_connect_data *const connssl = cf->ctx;
+ struct ssl_backend_data *const backend = connssl->backend;
+ struct rustls_connection *rconn = NULL;
+ size_t n = 0;
+ size_t plain_bytes_copied = 0;
+ rustls_result rresult = 0;
+ ssize_t nread;
+ bool eof = FALSE;
+
+ DEBUGASSERT(backend);
+ rconn = backend->conn;
while(plain_bytes_copied < plainlen) {
+ if(!backend->data_pending) {
+ if(tls_recv_more(cf, data, err) < 0) {
+ if(*err != CURLE_AGAIN) {
+ nread = -1;
+ goto out;
+ }
+ break;
+ }
+ }
+
rresult = rustls_connection_read(rconn,
(uint8_t *)plainbuf + plain_bytes_copied,
plainlen - plain_bytes_copied,
&n);
if(rresult == RUSTLS_RESULT_PLAINTEXT_EMPTY) {
- DEBUGF(LOG_CF(data, cf, "cr_recv: got PLAINTEXT_EMPTY. "
- "will try again later."));
backend->data_pending = FALSE;
- break;
}
else if(rresult == RUSTLS_RESULT_UNEXPECTED_EOF) {
failf(data, "rustls: peer closed TCP connection "
"without first closing TLS connection");
*err = CURLE_READ_ERROR;
- return -1;
+ nread = -1;
+ goto out;
}
else if(rresult != RUSTLS_RESULT_OK) {
/* n always equals 0 in this case, don't need to check it */
+ char errorbuf[255];
+ size_t errorlen;
rustls_error(rresult, errorbuf, sizeof(errorbuf), &errorlen);
failf(data, "rustls_connection_read: %.*s", errorlen, errorbuf);
*err = CURLE_READ_ERROR;
- return -1;
+ nread = -1;
+ goto out;
}
else if(n == 0) {
/* n == 0 indicates clean EOF, but we may have read some other
plaintext bytes before we reached this. Break out of the loop
so we can figure out whether to return success or EOF. */
+ eof = TRUE;
break;
}
else {
- DEBUGF(LOG_CF(data, cf, "cr_recv: got %ld plain bytes", n));
plain_bytes_copied += n;
}
}
if(plain_bytes_copied) {
*err = CURLE_OK;
- return plain_bytes_copied;
+ nread = (ssize_t)plain_bytes_copied;
}
-
- /* If we wrote out 0 plaintext bytes, that means either we hit a clean EOF,
- OR we got a RUSTLS_RESULT_PLAINTEXT_EMPTY.
- If the latter, return CURLE_AGAIN so curl doesn't treat this as EOF. */
- if(!backend->data_pending) {
+ else if(eof) {
+ *err = CURLE_OK;
+ nread = 0;
+ }
+ else {
*err = CURLE_AGAIN;
- return -1;
+ nread = -1;
}
- /* Zero bytes read, and no RUSTLS_RESULT_PLAINTEXT_EMPTY, means the TCP
- connection was cleanly closed (with a close_notify alert). */
- *err = CURLE_OK;
- return 0;
+out:
+ DEBUGF(LOG_CF(data, cf, "cf_recv(len=%zu) -> %zd, %d",
+ plainlen, nread, *err));
+ return nread;
}
/*
@@ -269,7 +297,10 @@ cr_send(struct Curl_cfilter *cf, struct Curl_easy *data, DEBUGASSERT(backend);
rconn = backend->conn;
- DEBUGF(LOG_CF(data, cf, "cr_send: %ld plain bytes", plainlen));
+ DEBUGF(LOG_CF(data, cf, "cf_send: %ld plain bytes", plainlen));
+
+ io_ctx.cf = cf;
+ io_ctx.data = data;
if(plainlen > 0) {
rresult = rustls_connection_write(rconn, plainbuf, plainlen,
@@ -287,14 +318,11 @@ cr_send(struct Curl_cfilter *cf, struct Curl_easy *data, }
}
- io_ctx.cf = cf;
- io_ctx.data = data;
-
while(rustls_connection_wants_write(rconn)) {
io_error = rustls_connection_write_tls(rconn, write_cb, &io_ctx,
&tlswritten);
if(io_error == EAGAIN || io_error == EWOULDBLOCK) {
- DEBUGF(LOG_CF(data, cf, "cr_send: EAGAIN after %zu bytes",
+ DEBUGF(LOG_CF(data, cf, "cf_send: EAGAIN after %zu bytes",
tlswritten_total));
*err = CURLE_AGAIN;
return -1;
@@ -311,7 +339,7 @@ cr_send(struct Curl_cfilter *cf, struct Curl_easy *data, *err = CURLE_WRITE_ERROR;
return -1;
}
- DEBUGF(LOG_CF(data, cf, "cr_send: wrote %zu TLS bytes", tlswritten));
+ DEBUGF(LOG_CF(data, cf, "cf_send: wrote %zu TLS bytes", tlswritten));
tlswritten_total += tlswritten;
}
@@ -538,13 +566,12 @@ cr_connect_nonblocking(struct Curl_cfilter *cf, if(wants_read) {
infof(data, "rustls_connection wants us to read_tls.");
- cr_recv(cf, data, NULL, 0, &tmperr);
- if(tmperr == CURLE_AGAIN) {
- infof(data, "reading would block");
- /* fall through */
- }
- else if(tmperr != CURLE_OK) {
- if(tmperr == CURLE_READ_ERROR) {
+ if(tls_recv_more(cf, data, &tmperr) < 0) {
+ if(tmperr == CURLE_AGAIN) {
+ infof(data, "reading would block");
+ /* fall through */
+ }
+ else if(tmperr == CURLE_READ_ERROR) {
return CURLE_SSL_CONNECT_ERROR;
}
else {
diff --git a/libs/libcurl/src/vtls/schannel.c b/libs/libcurl/src/vtls/schannel.c index 63f9b07690..68cead586b 100644 --- a/libs/libcurl/src/vtls/schannel.c +++ b/libs/libcurl/src/vtls/schannel.c @@ -1171,9 +1171,11 @@ schannel_connect_step1(struct Curl_cfilter *cf, struct Curl_easy *data) if(!backend->cred) {
char *snihost;
result = schannel_acquire_credential_handle(cf, data);
- if(result != CURLE_OK) {
+ if(result)
return result;
- }
+ /* schannel_acquire_credential_handle() sets backend->cred accordingly or
+ it returns error otherwise. */
+
/* A hostname associated with the credential is needed by
InitializeSecurityContext for SNI and other reasons. */
snihost = Curl_ssl_snihost(data, hostname, NULL);
@@ -2356,7 +2358,7 @@ schannel_recv(struct Curl_cfilter *cf, struct Curl_easy *data, "schannel: decrypted data buffer: offset %zu length %zu",
backend->decdata_offset, backend->decdata_length));
- cleanup:
+cleanup:
/* Warning- there is no guarantee the encdata state is valid at this point */
DEBUGF(infof(data, "schannel: schannel_recv cleanup"));
diff --git a/libs/libcurl/src/vtls/sectransp.c b/libs/libcurl/src/vtls/sectransp.c index 81ab9ab061..b718c84adf 100644 --- a/libs/libcurl/src/vtls/sectransp.c +++ b/libs/libcurl/src/vtls/sectransp.c @@ -45,6 +45,11 @@ #pragma clang diagnostic ignored "-Wtautological-pointer-compare"
#endif /* __clang__ */
+#ifdef __GNUC__
+#pragma GCC diagnostic ignored "-Waddress"
+#pragma GCC diagnostic ignored "-Wundef"
+#endif
+
#include <limits.h>
#include <Security/Security.h>
@@ -234,7 +239,7 @@ struct st_cipher { insert in between existing items to appropriate place based on
cipher suite IANA number
*/
-const static struct st_cipher ciphertable[] = {
+static const struct st_cipher ciphertable[] = {
/* SSL version 3.0 and initial TLS 1.0 cipher suites.
Defined since SDK 10.2.8 */
CIPHER_DEF_SSLTLS(NULL_WITH_NULL_NULL, /* 0x0000 */
@@ -900,12 +905,12 @@ CF_INLINE const char *TLSCipherNameForNumber(SSLCipherSuite cipher) /* The first ciphers in the ciphertable are continuous. Here we do small
optimization and instead of loop directly get SSL name by cipher number.
*/
+ size_t i;
if(cipher <= SSL_FORTEZZA_DMS_WITH_FORTEZZA_CBC_SHA) {
return ciphertable[cipher].name;
}
/* Iterate through the rest of the ciphers */
- for(size_t i = SSL_FORTEZZA_DMS_WITH_FORTEZZA_CBC_SHA + 1;
- i < NUM_OF_CIPHERS;
+ for(i = SSL_FORTEZZA_DMS_WITH_FORTEZZA_CBC_SHA + 1; i < NUM_OF_CIPHERS;
++i) {
if(ciphertable[i].num == cipher) {
return ciphertable[i].name;
@@ -1429,7 +1434,8 @@ static CURLcode set_ssl_version_min_max(struct Curl_cfilter *cf, static bool is_cipher_suite_strong(SSLCipherSuite suite_num)
{
- for(size_t i = 0; i < NUM_OF_CIPHERS; ++i) {
+ size_t i;
+ for(i = 0; i < NUM_OF_CIPHERS; ++i) {
if(ciphertable[i].num == suite_num) {
return !ciphertable[i].weak;
}
@@ -1545,16 +1551,17 @@ static CURLcode sectransp_set_selected_ciphers(struct Curl_easy *data, size_t cipher_len = 0;
const char *cipher_end = NULL;
bool tls_name = FALSE;
+ size_t i;
/* Skip separators */
while(is_separator(*cipher_start))
- cipher_start++;
+ cipher_start++;
if(*cipher_start == '\0') {
break;
}
/* Find last position of a cipher in the ciphers string */
cipher_end = cipher_start;
- while (*cipher_end != '\0' && !is_separator(*cipher_end)) {
+ while(*cipher_end != '\0' && !is_separator(*cipher_end)) {
++cipher_end;
}
@@ -1568,7 +1575,7 @@ static CURLcode sectransp_set_selected_ciphers(struct Curl_easy *data, /* Iterate through the cipher table and look for the cipher, starting
the cipher number 0x01 because the 0x00 is not the real cipher */
cipher_len = cipher_end - cipher_start;
- for(size_t i = 1; i < NUM_OF_CIPHERS; ++i) {
+ for(i = 1; i < NUM_OF_CIPHERS; ++i) {
const char *table_cipher_name = NULL;
if(tls_name) {
table_cipher_name = ciphertable[i].name;
@@ -2712,7 +2719,7 @@ check_handshake: failf(data, "Peer rejected unexpected message");
break;
#if CURL_BUILD_MAC_10_11 || CURL_BUILD_IOS_9
- /* Treaing non-fatal error as fatal like before */
+ /* Treating non-fatal error as fatal like before */
case errSSLClientHelloReceived:
failf(data, "A non-fatal result for providing a server name "
"indication");
@@ -2796,7 +2803,7 @@ check_handshake: }
#if(CURL_BUILD_MAC_10_13 || CURL_BUILD_IOS_11) && HAVE_BUILTIN_AVAILABLE == 1
- if(cf->conn->bits.tls_enable_alpn) {
+ if(connssl->alpn) {
if(__builtin_available(macOS 10.13.4, iOS 11, tvOS 11, *)) {
CFArrayRef alpnArr = NULL;
CFStringRef chosenProtocol = NULL;
@@ -3376,7 +3383,7 @@ static ssize_t sectransp_recv(struct Curl_cfilter *cf, DEBUGASSERT(backend);
- again:
+again:
*curlcode = CURLE_OK;
err = SSLRead(backend->ssl_ctx, buf, buffersize, &processed);
diff --git a/libs/libcurl/src/vtls/vtls.c b/libs/libcurl/src/vtls/vtls.c index 1f618d9aac..5068bc2aaf 100644 --- a/libs/libcurl/src/vtls/vtls.c +++ b/libs/libcurl/src/vtls/vtls.c @@ -130,6 +130,33 @@ static bool blobcmp(struct curl_blob *first, struct curl_blob *second) return !memcmp(first->data, second->data, first->len); /* same data */
}
+#ifdef USE_SSL
+static const struct alpn_spec ALPN_SPEC_H10 = {
+ { ALPN_HTTP_1_0 }, 1
+};
+static const struct alpn_spec ALPN_SPEC_H11 = {
+ { ALPN_HTTP_1_1 }, 1
+};
+#ifdef USE_HTTP2
+static const struct alpn_spec ALPN_SPEC_H2_H11 = {
+ { ALPN_H2, ALPN_HTTP_1_1 }, 2
+};
+#endif
+
+static const struct alpn_spec *alpn_get_spec(int httpwant, bool use_alpn)
+{
+ if(!use_alpn)
+ return NULL;
+ if(httpwant == CURL_HTTP_VERSION_1_0)
+ return &ALPN_SPEC_H10;
+#ifdef USE_HTTP2
+ if(httpwant >= CURL_HTTP_VERSION_2)
+ return &ALPN_SPEC_H2_H11;
+#endif
+ return &ALPN_SPEC_H11;
+}
+#endif /* USE_SSL */
+
bool
Curl_ssl_config_matches(struct ssl_primary_config *data,
@@ -291,7 +318,7 @@ static bool ssl_prefs_check(struct Curl_easy *data) }
static struct ssl_connect_data *cf_ctx_new(struct Curl_easy *data,
- const struct alpn_spec *alpn)
+ const struct alpn_spec *alpn)
{
struct ssl_connect_data *ctx;
@@ -754,20 +781,6 @@ CURLcode Curl_ssl_push_certinfo_len(struct Curl_easy *data, return result;
}
-/*
- * This is a convenience function for push_certinfo_len that takes a zero
- * terminated value.
- */
-CURLcode Curl_ssl_push_certinfo(struct Curl_easy *data,
- int certnum,
- const char *label,
- const char *value)
-{
- size_t valuelen = strlen(value);
-
- return Curl_ssl_push_certinfo_len(data, certnum, label, value, valuelen);
-}
-
CURLcode Curl_ssl_random(struct Curl_easy *data,
unsigned char *entropy,
size_t length)
@@ -1581,8 +1594,15 @@ static ssize_t ssl_cf_recv(struct Curl_cfilter *cf, ssize_t nread;
CF_DATA_SAVE(save, cf, data);
- *err = CURLE_OK;
nread = Curl_ssl->recv_plain(cf, data, buf, len, err);
+ if(nread > 0) {
+ DEBUGASSERT((size_t)nread <= len);
+ }
+ else if(nread == 0) {
+ /* eof */
+ *err = CURLE_OK;
+ }
+ DEBUGF(LOG_CF(data, cf, "cf_recv(len=%zu) -> %zd, %d", len, nread, *err));
CF_DATA_RESTORE(cf, save);
return nread;
}
@@ -1726,7 +1746,8 @@ static CURLcode cf_ssl_create(struct Curl_cfilter **pcf, DEBUGASSERT(data->conn);
- ctx = cf_ctx_new(data, Curl_alpn_get_spec(data, conn));
+ ctx = cf_ctx_new(data, alpn_get_spec(data->state.httpwant,
+ conn->bits.tls_enable_alpn));
if(!ctx) {
result = CURLE_OUT_OF_MEMORY;
goto out;
@@ -1767,6 +1788,7 @@ CURLcode Curl_cf_ssl_insert_after(struct Curl_cfilter *cf_at, }
#ifndef CURL_DISABLE_PROXY
+
static CURLcode cf_ssl_proxy_create(struct Curl_cfilter **pcf,
struct Curl_easy *data,
struct connectdata *conn)
@@ -1774,8 +1796,17 @@ static CURLcode cf_ssl_proxy_create(struct Curl_cfilter **pcf, struct Curl_cfilter *cf = NULL;
struct ssl_connect_data *ctx;
CURLcode result;
+ bool use_alpn = conn->bits.tls_enable_alpn;
+ int httpwant = CURL_HTTP_VERSION_1_1;
+
+#ifdef USE_HTTP2
+ if(conn->http_proxy.proxytype == CURLPROXY_HTTPS2) {
+ use_alpn = TRUE;
+ httpwant = CURL_HTTP_VERSION_2;
+ }
+#endif
- ctx = cf_ctx_new(data, Curl_alpn_get_proxy_spec(data, conn));
+ ctx = cf_ctx_new(data, alpn_get_spec(httpwant, use_alpn));
if(!ctx) {
result = CURLE_OUT_OF_MEMORY;
goto out;
@@ -1789,19 +1820,6 @@ out: return result;
}
-CURLcode Curl_ssl_cfilter_proxy_add(struct Curl_easy *data,
- struct connectdata *conn,
- int sockindex)
-{
- struct Curl_cfilter *cf;
- CURLcode result;
-
- result = cf_ssl_proxy_create(&cf, data, conn);
- if(!result)
- Curl_conn_cf_add(data, conn, sockindex, cf);
- return result;
-}
-
CURLcode Curl_cf_ssl_proxy_insert_after(struct Curl_cfilter *cf_at,
struct Curl_easy *data)
{
@@ -1844,15 +1862,16 @@ void *Curl_ssl_get_internals(struct Curl_easy *data, int sockindex, CURLcode Curl_ssl_cfilter_remove(struct Curl_easy *data,
int sockindex)
{
- struct Curl_cfilter *cf = data->conn? data->conn->cfilter[sockindex] : NULL;
+ struct Curl_cfilter *cf, *head;
CURLcode result = CURLE_OK;
(void)data;
- for(; cf; cf = cf->next) {
+ head = data->conn? data->conn->cfilter[sockindex] : NULL;
+ for(cf = head; cf; cf = cf->next) {
if(cf->cft == &Curl_cft_ssl) {
if(Curl_ssl->shut_down(cf, data))
result = CURLE_SSL_SHUTDOWN_FAILED;
- Curl_conn_cf_discard(cf, data);
+ Curl_conn_cf_discard_sub(head, cf, data, FALSE);
break;
}
}
@@ -1914,19 +1933,6 @@ Curl_ssl_cf_get_primary_config(struct Curl_cfilter *cf) #endif
}
-struct ssl_primary_config *
-Curl_ssl_get_primary_config(struct Curl_easy *data,
- struct connectdata *conn,
- int sockindex)
-{
- struct Curl_cfilter *cf;
-
- (void)data;
- DEBUGASSERT(conn);
- cf = get_ssl_cf_engaged(conn, sockindex);
- return cf? Curl_ssl_cf_get_primary_config(cf) : NULL;
-}
-
struct Curl_cfilter *Curl_ssl_cf_get_ssl(struct Curl_cfilter *cf)
{
for(; cf; cf = cf->next) {
@@ -1936,42 +1942,6 @@ struct Curl_cfilter *Curl_ssl_cf_get_ssl(struct Curl_cfilter *cf) return NULL;
}
-static const struct alpn_spec ALPN_SPEC_H10 = {
- { ALPN_HTTP_1_0 }, 1
-};
-static const struct alpn_spec ALPN_SPEC_H11 = {
- { ALPN_HTTP_1_1 }, 1
-};
-#ifdef USE_HTTP2
-static const struct alpn_spec ALPN_SPEC_H2_H11 = {
- { ALPN_H2, ALPN_HTTP_1_1 }, 2
-};
-#endif
-
-const struct alpn_spec *
-Curl_alpn_get_spec(struct Curl_easy *data, struct connectdata *conn)
-{
- if(!conn->bits.tls_enable_alpn)
- return NULL;
- if(data->state.httpwant == CURL_HTTP_VERSION_1_0)
- return &ALPN_SPEC_H10;
-#ifdef USE_HTTP2
- if(data->state.httpwant >= CURL_HTTP_VERSION_2)
- return &ALPN_SPEC_H2_H11;
-#endif
- return &ALPN_SPEC_H11;
-}
-
-const struct alpn_spec *
-Curl_alpn_get_proxy_spec(struct Curl_easy *data, struct connectdata *conn)
-{
- if(!conn->bits.tls_enable_alpn)
- return NULL;
- if(data->state.httpwant == CURL_HTTP_VERSION_1_0)
- return &ALPN_SPEC_H10;
- return &ALPN_SPEC_H11;
-}
-
CURLcode Curl_alpn_to_proto_buf(struct alpn_proto_buf *buf,
const struct alpn_spec *spec)
{
@@ -2006,7 +1976,7 @@ CURLcode Curl_alpn_to_proto_str(struct alpn_proto_buf *buf, len = strlen(spec->entries[i]);
if(len >= ALPN_NAME_MAX)
return CURLE_FAILED_INIT;
- if(off + len + 2 >= (int)sizeof(buf->data))
+ if(off + len + 2 >= sizeof(buf->data))
return CURLE_FAILED_INIT;
if(off)
buf->data[off++] = ',';
@@ -2024,32 +1994,40 @@ CURLcode Curl_alpn_set_negotiated(struct Curl_cfilter *cf, size_t proto_len)
{
int can_multi = 0;
+ unsigned char *palpn =
+#ifndef CURL_DISABLE_PROXY
+ (cf->conn->bits.tunnel_proxy && Curl_ssl_cf_is_proxy(cf))?
+ &cf->conn->proxy_alpn : &cf->conn->alpn
+#else
+ &cf->conn->alpn
+#endif
+ ;
if(proto && proto_len) {
if(proto_len == ALPN_HTTP_1_1_LENGTH &&
- !memcmp(ALPN_HTTP_1_1, proto, ALPN_HTTP_1_1_LENGTH)) {
- cf->conn->alpn = CURL_HTTP_VERSION_1_1;
+ !memcmp(ALPN_HTTP_1_1, proto, ALPN_HTTP_1_1_LENGTH)) {
+ *palpn = CURL_HTTP_VERSION_1_1;
}
else if(proto_len == ALPN_HTTP_1_0_LENGTH &&
!memcmp(ALPN_HTTP_1_0, proto, ALPN_HTTP_1_0_LENGTH)) {
- cf->conn->alpn = CURL_HTTP_VERSION_1_0;
+ *palpn = CURL_HTTP_VERSION_1_0;
}
#ifdef USE_HTTP2
else if(proto_len == ALPN_H2_LENGTH &&
!memcmp(ALPN_H2, proto, ALPN_H2_LENGTH)) {
- cf->conn->alpn = CURL_HTTP_VERSION_2;
+ *palpn = CURL_HTTP_VERSION_2;
can_multi = 1;
}
#endif
#ifdef USE_HTTP3
else if(proto_len == ALPN_H3_LENGTH &&
- !memcmp(ALPN_H3, proto, ALPN_H3_LENGTH)) {
- cf->conn->alpn = CURL_HTTP_VERSION_3;
+ !memcmp(ALPN_H3, proto, ALPN_H3_LENGTH)) {
+ *palpn = CURL_HTTP_VERSION_3;
can_multi = 1;
}
#endif
else {
- cf->conn->alpn = CURL_HTTP_VERSION_NONE;
+ *palpn = CURL_HTTP_VERSION_NONE;
failf(data, "unsupported ALPN protocol: '%.*s'", (int)proto_len, proto);
/* TODO: do we want to fail this? Previous code just ignored it and
* some vtls backends even ignore the return code of this function. */
@@ -2059,12 +2037,14 @@ CURLcode Curl_alpn_set_negotiated(struct Curl_cfilter *cf, infof(data, VTLS_INFOF_ALPN_ACCEPTED_LEN_1STR, (int)proto_len, proto);
}
else {
- cf->conn->alpn = CURL_HTTP_VERSION_NONE;
+ *palpn = CURL_HTTP_VERSION_NONE;
infof(data, VTLS_INFOF_NO_ALPN);
}
out:
- Curl_multiuse_state(data, can_multi? BUNDLE_MULTIPLEX : BUNDLE_NO_MULTIUSE);
+ if(!Curl_ssl_cf_is_proxy(cf))
+ Curl_multiuse_state(data, can_multi?
+ BUNDLE_MULTIPLEX : BUNDLE_NO_MULTIUSE);
return CURLE_OK;
}
diff --git a/libs/libcurl/src/vtls/vtls.h b/libs/libcurl/src/vtls/vtls.h index bb59ecbe9c..2178e8834f 100644 --- a/libs/libcurl/src/vtls/vtls.h +++ b/libs/libcurl/src/vtls/vtls.h @@ -65,58 +65,6 @@ CURLsslset Curl_init_sslset_nolock(curl_sslbackend id, const char *name, #define CURL_SHA256_DIGEST_LENGTH 32 /* fixed size */
#endif
-/* see https://www.iana.org/assignments/tls-extensiontype-values/ */
-#define ALPN_HTTP_1_1_LENGTH 8
-#define ALPN_HTTP_1_1 "http/1.1"
-#define ALPN_HTTP_1_0_LENGTH 8
-#define ALPN_HTTP_1_0 "http/1.0"
-#define ALPN_H2_LENGTH 2
-#define ALPN_H2 "h2"
-#define ALPN_H3_LENGTH 2
-#define ALPN_H3 "h3"
-
-/* conservative sizes on the ALPN entries and count we are handling,
- * we can increase these if we ever feel the need or have to accommodate
- * ALPN strings from the "outside". */
-#define ALPN_NAME_MAX 10
-#define ALPN_ENTRIES_MAX 3
-#define ALPN_PROTO_BUF_MAX (ALPN_ENTRIES_MAX * (ALPN_NAME_MAX + 1))
-
-struct alpn_spec {
- const char entries[ALPN_ENTRIES_MAX][ALPN_NAME_MAX];
- size_t count; /* number of entries */
-};
-
-struct alpn_proto_buf {
- unsigned char data[ALPN_PROTO_BUF_MAX];
- int len;
-};
-
-CURLcode Curl_alpn_to_proto_buf(struct alpn_proto_buf *buf,
- const struct alpn_spec *spec);
-CURLcode Curl_alpn_to_proto_str(struct alpn_proto_buf *buf,
- const struct alpn_spec *spec);
-
-CURLcode Curl_alpn_set_negotiated(struct Curl_cfilter *cf,
- struct Curl_easy *data,
- const unsigned char *proto,
- size_t proto_len);
-
-/**
- * Get the ALPN specification to use for talking to remote host.
- * May return NULL if ALPN is disabled on the connection.
- */
-const struct alpn_spec *
-Curl_alpn_get_spec(struct Curl_easy *data, struct connectdata *conn);
-
-/**
- * Get the ALPN specification to use for talking to the proxy.
- * May return NULL if ALPN is disabled on the connection.
- */
-const struct alpn_spec *
-Curl_alpn_get_proxy_spec(struct Curl_easy *data, struct connectdata *conn);
-
-
char *Curl_ssl_snihost(struct Curl_easy *data, const char *host, size_t *olen);
bool Curl_ssl_config_matches(struct ssl_primary_config *data,
struct ssl_primary_config *needle);
@@ -207,9 +155,6 @@ CURLcode Curl_ssl_cfilter_remove(struct Curl_easy *data, int sockindex);
#ifndef CURL_DISABLE_PROXY
-CURLcode Curl_ssl_cfilter_proxy_add(struct Curl_easy *data,
- struct connectdata *conn,
- int sockindex);
CURLcode Curl_cf_ssl_proxy_insert_after(struct Curl_cfilter *cf_at,
struct Curl_easy *data);
#endif /* !CURL_DISABLE_PROXY */
@@ -227,20 +172,6 @@ struct ssl_config_data *Curl_ssl_get_config(struct Curl_easy *data, int sockindex);
/**
- * Get the primary SSL configuration from the connection.
- * This returns NULL if no SSL is configured.
- * Otherwise it returns the config of the first (highest) one that is
- * either connected, in handshake or about to start
- * (e.g. all filters below it are connected). If SSL filters are present,
- * but neither can start operating, return the config of the lowest one
- * that will first come into effect when connecting.
- */
-struct ssl_primary_config *
-Curl_ssl_get_primary_config(struct Curl_easy *data,
- struct connectdata *conn,
- int sockindex);
-
-/**
* True iff the underlying SSL implementation supports the option.
* Option is one of the defined SSLSUPP_* values.
* `data` maybe NULL for the features of the default implementation.
@@ -278,7 +209,6 @@ extern struct Curl_cftype Curl_cft_ssl_proxy; #define Curl_ssl_get_internals(a,b,c,d) NULL
#define Curl_ssl_supports(a,b) FALSE
#define Curl_ssl_cfilter_add(a,b,c) CURLE_NOT_BUILT_IN
-#define Curl_ssl_cfilter_proxy_add(a,b,c) CURLE_NOT_BUILT_IN
#define Curl_ssl_get_config(a,b) NULL
#define Curl_ssl_cfilter_remove(a,b) CURLE_OK
#endif
diff --git a/libs/libcurl/src/vtls/vtls_int.h b/libs/libcurl/src/vtls/vtls_int.h index a9b16f8b2c..31b57fd37a 100644 --- a/libs/libcurl/src/vtls/vtls_int.h +++ b/libs/libcurl/src/vtls/vtls_int.h @@ -29,17 +29,55 @@ #ifdef USE_SSL
+/* see https://www.iana.org/assignments/tls-extensiontype-values/ */
+#define ALPN_HTTP_1_1_LENGTH 8
+#define ALPN_HTTP_1_1 "http/1.1"
+#define ALPN_HTTP_1_0_LENGTH 8
+#define ALPN_HTTP_1_0 "http/1.0"
+#define ALPN_H2_LENGTH 2
+#define ALPN_H2 "h2"
+#define ALPN_H3_LENGTH 2
+#define ALPN_H3 "h3"
+
+/* conservative sizes on the ALPN entries and count we are handling,
+ * we can increase these if we ever feel the need or have to accommodate
+ * ALPN strings from the "outside". */
+#define ALPN_NAME_MAX 10
+#define ALPN_ENTRIES_MAX 3
+#define ALPN_PROTO_BUF_MAX (ALPN_ENTRIES_MAX * (ALPN_NAME_MAX + 1))
+
+struct alpn_spec {
+ const char entries[ALPN_ENTRIES_MAX][ALPN_NAME_MAX];
+ size_t count; /* number of entries */
+};
+
+struct alpn_proto_buf {
+ unsigned char data[ALPN_PROTO_BUF_MAX];
+ int len;
+};
+
+CURLcode Curl_alpn_to_proto_buf(struct alpn_proto_buf *buf,
+ const struct alpn_spec *spec);
+CURLcode Curl_alpn_to_proto_str(struct alpn_proto_buf *buf,
+ const struct alpn_spec *spec);
+
+CURLcode Curl_alpn_set_negotiated(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ const unsigned char *proto,
+ size_t proto_len);
+
/* Information in each SSL cfilter context: cf->ctx */
struct ssl_connect_data {
ssl_connection_state state;
ssl_connect_state connecting_state;
char *hostname; /* hostname for verification */
char *dispname; /* display version of hostname */
- int port; /* remote port at origin */
const struct alpn_spec *alpn; /* ALPN to use or NULL for none */
struct ssl_backend_data *backend; /* vtls backend specific props */
struct cf_call_data call_data; /* data handle used in current call */
struct curltime handshake_done; /* time when handshake finished */
+ int port; /* remote port at origin */
+ BIT(use_alpn); /* if ALPN shall be used in handshake */
};
diff --git a/libs/libcurl/src/vtls/wolfssl.c b/libs/libcurl/src/vtls/wolfssl.c index 8918e3554a..5ed483815b 100644 --- a/libs/libcurl/src/vtls/wolfssl.c +++ b/libs/libcurl/src/vtls/wolfssl.c @@ -854,7 +854,7 @@ wolfssl_connect_step2(struct Curl_cfilter *cf, struct Curl_easy *data) }
#ifdef HAVE_ALPN
- if(cf->conn->bits.tls_enable_alpn) {
+ if(connssl->alpn) {
int rc;
char *protocol = NULL;
unsigned short protocol_len = 0;
diff --git a/libs/libcurl/src/vtls/x509asn1.c b/libs/libcurl/src/vtls/x509asn1.c index cf673b349f..58feb58226 100644 --- a/libs/libcurl/src/vtls/x509asn1.c +++ b/libs/libcurl/src/vtls/x509asn1.c @@ -172,7 +172,7 @@ static const struct Curl_OID OIDtable[] = { * It is intended to support certificate information gathering for SSL backends
* that offer a mean to get certificates as a whole, but do not supply
* entry points to get particular certificate sub-fields.
- * Please note there is no pretention here to rewrite a full SSL library.
+ * Please note there is no pretension here to rewrite a full SSL library.
*/
static const char *getASN1Element(struct Curl_asn1Element *elem,
@@ -918,6 +918,20 @@ static const char *dumpAlgo(struct Curl_asn1Element *param, return OID2str(oid.beg, oid.end, TRUE);
}
+/*
+ * This is a convenience function for push_certinfo_len that takes a zero
+ * terminated value.
+ */
+static CURLcode ssl_push_certinfo(struct Curl_easy *data,
+ int certnum,
+ const char *label,
+ const char *value)
+{
+ size_t valuelen = strlen(value);
+
+ return Curl_ssl_push_certinfo_len(data, certnum, label, value, valuelen);
+}
+
/* return 0 on success, 1 on error */
static int do_pubkey_field(struct Curl_easy *data, int certnum,
const char *label, struct Curl_asn1Element *elem)
@@ -930,7 +944,7 @@ static int do_pubkey_field(struct Curl_easy *data, int certnum, output = ASN1tostr(elem, 0);
if(output) {
if(data->set.ssl.certinfo)
- result = Curl_ssl_push_certinfo(data, certnum, label, output);
+ result = ssl_push_certinfo(data, certnum, label, output);
if(!certnum && !result)
infof(data, " %s: %s", label, output);
free((char *) output);
@@ -960,7 +974,7 @@ static int do_pubkey(struct Curl_easy *data, int certnum, if(data->set.ssl.certinfo) {
char q[sizeof(len) * 8 / 3 + 1];
(void)msnprintf(q, sizeof(q), "%lu", len);
- if(Curl_ssl_push_certinfo(data, certnum, "ECC Public Key", q))
+ if(ssl_push_certinfo(data, certnum, "ECC Public Key", q))
return 1;
}
return do_pubkey_field(data, certnum, "ecPublicKey", pubkey);
@@ -994,7 +1008,7 @@ static int do_pubkey(struct Curl_easy *data, int certnum, if(data->set.ssl.certinfo) {
char r[sizeof(len) * 8 / 3 + 1];
msnprintf(r, sizeof(r), "%lu", len);
- if(Curl_ssl_push_certinfo(data, certnum, "RSA Public Key", r))
+ if(ssl_push_certinfo(data, certnum, "RSA Public Key", r))
return 1;
}
/* Generate coefficients. */
@@ -1092,7 +1106,7 @@ CURLcode Curl_extract_certinfo(struct Curl_easy *data, if(!ccp)
return CURLE_OUT_OF_MEMORY;
if(data->set.ssl.certinfo) {
- result = Curl_ssl_push_certinfo(data, certnum, "Subject", ccp);
+ result = ssl_push_certinfo(data, certnum, "Subject", ccp);
if(result)
return result;
}
@@ -1105,7 +1119,7 @@ CURLcode Curl_extract_certinfo(struct Curl_easy *data, if(!ccp)
return CURLE_OUT_OF_MEMORY;
if(data->set.ssl.certinfo) {
- result = Curl_ssl_push_certinfo(data, certnum, "Issuer", ccp);
+ result = ssl_push_certinfo(data, certnum, "Issuer", ccp);
}
if(!certnum)
infof(data, " Issuer: %s", ccp);
@@ -1121,7 +1135,7 @@ CURLcode Curl_extract_certinfo(struct Curl_easy *data, ccp = curl_maprintf("%x", version);
if(!ccp)
return CURLE_OUT_OF_MEMORY;
- result = Curl_ssl_push_certinfo(data, certnum, "Version", ccp);
+ result = ssl_push_certinfo(data, certnum, "Version", ccp);
free((char *) ccp);
if(result)
return result;
@@ -1134,7 +1148,7 @@ CURLcode Curl_extract_certinfo(struct Curl_easy *data, if(!ccp)
return CURLE_OUT_OF_MEMORY;
if(data->set.ssl.certinfo)
- result = Curl_ssl_push_certinfo(data, certnum, "Serial Number", ccp);
+ result = ssl_push_certinfo(data, certnum, "Serial Number", ccp);
if(!certnum)
infof(data, " Serial Number: %s", ccp);
free((char *) ccp);
@@ -1147,7 +1161,7 @@ CURLcode Curl_extract_certinfo(struct Curl_easy *data, if(!ccp)
return CURLE_OUT_OF_MEMORY;
if(data->set.ssl.certinfo)
- result = Curl_ssl_push_certinfo(data, certnum, "Signature Algorithm", ccp);
+ result = ssl_push_certinfo(data, certnum, "Signature Algorithm", ccp);
if(!certnum)
infof(data, " Signature Algorithm: %s", ccp);
free((char *) ccp);
@@ -1159,7 +1173,7 @@ CURLcode Curl_extract_certinfo(struct Curl_easy *data, if(!ccp)
return CURLE_OUT_OF_MEMORY;
if(data->set.ssl.certinfo)
- result = Curl_ssl_push_certinfo(data, certnum, "Start Date", ccp);
+ result = ssl_push_certinfo(data, certnum, "Start Date", ccp);
if(!certnum)
infof(data, " Start Date: %s", ccp);
free((char *) ccp);
@@ -1171,7 +1185,7 @@ CURLcode Curl_extract_certinfo(struct Curl_easy *data, if(!ccp)
return CURLE_OUT_OF_MEMORY;
if(data->set.ssl.certinfo)
- result = Curl_ssl_push_certinfo(data, certnum, "Expire Date", ccp);
+ result = ssl_push_certinfo(data, certnum, "Expire Date", ccp);
if(!certnum)
infof(data, " Expire Date: %s", ccp);
free((char *) ccp);
@@ -1184,7 +1198,7 @@ CURLcode Curl_extract_certinfo(struct Curl_easy *data, if(!ccp)
return CURLE_OUT_OF_MEMORY;
if(data->set.ssl.certinfo)
- result = Curl_ssl_push_certinfo(data, certnum, "Public Key Algorithm",
+ result = ssl_push_certinfo(data, certnum, "Public Key Algorithm",
ccp);
if(!result) {
int ret;
@@ -1203,7 +1217,7 @@ CURLcode Curl_extract_certinfo(struct Curl_easy *data, if(!ccp)
return CURLE_OUT_OF_MEMORY;
if(data->set.ssl.certinfo)
- result = Curl_ssl_push_certinfo(data, certnum, "Signature", ccp);
+ result = ssl_push_certinfo(data, certnum, "Signature", ccp);
if(!certnum)
infof(data, " Signature: %s", ccp);
free((char *) ccp);
@@ -1238,7 +1252,7 @@ CURLcode Curl_extract_certinfo(struct Curl_easy *data, cp2[i] = '\0';
free(cp1);
if(data->set.ssl.certinfo)
- result = Curl_ssl_push_certinfo(data, certnum, "Cert", cp2);
+ result = ssl_push_certinfo(data, certnum, "Cert", cp2);
if(!certnum)
infof(data, "%s", cp2);
free(cp2);
diff --git a/libs/libcurl/src/ws.c b/libs/libcurl/src/ws.c index 998f75af72..2d3b3bcf53 100644 --- a/libs/libcurl/src/ws.c +++ b/libs/libcurl/src/ws.c @@ -27,9 +27,11 @@ #ifdef USE_WEBSOCKETS
#include "urldata.h"
+#include "bufq.h"
#include "dynbuf.h"
#include "rand.h"
#include "curl_base64.h"
+#include "connect.h"
#include "sendf.h"
#include "multiif.h"
#include "ws.h"
@@ -42,6 +44,485 @@ #include "curl_memory.h"
#include "memdebug.h"
+
+#define WSBIT_FIN 0x80
+#define WSBIT_OPCODE_CONT 0
+#define WSBIT_OPCODE_TEXT (1)
+#define WSBIT_OPCODE_BIN (2)
+#define WSBIT_OPCODE_CLOSE (8)
+#define WSBIT_OPCODE_PING (9)
+#define WSBIT_OPCODE_PONG (0xa)
+#define WSBIT_OPCODE_MASK (0xf)
+
+#define WSBIT_MASK 0x80
+
+/* buffer dimensioning */
+#define WS_CHUNK_SIZE 65535
+#define WS_CHUNK_COUNT 2
+
+struct ws_frame_meta {
+ char proto_opcode;
+ int flags;
+ const char *name;
+};
+
+static struct ws_frame_meta WS_FRAMES[] = {
+ { WSBIT_OPCODE_CONT, CURLWS_CONT, "CONT" },
+ { WSBIT_OPCODE_TEXT, CURLWS_TEXT, "TEXT" },
+ { WSBIT_OPCODE_BIN, CURLWS_BINARY, "BIN" },
+ { WSBIT_OPCODE_CLOSE, CURLWS_CLOSE, "CLOSE" },
+ { WSBIT_OPCODE_PING, CURLWS_PING, "PING" },
+ { WSBIT_OPCODE_PONG, CURLWS_PONG, "PONG" },
+};
+
+static const char *ws_frame_name_of_op(unsigned char proto_opcode)
+{
+ unsigned char opcode = proto_opcode & WSBIT_OPCODE_MASK;
+ size_t i;
+ for(i = 0; i < sizeof(WS_FRAMES)/sizeof(WS_FRAMES[0]); ++i) {
+ if(WS_FRAMES[i].proto_opcode == opcode)
+ return WS_FRAMES[i].name;
+ }
+ return "???";
+}
+
+static int ws_frame_op2flags(unsigned char proto_opcode)
+{
+ unsigned char opcode = proto_opcode & WSBIT_OPCODE_MASK;
+ size_t i;
+ for(i = 0; i < sizeof(WS_FRAMES)/sizeof(WS_FRAMES[0]); ++i) {
+ if(WS_FRAMES[i].proto_opcode == opcode)
+ return WS_FRAMES[i].flags;
+ }
+ return 0;
+}
+
+static unsigned char ws_frame_flags2op(int flags)
+{
+ size_t i;
+ for(i = 0; i < sizeof(WS_FRAMES)/sizeof(WS_FRAMES[0]); ++i) {
+ if(WS_FRAMES[i].flags & flags)
+ return WS_FRAMES[i].proto_opcode;
+ }
+ return 0;
+}
+
+static void ws_dec_info(struct ws_decoder *dec, struct Curl_easy *data,
+ const char *msg)
+{
+ switch(dec->head_len) {
+ case 0:
+ break;
+ case 1:
+ infof(data, "WS-DEC: %s [%s%s]", msg,
+ ws_frame_name_of_op(dec->head[0]),
+ (dec->head[0] & WSBIT_FIN)? "" : " NON-FINAL");
+ break;
+ default:
+ if(dec->head_len < dec->head_total) {
+ infof(data, "WS-DEC: %s [%s%s](%d/%d)", msg,
+ ws_frame_name_of_op(dec->head[0]),
+ (dec->head[0] & WSBIT_FIN)? "" : " NON-FINAL",
+ dec->head_len, dec->head_total);
+ }
+ else {
+ infof(data, "WS-DEC: %s [%s%s payload=%zd/%zd]", msg,
+ ws_frame_name_of_op(dec->head[0]),
+ (dec->head[0] & WSBIT_FIN)? "" : " NON-FINAL",
+ dec->payload_offset, dec->payload_len);
+ }
+ break;
+ }
+}
+
+typedef ssize_t ws_write_payload(const unsigned char *buf, size_t buflen,
+ int frame_age, int frame_flags,
+ curl_off_t payload_offset,
+ curl_off_t payload_len,
+ void *userp,
+ CURLcode *err);
+
+
+static void ws_dec_reset(struct ws_decoder *dec)
+{
+ dec->frame_age = 0;
+ dec->frame_flags = 0;
+ dec->payload_offset = 0;
+ dec->payload_len = 0;
+ dec->head_len = dec->head_total = 0;
+ dec->state = WS_DEC_INIT;
+}
+
+static void ws_dec_init(struct ws_decoder *dec)
+{
+ ws_dec_reset(dec);
+}
+
+static CURLcode ws_dec_read_head(struct ws_decoder *dec,
+ struct Curl_easy *data,
+ struct bufq *inraw)
+{
+ const unsigned char *inbuf;
+ size_t inlen;
+
+ while(Curl_bufq_peek(inraw, &inbuf, &inlen)) {
+ if(dec->head_len == 0) {
+ dec->head[0] = *inbuf;
+ Curl_bufq_skip(inraw, 1);
+
+ dec->frame_flags = ws_frame_op2flags(dec->head[0]);
+ if(!dec->frame_flags) {
+ failf(data, "WS: unknown opcode: %x", dec->head[0]);
+ ws_dec_reset(dec);
+ return CURLE_RECV_ERROR;
+ }
+ dec->head_len = 1;
+ /* ws_dec_info(dec, data, "seeing opcode"); */
+ continue;
+ }
+ else if(dec->head_len == 1) {
+ dec->head[1] = *inbuf;
+ Curl_bufq_skip(inraw, 1);
+ dec->head_len = 2;
+
+ if(dec->head[1] & WSBIT_MASK) {
+ /* A client MUST close a connection if it detects a masked frame. */
+ failf(data, "WS: masked input frame");
+ ws_dec_reset(dec);
+ return CURLE_RECV_ERROR;
+ }
+ /* How long is the frame head? */
+ if(dec->head[1] == 126) {
+ dec->head_total = 4;
+ continue;
+ }
+ else if(dec->head[1] == 127) {
+ dec->head_total = 10;
+ continue;
+ }
+ else {
+ dec->head_total = 2;
+ }
+ }
+
+ if(dec->head_len < dec->head_total) {
+ dec->head[dec->head_len] = *inbuf;
+ Curl_bufq_skip(inraw, 1);
+ ++dec->head_len;
+ if(dec->head_len < dec->head_total) {
+ /* ws_dec_info(dec, data, "decoding head"); */
+ continue;
+ }
+ }
+ /* got the complete frame head */
+ DEBUGASSERT(dec->head_len == dec->head_total);
+ switch(dec->head_total) {
+ case 2:
+ dec->payload_len = dec->head[1];
+ break;
+ case 4:
+ dec->payload_len = (dec->head[2] << 8) | dec->head[3];
+ break;
+ case 10:
+ dec->payload_len = ((curl_off_t)dec->head[2] << 56) |
+ (curl_off_t)dec->head[3] << 48 |
+ (curl_off_t)dec->head[4] << 40 |
+ (curl_off_t)dec->head[5] << 32 |
+ (curl_off_t)dec->head[6] << 24 |
+ (curl_off_t)dec->head[7] << 16 |
+ (curl_off_t)dec->head[8] << 8 |
+ dec->head[9];
+ break;
+ default:
+ /* this should never happen */
+ DEBUGASSERT(0);
+ failf(data, "WS: unexpected frame header length");
+ return CURLE_RECV_ERROR;
+ }
+
+ dec->frame_age = 0;
+ dec->payload_offset = 0;
+ ws_dec_info(dec, data, "decoded");
+ return CURLE_OK;
+ }
+ return CURLE_AGAIN;
+}
+
+static CURLcode ws_dec_pass_payload(struct ws_decoder *dec,
+ struct Curl_easy *data,
+ struct bufq *inraw,
+ ws_write_payload *write_payload,
+ void *write_ctx)
+{
+ const unsigned char *inbuf;
+ size_t inlen;
+ ssize_t nwritten;
+ CURLcode result;
+ curl_off_t remain = dec->payload_len - dec->payload_offset;
+
+ (void)data;
+ while(remain && Curl_bufq_peek(inraw, &inbuf, &inlen)) {
+ if((curl_off_t)inlen > remain)
+ inlen = (size_t)remain;
+ nwritten = write_payload(inbuf, inlen, dec->frame_age, dec->frame_flags,
+ dec->payload_offset, dec->payload_len,
+ write_ctx, &result);
+ if(nwritten < 0)
+ return result;
+ Curl_bufq_skip(inraw, (size_t)nwritten);
+ dec->payload_offset += (curl_off_t)nwritten;
+ remain = dec->payload_len - dec->payload_offset;
+ /* infof(data, "WS-DEC: passed %zd bytes payload, %zd remain",
+ nwritten, remain); */
+ }
+
+ return remain? CURLE_AGAIN : CURLE_OK;
+}
+
+static CURLcode ws_dec_pass(struct ws_decoder *dec,
+ struct Curl_easy *data,
+ struct bufq *inraw,
+ ws_write_payload *write_payload,
+ void *write_ctx)
+{
+ CURLcode result;
+
+ if(Curl_bufq_is_empty(inraw))
+ return CURLE_AGAIN;
+
+ switch(dec->state) {
+ case WS_DEC_INIT:
+ ws_dec_reset(dec);
+ dec->state = WS_DEC_HEAD;
+ /* FALLTHROUGH */
+ case WS_DEC_HEAD:
+ result = ws_dec_read_head(dec, data, inraw);
+ if(result) {
+ if(result != CURLE_AGAIN) {
+ infof(data, "WS: decode error %d", (int)result);
+ break; /* real error */
+ }
+ /* incomplete ws frame head */
+ DEBUGASSERT(Curl_bufq_is_empty(inraw));
+ break;
+ }
+ /* head parsing done */
+ dec->state = WS_DEC_PAYLOAD;
+ if(dec->payload_len == 0) {
+ ssize_t nwritten;
+ const unsigned char tmp = '\0';
+ /* special case of a 0 length frame, need to write once */
+ nwritten = write_payload(&tmp, 0, dec->frame_age, dec->frame_flags,
+ 0, 0, write_ctx, &result);
+ if(nwritten < 0)
+ return result;
+ dec->state = WS_DEC_INIT;
+ break;
+ }
+ /* FALLTHROUGH */
+ case WS_DEC_PAYLOAD:
+ result = ws_dec_pass_payload(dec, data, inraw, write_payload, write_ctx);
+ ws_dec_info(dec, data, "passing");
+ if(result)
+ return result;
+ /* paylod parsing done */
+ dec->state = WS_DEC_INIT;
+ break;
+ default:
+ /* we covered all enums above, but some code analyzers are whimps */
+ result = CURLE_FAILED_INIT;
+ }
+ return result;
+}
+
+static void update_meta(struct websocket *ws,
+ int frame_age, int frame_flags,
+ curl_off_t payload_offset,
+ curl_off_t payload_len,
+ size_t cur_len)
+{
+ ws->frame.age = frame_age;
+ ws->frame.flags = frame_flags;
+ ws->frame.offset = payload_offset;
+ ws->frame.len = cur_len;
+ ws->frame.bytesleft = (payload_len - payload_offset - cur_len);
+}
+
+static void ws_enc_info(struct ws_encoder *enc, struct Curl_easy *data,
+ const char *msg)
+{
+ infof(data, "WS-ENC: %s [%s%s%s payload=%zd/%zd]", msg,
+ ws_frame_name_of_op(enc->firstbyte),
+ (enc->firstbyte & WSBIT_OPCODE_MASK) == WSBIT_OPCODE_CONT ?
+ " CONT" : "",
+ (enc->firstbyte & WSBIT_FIN)? "" : " NON-FIN",
+ enc->payload_len - enc->payload_remain, enc->payload_len);
+}
+
+static void ws_enc_reset(struct ws_encoder *enc)
+{
+ enc->payload_remain = 0;
+ enc->xori = 0;
+ enc->contfragment = FALSE;
+}
+
+static void ws_enc_init(struct ws_encoder *enc)
+{
+ ws_enc_reset(enc);
+}
+
+/***
+ RFC 6455 Section 5.2
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-------+-+-------------+-------------------------------+
+ |F|R|R|R| opcode|M| Payload len | Extended payload length |
+ |I|S|S|S| (4) |A| (7) | (16/64) |
+ |N|V|V|V| |S| | (if payload len==126/127) |
+ | |1|2|3| |K| | |
+ +-+-+-+-+-------+-+-------------+ - - - - - - - - - - - - - - - +
+ | Extended payload length continued, if payload len == 127 |
+ + - - - - - - - - - - - - - - - +-------------------------------+
+ | |Masking-key, if MASK set to 1 |
+ +-------------------------------+-------------------------------+
+ | Masking-key (continued) | Payload Data |
+ +-------------------------------- - - - - - - - - - - - - - - - +
+ : Payload Data continued ... :
+ + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
+ | Payload Data continued ... |
+ +---------------------------------------------------------------+
+*/
+
+static ssize_t ws_enc_write_head(struct Curl_easy *data,
+ struct ws_encoder *enc,
+ unsigned int flags,
+ curl_off_t payload_len,
+ struct bufq *out,
+ CURLcode *err)
+{
+ unsigned char firstbyte = 0;
+ unsigned char opcode;
+ unsigned char head[14];
+ size_t hlen;
+ ssize_t n;
+
+ if(enc->payload_remain > 0) {
+ /* trying to write a new frame before the previous one is finished */
+ failf(data, "WS: starting new frame with %zd bytes from last one"
+ "remaining to be sent", (ssize_t)enc->payload_remain);
+ *err = CURLE_SEND_ERROR;
+ return -1;
+ }
+
+ opcode = ws_frame_flags2op(flags);
+ if(!opcode) {
+ failf(data, "WS: provided flags not recognized '%x'", flags);
+ *err = CURLE_SEND_ERROR;
+ return -1;
+ }
+
+ if(!(flags & CURLWS_CONT)) {
+ if(!enc->contfragment)
+ /* not marked as continuing, this is the final fragment */
+ firstbyte |= WSBIT_FIN | opcode;
+ else
+ /* marked as continuing, this is the final fragment; set CONT
+ opcode and FIN bit */
+ firstbyte |= WSBIT_FIN | WSBIT_OPCODE_CONT;
+
+ enc->contfragment = FALSE;
+ }
+ else if(enc->contfragment) {
+ /* the previous fragment was not a final one and this isn't either, keep a
+ CONT opcode and no FIN bit */
+ firstbyte |= WSBIT_OPCODE_CONT;
+ }
+ else {
+ firstbyte = opcode;
+ enc->contfragment = TRUE;
+ }
+
+ head[0] = enc->firstbyte = firstbyte;
+ if(payload_len > 65535) {
+ head[1] = 127 | WSBIT_MASK;
+ head[2] = (unsigned char)((payload_len >> 56) & 0xff);
+ head[3] = (unsigned char)((payload_len >> 48) & 0xff);
+ head[4] = (unsigned char)((payload_len >> 40) & 0xff);
+ head[5] = (unsigned char)((payload_len >> 32) & 0xff);
+ head[6] = (unsigned char)((payload_len >> 24) & 0xff);
+ head[7] = (unsigned char)((payload_len >> 16) & 0xff);
+ head[8] = (unsigned char)((payload_len >> 8) & 0xff);
+ head[9] = (unsigned char)(payload_len & 0xff);
+ hlen = 10;
+ }
+ else if(payload_len >= 126) {
+ head[1] = 126 | WSBIT_MASK;
+ head[2] = (unsigned char)((payload_len >> 8) & 0xff);
+ head[3] = (unsigned char)(payload_len & 0xff);
+ hlen = 4;
+ }
+ else {
+ head[1] = (unsigned char)payload_len | WSBIT_MASK;
+ hlen = 2;
+ }
+
+ enc->payload_remain = enc->payload_len = payload_len;
+ ws_enc_info(enc, data, "sending");
+
+ /* add 4 bytes mask */
+ memcpy(&head[hlen], &enc->mask, 4);
+ hlen += 4;
+ /* reset for payload to come */
+ enc->xori = 0;
+
+ n = Curl_bufq_write(out, head, hlen, err);
+ if(n < 0)
+ return -1;
+ if((size_t)n != hlen) {
+ /* We use a bufq with SOFT_LIMIT, writing should always succeed */
+ DEBUGASSERT(0);
+ *err = CURLE_SEND_ERROR;
+ return -1;
+ }
+ return n;
+}
+
+static ssize_t ws_enc_write_payload(struct ws_encoder *enc,
+ struct Curl_easy *data,
+ const unsigned char *buf, size_t buflen,
+ struct bufq *out, CURLcode *err)
+{
+ ssize_t n;
+ size_t i, len;
+
+ if(Curl_bufq_is_full(out)) {
+ *err = CURLE_AGAIN;
+ return -1;
+ }
+
+ /* not the most performant way to do this */
+ len = buflen;
+ if((curl_off_t)len > enc->payload_remain)
+ len = (size_t)enc->payload_remain;
+
+ for(i = 0; i < len; ++i) {
+ unsigned char c = buf[i] ^ enc->mask[enc->xori];
+ n = Curl_bufq_write(out, &c, 1, err);
+ if(n < 0) {
+ if((*err != CURLE_AGAIN) || !i)
+ return -1;
+ break;
+ }
+ enc->xori++;
+ enc->xori &= 3;
+ }
+ enc->payload_remain -= (curl_off_t)i;
+ ws_enc_info(enc, data, "buffered");
+ return (ssize_t)i;
+}
+
+
struct wsfield {
const char *name;
const char *val;
@@ -111,7 +592,6 @@ CURLcode Curl_ws_request(struct Curl_easy *data, REQTYPE *req) }
}
k->upgr101 = UPGR101_WS;
- Curl_dyn_init(&data->req.p.http->ws.buf, MAX_WS_SIZE * 2);
return result;
}
@@ -123,12 +603,27 @@ CURLcode Curl_ws_accept(struct Curl_easy *data, const char *mem, size_t nread)
{
struct SingleRequest *k = &data->req;
- struct HTTP *ws = data->req.p.http;
- struct connectdata *conn = data->conn;
- struct websocket *wsp = &data->req.p.http->ws;
- struct ws_conn *wsc = &conn->proto.ws;
+ struct websocket *ws;
CURLcode result;
+ DEBUGASSERT(data->conn);
+ ws = data->conn->proto.ws;
+ if(!ws) {
+ ws = calloc(1, sizeof(*ws));
+ if(!ws)
+ return CURLE_OUT_OF_MEMORY;
+ data->conn->proto.ws = ws;
+ Curl_bufq_init(&ws->recvbuf, WS_CHUNK_SIZE, WS_CHUNK_COUNT);
+ Curl_bufq_init2(&ws->sendbuf, WS_CHUNK_SIZE, WS_CHUNK_COUNT,
+ BUFQ_OPT_SOFT_LIMIT);
+ ws_dec_init(&ws->dec);
+ ws_enc_init(&ws->enc);
+ }
+ else {
+ Curl_bufq_reset(&ws->recvbuf);
+ ws_dec_reset(&ws->dec);
+ ws_enc_reset(&ws->enc);
+ }
/* Verify the Sec-WebSocket-Accept response.
The sent value is the base64 encoded version of a SHA-1 hash done on the
@@ -149,169 +644,74 @@ CURLcode Curl_ws_accept(struct Curl_easy *data, the WebSocket Connection. */
/* 4 bytes random */
- result = Curl_rand(data, (unsigned char *)&ws->ws.mask, sizeof(ws->ws.mask));
+
+ result = Curl_rand(data, (unsigned char *)&ws->enc.mask,
+ sizeof(ws->enc.mask));
if(result)
return result;
-
infof(data, "Received 101, switch to WebSocket; mask %02x%02x%02x%02x",
- ws->ws.mask[0], ws->ws.mask[1], ws->ws.mask[2], ws->ws.mask[3]);
- Curl_dyn_init(&wsc->early, data->set.buffer_size);
- if(nread) {
- result = Curl_dyn_addn(&wsc->early, mem, nread);
- if(result)
+ ws->enc.mask[0], ws->enc.mask[1], ws->enc.mask[2], ws->enc.mask[3]);
+
+ if(data->set.connect_only) {
+ ssize_t nwritten;
+ /* In CONNECT_ONLY setup, the payloads from `mem` need to be received
+ * when using `curl_ws_recv` later on after this transfer is already
+ * marked as DONE. */
+ nwritten = Curl_bufq_write(&ws->recvbuf, (const unsigned char *)mem,
+ nread, &result);
+ if(nwritten < 0)
return result;
infof(data, "%zu bytes websocket payload", nread);
- wsp->stillb = Curl_dyn_ptr(&wsc->early);
- wsp->stillblen = Curl_dyn_len(&wsc->early);
}
k->upgr101 = UPGR101_RECEIVED;
return result;
}
-#define WSBIT_FIN 0x80
-#define WSBIT_OPCODE_CONT 0
-#define WSBIT_OPCODE_TEXT (1)
-#define WSBIT_OPCODE_BIN (2)
-#define WSBIT_OPCODE_CLOSE (8)
-#define WSBIT_OPCODE_PING (9)
-#define WSBIT_OPCODE_PONG (0xa)
-#define WSBIT_OPCODE_MASK (0xf)
-
-#define WSBIT_MASK 0x80
-
-/* remove the spent bytes from the beginning of the buffer as that part has
- now been delivered to the application */
-static void ws_decode_shift(struct Curl_easy *data, size_t spent)
+static ssize_t ws_client_write(const unsigned char *buf, size_t buflen,
+ int frame_age, int frame_flags,
+ curl_off_t payload_offset,
+ curl_off_t payload_len,
+ void *userp,
+ CURLcode *err)
{
- struct websocket *wsp = &data->req.p.http->ws;
- size_t len = Curl_dyn_len(&wsp->buf);
- size_t keep = len - spent;
- DEBUGASSERT(len >= spent);
- Curl_dyn_tail(&wsp->buf, keep);
-}
-
-/* ws_decode() decodes a binary frame into structured WebSocket data,
-
- data - the transfer
- inbuf - incoming raw data. If NULL, work on the already buffered data.
- inlen - size of the provided data, perhaps too little, perhaps too much
- headlen - stored length of the frame header
- olen - stored length of the extracted data
- oleft - number of unread bytes pending to that belongs to this frame
- flags - stored bitmask about the frame
-
- Returns CURLE_AGAIN if there is only a partial frame in the buffer. Then it
- stores the first part in the ->extra buffer to be used in the next call
- when more data is provided.
-*/
-
-static CURLcode ws_decode(struct Curl_easy *data,
- unsigned char *inbuf, size_t inlen,
- size_t *headlen, size_t *olen,
- curl_off_t *oleft,
- unsigned int *flags)
-{
- bool fin;
- unsigned char opcode;
- curl_off_t total;
- size_t dataindex = 2;
- curl_off_t payloadsize;
-
- *olen = *headlen = 0;
-
- if(inlen < 2) {
- /* the smallest possible frame is two bytes */
- infof(data, "WS: plen == %u, EAGAIN", (int)inlen);
- return CURLE_AGAIN;
- }
-
- fin = inbuf[0] & WSBIT_FIN;
- opcode = inbuf[0] & WSBIT_OPCODE_MASK;
- infof(data, "WS:%d received FIN bit %u", __LINE__, (int)fin);
- *flags = 0;
- switch(opcode) {
- case WSBIT_OPCODE_CONT:
- if(!fin)
- *flags |= CURLWS_CONT;
- infof(data, "WS: received OPCODE CONT");
- break;
- case WSBIT_OPCODE_TEXT:
- infof(data, "WS: received OPCODE TEXT");
- *flags |= CURLWS_TEXT;
- break;
- case WSBIT_OPCODE_BIN:
- infof(data, "WS: received OPCODE BINARY");
- *flags |= CURLWS_BINARY;
- break;
- case WSBIT_OPCODE_CLOSE:
- infof(data, "WS: received OPCODE CLOSE");
- *flags |= CURLWS_CLOSE;
- break;
- case WSBIT_OPCODE_PING:
- infof(data, "WS: received OPCODE PING");
- *flags |= CURLWS_PING;
- break;
- case WSBIT_OPCODE_PONG:
- infof(data, "WS: received OPCODE PONG");
- *flags |= CURLWS_PONG;
- break;
- default:
- failf(data, "WS: unknown opcode: %x", opcode);
- return CURLE_RECV_ERROR;
+ struct Curl_easy *data = userp;
+ struct websocket *ws;
+ size_t wrote;
+ curl_off_t remain = (payload_len - (payload_offset + buflen));
+
+ (void)frame_age;
+ if(!data->conn || !data->conn->proto.ws) {
+ *err = CURLE_FAILED_INIT;
+ return -1;
}
-
- if(inbuf[1] & WSBIT_MASK) {
- /* A client MUST close a connection if it detects a masked frame. */
- failf(data, "WS: masked input frame");
- return CURLE_RECV_ERROR;
+ ws = data->conn->proto.ws;
+
+ if((frame_flags & CURLWS_PING) && !remain) {
+ /* auto-respond to PINGs, only works for single-frame payloads atm */
+ size_t bytes;
+ infof(data, "WS: auto-respond to PING with a PONG");
+ /* send back the exact same content as a PONG */
+ *err = curl_ws_send(data, buf, buflen, &bytes, 0, CURLWS_PONG);
+ if(*err)
+ return -1;
}
- payloadsize = inbuf[1];
- if(payloadsize == 126) {
- if(inlen < 4) {
- infof(data, "WS:%d plen == %u, EAGAIN", __LINE__, (int)inlen);
- return CURLE_AGAIN; /* not enough data available */
+ else if(buflen || !remain) {
+ /* deliver the decoded frame to the user callback. The application
+ * may invoke curl_ws_meta() to access frame information. */
+ update_meta(ws, frame_age, frame_flags, payload_offset,
+ payload_len, buflen);
+ Curl_set_in_callback(data, true);
+ wrote = data->set.fwrite_func((char *)buf, 1,
+ buflen, data->set.out);
+ Curl_set_in_callback(data, false);
+ if(wrote != buflen) {
+ *err = CURLE_RECV_ERROR;
+ return -1;
}
- payloadsize = (inbuf[2] << 8) | inbuf[3];
- dataindex += 2;
- }
- else if(payloadsize == 127) {
- /* 64 bit payload size */
- if(inlen < 10)
- return CURLE_AGAIN;
- if(inbuf[2] & 80) {
- failf(data, "WS: too large frame");
- return CURLE_RECV_ERROR;
- }
- dataindex += 8;
- payloadsize = ((curl_off_t)inbuf[2] << 56) |
- (curl_off_t)inbuf[3] << 48 |
- (curl_off_t)inbuf[4] << 40 |
- (curl_off_t)inbuf[5] << 32 |
- (curl_off_t)inbuf[6] << 24 |
- (curl_off_t)inbuf[7] << 16 |
- (curl_off_t)inbuf[8] << 8 |
- inbuf[9];
- }
-
- /* point to the payload */
- *headlen = dataindex;
- total = dataindex + payloadsize;
- if(total > (curl_off_t)inlen) {
- /* buffer contains partial frame */
- *olen = inlen - dataindex; /* bytes to write out */
- *oleft = total - inlen; /* bytes yet to come (for this frame) */
- payloadsize = total - dataindex;
- }
- else {
- /* we have the complete frame (`total` bytes) in buffer */
- *olen = payloadsize; /* bytes to write out */
- *oleft = 0; /* bytes yet to come (for this frame) */
}
-
- infof(data, "WS: received %Ou bytes payload (%Ou left, buflen was %zu)",
- payloadsize, *oleft, inlen);
- return CURLE_OK;
+ *err = CURLE_OK;
+ return (ssize_t)buflen;
}
/* Curl_ws_writecb() is the write callback for websocket traffic. The
@@ -321,98 +721,150 @@ static CURLcode ws_decode(struct Curl_easy *data, size_t Curl_ws_writecb(char *buffer, size_t size /* 1 */,
size_t nitems, void *userp)
{
- struct HTTP *ws = (struct HTTP *)userp;
- struct Curl_easy *data = ws->ws.data;
- struct websocket *wsp = &data->req.p.http->ws;
- void *writebody_ptr = data->set.out;
+ struct Curl_easy *data = userp;
+
if(data->set.ws_raw_mode)
- return data->set.fwrite_func(buffer, size, nitems, writebody_ptr);
+ return data->set.fwrite_func(buffer, size, nitems, data->set.out);
else if(nitems) {
- size_t wrote = 0, headlen;
+ struct websocket *ws;
CURLcode result;
+ if(!data->conn || !data->conn->proto.ws) {
+ failf(data, "WS: not a websocket transfer");
+ return nitems - 1;
+ }
+ ws = data->conn->proto.ws;
+
if(buffer) {
- result = Curl_dyn_addn(&wsp->buf, buffer, nitems);
- if(result) {
+ ssize_t nwritten;
+
+ nwritten = Curl_bufq_write(&ws->recvbuf, (const unsigned char *)buffer,
+ nitems, &result);
+ if(nwritten < 0) {
infof(data, "WS: error adding data to buffer %d", (int)result);
return nitems - 1;
}
buffer = NULL;
}
- while(Curl_dyn_len(&wsp->buf)) {
- unsigned char *wsbuf = Curl_dyn_uptr(&wsp->buf);
- size_t buflen = Curl_dyn_len(&wsp->buf);
- size_t write_len = 0;
- size_t consumed = 0;
-
- if(!ws->ws.frame.bytesleft) {
- unsigned int recvflags;
- curl_off_t fb_left;
-
- result = ws_decode(data, wsbuf, buflen,
- &headlen, &write_len, &fb_left, &recvflags);
- if(result == CURLE_AGAIN)
- /* insufficient amount of data, keep it for later.
- * we pretend to have written all since we have a copy */
- return nitems;
- else if(result) {
- infof(data, "WS: decode error %d", (int)result);
- return nitems - 1;
- }
- consumed += headlen;
- wsbuf += headlen;
- buflen -= headlen;
-
- /* New frame. store details about the frame to be reachable with
- curl_ws_meta() from within the write callback */
- ws->ws.frame.age = 0;
- ws->ws.frame.offset = 0;
- ws->ws.frame.flags = recvflags;
- ws->ws.frame.bytesleft = fb_left;
- }
- else {
- /* continuing frame */
- write_len = (size_t)ws->ws.frame.bytesleft;
- if(write_len > buflen)
- write_len = buflen;
- ws->ws.frame.offset += write_len;
- ws->ws.frame.bytesleft -= write_len;
- }
- if((ws->ws.frame.flags & CURLWS_PING) && !ws->ws.frame.bytesleft) {
- /* auto-respond to PINGs, only works for single-frame payloads atm */
- size_t bytes;
- infof(data, "WS: auto-respond to PING with a PONG");
- /* send back the exact same content as a PONG */
- result = curl_ws_send(data, wsbuf, write_len,
- &bytes, 0, CURLWS_PONG);
- if(result)
- return result;
- }
- else if(write_len || !wsp->frame.bytesleft) {
- /* deliver the decoded frame to the user callback */
- Curl_set_in_callback(data, true);
- wrote = data->set.fwrite_func((char *)wsbuf, 1,
- write_len, writebody_ptr);
- Curl_set_in_callback(data, false);
- if(wrote != write_len)
- return 0;
+ while(!Curl_bufq_is_empty(&ws->recvbuf)) {
+
+ result = ws_dec_pass(&ws->dec, data, &ws->recvbuf,
+ ws_client_write, data);
+ if(result == CURLE_AGAIN)
+ /* insufficient amount of data, keep it for later.
+ * we pretend to have written all since we have a copy */
+ return nitems;
+ else if(result) {
+ infof(data, "WS: decode error %d", (int)result);
+ return nitems - 1;
}
- /* get rid of the buffered data consumed */
- consumed += write_len;
- ws_decode_shift(data, consumed);
}
}
return nitems;
}
+struct ws_collect {
+ struct Curl_easy *data;
+ void *buffer;
+ size_t buflen;
+ size_t bufidx;
+ int frame_age;
+ int frame_flags;
+ curl_off_t payload_offset;
+ curl_off_t payload_len;
+ bool written;
+};
+
+static ssize_t ws_client_collect(const unsigned char *buf, size_t buflen,
+ int frame_age, int frame_flags,
+ curl_off_t payload_offset,
+ curl_off_t payload_len,
+ void *userp,
+ CURLcode *err)
+{
+ struct ws_collect *ctx = userp;
+ size_t nwritten;
+ curl_off_t remain = (payload_len - (payload_offset + buflen));
+
+ if(!ctx->bufidx) {
+ /* first write */
+ ctx->frame_age = frame_age;
+ ctx->frame_flags = frame_flags;
+ ctx->payload_offset = payload_offset;
+ ctx->payload_len = payload_len;
+ }
+
+ if((frame_flags & CURLWS_PING) && !remain) {
+ /* auto-respond to PINGs, only works for single-frame payloads atm */
+ size_t bytes;
+ infof(ctx->data, "WS: auto-respond to PING with a PONG");
+ /* send back the exact same content as a PONG */
+ *err = curl_ws_send(ctx->data, buf, buflen, &bytes, 0, CURLWS_PONG);
+ if(*err)
+ return -1;
+ nwritten = bytes;
+ }
+ else {
+ ctx->written = TRUE;
+ DEBUGASSERT(ctx->buflen >= ctx->bufidx);
+ nwritten = CURLMIN(buflen, ctx->buflen - ctx->bufidx);
+ if(!nwritten) {
+ if(!buflen) { /* 0 length write, we accept that */
+ *err = CURLE_OK;
+ return 0;
+ }
+ *err = CURLE_AGAIN; /* no more space */
+ return -1;
+ }
+ *err = CURLE_OK;
+ memcpy(ctx->buffer, buf, nwritten);
+ ctx->bufidx += nwritten;
+ }
+ return nwritten;
+}
+
+static ssize_t nw_in_recv(void *reader_ctx,
+ unsigned char *buf, size_t buflen,
+ CURLcode *err)
+{
+ struct Curl_easy *data = reader_ctx;
+ size_t nread;
+
+ *err = curl_easy_recv(data, buf, buflen, &nread);
+ if(*err)
+ return -1;
+ return (ssize_t)nread;
+}
+
CURL_EXTERN CURLcode curl_ws_recv(struct Curl_easy *data, void *buffer,
size_t buflen, size_t *nread,
struct curl_ws_frame **metap)
{
- CURLcode result;
- struct websocket *wsp = &data->req.p.http->ws;
+ struct connectdata *conn = data->conn;
+ struct websocket *ws;
bool done = FALSE; /* not filled passed buffer yet */
+ struct ws_collect ctx;
+ CURLcode result;
+
+ if(!conn) {
+ /* Unhappy hack with lifetimes of transfers and connection */
+ if(!data->set.connect_only) {
+ failf(data, "CONNECT_ONLY is required");
+ return CURLE_UNSUPPORTED_PROTOCOL;
+ }
+
+ Curl_getconnectinfo(data, &conn);
+ if(!conn) {
+ failf(data, "connection not found");
+ return CURLE_BAD_FUNCTION_ARGUMENT;
+ }
+ }
+ ws = conn->proto.ws;
+ if(!ws) {
+ failf(data, "connection is not setup for websocket");
+ return CURLE_BAD_FUNCTION_ARGUMENT;
+ }
*nread = 0;
*metap = NULL;
@@ -421,221 +873,97 @@ CURL_EXTERN CURLcode curl_ws_recv(struct Curl_easy *data, void *buffer, if(result)
return result;
+ memset(&ctx, 0, sizeof(ctx));
+ ctx.data = data;
+ ctx.buffer = buffer;
+ ctx.buflen = buflen;
+
while(!done) {
- size_t datalen;
- unsigned int recvflags;
-
- if(!wsp->stillblen) {
- /* try to get more data */
- size_t n;
- result = curl_easy_recv(data, data->state.buffer,
- data->set.buffer_size, &n);
- if(result)
+ /* receive more when our buffer is empty */
+ if(Curl_bufq_is_empty(&ws->recvbuf)) {
+ ssize_t n = Curl_bufq_slurp(&ws->recvbuf, nw_in_recv, data, &result);
+ if(n < 0) {
return result;
- if(!n) {
+ }
+ else if(n == 0) {
/* connection closed */
infof(data, "connection expectedly closed?");
return CURLE_GOT_NOTHING;
}
- wsp->stillb = data->state.buffer;
- wsp->stillblen = n;
+ DEBUGF(infof(data, "curl_ws_recv, added %zu bytes from network",
+ Curl_bufq_len(&ws->recvbuf)));
}
- infof(data, "WS: %u bytes left to decode", (int)wsp->stillblen);
- if(!wsp->frame.bytesleft) {
- size_t headlen;
- curl_off_t oleft;
- /* detect new frame */
- result = ws_decode(data, (unsigned char *)wsp->stillb, wsp->stillblen,
- &headlen, &datalen, &oleft, &recvflags);
- if(result == CURLE_AGAIN)
- /* a packet fragment only */
- break;
- else if(result)
- return result;
- if(datalen > buflen) {
- size_t diff = datalen - buflen;
- datalen = buflen;
- oleft += diff;
+ result = ws_dec_pass(&ws->dec, data, &ws->recvbuf,
+ ws_client_collect, &ctx);
+ if(result == CURLE_AGAIN) {
+ if(!ctx.written) {
+ ws_dec_info(&ws->dec, data, "need more input");
+ continue; /* nothing written, try more input */
}
- wsp->stillb += headlen;
- wsp->stillblen -= headlen;
- wsp->frame.offset = 0;
- wsp->frame.bytesleft = oleft;
- wsp->frame.flags = recvflags;
- }
- else {
- /* existing frame, remaining payload handling */
- datalen = wsp->frame.bytesleft;
- if(datalen > wsp->stillblen)
- datalen = wsp->stillblen;
- if(datalen > buflen)
- datalen = buflen;
-
- wsp->frame.offset += wsp->frame.len;
- wsp->frame.bytesleft -= datalen;
+ done = TRUE;
+ break;
}
- wsp->frame.len = datalen;
-
- /* auto-respond to PINGs */
- if((wsp->frame.flags & CURLWS_PING) && !wsp->frame.bytesleft) {
- size_t nsent = 0;
- infof(data, "WS: auto-respond to PING with a PONG, %zu bytes payload",
- datalen);
- /* send back the exact same content as a PONG */
- result = curl_ws_send(data, wsp->stillb, datalen, &nsent, 0,
- CURLWS_PONG);
- if(result)
- return result;
- infof(data, "WS: bytesleft %zu datalen %zu",
- wsp->frame.bytesleft, datalen);
- /* we handled the data part of the PING, advance over that */
- wsp->stillb += nsent;
- wsp->stillblen -= nsent;
+ else if(result) {
+ return result;
}
- else if(datalen) {
- /* copy the payload to the user buffer */
- memcpy(buffer, wsp->stillb, datalen);
- *nread = datalen;
+ else if(ctx.written) {
+ /* The decoded frame is passed back to our caller.
+ * There are frames like PING were we auto-respond to and
+ * that we do not return. For these `ctx.written` is not set. */
done = TRUE;
-
- wsp->stillblen -= datalen;
- if(wsp->stillblen)
- wsp->stillb += datalen;
- else {
- wsp->stillb = NULL;
- }
+ break;
}
}
- *metap = &wsp->frame;
- return CURLE_OK;
-}
-static void ws_xor(struct Curl_easy *data,
- const unsigned char *source,
- unsigned char *dest,
- size_t len)
-{
- struct websocket *wsp = &data->req.p.http->ws;
- size_t i;
- /* append payload after the mask, XOR appropriately */
- for(i = 0; i < len; i++) {
- dest[i] = source[i] ^ wsp->mask[wsp->xori];
- wsp->xori++;
- wsp->xori &= 3;
- }
+ /* update frame information to be passed back */
+ update_meta(ws, ctx.frame_age, ctx.frame_flags, ctx.payload_offset,
+ ctx.payload_len, ctx.bufidx);
+ *metap = &ws->frame;
+ *nread = ws->frame.len;
+ /* infof(data, "curl_ws_recv(len=%zu) -> %zu bytes (frame at %zd, %zd left)",
+ buflen, *nread, ws->frame.offset, ws->frame.bytesleft); */
+ return CURLE_OK;
}
-/***
- RFC 6455 Section 5.2
-
- 0 1 2 3
- 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
- +-+-+-+-+-------+-+-------------+-------------------------------+
- |F|R|R|R| opcode|M| Payload len | Extended payload length |
- |I|S|S|S| (4) |A| (7) | (16/64) |
- |N|V|V|V| |S| | (if payload len==126/127) |
- | |1|2|3| |K| | |
- +-+-+-+-+-------+-+-------------+ - - - - - - - - - - - - - - - +
- | Extended payload length continued, if payload len == 127 |
- + - - - - - - - - - - - - - - - +-------------------------------+
- | |Masking-key, if MASK set to 1 |
- +-------------------------------+-------------------------------+
- | Masking-key (continued) | Payload Data |
- +-------------------------------- - - - - - - - - - - - - - - - +
- : Payload Data continued ... :
- + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
- | Payload Data continued ... |
- +---------------------------------------------------------------+
-*/
-
-static size_t ws_packethead(struct Curl_easy *data,
- size_t len, unsigned int flags)
+static CURLcode ws_flush(struct Curl_easy *data, struct websocket *ws,
+ bool complete)
{
- struct HTTP *ws = data->req.p.http;
- unsigned char *out = (unsigned char *)data->state.ulbuf;
- unsigned char firstbyte = 0;
- int outi;
- unsigned char opcode;
- if(flags & CURLWS_TEXT) {
- opcode = WSBIT_OPCODE_TEXT;
- infof(data, "WS: send OPCODE TEXT");
- }
- else if(flags & CURLWS_CLOSE) {
- opcode = WSBIT_OPCODE_CLOSE;
- infof(data, "WS: send OPCODE CLOSE");
- }
- else if(flags & CURLWS_PING) {
- opcode = WSBIT_OPCODE_PING;
- infof(data, "WS: send OPCODE PING");
- }
- else if(flags & CURLWS_PONG) {
- opcode = WSBIT_OPCODE_PONG;
- infof(data, "WS: send OPCODE PONG");
- }
- else {
- opcode = WSBIT_OPCODE_BIN;
- infof(data, "WS: send OPCODE BINARY");
- }
-
- if(!(flags & CURLWS_CONT)) {
- if(!ws->ws.contfragment)
- /* not marked as continuing, this is the final fragment */
- firstbyte |= WSBIT_FIN | opcode;
- else
- /* marked as continuing, this is the final fragment; set CONT
- opcode and FIN bit */
- firstbyte |= WSBIT_FIN | WSBIT_OPCODE_CONT;
+ if(!Curl_bufq_is_empty(&ws->sendbuf)) {
+ CURLcode result;
+ const unsigned char *out;
+ size_t outlen;
+ ssize_t n;
- ws->ws.contfragment = FALSE;
- infof(data, "WS: set FIN");
- }
- else if(ws->ws.contfragment) {
- /* the previous fragment was not a final one and this isn't either, keep a
- CONT opcode and no FIN bit */
- firstbyte |= WSBIT_OPCODE_CONT;
- infof(data, "WS: keep CONT, no FIN");
- }
- else {
- firstbyte = opcode;
- ws->ws.contfragment = TRUE;
- infof(data, "WS: set CONT, no FIN");
- }
- out[0] = firstbyte;
- if(len > 65535) {
- out[1] = 127 | WSBIT_MASK;
- out[2] = (len >> 8) & 0xff;
- out[3] = len & 0xff;
- outi = 10;
- }
- else if(len > 126) {
- out[1] = 126 | WSBIT_MASK;
- out[2] = (len >> 8) & 0xff;
- out[3] = len & 0xff;
- outi = 4;
- }
- else {
- out[1] = (unsigned char)len | WSBIT_MASK;
- outi = 2;
+ while(Curl_bufq_peek(&ws->sendbuf, &out, &outlen)) {
+ if(data->set.connect_only)
+ result = Curl_senddata(data, out, outlen, &n);
+ else
+ result = Curl_write(data, data->conn->writesockfd, out, outlen, &n);
+ if(result) {
+ if(result == CURLE_AGAIN) {
+ if(!complete) {
+ infof(data, "WS: flush EAGAIN, %zu bytes remain in buffer",
+ Curl_bufq_len(&ws->sendbuf));
+ return result;
+ }
+ /* TODO: the current design does not allow for buffered writes.
+ * We need to flush the buffer now. There is no ws_flush() later */
+ n = 0;
+ continue;
+ }
+ else if(result) {
+ failf(data, "WS: flush, write error %d", result);
+ return result;
+ }
+ }
+ else {
+ infof(data, "WS: flushed %zu bytes", (size_t)n);
+ Curl_bufq_skip(&ws->sendbuf, (size_t)n);
+ }
+ }
}
-
- infof(data, "WS: send FIN bit %u (byte %02x)",
- firstbyte & WSBIT_FIN ? 1 : 0,
- firstbyte);
- infof(data, "WS: send payload len %u", (int)len);
-
- /* 4 bytes mask */
- memcpy(&out[outi], &ws->ws.mask, 4);
-
- if(data->set.upload_buffer_size < (len + 10))
- return 0;
-
- /* pass over the mask */
- outi += 4;
-
- ws->ws.xori = 0;
- /* return packet size */
- return outi;
+ return CURLE_OK;
}
CURL_EXTERN CURLcode curl_ws_send(struct Curl_easy *data, const void *buffer,
@@ -643,109 +971,114 @@ CURL_EXTERN CURLcode curl_ws_send(struct Curl_easy *data, const void *buffer, curl_off_t totalsize,
unsigned int sendflags)
{
+ struct websocket *ws;
+ ssize_t nwritten, n;
+ size_t space;
CURLcode result;
- size_t headlen;
- char *out;
- ssize_t written;
- struct websocket *wsp = &data->req.p.http->ws;
- if(!data->set.ws_raw_mode) {
- result = Curl_get_upload_buffer(data);
+ *sent = 0;
+ if(!data->conn && data->set.connect_only) {
+ result = Curl_connect_only_attach(data);
if(result)
return result;
}
- else {
- if(totalsize || sendflags)
- return CURLE_BAD_FUNCTION_ARGUMENT;
+ if(!data->conn) {
+ failf(data, "No associated connection");
+ return CURLE_SEND_ERROR;
}
+ if(!data->conn->proto.ws) {
+ failf(data, "Not a websocket transfer on connection #%ld",
+ data->conn->connection_id);
+ return CURLE_SEND_ERROR;
+ }
+ ws = data->conn->proto.ws;
if(data->set.ws_raw_mode) {
+ if(totalsize || sendflags)
+ return CURLE_BAD_FUNCTION_ARGUMENT;
if(!buflen)
/* nothing to do */
return CURLE_OK;
/* raw mode sends exactly what was requested, and this is from within
the write callback */
if(Curl_is_in_callback(data)) {
- if(!data->conn) {
- failf(data, "No associated connection");
- return CURLE_SEND_ERROR;
- }
result = Curl_write(data, data->conn->writesockfd, buffer, buflen,
- &written);
+ &nwritten);
}
else
- result = Curl_senddata(data, buffer, buflen, &written);
+ result = Curl_senddata(data, buffer, buflen, &nwritten);
infof(data, "WS: wanted to send %zu bytes, sent %zu bytes",
- buflen, written);
- *sent = written;
+ buflen, nwritten);
+ *sent = (nwritten >= 0)? (size_t)nwritten : 0;
return result;
}
- if(buflen > (data->set.upload_buffer_size - 10))
- /* don't do more than this in one go */
- buflen = data->set.upload_buffer_size - 10;
+ /* Not RAW mode, buf we do the frame encoding */
+ result = ws_flush(data, ws, FALSE);
+ if(result)
+ return result;
+
+ /* TODO: the current design does not allow partial writes, afaict.
+ * It is not clear who the application is supposed to react. */
+ space = Curl_bufq_space(&ws->sendbuf);
+ DEBUGF(infof(data, "curl_ws_send(len=%zu), sendbuf len=%zu space %zu",
+ buflen, Curl_bufq_len(&ws->sendbuf), space));
+ if(space < 14)
+ return CURLE_AGAIN;
if(sendflags & CURLWS_OFFSET) {
if(totalsize) {
/* a frame series 'totalsize' bytes big, this is the first */
- headlen = ws_packethead(data, totalsize, sendflags);
- wsp->sleft = totalsize - buflen;
+ n = ws_enc_write_head(data, &ws->enc, sendflags, totalsize,
+ &ws->sendbuf, &result);
+ if(n < 0)
+ return result;
}
else {
- headlen = 0;
- if((curl_off_t)buflen > wsp->sleft) {
- infof(data, "WS: unaligned frame size (sending %zu instead of %zu)",
- buflen, wsp->sleft);
- wsp->sleft = 0;
+ if((curl_off_t)buflen > ws->enc.payload_remain) {
+ infof(data, "WS: unaligned frame size (sending %zu instead of %zd)",
+ buflen, ws->enc.payload_remain);
}
- else
- wsp->sleft -= buflen;
}
}
- else
- headlen = ws_packethead(data, buflen, sendflags);
-
- /* headlen is the size of the frame header */
- out = data->state.ulbuf;
- if(buflen)
- /* for PING and PONG etc there might not be a payload */
- ws_xor(data, buffer, (unsigned char *)out + headlen, buflen);
+ else if(!ws->enc.payload_remain) {
+ n = ws_enc_write_head(data, &ws->enc, sendflags, (curl_off_t)buflen,
+ &ws->sendbuf, &result);
+ if(n < 0)
+ return result;
+ }
- if(data->set.connect_only)
- result = Curl_senddata(data, out, buflen + headlen, &written);
- else
- result = Curl_write(data, data->conn->writesockfd, out,
- buflen + headlen, &written);
+ n = ws_enc_write_payload(&ws->enc, data,
+ buffer, buflen, &ws->sendbuf, &result);
+ if(n < 0)
+ return result;
- infof(data, "WS: wanted to send %zu bytes, sent %zu bytes",
- headlen + buflen, written);
+ *sent = (size_t)n;
+ return ws_flush(data, ws, TRUE);
+}
- if(!result) {
- /* the *sent number only counts "payload", excluding the header */
- if((size_t)written > headlen)
- *sent = written - headlen;
- else
- *sent = 0;
+static void ws_free(struct connectdata *conn)
+{
+ if(conn && conn->proto.ws) {
+ Curl_bufq_free(&conn->proto.ws->recvbuf);
+ Curl_bufq_free(&conn->proto.ws->sendbuf);
+ Curl_safefree(conn->proto.ws);
}
- return result;
}
void Curl_ws_done(struct Curl_easy *data)
{
- struct websocket *wsp = &data->req.p.http->ws;
- DEBUGASSERT(wsp);
- Curl_dyn_free(&wsp->buf);
+ (void)data;
}
CURLcode Curl_ws_disconnect(struct Curl_easy *data,
struct connectdata *conn,
bool dead_connection)
{
- struct ws_conn *wsc = &conn->proto.ws;
(void)data;
(void)dead_connection;
- Curl_dyn_free(&wsc->early);
+ ws_free(conn);
return CURLE_OK;
}
@@ -753,9 +1086,9 @@ CURL_EXTERN struct curl_ws_frame *curl_ws_meta(struct Curl_easy *data) {
/* we only return something for websocket, called from within the callback
when not using raw mode */
- if(GOOD_EASY_HANDLE(data) && Curl_is_in_callback(data) && data->req.p.http &&
- !data->set.ws_raw_mode)
- return &data->req.p.http->ws.frame;
+ if(GOOD_EASY_HANDLE(data) && Curl_is_in_callback(data) && data->conn &&
+ data->conn->proto.ws && !data->set.ws_raw_mode)
+ return &data->conn->proto.ws->frame;
return NULL;
}
diff --git a/libs/libcurl/src/ws.h b/libs/libcurl/src/ws.h index 22ae51f2bb..20b9103ac6 100644 --- a/libs/libcurl/src/ws.h +++ b/libs/libcurl/src/ws.h @@ -33,28 +33,44 @@ #define REQTYPE struct dynbuf
#endif
-/* this is the largest single fragment size we support */
-#define MAX_WS_SIZE 65535
+/* a client-side WS frame decoder, parsing frame headers and
+ * payload, keeping track of current position and stats */
+enum ws_dec_state {
+ WS_DEC_INIT,
+ WS_DEC_HEAD,
+ WS_DEC_PAYLOAD
+};
-/* part of 'struct HTTP', when used in the 'struct SingleRequest' in the
- Curl_easy struct */
-struct websocket {
- bool contfragment; /* set TRUE if the previous fragment sent was not final */
- unsigned char mask[4]; /* 32 bit mask for this connection */
- struct Curl_easy *data; /* used for write callback handling */
- struct dynbuf buf;
- size_t usedbuf; /* number of leading bytes in 'buf' the most recent complete
- websocket frame uses */
- struct curl_ws_frame frame; /* the struct used for frame state */
- size_t stillblen; /* number of bytes left in the buffer to deliver in
- the next curl_ws_recv() call */
- const char *stillb; /* the stillblen pending bytes are here */
- curl_off_t sleft; /* outstanding number of payload bytes left to send */
+struct ws_decoder {
+ int frame_age; /* zero */
+ int frame_flags; /* See the CURLWS_* defines */
+ curl_off_t payload_offset; /* the offset parsing is at */
+ curl_off_t payload_len;
+ unsigned char head[10];
+ int head_len, head_total;
+ enum ws_dec_state state;
+};
+
+/* a client-side WS frame encoder, generating frame headers and
+ * converting payloads, tracking remaining data in current frame */
+struct ws_encoder {
+ curl_off_t payload_len; /* payload length of current frame */
+ curl_off_t payload_remain; /* remaining payload of current */
unsigned int xori; /* xor index */
+ unsigned char mask[4]; /* 32 bit mask for this connection */
+ unsigned char firstbyte; /* first byte of frame we encode */
+ bool contfragment; /* set TRUE if the previous fragment sent was not final */
};
-struct ws_conn {
- struct dynbuf early; /* data already read when switching to ws */
+/* A websocket connection with en- and decoder that treat frames
+ * and keep track of boundaries. */
+struct websocket {
+ struct Curl_easy *data; /* used for write callback handling */
+ struct ws_decoder dec; /* decode of we frames */
+ struct ws_encoder enc; /* decode of we frames */
+ struct bufq recvbuf; /* raw data from the server */
+ struct bufq sendbuf; /* raw data to be sent to the server */
+ struct curl_ws_frame frame; /* the current WS FRAME received */
};
CURLcode Curl_ws_request(struct Curl_easy *data, REQTYPE *req);
|