diff --git a/.cargo-vendor/async-channel/.cargo-checksum.json b/.cargo-vendor/async-channel/.cargo-checksum.json new file mode 100644 index 0000000000..00e408c26f --- /dev/null +++ b/.cargo-vendor/async-channel/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"66b58b243aec6330c358a94ac4f4cc7ce818f5cad43f66d7202d28f41b935b4a","Cargo.toml":"61d9483ba8c1e45bbecb46f57199b4703f5914152bbf6237a8e44034cb2cdd0e","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"a9ae50c2a6df06ff476c369d23553a7bbcb4c47f25a3353d168e05289acbb4ed","src/lib.rs":"709b1282da2ae839944e8a9243db1c3ab7052818fae7571a24e76aec1746cf52","tests/bounded.rs":"8a23685f6a1cbf1ac8b838b0aedde43ce2169c3180d09d4c73457f0eb22331d4","tests/unbounded.rs":"1bed2b00f11495247f4a5084f553016bf4cbf6136ad159a776dcbd9a8bc29f3c"},"package":"89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a"} \ No newline at end of file diff --git a/.cargo-vendor/async-channel/CHANGELOG.md b/.cargo-vendor/async-channel/CHANGELOG.md new file mode 100644 index 0000000000..610bdae0b9 --- /dev/null +++ b/.cargo-vendor/async-channel/CHANGELOG.md @@ -0,0 +1,98 @@ +# Version 2.3.1 + +- Use the correct version of `async-channel` in our manifest. (#93) + +# Version 2.3.0 + +- Add `force_send` for sending items over the channel that displace other items. (#89) + +# Version 2.2.1 + +- Fix the CI badge in the `crates.io` page. (#84) + +# Version 2.2.0 + +- Bump `event-listener` to v5.0.0. (#79) +- Bump MSRV to 1.60. (#80) + +# Version 2.1.1 + +- Bump `event-listener` to v4.0.0. (#73) + +# Version 2.1.0 + +- Bump `futures-lite` to its latest version. (#70) + +# Version 2.0.0 + +- **Breaking:** Make `Send`, `Recv` and `Receiver` `!Unpin`. This enables more efficient event notification strategies. (#59) +- **Breaking:** Add an `std` enabled-by-default feature that enables parts of the API that require `std`. (#59) +- Add support for the `wasm32` target. (#67) + +# Version 1.9.0 + +- Fix a bug where `WeakSender/WeakReceiver` could incorrectly return `Some` even if the channel is already closed (#60) +- Remove the unnecessary `T: Clone` bound from `WeakSender/WeakReceiver`'s `Clone` implementation (#62) + +# Version 1.8.0 + +- Prevent deadlock if sender/receiver is forgotten (#49) +- Add weak sender and receiver (#51) +- Update `concurrent-queue` to v2 (#50) + +# Version 1.7.1 + +- Work around MSRV increase due to a cargo bug. + +# Version 1.7.0 + +- Add `send_blocking` and `recv_blocking` (#47) + +# Version 1.6.1 + +- Make `send` return `Send` (#34) + +# Version 1.6.0 + +- Added `Send` and `Recv` futures (#33) +- impl `FusedStream` for `Receiver` (#30) + +# Version 1.5.1 + +- Fix typos in the docs. + +# Version 1.5.0 + +- Add `receiver_count()` and `sender_count()`. + +# Version 1.4.2 + +- Fix a bug that would sometime cause 100% CPU usage. + +# Version 1.4.1 + +- Update dependencies. + +# Version 1.4.0 + +- Update dependencies. + +# Version 1.3.0 + +- Add `Sender::is_closed()` and `Receiver::is_closed()`. + +# Version 1.2.0 + +- Add `Sender::close()` and `Receiver::close()`. + +# Version 1.1.1 + +- Replace `usize::MAX` with `std::usize::MAX`. + +# Version 1.1.0 + +- Add methods to error types. + +# Version 1.0.0 + +- Initial version diff --git a/.cargo-vendor/async-channel/Cargo.toml b/.cargo-vendor/async-channel/Cargo.toml new file mode 100644 index 0000000000..49fffb45f3 --- /dev/null +++ b/.cargo-vendor/async-channel/Cargo.toml @@ -0,0 +1,64 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.60" +name = "async-channel" +version = "2.3.1" +authors = ["Stjepan Glavina "] +exclude = ["/.*"] +description = "Async multi-producer multi-consumer channel" +readme = "README.md" +keywords = [ + "mpmc", + "mpsc", + "spmc", + "chan", + "futures", +] +categories = [ + "asynchronous", + "concurrency", +] +license = "Apache-2.0 OR MIT" +repository = "https://github.com/smol-rs/async-channel" + +[dependencies.concurrent-queue] +version = "2.5" +default-features = false + +[dependencies.event-listener-strategy] +version = "0.5.2" +default-features = false + +[dependencies.futures-core] +version = "0.3.5" +default-features = false + +[dependencies.pin-project-lite] +version = "0.2.11" + +[dev-dependencies.easy-parallel] +version = "3" + +[dev-dependencies.futures-lite] +version = "2" + +[features] +default = ["std"] +std = [ + "concurrent-queue/std", + "event-listener-strategy/std", +] + +[target."cfg(target_family = \"wasm\")".dev-dependencies.wasm-bindgen-test] +version = "0.3.37" diff --git a/.cargo-vendor/async-channel/LICENSE-APACHE b/.cargo-vendor/async-channel/LICENSE-APACHE new file mode 100644 index 0000000000..16fe87b06e --- /dev/null +++ b/.cargo-vendor/async-channel/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/.cargo-vendor/async-channel/LICENSE-MIT b/.cargo-vendor/async-channel/LICENSE-MIT new file mode 100644 index 0000000000..31aa79387f --- /dev/null +++ b/.cargo-vendor/async-channel/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/.cargo-vendor/async-channel/README.md b/.cargo-vendor/async-channel/README.md new file mode 100644 index 0000000000..8809b27c46 --- /dev/null +++ b/.cargo-vendor/async-channel/README.md @@ -0,0 +1,51 @@ +# async-channel + +[![Build](https://github.com/smol-rs/async-channel/actions/workflows/ci.yml/badge.svg)]( +https://github.com/smol-rs/async-channel/actions) +[![License](https://img.shields.io/badge/license-Apache--2.0_OR_MIT-blue.svg)]( +https://github.com/smol-rs/async-channel) +[![Cargo](https://img.shields.io/crates/v/async-channel.svg)]( +https://crates.io/crates/async-channel) +[![Documentation](https://docs.rs/async-channel/badge.svg)]( +https://docs.rs/async-channel) + +An async multi-producer multi-consumer channel, where each message can be received by only +one of all existing consumers. + +There are two kinds of channels: + +1. Bounded channel with limited capacity. +2. Unbounded channel with unlimited capacity. + +A channel has the `Sender` and `Receiver` side. Both sides are cloneable and can be shared +among multiple threads. + +When all `Sender`s or all `Receiver`s are dropped, the channel becomes closed. When a +channel is closed, no more messages can be sent, but remaining messages can still be received. + +The channel can also be closed manually by calling `Sender::close()` or +`Receiver::close()`. + +## Examples + +```rust +let (s, r) = async_channel::unbounded(); + +assert_eq!(s.send("Hello").await, Ok(())); +assert_eq!(r.recv().await, Ok("Hello")); +``` + +## License + +Licensed under either of + + * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +at your option. + +#### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. diff --git a/.cargo-vendor/async-channel/src/lib.rs b/.cargo-vendor/async-channel/src/lib.rs new file mode 100644 index 0000000000..245a2166d7 --- /dev/null +++ b/.cargo-vendor/async-channel/src/lib.rs @@ -0,0 +1,1262 @@ +//! An async multi-producer multi-consumer channel, where each message can be received by only +//! one of all existing consumers. +//! +//! There are two kinds of channels: +//! +//! 1. [Bounded][`bounded()`] channel with limited capacity. +//! 2. [Unbounded][`unbounded()`] channel with unlimited capacity. +//! +//! A channel has the [`Sender`] and [`Receiver`] side. Both sides are cloneable and can be shared +//! among multiple threads. +//! +//! When all [`Sender`]s or all [`Receiver`]s are dropped, the channel becomes closed. When a +//! channel is closed, no more messages can be sent, but remaining messages can still be received. +//! +//! The channel can also be closed manually by calling [`Sender::close()`] or +//! [`Receiver::close()`]. +//! +//! # Examples +//! +//! ``` +//! # futures_lite::future::block_on(async { +//! let (s, r) = async_channel::unbounded(); +//! +//! assert_eq!(s.send("Hello").await, Ok(())); +//! assert_eq!(r.recv().await, Ok("Hello")); +//! # }); +//! ``` + +#![cfg_attr(not(feature = "std"), no_std)] +#![forbid(unsafe_code)] +#![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)] +#![doc( + html_favicon_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png" +)] +#![doc( + html_logo_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png" +)] + +extern crate alloc; + +use core::fmt; +use core::future::Future; +use core::marker::PhantomPinned; +use core::pin::Pin; +use core::sync::atomic::{AtomicUsize, Ordering}; +use core::task::{Context, Poll}; + +use alloc::sync::Arc; + +use concurrent_queue::{ConcurrentQueue, ForcePushError, PopError, PushError}; +use event_listener_strategy::{ + easy_wrapper, + event_listener::{Event, EventListener}, + EventListenerFuture, Strategy, +}; +use futures_core::ready; +use futures_core::stream::Stream; +use pin_project_lite::pin_project; + +struct Channel { + /// Inner message queue. + queue: ConcurrentQueue, + + /// Send operations waiting while the channel is full. + send_ops: Event, + + /// Receive operations waiting while the channel is empty and not closed. + recv_ops: Event, + + /// Stream operations while the channel is empty and not closed. + stream_ops: Event, + + /// The number of currently active `Sender`s. + sender_count: AtomicUsize, + + /// The number of currently active `Receivers`s. + receiver_count: AtomicUsize, +} + +impl Channel { + /// Closes the channel and notifies all blocked operations. + /// + /// Returns `true` if this call has closed the channel and it was not closed already. + fn close(&self) -> bool { + if self.queue.close() { + // Notify all send operations. + self.send_ops.notify(usize::MAX); + + // Notify all receive and stream operations. + self.recv_ops.notify(usize::MAX); + self.stream_ops.notify(usize::MAX); + + true + } else { + false + } + } +} + +/// Creates a bounded channel. +/// +/// The created channel has space to hold at most `cap` messages at a time. +/// +/// # Panics +/// +/// Capacity must be a positive number. If `cap` is zero, this function will panic. +/// +/// # Examples +/// +/// ``` +/// # futures_lite::future::block_on(async { +/// use async_channel::{bounded, TryRecvError, TrySendError}; +/// +/// let (s, r) = bounded(1); +/// +/// assert_eq!(s.send(10).await, Ok(())); +/// assert_eq!(s.try_send(20), Err(TrySendError::Full(20))); +/// +/// assert_eq!(r.recv().await, Ok(10)); +/// assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); +/// # }); +/// ``` +pub fn bounded(cap: usize) -> (Sender, Receiver) { + assert!(cap > 0, "capacity cannot be zero"); + + let channel = Arc::new(Channel { + queue: ConcurrentQueue::bounded(cap), + send_ops: Event::new(), + recv_ops: Event::new(), + stream_ops: Event::new(), + sender_count: AtomicUsize::new(1), + receiver_count: AtomicUsize::new(1), + }); + + let s = Sender { + channel: channel.clone(), + }; + let r = Receiver { + listener: None, + channel, + _pin: PhantomPinned, + }; + (s, r) +} + +/// Creates an unbounded channel. +/// +/// The created channel can hold an unlimited number of messages. +/// +/// # Examples +/// +/// ``` +/// # futures_lite::future::block_on(async { +/// use async_channel::{unbounded, TryRecvError}; +/// +/// let (s, r) = unbounded(); +/// +/// assert_eq!(s.send(10).await, Ok(())); +/// assert_eq!(s.send(20).await, Ok(())); +/// +/// assert_eq!(r.recv().await, Ok(10)); +/// assert_eq!(r.recv().await, Ok(20)); +/// assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); +/// # }); +/// ``` +pub fn unbounded() -> (Sender, Receiver) { + let channel = Arc::new(Channel { + queue: ConcurrentQueue::unbounded(), + send_ops: Event::new(), + recv_ops: Event::new(), + stream_ops: Event::new(), + sender_count: AtomicUsize::new(1), + receiver_count: AtomicUsize::new(1), + }); + + let s = Sender { + channel: channel.clone(), + }; + let r = Receiver { + listener: None, + channel, + _pin: PhantomPinned, + }; + (s, r) +} + +/// The sending side of a channel. +/// +/// Senders can be cloned and shared among threads. When all senders associated with a channel are +/// dropped, the channel becomes closed. +/// +/// The channel can also be closed manually by calling [`Sender::close()`]. +pub struct Sender { + /// Inner channel state. + channel: Arc>, +} + +impl Sender { + /// Attempts to send a message into the channel. + /// + /// If the channel is full or closed, this method returns an error. + /// + /// # Examples + /// + /// ``` + /// use async_channel::{bounded, TrySendError}; + /// + /// let (s, r) = bounded(1); + /// + /// assert_eq!(s.try_send(1), Ok(())); + /// assert_eq!(s.try_send(2), Err(TrySendError::Full(2))); + /// + /// drop(r); + /// assert_eq!(s.try_send(3), Err(TrySendError::Closed(3))); + /// ``` + pub fn try_send(&self, msg: T) -> Result<(), TrySendError> { + match self.channel.queue.push(msg) { + Ok(()) => { + // Notify a blocked receive operation. If the notified operation gets canceled, + // it will notify another blocked receive operation. + self.channel.recv_ops.notify_additional(1); + + // Notify all blocked streams. + self.channel.stream_ops.notify(usize::MAX); + + Ok(()) + } + Err(PushError::Full(msg)) => Err(TrySendError::Full(msg)), + Err(PushError::Closed(msg)) => Err(TrySendError::Closed(msg)), + } + } + + /// Sends a message into the channel. + /// + /// If the channel is full, this method waits until there is space for a message. + /// + /// If the channel is closed, this method returns an error. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::{unbounded, SendError}; + /// + /// let (s, r) = unbounded(); + /// + /// assert_eq!(s.send(1).await, Ok(())); + /// drop(r); + /// assert_eq!(s.send(2).await, Err(SendError(2))); + /// # }); + /// ``` + pub fn send(&self, msg: T) -> Send<'_, T> { + Send::_new(SendInner { + sender: self, + msg: Some(msg), + listener: None, + _pin: PhantomPinned, + }) + } + + /// Sends a message into this channel using the blocking strategy. + /// + /// If the channel is full, this method will block until there is room. + /// If the channel is closed, this method returns an error. + /// + /// # Blocking + /// + /// Rather than using asynchronous waiting, like the [`send`](Self::send) method, + /// this method will block the current thread until the message is sent. + /// + /// This method should not be used in an asynchronous context. It is intended + /// to be used such that a channel can be used in both asynchronous and synchronous contexts. + /// Calling this method in an asynchronous context may result in deadlocks. + /// + /// # Examples + /// + /// ``` + /// use async_channel::{unbounded, SendError}; + /// + /// let (s, r) = unbounded(); + /// + /// assert_eq!(s.send_blocking(1), Ok(())); + /// drop(r); + /// assert_eq!(s.send_blocking(2), Err(SendError(2))); + /// ``` + #[cfg(all(feature = "std", not(target_family = "wasm")))] + pub fn send_blocking(&self, msg: T) -> Result<(), SendError> { + self.send(msg).wait() + } + + /// Forcefully push a message into this channel. + /// + /// If the channel is full, this method will replace an existing message in the + /// channel and return it as `Ok(Some(value))`. If the channel is closed, this + /// method will return an error. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::{bounded, SendError}; + /// + /// let (s, r) = bounded(3); + /// + /// assert_eq!(s.send(1).await, Ok(())); + /// assert_eq!(s.send(2).await, Ok(())); + /// assert_eq!(s.force_send(3), Ok(None)); + /// assert_eq!(s.force_send(4), Ok(Some(1))); + /// + /// assert_eq!(r.recv().await, Ok(2)); + /// assert_eq!(r.recv().await, Ok(3)); + /// assert_eq!(r.recv().await, Ok(4)); + /// # }); + /// ``` + pub fn force_send(&self, msg: T) -> Result, SendError> { + match self.channel.queue.force_push(msg) { + Ok(backlog) => { + // Notify a blocked receive operation. If the notified operation gets canceled, + // it will notify another blocked receive operation. + self.channel.recv_ops.notify_additional(1); + + // Notify all blocked streams. + self.channel.stream_ops.notify(usize::MAX); + + Ok(backlog) + } + + Err(ForcePushError(reject)) => Err(SendError(reject)), + } + } + + /// Closes the channel. + /// + /// Returns `true` if this call has closed the channel and it was not closed already. + /// + /// The remaining messages can still be received. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::{unbounded, RecvError}; + /// + /// let (s, r) = unbounded(); + /// assert_eq!(s.send(1).await, Ok(())); + /// assert!(s.close()); + /// + /// assert_eq!(r.recv().await, Ok(1)); + /// assert_eq!(r.recv().await, Err(RecvError)); + /// # }); + /// ``` + pub fn close(&self) -> bool { + self.channel.close() + } + + /// Returns `true` if the channel is closed. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::{unbounded, RecvError}; + /// + /// let (s, r) = unbounded::<()>(); + /// assert!(!s.is_closed()); + /// + /// drop(r); + /// assert!(s.is_closed()); + /// # }); + /// ``` + pub fn is_closed(&self) -> bool { + self.channel.queue.is_closed() + } + + /// Returns `true` if the channel is empty. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::unbounded; + /// + /// let (s, r) = unbounded(); + /// + /// assert!(s.is_empty()); + /// s.send(1).await; + /// assert!(!s.is_empty()); + /// # }); + /// ``` + pub fn is_empty(&self) -> bool { + self.channel.queue.is_empty() + } + + /// Returns `true` if the channel is full. + /// + /// Unbounded channels are never full. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::bounded; + /// + /// let (s, r) = bounded(1); + /// + /// assert!(!s.is_full()); + /// s.send(1).await; + /// assert!(s.is_full()); + /// # }); + /// ``` + pub fn is_full(&self) -> bool { + self.channel.queue.is_full() + } + + /// Returns the number of messages in the channel. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::unbounded; + /// + /// let (s, r) = unbounded(); + /// assert_eq!(s.len(), 0); + /// + /// s.send(1).await; + /// s.send(2).await; + /// assert_eq!(s.len(), 2); + /// # }); + /// ``` + pub fn len(&self) -> usize { + self.channel.queue.len() + } + + /// Returns the channel capacity if it's bounded. + /// + /// # Examples + /// + /// ``` + /// use async_channel::{bounded, unbounded}; + /// + /// let (s, r) = bounded::(5); + /// assert_eq!(s.capacity(), Some(5)); + /// + /// let (s, r) = unbounded::(); + /// assert_eq!(s.capacity(), None); + /// ``` + pub fn capacity(&self) -> Option { + self.channel.queue.capacity() + } + + /// Returns the number of receivers for the channel. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::unbounded; + /// + /// let (s, r) = unbounded::<()>(); + /// assert_eq!(s.receiver_count(), 1); + /// + /// let r2 = r.clone(); + /// assert_eq!(s.receiver_count(), 2); + /// # }); + /// ``` + pub fn receiver_count(&self) -> usize { + self.channel.receiver_count.load(Ordering::SeqCst) + } + + /// Returns the number of senders for the channel. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::unbounded; + /// + /// let (s, r) = unbounded::<()>(); + /// assert_eq!(s.sender_count(), 1); + /// + /// let s2 = s.clone(); + /// assert_eq!(s.sender_count(), 2); + /// # }); + /// ``` + pub fn sender_count(&self) -> usize { + self.channel.sender_count.load(Ordering::SeqCst) + } + + /// Downgrade the sender to a weak reference. + pub fn downgrade(&self) -> WeakSender { + WeakSender { + channel: self.channel.clone(), + } + } +} + +impl Drop for Sender { + fn drop(&mut self) { + // Decrement the sender count and close the channel if it drops down to zero. + if self.channel.sender_count.fetch_sub(1, Ordering::AcqRel) == 1 { + self.channel.close(); + } + } +} + +impl fmt::Debug for Sender { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Sender {{ .. }}") + } +} + +impl Clone for Sender { + fn clone(&self) -> Sender { + let count = self.channel.sender_count.fetch_add(1, Ordering::Relaxed); + + // Make sure the count never overflows, even if lots of sender clones are leaked. + if count > usize::MAX / 2 { + abort(); + } + + Sender { + channel: self.channel.clone(), + } + } +} + +pin_project! { + /// The receiving side of a channel. + /// + /// Receivers can be cloned and shared among threads. When all receivers associated with a channel + /// are dropped, the channel becomes closed. + /// + /// The channel can also be closed manually by calling [`Receiver::close()`]. + /// + /// Receivers implement the [`Stream`] trait. + pub struct Receiver { + // Inner channel state. + channel: Arc>, + + // Listens for a send or close event to unblock this stream. + listener: Option, + + // Keeping this type `!Unpin` enables future optimizations. + #[pin] + _pin: PhantomPinned + } + + impl PinnedDrop for Receiver { + fn drop(this: Pin<&mut Self>) { + let this = this.project(); + + // Decrement the receiver count and close the channel if it drops down to zero. + if this.channel.receiver_count.fetch_sub(1, Ordering::AcqRel) == 1 { + this.channel.close(); + } + } + } +} + +impl Receiver { + /// Attempts to receive a message from the channel. + /// + /// If the channel is empty, or empty and closed, this method returns an error. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::{unbounded, TryRecvError}; + /// + /// let (s, r) = unbounded(); + /// assert_eq!(s.send(1).await, Ok(())); + /// + /// assert_eq!(r.try_recv(), Ok(1)); + /// assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); + /// + /// drop(s); + /// assert_eq!(r.try_recv(), Err(TryRecvError::Closed)); + /// # }); + /// ``` + pub fn try_recv(&self) -> Result { + match self.channel.queue.pop() { + Ok(msg) => { + // Notify a blocked send operation. If the notified operation gets canceled, it + // will notify another blocked send operation. + self.channel.send_ops.notify_additional(1); + + Ok(msg) + } + Err(PopError::Empty) => Err(TryRecvError::Empty), + Err(PopError::Closed) => Err(TryRecvError::Closed), + } + } + + /// Receives a message from the channel. + /// + /// If the channel is empty, this method waits until there is a message. + /// + /// If the channel is closed, this method receives a message or returns an error if there are + /// no more messages. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::{unbounded, RecvError}; + /// + /// let (s, r) = unbounded(); + /// + /// assert_eq!(s.send(1).await, Ok(())); + /// drop(s); + /// + /// assert_eq!(r.recv().await, Ok(1)); + /// assert_eq!(r.recv().await, Err(RecvError)); + /// # }); + /// ``` + pub fn recv(&self) -> Recv<'_, T> { + Recv::_new(RecvInner { + receiver: self, + listener: None, + _pin: PhantomPinned, + }) + } + + /// Receives a message from the channel using the blocking strategy. + /// + /// If the channel is empty, this method waits until there is a message. + /// If the channel is closed, this method receives a message or returns an error if there are + /// no more messages. + /// + /// # Blocking + /// + /// Rather than using asynchronous waiting, like the [`recv`](Self::recv) method, + /// this method will block the current thread until the message is sent. + /// + /// This method should not be used in an asynchronous context. It is intended + /// to be used such that a channel can be used in both asynchronous and synchronous contexts. + /// Calling this method in an asynchronous context may result in deadlocks. + /// + /// # Examples + /// + /// ``` + /// use async_channel::{unbounded, RecvError}; + /// + /// let (s, r) = unbounded(); + /// + /// assert_eq!(s.send_blocking(1), Ok(())); + /// drop(s); + /// + /// assert_eq!(r.recv_blocking(), Ok(1)); + /// assert_eq!(r.recv_blocking(), Err(RecvError)); + /// ``` + #[cfg(all(feature = "std", not(target_family = "wasm")))] + pub fn recv_blocking(&self) -> Result { + self.recv().wait() + } + + /// Closes the channel. + /// + /// Returns `true` if this call has closed the channel and it was not closed already. + /// + /// The remaining messages can still be received. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::{unbounded, RecvError}; + /// + /// let (s, r) = unbounded(); + /// assert_eq!(s.send(1).await, Ok(())); + /// + /// assert!(r.close()); + /// assert_eq!(r.recv().await, Ok(1)); + /// assert_eq!(r.recv().await, Err(RecvError)); + /// # }); + /// ``` + pub fn close(&self) -> bool { + self.channel.close() + } + + /// Returns `true` if the channel is closed. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::{unbounded, RecvError}; + /// + /// let (s, r) = unbounded::<()>(); + /// assert!(!r.is_closed()); + /// + /// drop(s); + /// assert!(r.is_closed()); + /// # }); + /// ``` + pub fn is_closed(&self) -> bool { + self.channel.queue.is_closed() + } + + /// Returns `true` if the channel is empty. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::unbounded; + /// + /// let (s, r) = unbounded(); + /// + /// assert!(s.is_empty()); + /// s.send(1).await; + /// assert!(!s.is_empty()); + /// # }); + /// ``` + pub fn is_empty(&self) -> bool { + self.channel.queue.is_empty() + } + + /// Returns `true` if the channel is full. + /// + /// Unbounded channels are never full. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::bounded; + /// + /// let (s, r) = bounded(1); + /// + /// assert!(!r.is_full()); + /// s.send(1).await; + /// assert!(r.is_full()); + /// # }); + /// ``` + pub fn is_full(&self) -> bool { + self.channel.queue.is_full() + } + + /// Returns the number of messages in the channel. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::unbounded; + /// + /// let (s, r) = unbounded(); + /// assert_eq!(r.len(), 0); + /// + /// s.send(1).await; + /// s.send(2).await; + /// assert_eq!(r.len(), 2); + /// # }); + /// ``` + pub fn len(&self) -> usize { + self.channel.queue.len() + } + + /// Returns the channel capacity if it's bounded. + /// + /// # Examples + /// + /// ``` + /// use async_channel::{bounded, unbounded}; + /// + /// let (s, r) = bounded::(5); + /// assert_eq!(r.capacity(), Some(5)); + /// + /// let (s, r) = unbounded::(); + /// assert_eq!(r.capacity(), None); + /// ``` + pub fn capacity(&self) -> Option { + self.channel.queue.capacity() + } + + /// Returns the number of receivers for the channel. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::unbounded; + /// + /// let (s, r) = unbounded::<()>(); + /// assert_eq!(r.receiver_count(), 1); + /// + /// let r2 = r.clone(); + /// assert_eq!(r.receiver_count(), 2); + /// # }); + /// ``` + pub fn receiver_count(&self) -> usize { + self.channel.receiver_count.load(Ordering::SeqCst) + } + + /// Returns the number of senders for the channel. + /// + /// # Examples + /// + /// ``` + /// # futures_lite::future::block_on(async { + /// use async_channel::unbounded; + /// + /// let (s, r) = unbounded::<()>(); + /// assert_eq!(r.sender_count(), 1); + /// + /// let s2 = s.clone(); + /// assert_eq!(r.sender_count(), 2); + /// # }); + /// ``` + pub fn sender_count(&self) -> usize { + self.channel.sender_count.load(Ordering::SeqCst) + } + + /// Downgrade the receiver to a weak reference. + pub fn downgrade(&self) -> WeakReceiver { + WeakReceiver { + channel: self.channel.clone(), + } + } +} + +impl fmt::Debug for Receiver { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Receiver {{ .. }}") + } +} + +impl Clone for Receiver { + fn clone(&self) -> Receiver { + let count = self.channel.receiver_count.fetch_add(1, Ordering::Relaxed); + + // Make sure the count never overflows, even if lots of receiver clones are leaked. + if count > usize::MAX / 2 { + abort(); + } + + Receiver { + channel: self.channel.clone(), + listener: None, + _pin: PhantomPinned, + } + } +} + +impl Stream for Receiver { + type Item = T; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + loop { + // If this stream is listening for events, first wait for a notification. + { + let this = self.as_mut().project(); + if let Some(listener) = this.listener.as_mut() { + ready!(Pin::new(listener).poll(cx)); + *this.listener = None; + } + } + + loop { + // Attempt to receive a message. + match self.try_recv() { + Ok(msg) => { + // The stream is not blocked on an event - drop the listener. + let this = self.as_mut().project(); + *this.listener = None; + return Poll::Ready(Some(msg)); + } + Err(TryRecvError::Closed) => { + // The stream is not blocked on an event - drop the listener. + let this = self.as_mut().project(); + *this.listener = None; + return Poll::Ready(None); + } + Err(TryRecvError::Empty) => {} + } + + // Receiving failed - now start listening for notifications or wait for one. + let this = self.as_mut().project(); + if this.listener.is_some() { + // Go back to the outer loop to wait for a notification. + break; + } else { + *this.listener = Some(this.channel.stream_ops.listen()); + } + } + } + } +} + +impl futures_core::stream::FusedStream for Receiver { + fn is_terminated(&self) -> bool { + self.channel.queue.is_closed() && self.channel.queue.is_empty() + } +} + +/// A [`Sender`] that prevents the channel from not being closed. +/// +/// This is created through the [`Sender::downgrade`] method. In order to use it, it needs +/// to be upgraded into a [`Sender`] through the `upgrade` method. +pub struct WeakSender { + channel: Arc>, +} + +impl WeakSender { + /// Upgrade the [`WeakSender`] into a [`Sender`]. + pub fn upgrade(&self) -> Option> { + if self.channel.queue.is_closed() { + None + } else { + match self.channel.sender_count.fetch_update( + Ordering::Relaxed, + Ordering::Relaxed, + |count| if count == 0 { None } else { Some(count + 1) }, + ) { + Err(_) => None, + Ok(new_value) if new_value > usize::MAX / 2 => { + // Make sure the count never overflows, even if lots of sender clones are leaked. + abort(); + } + Ok(_) => Some(Sender { + channel: self.channel.clone(), + }), + } + } + } +} + +impl Clone for WeakSender { + fn clone(&self) -> Self { + WeakSender { + channel: self.channel.clone(), + } + } +} + +impl fmt::Debug for WeakSender { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "WeakSender {{ .. }}") + } +} + +/// A [`Receiver`] that prevents the channel from not being closed. +/// +/// This is created through the [`Receiver::downgrade`] method. In order to use it, it needs +/// to be upgraded into a [`Receiver`] through the `upgrade` method. +pub struct WeakReceiver { + channel: Arc>, +} + +impl WeakReceiver { + /// Upgrade the [`WeakReceiver`] into a [`Receiver`]. + pub fn upgrade(&self) -> Option> { + if self.channel.queue.is_closed() { + None + } else { + match self.channel.receiver_count.fetch_update( + Ordering::Relaxed, + Ordering::Relaxed, + |count| if count == 0 { None } else { Some(count + 1) }, + ) { + Err(_) => None, + Ok(new_value) if new_value > usize::MAX / 2 => { + // Make sure the count never overflows, even if lots of receiver clones are leaked. + abort(); + } + Ok(_) => Some(Receiver { + channel: self.channel.clone(), + listener: None, + _pin: PhantomPinned, + }), + } + } + } +} + +impl Clone for WeakReceiver { + fn clone(&self) -> Self { + WeakReceiver { + channel: self.channel.clone(), + } + } +} + +impl fmt::Debug for WeakReceiver { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "WeakReceiver {{ .. }}") + } +} + +/// An error returned from [`Sender::send()`]. +/// +/// Received because the channel is closed. +#[derive(PartialEq, Eq, Clone, Copy)] +pub struct SendError(pub T); + +impl SendError { + /// Unwraps the message that couldn't be sent. + pub fn into_inner(self) -> T { + self.0 + } +} + +#[cfg(feature = "std")] +impl std::error::Error for SendError {} + +impl fmt::Debug for SendError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "SendError(..)") + } +} + +impl fmt::Display for SendError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "sending into a closed channel") + } +} + +/// An error returned from [`Sender::try_send()`]. +#[derive(PartialEq, Eq, Clone, Copy)] +pub enum TrySendError { + /// The channel is full but not closed. + Full(T), + + /// The channel is closed. + Closed(T), +} + +impl TrySendError { + /// Unwraps the message that couldn't be sent. + pub fn into_inner(self) -> T { + match self { + TrySendError::Full(t) => t, + TrySendError::Closed(t) => t, + } + } + + /// Returns `true` if the channel is full but not closed. + pub fn is_full(&self) -> bool { + match self { + TrySendError::Full(_) => true, + TrySendError::Closed(_) => false, + } + } + + /// Returns `true` if the channel is closed. + pub fn is_closed(&self) -> bool { + match self { + TrySendError::Full(_) => false, + TrySendError::Closed(_) => true, + } + } +} + +#[cfg(feature = "std")] +impl std::error::Error for TrySendError {} + +impl fmt::Debug for TrySendError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + TrySendError::Full(..) => write!(f, "Full(..)"), + TrySendError::Closed(..) => write!(f, "Closed(..)"), + } + } +} + +impl fmt::Display for TrySendError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + TrySendError::Full(..) => write!(f, "sending into a full channel"), + TrySendError::Closed(..) => write!(f, "sending into a closed channel"), + } + } +} + +/// An error returned from [`Receiver::recv()`]. +/// +/// Received because the channel is empty and closed. +#[derive(PartialEq, Eq, Clone, Copy, Debug)] +pub struct RecvError; + +#[cfg(feature = "std")] +impl std::error::Error for RecvError {} + +impl fmt::Display for RecvError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "receiving from an empty and closed channel") + } +} + +/// An error returned from [`Receiver::try_recv()`]. +#[derive(PartialEq, Eq, Clone, Copy, Debug)] +pub enum TryRecvError { + /// The channel is empty but not closed. + Empty, + + /// The channel is empty and closed. + Closed, +} + +impl TryRecvError { + /// Returns `true` if the channel is empty but not closed. + pub fn is_empty(&self) -> bool { + match self { + TryRecvError::Empty => true, + TryRecvError::Closed => false, + } + } + + /// Returns `true` if the channel is empty and closed. + pub fn is_closed(&self) -> bool { + match self { + TryRecvError::Empty => false, + TryRecvError::Closed => true, + } + } +} + +#[cfg(feature = "std")] +impl std::error::Error for TryRecvError {} + +impl fmt::Display for TryRecvError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + TryRecvError::Empty => write!(f, "receiving from an empty channel"), + TryRecvError::Closed => write!(f, "receiving from an empty and closed channel"), + } + } +} + +easy_wrapper! { + /// A future returned by [`Sender::send()`]. + #[derive(Debug)] + #[must_use = "futures do nothing unless you `.await` or poll them"] + pub struct Send<'a, T>(SendInner<'a, T> => Result<(), SendError>); + #[cfg(all(feature = "std", not(target_family = "wasm")))] + pub(crate) wait(); +} + +pin_project! { + #[derive(Debug)] + #[project(!Unpin)] + struct SendInner<'a, T> { + // Reference to the original sender. + sender: &'a Sender, + + // The message to send. + msg: Option, + + // Listener waiting on the channel. + listener: Option, + + // Keeping this type `!Unpin` enables future optimizations. + #[pin] + _pin: PhantomPinned + } +} + +impl<'a, T> EventListenerFuture for SendInner<'a, T> { + type Output = Result<(), SendError>; + + /// Run this future with the given `Strategy`. + fn poll_with_strategy<'x, S: Strategy<'x>>( + self: Pin<&mut Self>, + strategy: &mut S, + context: &mut S::Context, + ) -> Poll>> { + let this = self.project(); + + loop { + let msg = this.msg.take().unwrap(); + // Attempt to send a message. + match this.sender.try_send(msg) { + Ok(()) => return Poll::Ready(Ok(())), + Err(TrySendError::Closed(msg)) => return Poll::Ready(Err(SendError(msg))), + Err(TrySendError::Full(m)) => *this.msg = Some(m), + } + + // Sending failed - now start listening for notifications or wait for one. + if this.listener.is_some() { + // Poll using the given strategy + ready!(S::poll(strategy, &mut *this.listener, context)); + } else { + *this.listener = Some(this.sender.channel.send_ops.listen()); + } + } + } +} + +easy_wrapper! { + /// A future returned by [`Receiver::recv()`]. + #[derive(Debug)] + #[must_use = "futures do nothing unless you `.await` or poll them"] + pub struct Recv<'a, T>(RecvInner<'a, T> => Result); + #[cfg(all(feature = "std", not(target_family = "wasm")))] + pub(crate) wait(); +} + +pin_project! { + #[derive(Debug)] + #[project(!Unpin)] + struct RecvInner<'a, T> { + // Reference to the receiver. + receiver: &'a Receiver, + + // Listener waiting on the channel. + listener: Option, + + // Keeping this type `!Unpin` enables future optimizations. + #[pin] + _pin: PhantomPinned + } +} + +impl<'a, T> EventListenerFuture for RecvInner<'a, T> { + type Output = Result; + + /// Run this future with the given `Strategy`. + fn poll_with_strategy<'x, S: Strategy<'x>>( + self: Pin<&mut Self>, + strategy: &mut S, + cx: &mut S::Context, + ) -> Poll> { + let this = self.project(); + + loop { + // Attempt to receive a message. + match this.receiver.try_recv() { + Ok(msg) => return Poll::Ready(Ok(msg)), + Err(TryRecvError::Closed) => return Poll::Ready(Err(RecvError)), + Err(TryRecvError::Empty) => {} + } + + // Receiving failed - now start listening for notifications or wait for one. + if this.listener.is_some() { + // Poll using the given strategy + ready!(S::poll(strategy, &mut *this.listener, cx)); + } else { + *this.listener = Some(this.receiver.channel.recv_ops.listen()); + } + } + } +} + +#[cfg(feature = "std")] +use std::process::abort; + +#[cfg(not(feature = "std"))] +fn abort() -> ! { + struct PanicOnDrop; + + impl Drop for PanicOnDrop { + fn drop(&mut self) { + panic!("Panic while panicking to abort"); + } + } + + let _bomb = PanicOnDrop; + panic!("Panic while panicking to abort") +} diff --git a/.cargo-vendor/async-channel/tests/bounded.rs b/.cargo-vendor/async-channel/tests/bounded.rs new file mode 100644 index 0000000000..460cb5523c --- /dev/null +++ b/.cargo-vendor/async-channel/tests/bounded.rs @@ -0,0 +1,529 @@ +#![allow(clippy::bool_assert_comparison, unused_imports)] + +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::thread::sleep; +use std::time::Duration; + +use async_channel::{bounded, RecvError, SendError, TryRecvError, TrySendError}; +use easy_parallel::Parallel; +use futures_lite::{future, prelude::*}; + +#[cfg(target_family = "wasm")] +use wasm_bindgen_test::wasm_bindgen_test as test; + +#[cfg(not(target_family = "wasm"))] +fn ms(ms: u64) -> Duration { + Duration::from_millis(ms) +} + +#[test] +fn smoke() { + let (s, r) = bounded(1); + + future::block_on(s.send(7)).unwrap(); + assert_eq!(r.try_recv(), Ok(7)); + + future::block_on(s.send(8)).unwrap(); + assert_eq!(future::block_on(r.recv()), Ok(8)); + + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); +} + +#[cfg(all(feature = "std", not(target_family = "wasm")))] +#[test] +fn smoke_blocking() { + let (s, r) = bounded(1); + + s.send_blocking(7).unwrap(); + assert_eq!(r.try_recv(), Ok(7)); + + s.send_blocking(8).unwrap(); + assert_eq!(future::block_on(r.recv()), Ok(8)); + + future::block_on(s.send(9)).unwrap(); + assert_eq!(r.recv_blocking(), Ok(9)); + + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); +} + +#[test] +fn capacity() { + for i in 1..10 { + let (s, r) = bounded::<()>(i); + assert_eq!(s.capacity(), Some(i)); + assert_eq!(r.capacity(), Some(i)); + } +} + +#[test] +fn len_empty_full() { + let (s, r) = bounded(2); + + assert_eq!(s.len(), 0); + assert_eq!(s.is_empty(), true); + assert_eq!(s.is_full(), false); + assert_eq!(r.len(), 0); + assert_eq!(r.is_empty(), true); + assert_eq!(r.is_full(), false); + + future::block_on(s.send(())).unwrap(); + + assert_eq!(s.len(), 1); + assert_eq!(s.is_empty(), false); + assert_eq!(s.is_full(), false); + assert_eq!(r.len(), 1); + assert_eq!(r.is_empty(), false); + assert_eq!(r.is_full(), false); + + future::block_on(s.send(())).unwrap(); + + assert_eq!(s.len(), 2); + assert_eq!(s.is_empty(), false); + assert_eq!(s.is_full(), true); + assert_eq!(r.len(), 2); + assert_eq!(r.is_empty(), false); + assert_eq!(r.is_full(), true); + + future::block_on(r.recv()).unwrap(); + + assert_eq!(s.len(), 1); + assert_eq!(s.is_empty(), false); + assert_eq!(s.is_full(), false); + assert_eq!(r.len(), 1); + assert_eq!(r.is_empty(), false); + assert_eq!(r.is_full(), false); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn try_recv() { + let (s, r) = bounded(100); + + Parallel::new() + .add(move || { + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); + sleep(ms(1500)); + assert_eq!(r.try_recv(), Ok(7)); + sleep(ms(500)); + assert_eq!(r.try_recv(), Err(TryRecvError::Closed)); + }) + .add(move || { + sleep(ms(1000)); + future::block_on(s.send(7)).unwrap(); + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn recv() { + let (s, r) = bounded(100); + + Parallel::new() + .add(move || { + assert_eq!(future::block_on(r.recv()), Ok(7)); + sleep(ms(1000)); + assert_eq!(future::block_on(r.recv()), Ok(8)); + sleep(ms(1000)); + assert_eq!(future::block_on(r.recv()), Ok(9)); + assert_eq!(future::block_on(r.recv()), Err(RecvError)); + }) + .add(move || { + sleep(ms(1500)); + future::block_on(s.send(7)).unwrap(); + future::block_on(s.send(8)).unwrap(); + future::block_on(s.send(9)).unwrap(); + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn try_send() { + let (s, r) = bounded(1); + + Parallel::new() + .add(move || { + assert_eq!(s.try_send(1), Ok(())); + assert_eq!(s.try_send(2), Err(TrySendError::Full(2))); + sleep(ms(1500)); + assert_eq!(s.try_send(3), Ok(())); + sleep(ms(500)); + assert_eq!(s.try_send(4), Err(TrySendError::Closed(4))); + }) + .add(move || { + sleep(ms(1000)); + assert_eq!(r.try_recv(), Ok(1)); + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); + assert_eq!(future::block_on(r.recv()), Ok(3)); + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn send() { + let (s, r) = bounded(1); + + Parallel::new() + .add(|| { + future::block_on(s.send(7)).unwrap(); + sleep(ms(1000)); + future::block_on(s.send(8)).unwrap(); + sleep(ms(1000)); + future::block_on(s.send(9)).unwrap(); + sleep(ms(1000)); + future::block_on(s.send(10)).unwrap(); + }) + .add(|| { + sleep(ms(1500)); + assert_eq!(future::block_on(r.recv()), Ok(7)); + assert_eq!(future::block_on(r.recv()), Ok(8)); + assert_eq!(future::block_on(r.recv()), Ok(9)); + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn force_send() { + let (s, r) = bounded(1); + + Parallel::new() + .add(|| { + s.force_send(7).unwrap(); + sleep(ms(1000)); + s.force_send(8).unwrap(); + sleep(ms(1000)); + s.force_send(9).unwrap(); + sleep(ms(1000)); + s.force_send(10).unwrap(); + }) + .add(|| { + sleep(ms(1500)); + assert_eq!(future::block_on(r.recv()), Ok(8)); + assert_eq!(future::block_on(r.recv()), Ok(9)); + assert_eq!(future::block_on(r.recv()), Ok(10)); + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn send_after_close() { + let (s, r) = bounded(100); + + future::block_on(s.send(1)).unwrap(); + future::block_on(s.send(2)).unwrap(); + future::block_on(s.send(3)).unwrap(); + + drop(r); + + assert_eq!(future::block_on(s.send(4)), Err(SendError(4))); + assert_eq!(s.try_send(5), Err(TrySendError::Closed(5))); + assert_eq!(future::block_on(s.send(6)), Err(SendError(6))); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn recv_after_close() { + let (s, r) = bounded(100); + + future::block_on(s.send(1)).unwrap(); + future::block_on(s.send(2)).unwrap(); + future::block_on(s.send(3)).unwrap(); + + drop(s); + + assert_eq!(future::block_on(r.recv()), Ok(1)); + assert_eq!(future::block_on(r.recv()), Ok(2)); + assert_eq!(future::block_on(r.recv()), Ok(3)); + assert_eq!(future::block_on(r.recv()), Err(RecvError)); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn len() { + const COUNT: usize = 25_000; + const CAP: usize = 1000; + + let (s, r) = bounded(CAP); + + assert_eq!(s.len(), 0); + assert_eq!(r.len(), 0); + + for _ in 0..CAP / 10 { + for i in 0..50 { + future::block_on(s.send(i)).unwrap(); + assert_eq!(s.len(), i + 1); + } + + for i in 0..50 { + future::block_on(r.recv()).unwrap(); + assert_eq!(r.len(), 50 - i - 1); + } + } + + assert_eq!(s.len(), 0); + assert_eq!(r.len(), 0); + + for i in 0..CAP { + future::block_on(s.send(i)).unwrap(); + assert_eq!(s.len(), i + 1); + } + + for _ in 0..CAP { + future::block_on(r.recv()).unwrap(); + } + + assert_eq!(s.len(), 0); + assert_eq!(r.len(), 0); + + Parallel::new() + .add(|| { + for i in 0..COUNT { + assert_eq!(future::block_on(r.recv()), Ok(i)); + let len = r.len(); + assert!(len <= CAP); + } + }) + .add(|| { + for i in 0..COUNT { + future::block_on(s.send(i)).unwrap(); + let len = s.len(); + assert!(len <= CAP); + } + }) + .run(); + + assert_eq!(s.len(), 0); + assert_eq!(r.len(), 0); +} + +#[test] +fn receiver_count() { + let (s, r) = bounded::<()>(5); + let receiver_clones: Vec<_> = (0..20).map(|_| r.clone()).collect(); + + assert_eq!(s.receiver_count(), 21); + assert_eq!(r.receiver_count(), 21); + + drop(receiver_clones); + + assert_eq!(s.receiver_count(), 1); + assert_eq!(r.receiver_count(), 1); +} + +#[test] +fn sender_count() { + let (s, r) = bounded::<()>(5); + let sender_clones: Vec<_> = (0..20).map(|_| s.clone()).collect(); + + assert_eq!(s.sender_count(), 21); + assert_eq!(r.sender_count(), 21); + + drop(sender_clones); + + assert_eq!(s.receiver_count(), 1); + assert_eq!(r.receiver_count(), 1); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn close_wakes_sender() { + let (s, r) = bounded(1); + + Parallel::new() + .add(move || { + assert_eq!(future::block_on(s.send(())), Ok(())); + assert_eq!(future::block_on(s.send(())), Err(SendError(()))); + }) + .add(move || { + sleep(ms(1000)); + drop(r); + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn close_wakes_receiver() { + let (s, r) = bounded::<()>(1); + + Parallel::new() + .add(move || { + assert_eq!(future::block_on(r.recv()), Err(RecvError)); + }) + .add(move || { + sleep(ms(1000)); + drop(s); + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn forget_blocked_sender() { + let (s1, r) = bounded(2); + let s2 = s1.clone(); + + Parallel::new() + .add(move || { + assert!(future::block_on(s1.send(3)).is_ok()); + assert!(future::block_on(s1.send(7)).is_ok()); + let s1_fut = s1.send(13); + futures_lite::pin!(s1_fut); + // Poll but keep the future alive. + assert_eq!(future::block_on(future::poll_once(s1_fut)), None); + sleep(ms(500)); + }) + .add(move || { + sleep(ms(100)); + assert!(future::block_on(s2.send(42)).is_ok()); + }) + .add(move || { + sleep(ms(200)); + assert_eq!(future::block_on(r.recv()), Ok(3)); + assert_eq!(future::block_on(r.recv()), Ok(7)); + sleep(ms(100)); + assert_eq!(r.try_recv(), Ok(42)); + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn forget_blocked_receiver() { + let (s, r1) = bounded(2); + let r2 = r1.clone(); + + Parallel::new() + .add(move || { + let r1_fut = r1.recv(); + // Poll but keep the future alive. + futures_lite::pin!(r1_fut); + assert_eq!(future::block_on(future::poll_once(&mut r1_fut)), None); + sleep(ms(500)); + }) + .add(move || { + sleep(ms(100)); + assert_eq!(future::block_on(r2.recv()), Ok(3)); + }) + .add(move || { + sleep(ms(200)); + assert!(future::block_on(s.send(3)).is_ok()); + assert!(future::block_on(s.send(7)).is_ok()); + sleep(ms(100)); + assert!(s.try_send(42).is_ok()); + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn spsc() { + const COUNT: usize = 100_000; + + let (s, r) = bounded(3); + + Parallel::new() + .add(move || { + for i in 0..COUNT { + assert_eq!(future::block_on(r.recv()), Ok(i)); + } + assert_eq!(future::block_on(r.recv()), Err(RecvError)); + }) + .add(move || { + for i in 0..COUNT { + future::block_on(s.send(i)).unwrap(); + } + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn mpmc() { + const COUNT: usize = 25_000; + const THREADS: usize = 4; + + let (s, r) = bounded::(3); + let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); + + Parallel::new() + .each(0..THREADS, |_| { + for _ in 0..COUNT { + let n = future::block_on(r.recv()).unwrap(); + v[n].fetch_add(1, Ordering::SeqCst); + } + }) + .each(0..THREADS, |_| { + for i in 0..COUNT { + future::block_on(s.send(i)).unwrap(); + } + }) + .run(); + + for c in v { + assert_eq!(c.load(Ordering::SeqCst), THREADS); + } +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn mpmc_stream() { + const COUNT: usize = 25_000; + const THREADS: usize = 4; + + let (s, r) = bounded::(3); + let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); + let v = &v; + + Parallel::new() + .each(0..THREADS, { + let r = r; + move |_| { + futures_lite::pin!(r); + for _ in 0..COUNT { + let n = future::block_on(r.next()).unwrap(); + v[n].fetch_add(1, Ordering::SeqCst); + } + } + }) + .each(0..THREADS, |_| { + for i in 0..COUNT { + future::block_on(s.send(i)).unwrap(); + } + }) + .run(); + + for c in v { + assert_eq!(c.load(Ordering::SeqCst), THREADS); + } +} + +#[cfg(all(feature = "std", not(target_family = "wasm")))] +#[test] +fn weak() { + let (s, r) = bounded::(3); + + // Create a weak sender/receiver pair. + let (weak_s, weak_r) = (s.downgrade(), r.downgrade()); + + // Upgrade and send. + { + let s = weak_s.upgrade().unwrap(); + s.send_blocking(3).unwrap(); + let r = weak_r.upgrade().unwrap(); + assert_eq!(r.recv_blocking(), Ok(3)); + } + + // Drop the original sender/receiver pair. + drop((s, r)); + + // Try to upgrade again. + { + assert!(weak_s.upgrade().is_none()); + assert!(weak_r.upgrade().is_none()); + } +} diff --git a/.cargo-vendor/async-channel/tests/unbounded.rs b/.cargo-vendor/async-channel/tests/unbounded.rs new file mode 100644 index 0000000000..90cb375844 --- /dev/null +++ b/.cargo-vendor/async-channel/tests/unbounded.rs @@ -0,0 +1,356 @@ +#![allow(clippy::bool_assert_comparison, unused_imports)] + +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::thread::sleep; +use std::time::Duration; + +use async_channel::{unbounded, RecvError, SendError, TryRecvError, TrySendError}; +use easy_parallel::Parallel; +use futures_lite::{future, prelude::*}; + +#[cfg(target_family = "wasm")] +use wasm_bindgen_test::wasm_bindgen_test as test; + +#[cfg(not(target_family = "wasm"))] +fn ms(ms: u64) -> Duration { + Duration::from_millis(ms) +} + +#[test] +fn smoke() { + let (s, r) = unbounded(); + + s.try_send(7).unwrap(); + assert_eq!(r.try_recv(), Ok(7)); + + future::block_on(s.send(8)).unwrap(); + assert_eq!(future::block_on(r.recv()), Ok(8)); + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); +} + +#[cfg(all(feature = "std", not(target_family = "wasm")))] +#[test] +fn smoke_blocking() { + let (s, r) = unbounded(); + + s.send_blocking(7).unwrap(); + assert_eq!(r.try_recv(), Ok(7)); + + s.send_blocking(8).unwrap(); + assert_eq!(future::block_on(r.recv()), Ok(8)); + + future::block_on(s.send(9)).unwrap(); + assert_eq!(r.recv_blocking(), Ok(9)); + + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); +} + +#[test] +fn capacity() { + let (s, r) = unbounded::<()>(); + assert_eq!(s.capacity(), None); + assert_eq!(r.capacity(), None); +} + +#[test] +fn len_empty_full() { + let (s, r) = unbounded(); + + assert_eq!(s.len(), 0); + assert_eq!(s.is_empty(), true); + assert_eq!(s.is_full(), false); + assert_eq!(r.len(), 0); + assert_eq!(r.is_empty(), true); + assert_eq!(r.is_full(), false); + + future::block_on(s.send(())).unwrap(); + + assert_eq!(s.len(), 1); + assert_eq!(s.is_empty(), false); + assert_eq!(s.is_full(), false); + assert_eq!(r.len(), 1); + assert_eq!(r.is_empty(), false); + assert_eq!(r.is_full(), false); + + future::block_on(r.recv()).unwrap(); + + assert_eq!(s.len(), 0); + assert_eq!(s.is_empty(), true); + assert_eq!(s.is_full(), false); + assert_eq!(r.len(), 0); + assert_eq!(r.is_empty(), true); + assert_eq!(r.is_full(), false); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn try_recv() { + let (s, r) = unbounded(); + + Parallel::new() + .add(move || { + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); + sleep(ms(1500)); + assert_eq!(r.try_recv(), Ok(7)); + sleep(ms(500)); + assert_eq!(r.try_recv(), Err(TryRecvError::Closed)); + }) + .add(move || { + sleep(ms(1000)); + future::block_on(s.send(7)).unwrap(); + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn recv() { + let (s, r) = unbounded(); + + Parallel::new() + .add(move || { + assert_eq!(future::block_on(r.recv()), Ok(7)); + sleep(ms(1000)); + assert_eq!(future::block_on(r.recv()), Ok(8)); + sleep(ms(1000)); + assert_eq!(future::block_on(r.recv()), Ok(9)); + assert_eq!(future::block_on(r.recv()), Err(RecvError)); + }) + .add(move || { + sleep(ms(1500)); + future::block_on(s.send(7)).unwrap(); + future::block_on(s.send(8)).unwrap(); + future::block_on(s.send(9)).unwrap(); + }) + .run(); +} + +#[test] +fn try_send() { + let (s, r) = unbounded(); + for i in 0..1000 { + assert_eq!(s.try_send(i), Ok(())); + } + + drop(r); + assert_eq!(s.try_send(777), Err(TrySendError::Closed(777))); +} + +#[test] +fn send() { + let (s, r) = unbounded(); + for i in 0..1000 { + assert_eq!(future::block_on(s.send(i)), Ok(())); + } + + drop(r); + assert_eq!(future::block_on(s.send(777)), Err(SendError(777))); +} + +#[test] +fn send_after_close() { + let (s, r) = unbounded(); + + future::block_on(s.send(1)).unwrap(); + future::block_on(s.send(2)).unwrap(); + future::block_on(s.send(3)).unwrap(); + + drop(r); + + assert_eq!(future::block_on(s.send(4)), Err(SendError(4))); + assert_eq!(s.try_send(5), Err(TrySendError::Closed(5))); +} + +#[test] +fn recv_after_close() { + let (s, r) = unbounded(); + + future::block_on(s.send(1)).unwrap(); + future::block_on(s.send(2)).unwrap(); + future::block_on(s.send(3)).unwrap(); + + drop(s); + + assert_eq!(future::block_on(r.recv()), Ok(1)); + assert_eq!(future::block_on(r.recv()), Ok(2)); + assert_eq!(future::block_on(r.recv()), Ok(3)); + assert_eq!(future::block_on(r.recv()), Err(RecvError)); +} + +#[test] +fn len() { + let (s, r) = unbounded(); + + assert_eq!(s.len(), 0); + assert_eq!(r.len(), 0); + + for i in 0..50 { + future::block_on(s.send(i)).unwrap(); + assert_eq!(s.len(), i + 1); + } + + for i in 0..50 { + future::block_on(r.recv()).unwrap(); + assert_eq!(r.len(), 50 - i - 1); + } + + assert_eq!(s.len(), 0); + assert_eq!(r.len(), 0); +} + +#[test] +fn receiver_count() { + let (s, r) = unbounded::<()>(); + let receiver_clones: Vec<_> = (0..20).map(|_| r.clone()).collect(); + + assert_eq!(s.receiver_count(), 21); + assert_eq!(r.receiver_count(), 21); + + drop(receiver_clones); + + assert_eq!(s.receiver_count(), 1); + assert_eq!(r.receiver_count(), 1); +} + +#[test] +fn sender_count() { + let (s, r) = unbounded::<()>(); + let sender_clones: Vec<_> = (0..20).map(|_| s.clone()).collect(); + + assert_eq!(s.sender_count(), 21); + assert_eq!(r.sender_count(), 21); + + drop(sender_clones); + + assert_eq!(s.receiver_count(), 1); + assert_eq!(r.receiver_count(), 1); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn close_wakes_receiver() { + let (s, r) = unbounded::<()>(); + + Parallel::new() + .add(move || { + assert_eq!(future::block_on(r.recv()), Err(RecvError)); + }) + .add(move || { + sleep(ms(1000)); + drop(s); + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn spsc() { + const COUNT: usize = 100_000; + + let (s, r) = unbounded(); + + Parallel::new() + .add(move || { + for i in 0..COUNT { + assert_eq!(future::block_on(r.recv()), Ok(i)); + } + assert_eq!(future::block_on(r.recv()), Err(RecvError)); + }) + .add(move || { + for i in 0..COUNT { + future::block_on(s.send(i)).unwrap(); + } + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn mpmc() { + const COUNT: usize = 25_000; + const THREADS: usize = 4; + + let (s, r) = unbounded::(); + let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); + + Parallel::new() + .each(0..THREADS, |_| { + for _ in 0..COUNT { + let n = future::block_on(r.recv()).unwrap(); + v[n].fetch_add(1, Ordering::SeqCst); + } + }) + .each(0..THREADS, |_| { + for i in 0..COUNT { + future::block_on(s.send(i)).unwrap(); + } + }) + .run(); + + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); + + for c in v { + assert_eq!(c.load(Ordering::SeqCst), THREADS); + } +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn mpmc_stream() { + const COUNT: usize = 25_000; + const THREADS: usize = 4; + + let (s, r) = unbounded::(); + let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); + let v = &v; + + Parallel::new() + .each(0..THREADS, { + let r = r.clone(); + move |_| { + futures_lite::pin!(r); + for _ in 0..COUNT { + let n = future::block_on(r.next()).unwrap(); + v[n].fetch_add(1, Ordering::SeqCst); + } + } + }) + .each(0..THREADS, |_| { + for i in 0..COUNT { + future::block_on(s.send(i)).unwrap(); + } + }) + .run(); + + assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); + + for c in v { + assert_eq!(c.load(Ordering::SeqCst), THREADS); + } +} + +#[cfg(all(feature = "std", not(target_family = "wasm")))] +#[test] +fn weak() { + let (s, r) = unbounded::(); + + // Create a weak sender/receiver pair. + let (weak_s, weak_r) = (s.downgrade(), r.downgrade()); + + // Upgrade and send. + { + let s = weak_s.upgrade().unwrap(); + s.send_blocking(3).unwrap(); + let r = weak_r.upgrade().unwrap(); + assert_eq!(r.recv_blocking(), Ok(3)); + } + + // Drop the original sender/receiver pair. + drop((s, r)); + + // Try to upgrade again. + { + assert!(weak_s.upgrade().is_none()); + assert!(weak_r.upgrade().is_none()); + } +} diff --git a/.cargo-vendor/axum-0.6.20/.cargo-checksum.json b/.cargo-vendor/axum-0.6.20/.cargo-checksum.json new file mode 100644 index 0000000000..6664b81fa6 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"80dbfc358bb9cea214f573bebb357878969c7d02e469be5c86b9432495860850","Cargo.toml":"a64509897a25122183449245546d644616ada3f0b3ecc667c43a9fb23ae1df4f","LICENSE":"c14b6ed9d732322af9bae1551f6ca373b3893d7ce6e9d46429fc378478d00dfb","README.md":"30bdfac17f1b03d7de708981458d1ceae159ba89e4927b92c27277d7efb34d18","benches/benches.rs":"7075f086f3150ab1ab61f20c7bd1859ce067b4bec51f2c9a19db880ffff448f9","build.rs":"3bad731d51fd3a32b235ebd437bc9a2788b35bcae4779237cd26696e13840bcb","src/body/mod.rs":"6ff16a4400248a601d68ee976bfdde784027db67ba0e9917432ea8184dd0db03","src/body/stream_body.rs":"7e6bccdcb868692e39597973fbf8589758e146349a1bbebdfe816608a999d294","src/boxed.rs":"6a24163de5a9a386c5a1626493922ea0f9bd36550829d2716733b5ef81828193","src/docs/debugging_handler_type_errors.md":"37c05a2aac5ae2cb9824cda699657c9d0876b7dfa5ef2a5aef5ed224943ab051","src/docs/error_handling.md":"9380e158554d390fa272e64d79c20c000b7125b3b6b618833620573d12140757","src/docs/extract.md":"1fa27266b4709b353abfe3968016472292735b524dd5286d4a94296aa88299e0","src/docs/handlers_intro.md":"44be7d8c2087380d3e512b6724cba41c9637dd7177c9b545d45cda244b6ec861","src/docs/method_routing/fallback.md":"c13a5fe4998bf86917fd1e2caed8090ebedd1e3b8e4d32ae6cc1907384e7ce08","src/docs/method_routing/layer.md":"b8c95e2777123f2aa99cbd7e0f11127f612ca0dab56b4959965f01b2e2433873","src/docs/method_routing/merge.md":"2e39d29f0a829609033213aaf670d67193f11cc1baf7d76c5d7ae9daf7b0020d","src/docs/method_routing/route_layer.md":"35a47515bd7405ceb9cd44cf789dc3a2b4fcb239dda89aa568d2de5c2b2a794a","src/docs/middleware.md":"8fe5565535722c833bc7b41b104e8370494f471ae6d6e139454c28af32d6669f","src/docs/response.md":"9846185ad98b58e4dce723cdd473a3793e7568b31f04790e5e2a17167f811c18","src/docs/routing/fallback.md":"545232927a5070a593492808ee888a4a145a67b7a22cce56848fed02abf0c321","src/docs/routing/into_make_service_with_connect_info.md":"6fd508c749946433d879def8d4d38865c086e601b038690ee95a044d2cb26c2b","src/docs/routing/layer.md":"574b295f64d4571df834ca0140c385755b7f67b334e887fc92f96801df9956c6","src/docs/routing/merge.md":"ad49169d947274bf02fd60bb075040ee2ead934cfc1bc748f0533ef9876ff58a","src/docs/routing/nest.md":"81ebb477536150dba95db8315471ab31af8bec1814e6b5c7e99c82826a92f1ff","src/docs/routing/route.md":"d4389881c31f721ad23ede7287c0ef2c8f2aa7ad505044fac6ce63700f52d018","src/docs/routing/route_layer.md":"4d3d2ed962f218d61d87db22339d26f94453a0501433e483794893d58991f1fa","src/docs/routing/route_service.md":"dcd44e5453475a775cca8ffdb1253c85bbd6a7785e32acdfbd1190285b4366c6","src/docs/routing/with_state.md":"6568e11a4388bba3a45221d1c6a707ebc9d8ac6e3a765b7df5381c76bce563d6","src/error_handling/mod.rs":"cada6274f59087113e40856d9e1ff7ff323ca2b2eaafe37f95eb12ac3b0d081a","src/extension.rs":"719cae16da05818ed770c6f8339db8beda05f5d00f53a72ec08a1a92f977978f","src/extract/connect_info.rs":"fd8fc767544fe0370eefd04fd0b36e2777596ed1bc108e66763cbb70a9fba3f2","src/extract/host.rs":"442742ba452085b6dd54f46f50543f9d8f6971c056851b8956477dc3dc0240cc","src/extract/matched_path.rs":"2620141b0c40eedd18956326f6b7f9c202a6e0e103f6b3aeb0da9a230f6fc105","src/extract/mod.rs":"17472a21a6d00ce6d7896fa0e294ea5e7e346f777948607dcdc312b55ee0d044","src/extract/multipart.rs":"64f10470b701df9b87b3b29c50a03b3c1c773045ba23c9892fe92c95c77231f2","src/extract/path/de.rs":"18687adc341b7d692d354429701847e40e3d3ae251f7e44517e65dede9aacc4e","src/extract/path/mod.rs":"f5ca11744b5bca18d29eb0bba1da3cc7d5d9133ee9c29b8a3d1186242219bf6b","src/extract/query.rs":"89e3371946f784576d3ebfc3317c5114c31859eaf945f52e0c7b8a8b11ffb1cb","src/extract/raw_form.rs":"d2bad0683f9312d79f62b84ae0cb33ff869a2b4beb2a7d99fb8472cf56755e0c","src/extract/raw_query.rs":"a09431e77038b90152d0ea7fdbdfdc0ff890f3b1f2317707136ba6b0d3e8409d","src/extract/rejection.rs":"5d92312fc8ccee8b3b9eb2408d6b00979ec43a990e5c047a928063ae557e79ba","src/extract/request_parts.rs":"ba5de7430e93f20056f5f25d72d971f30eeee8a94808530abc70f8e7317660de","src/extract/state.rs":"6abacbc54b559cf4a6c08ca965aa0bc192c1cdee49ebb65405fb7e73cdc410de","src/extract/ws.rs":"eca2d282443413661155fd30ac75389e50ceaabf95bc5edc9ad3b15a1bc7171d","src/form.rs":"9ac7535d25d54923d2749e699c7a3b3ba5399625b39373233de35e1b8b9e51bd","src/handler/future.rs":"c9dbf8e313c87437fd83fadd09ae4585eb1b573aabefa420a8eec0afda6560de","src/handler/into_service_state_in_extension.rs":"ea5fd70b1b56b3287416e92814f9f02fbca11dbc4969c355c89ade61cebad6a0","src/handler/mod.rs":"0d30a48a4d5f12d1ca79b14b4b3ee0c767682d489657913856b8fa83200af46c","src/handler/service.rs":"941977b1c66fba5670a8a24b0f755d881988b7cd05bb285001d89cad83ae5e3b","src/json.rs":"5df4e4f3e6de94b0619f7c4c12257275035f51b9d0b8129a7d53ab3d2cd6c901","src/lib.rs":"32b7910b38d6571043deb6dc24b01dea61bdb365f34de7201faafcb7315b5311","src/macros.rs":"6567e7ecaeef04071ced390360f314eb3d8b4d01aae603611039e799009db59e","src/middleware/from_extractor.rs":"ffe078c788ea8142d4b0cc992400008cee0958409db1410fcae0c68fac648a6a","src/middleware/from_fn.rs":"32084741acf5fe99aae5f114d3694b6ff618b1062157fbfd3cb83bf459aca57b","src/middleware/map_request.rs":"6ae78a54bc6090f441ba99eb0e8972b1bbb75ecafa8920c145477e376856e573","src/middleware/map_response.rs":"775d8d932aaa48c6f378ef7282c2c659232ba08a3ae0160fb8582a44a9817094","src/middleware/mod.rs":"5b7fccd72e7dc87e321516f9656995f701efc9eacffc322861d931c4de21629e","src/response/mod.rs":"c6537b8aa8f4b2da441409370934b108551cbe844081b7229cf6b828dff30497","src/response/redirect.rs":"b94d9118e86de3224ad8d198f374a768cb49b3e9ed4cf5b069fbd059bb4dce01","src/response/sse.rs":"3e1892a5dceb9a08ebdda24c029d2458bc80ddbf72cc8113ed613d14c9e08908","src/routing/future.rs":"c0610a9b104f64f02deec5fdf79e148c09e72e6f325b62afc174f42b36f295c5","src/routing/into_make_service.rs":"eb259a904f471078cf4942573a99ab11a3f9836a85c7d0daf5a92bcb8d69fda6","src/routing/method_filter.rs":"f4eef4a2f3b06b711ffeeee826c3e817e41fc6bf1b3a2bd38117e5d9c7a584fb","src/routing/method_routing.rs":"564b9095988f1de20473132b03b153af04e12f4c44a6998509a2954b26cf293e","src/routing/mod.rs":"c1e8181edd462c8d8911cb249104eccce47cafb71e2772bf74577e0da73eb7b5","src/routing/not_found.rs":"14145bf876006a603a1819d28454a7e8c2d45fdbc23046ea041b4e62cda4d2a4","src/routing/path_router.rs":"64c50cebebf570b7747a330c28c66ac489393451d76a9d616010f174cbec89f3","src/routing/route.rs":"7f8e8e61ac95cf2c9d7bd3b714e37ef71056f2b7162f5ef60026d2cae29d01a5","src/routing/strip_prefix.rs":"437a82ee5bfa3112058032cbf511b4abd11c3fc2ebc38d92f4c54e903ad3a906","src/routing/tests/fallback.rs":"644fa72a277a09a4309fbc1abda7d3d74d48ff60dd33e22a1c23f900020653b8","src/routing/tests/get_to_head.rs":"85659b88e83ecf829efc5293bedc9243eaffdb0dc568b3f79a125e12f0c68b21","src/routing/tests/handle_error.rs":"b56b085f9ee666a3c3ca669971e9c0250f786433e046261e05b3253869103690","src/routing/tests/merge.rs":"76b09c9c3554676624b7ef98b23bc9d0a74494a5b0fb7bdc21d1a13e3a0b7870","src/routing/tests/mod.rs":"dce57b4a04f63b78ad989fb640a37604cfeb6b1f38eee87db07bd94efd2b3892","src/routing/tests/nest.rs":"b84f3e708f2c8c6eef930c40d5bc6655eb652cef6b9b7651d7274e8b3084171c","src/routing/url_params.rs":"c9f1d2ce2044bd61df4a4db704990e6b0b10c9f809e516cf63eb116d2b0cb73b","src/service_ext.rs":"cacb02bae694edd667a176a9ffd5621434997bf1395ee635a407d9d98787265b","src/test_helpers/mod.rs":"288076f58a86276a31e8652f41f6960c446acfbe74169ab3cc05705e7c6edc3e","src/test_helpers/test_client.rs":"0ed24f8a249e728a9d28c93eb767c0bfe0cab9dca2b4252a879c92db2455cbe6","src/test_helpers/tracing_helpers.rs":"d664f080c7827137e82236c9dc94b87f0eb9f1fe0a8dc373ae5f82202a94eff2","src/typed_header.rs":"441ea68bdee820f5f126fed2da9eb9fb7bbfe14bd65395b52ab85dadc72a9486","src/util.rs":"bcc9ffb205e84d5ef265d2703a84e91cfb73fa0e9846d078d38ea78fd04b7131"},"package":"3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf"} \ No newline at end of file diff --git a/.cargo-vendor/axum-0.6.20/CHANGELOG.md b/.cargo-vendor/axum-0.6.20/CHANGELOG.md new file mode 100644 index 0000000000..7bb8bd590b --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/CHANGELOG.md @@ -0,0 +1,2041 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +# Unreleased + +- None. + +# 0.6.20 (03. August, 2023) + +- **added:** `WebSocketUpgrade::write_buffer_size` and `WebSocketUpgrade::max_write_buffer_size` +- **changed:** Deprecate `WebSocketUpgrade::max_send_queue` +- **change:** Update tokio-tungstenite to 0.20 +- **added:** Implement `Handler` for `T: IntoResponse` ([#2140]) + +[#2140]: https://github.com/tokio-rs/axum/pull/2140 + +# 0.6.19 (17. July, 2023) + +- **added:** Add `axum::extract::Query::try_from_uri` ([#2058]) +- **added:** Implement `IntoResponse` for `Box` and `Box<[u8]>` ([#2035]) +- **fixed:** Fix bugs around merging routers with nested fallbacks ([#2096]) +- **fixed:** Fix `.source()` of composite rejections ([#2030]) +- **fixed:** Allow unreachable code in `#[debug_handler]` ([#2014]) +- **change:** Update tokio-tungstenite to 0.19 ([#2021]) +- **change:** axum's MSRV is now 1.63 ([#2021]) + +[#2014]: https://github.com/tokio-rs/axum/pull/2014 +[#2021]: https://github.com/tokio-rs/axum/pull/2021 +[#2030]: https://github.com/tokio-rs/axum/pull/2030 +[#2035]: https://github.com/tokio-rs/axum/pull/2035 +[#2058]: https://github.com/tokio-rs/axum/pull/2058 +[#2096]: https://github.com/tokio-rs/axum/pull/2096 + +# 0.6.18 (30. April, 2023) + +- **fixed:** Don't remove the `Sec-WebSocket-Key` header in `WebSocketUpgrade` ([#1972]) + +[#1972]: https://github.com/tokio-rs/axum/pull/1972 + +# 0.6.17 (25. April, 2023) + +- **fixed:** Fix fallbacks causing a panic on `CONNECT` requests ([#1958]) + +[#1958]: https://github.com/tokio-rs/axum/pull/1958 + +# 0.6.16 (18. April, 2023) + +- **fixed:** Don't allow extracting `MatchedPath` in fallbacks ([#1934]) +- **fixed:** Fix panic if `Router` with something nested at `/` was used as a fallback ([#1934]) +- **added:** Document that `Router::new().fallback(...)` isn't optimal ([#1940]) + +[#1934]: https://github.com/tokio-rs/axum/pull/1934 +[#1940]: https://github.com/tokio-rs/axum/pull/1940 + +# 0.6.15 (12. April, 2023) + +- **fixed:** Removed additional leftover debug messages ([#1927]) + +[#1927]: https://github.com/tokio-rs/axum/pull/1927 + +# 0.6.14 (11. April, 2023) + +- **fixed:** Removed leftover "path_router hit" debug message ([#1925]) + +[#1925]: https://github.com/tokio-rs/axum/pull/1925 + +# 0.6.13 (11. April, 2023) + +- **added:** Log rejections from built-in extractors with the + `axum::rejection=trace` target ([#1890]) +- **fixed:** Fixed performance regression with `Router::nest` introduced in + 0.6.0. `nest` now flattens the routes which performs better ([#1711]) +- **fixed:** Extracting `MatchedPath` in nested handlers now gives the full + matched path, including the nested path ([#1711]) +- **added:** Implement `Deref` and `DerefMut` for built-in extractors ([#1922]) + +[#1711]: https://github.com/tokio-rs/axum/pull/1711 +[#1890]: https://github.com/tokio-rs/axum/pull/1890 +[#1922]: https://github.com/tokio-rs/axum/pull/1922 + +# 0.6.12 (22. March, 2023) + +- **added:** Implement `IntoResponse` for `MultipartError` ([#1861]) +- **fixed:** More clearly document what wildcards matches ([#1873]) + +[#1861]: https://github.com/tokio-rs/axum/pull/1861 +[#1873]: https://github.com/tokio-rs/axum/pull/1873 + +# 0.6.11 (13. March, 2023) + +- **fixed:** Don't require `S: Debug` for `impl Debug for Router` ([#1836]) +- **fixed:** Clone state a bit less when handling requests ([#1837]) +- **fixed:** Unpin itoa dependency ([#1815]) + +[#1815]: https://github.com/tokio-rs/axum/pull/1815 +[#1836]: https://github.com/tokio-rs/axum/pull/1836 +[#1837]: https://github.com/tokio-rs/axum/pull/1837 + +# 0.6.10 (03. March, 2023) + +- **fixed:** Add `#[must_use]` attributes to types that do nothing unless used ([#1809]) +- **fixed:** Gracefully handle missing headers in the `TypedHeader` extractor ([#1810]) +- **fixed:** Fix routing issues when loading a `Router` via a dynamic library ([#1806]) + +[#1806]: https://github.com/tokio-rs/axum/pull/1806 +[#1809]: https://github.com/tokio-rs/axum/pull/1809 +[#1810]: https://github.com/tokio-rs/axum/pull/1810 + +# 0.6.9 (24. February, 2023) + +- **changed:** Update to tower-http 0.4. axum is still compatible with tower-http 0.3 ([#1783]) + +[#1783]: https://github.com/tokio-rs/axum/pull/1783 + +# 0.6.8 (24. February, 2023) + +- **fixed:** Fix `Allow` missing from routers with middleware ([#1773]) +- **added:** Add `KeepAlive::event` for customizing the event sent for SSE keep alive ([#1729]) + +[#1729]: https://github.com/tokio-rs/axum/pull/1729 +[#1773]: https://github.com/tokio-rs/axum/pull/1773 + +# 0.6.7 (17. February, 2023) + +- **added:** Add `FormRejection::FailedToDeserializeFormBody` which is returned + if the request body couldn't be deserialized into the target type, as opposed + to `FailedToDeserializeForm` which is only for query parameters ([#1683]) +- **added:** Add `MockConnectInfo` for setting `ConnectInfo` during tests ([#1767]) + +[#1683]: https://github.com/tokio-rs/axum/pull/1683 +[#1767]: https://github.com/tokio-rs/axum/pull/1767 + +# 0.6.6 (12. February, 2023) + +- **fixed:** Enable passing `MethodRouter` to `Router::fallback` ([#1730]) + +[#1730]: https://github.com/tokio-rs/axum/pull/1730 + +# 0.6.5 (11. February, 2023) + +- **fixed:** Fix `#[debug_handler]` sometimes giving wrong borrow related suggestions ([#1710]) +- Document gotchas related to using `impl IntoResponse` as the return type from handler functions ([#1736]) + +[#1710]: https://github.com/tokio-rs/axum/pull/1710 +[#1736]: https://github.com/tokio-rs/axum/pull/1736 + +# 0.6.4 (22. January, 2023) + +- Depend on axum-macros 0.3.2 + +# 0.6.3 (20. January, 2023) + +- **added:** Implement `IntoResponse` for `&'static [u8; N]` and `[u8; N]` ([#1690]) +- **fixed:** Make `Path` support types using `serde::Deserializer::deserialize_any` ([#1693]) +- **added:** Add `RawPathParams` ([#1713]) +- **added:** Implement `Clone` and `Service` for `axum::middleware::Next` ([#1712]) +- **fixed:** Document required tokio features to run "Hello, World!" example ([#1715]) + +[#1690]: https://github.com/tokio-rs/axum/pull/1690 +[#1693]: https://github.com/tokio-rs/axum/pull/1693 +[#1712]: https://github.com/tokio-rs/axum/pull/1712 +[#1713]: https://github.com/tokio-rs/axum/pull/1713 +[#1715]: https://github.com/tokio-rs/axum/pull/1715 + +# 0.6.2 (9. January, 2023) + +- **added:** Add `body_text` and `status` methods to built-in rejections ([#1612]) +- **added:** Enable the `runtime` feature of `hyper` when using `tokio` ([#1671]) + +[#1612]: https://github.com/tokio-rs/axum/pull/1612 +[#1671]: https://github.com/tokio-rs/axum/pull/1671 + +# 0.6.1 (29. November, 2022) + +- **added:** Expand the docs for `Router::with_state` ([#1580]) + +[#1580]: https://github.com/tokio-rs/axum/pull/1580 + +# 0.6.0 (25. November, 2022) + +## Routing + +- **fixed:** Nested routers are now allowed to have fallbacks ([#1521]): + + ```rust + let api_router = Router::new() + .route("/users", get(|| { ... })) + .fallback(api_fallback); + + let app = Router::new() + // this would panic in 0.5 but in 0.6 it just works + // + // requests starting with `/api` but not handled by `api_router` + // will go to `api_fallback` + .nest("/api", api_router); + ``` + + The outer router's fallback will still apply if a nested router doesn't have + its own fallback: + + ```rust + // this time without a fallback + let api_router = Router::new().route("/users", get(|| { ... })); + + let app = Router::new() + .nest("/api", api_router) + // `api_router` will inherit this fallback + .fallback(app_fallback); + ``` + +- **breaking:** The request `/foo/` no longer matches `/foo/*rest`. If you want + to match `/foo/` you have to add a route specifically for that ([#1086]) + + For example: + + ```rust + use axum::{Router, routing::get, extract::Path}; + + let app = Router::new() + // this will match `/foo/bar/baz` + .route("/foo/*rest", get(handler)) + // this will match `/foo/` + .route("/foo/", get(handler)) + // if you want `/foo` to match you must also add an explicit route for it + .route("/foo", get(handler)); + + async fn handler( + // use an `Option` because `/foo/` and `/foo` don't have any path params + params: Option>, + ) {} + ``` + +- **breaking:** Path params for wildcard routes no longer include the prefix + `/`. e.g. `/foo.js` will match `/*filepath` with a value of `foo.js`, _not_ + `/foo.js` ([#1086]) + + For example: + + ```rust + use axum::{Router, routing::get, extract::Path}; + + let app = Router::new().route("/foo/*rest", get(handler)); + + async fn handler( + Path(params): Path, + ) { + // for the request `/foo/bar/baz` the value of `params` will be `bar/baz` + // + // on 0.5 it would be `/bar/baz` + } + ``` + +- **fixed:** Routes like `/foo` and `/*rest` are no longer considered + overlapping. `/foo` will take priority ([#1086]) + + For example: + + ```rust + use axum::{Router, routing::get}; + + let app = Router::new() + // this used to not be allowed but now just works + .route("/foo/*rest", get(foo)) + .route("/foo/bar", get(bar)); + + async fn foo() {} + + async fn bar() {} + ``` + +- **breaking:** Automatic trailing slash redirects have been removed. + Previously if you added a route for `/foo`, axum would redirect calls to + `/foo/` to `/foo` (or vice versa for `/foo/`): + + ```rust + use axum::{Router, routing::get}; + + let app = Router::new() + // a request to `GET /foo/` will now get `404 Not Found` + // whereas in 0.5 axum would redirect to `/foo` + // + // same goes the other way if you had the route `/foo/` + // axum will no longer redirect from `/foo` to `/foo/` + .route("/foo", get(handler)); + + async fn handler() {} + ``` + + Either explicitly add routes for `/foo` and `/foo/` or use + `axum_extra::routing::RouterExt::route_with_tsr` if you want the old behavior + ([#1119]) + +- **breaking:** `Router::fallback` now only accepts `Handler`s (similarly to + what `get`, `post`, etc. accept). Use the new `Router::fallback_service` for + setting any `Service` as the fallback ([#1155]) + + This fallback on 0.5: + + ```rust + use axum::{Router, handler::Handler}; + + let app = Router::new().fallback(fallback.into_service()); + + async fn fallback() {} + ``` + + Becomes this in 0.6 + + ```rust + use axum::Router; + + let app = Router::new().fallback(fallback); + + async fn fallback() {} + ``` + +- **breaking:** It is no longer supported to `nest` twice at the same path, i.e. + `.nest("/foo", a).nest("/foo", b)` will panic. Instead use `.nest("/foo", a.merge(b))` +- **breaking:** It is no longer supported to `nest` a router and add a route at + the same path, such as `.nest("/a", _).route("/a", _)`. Instead use + `.nest("/a/", _).route("/a", _)`. +- **changed:** `Router::nest` now only accepts `Router`s, the general-purpose + `Service` nesting method has been renamed to `nest_service` ([#1368]) +- **breaking:** Allow `Error: Into` for `Route::{layer, route_layer}` ([#924]) +- **breaking:** `MethodRouter` now panics on overlapping routes ([#1102]) +- **breaking:** `Router::route` now only accepts `MethodRouter`s created with + `get`, `post`, etc. Use the new `Router::route_service` for routing to + any `Service`s ([#1155]) +- **breaking:** Adding a `.route_layer` onto a `Router` or `MethodRouter` + without any routes will now result in a panic. Previously, this just did + nothing. [#1327] +- **breaking:** `RouterService` has been removed since `Router` now implements + `Service` when the state is `()`. Use `Router::with_state` to provide the + state and get a `Router<()>`. Note that `RouterService` only existed in the + pre-releases, not 0.5 ([#1552]) + +## Extractors + +- **added:** Added new type safe `State` extractor. This can be used with + `Router::with_state` and gives compile errors for missing states, whereas + `Extension` would result in runtime errors ([#1155]) + + We recommend migrating from `Extension` to `State` for sharing application state since that is more type + safe and faster. That is done by using `Router::with_state` and `State`. + + This setup in 0.5 + + ```rust + use axum::{routing::get, Extension, Router}; + + let app = Router::new() + .route("/", get(handler)) + .layer(Extension(AppState {})); + + async fn handler(Extension(app_state): Extension) {} + + #[derive(Clone)] + struct AppState {} + ``` + + Becomes this in 0.6 using `State`: + + ```rust + use axum::{routing::get, extract::State, Router}; + + let app = Router::new() + .route("/", get(handler)) + .with_state(AppState {}); + + async fn handler(State(app_state): State) {} + + #[derive(Clone)] + struct AppState {} + ``` + + If you have multiple extensions, you can use fields on `AppState` and implement + `FromRef`: + + ```rust + use axum::{extract::{State, FromRef}, routing::get, Router}; + + let state = AppState { + client: HttpClient {}, + database: Database {}, + }; + + let app = Router::new().route("/", get(handler)).with_state(state); + + async fn handler( + State(client): State, + State(database): State, + ) {} + + // the derive requires enabling the "macros" feature + #[derive(Clone, FromRef)] + struct AppState { + client: HttpClient, + database: Database, + } + + #[derive(Clone)] + struct HttpClient {} + + #[derive(Clone)] + struct Database {} + ``` + +- **breaking:** It is now only possible for one extractor per handler to consume + the request body. In 0.5 doing so would result in runtime errors but in 0.6 it + is a compile error ([#1272]) + + axum enforces this by only allowing the _last_ extractor to consume the + request. + + For example: + + ```rust + use axum::{Json, http::HeaderMap}; + + // This wont compile on 0.6 because both `Json` and `String` need to consume + // the request body. You can use either `Json` or `String`, but not both. + async fn handler_1( + json: Json, + string: String, + ) {} + + // This won't work either since `Json` is not the last extractor. + async fn handler_2( + json: Json, + headers: HeaderMap, + ) {} + + // This works! + async fn handler_3( + headers: HeaderMap, + json: Json, + ) {} + ``` + + This is done by reworking the `FromRequest` trait and introducing a new + `FromRequestParts` trait. + + If your extractor needs to consume the request body then you should implement + `FromRequest`, otherwise implement `FromRequestParts`. + + This extractor in 0.5: + + ```rust + struct MyExtractor { /* ... */ } + + #[async_trait] + impl FromRequest for MyExtractor + where + B: Send, + { + type Rejection = StatusCode; + + async fn from_request(req: &mut RequestParts) -> Result { + // ... + } + } + ``` + + Becomes this in 0.6: + + ```rust + use axum::{ + extract::{FromRequest, FromRequestParts}, + http::{StatusCode, Request, request::Parts}, + async_trait, + }; + + struct MyExtractor { /* ... */ } + + // implement `FromRequestParts` if you don't need to consume the request body + #[async_trait] + impl FromRequestParts for MyExtractor + where + S: Send + Sync, + { + type Rejection = StatusCode; + + async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { + // ... + } + } + + // implement `FromRequest` if you do need to consume the request body + #[async_trait] + impl FromRequest for MyExtractor + where + S: Send + Sync, + B: Send + 'static, + { + type Rejection = StatusCode; + + async fn from_request(req: Request, state: &S) -> Result { + // ... + } + } + ``` + + For an example of how to write an extractor that accepts different + `Content-Types` see the [`parse-body-based-on-content-type`] example. + +- **added:** `FromRequest` and `FromRequestParts` derive macro re-exports from + [`axum-macros`] behind the `macros` feature ([#1352]) +- **added:** Add `RequestExt` and `RequestPartsExt` which adds convenience + methods for running extractors to `http::Request` and `http::request::Parts` ([#1301]) +- **added**: `JsonRejection` now displays the path at which a deserialization + error occurred ([#1371]) +- **added:** Add `extract::RawForm` for accessing raw urlencoded query bytes or request body ([#1487]) +- **fixed:** Used `400 Bad Request` for `FailedToDeserializeQueryString` + rejections, instead of `422 Unprocessable Entity` ([#1387]) +- **changed**: The inner error of a `JsonRejection` is now + `serde_path_to_error::Error`. Previously it was + `serde_json::Error` ([#1371]) +- **changed:** The default body limit now applies to the `Multipart` extractor ([#1420]) +- **breaking:** `ContentLengthLimit` has been removed. Use `DefaultBodyLimit` instead ([#1400]) +- **breaking:** `RequestParts` has been removed as part of the `FromRequest` + rework ([#1272]) +- **breaking:** `BodyAlreadyExtracted` has been removed ([#1272]) +- **breaking:** The following types or traits have a new `S` type param + which represents the state ([#1155]): + - `Router`, defaults to `()` + - `MethodRouter`, defaults to `()` + - `FromRequest`, no default + - `Handler`, no default +- **breaking:** `MatchedPath` can now no longer be extracted in middleware for + nested routes. In previous versions it returned invalid data when extracted + from a middleware applied to a nested router. `MatchedPath` can still be + extracted from handlers and middleware that aren't on nested routers ([#1462]) +- **breaking:** Rename `FormRejection::FailedToDeserializeQueryString` to + `FormRejection::FailedToDeserializeForm` ([#1496]) + +## Middleware + +- **added:** Support running extractors on `middleware::from_fn` functions ([#1088]) +- **added**: Add `middleware::from_fn_with_state` to enable running extractors that require + state ([#1342]) +- **added:** Add `middleware::from_extractor_with_state` ([#1396]) +- **added:** Add `map_request`, `map_request_with_state` for transforming the + request with an async function ([#1408]) +- **added:** Add `map_response`, `map_response_with_state` for transforming the + response with an async function ([#1414]) +- **added:** Support any middleware response that implements `IntoResponse` ([#1152]) +- **breaking:** Remove `extractor_middleware` which was previously deprecated. + Use `axum::middleware::from_extractor` instead ([#1077]) +- **breaking:** Require middleware added with `Handler::layer` to have + `Infallible` as the error type ([#1152]) + +## Misc + +- **added:** Support compiling to WASM. See the `simple-router-wasm` example + for more details ([#1382]) +- **added:** Add `ServiceExt` with methods for turning any `Service` into a + `MakeService` similarly to `Router::into_make_service` ([#1302]) +- **added:** String and binary `From` impls have been added to `extract::ws::Message` + to be more inline with `tungstenite` ([#1421]) +- **added:** Add `#[derive(axum::extract::FromRef)]` ([#1430]) +- **added:** Add `accept_unmasked_frames` setting in WebSocketUpgrade ([#1529]) +- **added:** Add `WebSocketUpgrade::on_failed_upgrade` to customize what to do + when upgrading a connection fails ([#1539]) +- **fixed:** Annotate panicking functions with `#[track_caller]` so the error + message points to where the user added the invalid route, rather than + somewhere internally in axum ([#1248]) +- **changed:** axum's MSRV is now 1.60 ([#1239]) +- **changed:** For methods that accept some `S: Service`, the bounds have been + relaxed so the response type must implement `IntoResponse` rather than being a + literal `Response` +- **breaking:** New `tokio` default feature needed for WASM support. If you + don't need WASM support but have `default_features = false` for other reasons + you likely need to re-enable the `tokio` feature ([#1382]) +- **breaking:** `handler::{WithState, IntoService}` are merged into one type, + named `HandlerService` ([#1418]) + +[#924]: https://github.com/tokio-rs/axum/pull/924 +[#1077]: https://github.com/tokio-rs/axum/pull/1077 +[#1086]: https://github.com/tokio-rs/axum/pull/1086 +[#1088]: https://github.com/tokio-rs/axum/pull/1088 +[#1102]: https://github.com/tokio-rs/axum/pull/1102 +[#1119]: https://github.com/tokio-rs/axum/pull/1119 +[#1152]: https://github.com/tokio-rs/axum/pull/1152 +[#1155]: https://github.com/tokio-rs/axum/pull/1155 +[#1239]: https://github.com/tokio-rs/axum/pull/1239 +[#1248]: https://github.com/tokio-rs/axum/pull/1248 +[#1272]: https://github.com/tokio-rs/axum/pull/1272 +[#1301]: https://github.com/tokio-rs/axum/pull/1301 +[#1302]: https://github.com/tokio-rs/axum/pull/1302 +[#1327]: https://github.com/tokio-rs/axum/pull/1327 +[#1342]: https://github.com/tokio-rs/axum/pull/1342 +[#1346]: https://github.com/tokio-rs/axum/pull/1346 +[#1352]: https://github.com/tokio-rs/axum/pull/1352 +[#1368]: https://github.com/tokio-rs/axum/pull/1368 +[#1371]: https://github.com/tokio-rs/axum/pull/1371 +[#1382]: https://github.com/tokio-rs/axum/pull/1382 +[#1387]: https://github.com/tokio-rs/axum/pull/1387 +[#1389]: https://github.com/tokio-rs/axum/pull/1389 +[#1396]: https://github.com/tokio-rs/axum/pull/1396 +[#1397]: https://github.com/tokio-rs/axum/pull/1397 +[#1400]: https://github.com/tokio-rs/axum/pull/1400 +[#1408]: https://github.com/tokio-rs/axum/pull/1408 +[#1414]: https://github.com/tokio-rs/axum/pull/1414 +[#1418]: https://github.com/tokio-rs/axum/pull/1418 +[#1420]: https://github.com/tokio-rs/axum/pull/1420 +[#1421]: https://github.com/tokio-rs/axum/pull/1421 +[#1430]: https://github.com/tokio-rs/axum/pull/1430 +[#1462]: https://github.com/tokio-rs/axum/pull/1462 +[#1487]: https://github.com/tokio-rs/axum/pull/1487 +[#1496]: https://github.com/tokio-rs/axum/pull/1496 +[#1521]: https://github.com/tokio-rs/axum/pull/1521 +[#1529]: https://github.com/tokio-rs/axum/pull/1529 +[#1532]: https://github.com/tokio-rs/axum/pull/1532 +[#1539]: https://github.com/tokio-rs/axum/pull/1539 +[#1552]: https://github.com/tokio-rs/axum/pull/1552 +[`axum-macros`]: https://docs.rs/axum-macros/latest/axum_macros/ +[`parse-body-based-on-content-type`]: https://github.com/tokio-rs/axum/blob/main/examples/parse-body-based-on-content-type/src/main.rs + +
+0.6.0 Pre-Releases + +# 0.6.0-rc.5 (18. November, 2022) + +- **breaking:** `Router::with_state` is no longer a constructor. It is instead + used to convert the router into a `RouterService` ([#1532]) + + This nested router on 0.6.0-rc.4 + + ```rust + Router::with_state(state).route(...); + ``` + + Becomes this in 0.6.0-rc.5 + + ```rust + Router::new().route(...).with_state(state); + ``` + +- **breaking:**: `Router::inherit_state` has been removed. Use + `Router::with_state` instead ([#1532]) +- **breaking:**: `Router::nest` and `Router::merge` now only supports nesting + routers that use the same state type as the router they're being merged into. + Use `FromRef` for substates ([#1532]) + +- **added:** Add `accept_unmasked_frames` setting in WebSocketUpgrade ([#1529]) +- **fixed:** Nested routers will now inherit fallbacks from outer routers ([#1521]) +- **added:** Add `WebSocketUpgrade::on_failed_upgrade` to customize what to do + when upgrading a connection fails ([#1539]) + +[#1521]: https://github.com/tokio-rs/axum/pull/1521 +[#1529]: https://github.com/tokio-rs/axum/pull/1529 +[#1532]: https://github.com/tokio-rs/axum/pull/1532 +[#1539]: https://github.com/tokio-rs/axum/pull/1539 + +# 0.6.0-rc.4 (9. November, 2022) + +- **changed**: The inner error of a `JsonRejection` is now + `serde_path_to_error::Error`. Previously it was + `serde_json::Error` ([#1371]) +- **added**: `JsonRejection` now displays the path at which a deserialization + error occurred ([#1371]) +- **fixed:** Support streaming/chunked requests in `ContentLengthLimit` ([#1389]) +- **fixed:** Used `400 Bad Request` for `FailedToDeserializeQueryString` + rejections, instead of `422 Unprocessable Entity` ([#1387]) +- **added:** Add `middleware::from_extractor_with_state` ([#1396]) +- **added:** Add `DefaultBodyLimit::max` for changing the default body limit ([#1397]) +- **added:** Add `map_request`, `map_request_with_state` for transforming the + request with an async function ([#1408]) +- **added:** Add `map_response`, `map_response_with_state` for transforming the + response with an async function ([#1414]) +- **breaking:** `ContentLengthLimit` has been removed. Use `DefaultBodyLimit` instead ([#1400]) +- **changed:** `Router` no longer implements `Service`, call `.into_service()` + on it to obtain a `RouterService` that does ([#1368]) +- **added:** Add `Router::inherit_state`, which creates a `Router` with an + arbitrary state type without actually supplying the state; such a `Router` + can't be turned into a service directly (`.into_service()` will panic), but + can be nested or merged into a `Router` with the same state type ([#1368]) +- **changed:** `Router::nest` now only accepts `Router`s, the general-purpose + `Service` nesting method has been renamed to `nest_service` ([#1368]) +- **added:** Support compiling to WASM. See the `simple-router-wasm` example + for more details ([#1382]) +- **breaking:** New `tokio` default feature needed for WASM support. If you + don't need WASM support but have `default_features = false` for other reasons + you likely need to re-enable the `tokio` feature ([#1382]) +- **breaking:** `handler::{WithState, IntoService}` are merged into one type, + named `HandlerService` ([#1418]) +- **changed:** The default body limit now applies to the `Multipart` extractor ([#1420]) +- **added:** String and binary `From` impls have been added to `extract::ws::Message` + to be more inline with `tungstenite` ([#1421]) +- **added:** Add `#[derive(axum::extract::FromRef)]` ([#1430]) +- **added:** `FromRequest` and `FromRequestParts` derive macro re-exports from + [`axum-macros`] behind the `macros` feature ([#1352]) +- **breaking:** `MatchedPath` can now no longer be extracted in middleware for + nested routes ([#1462]) +- **added:** Add `extract::RawForm` for accessing raw urlencoded query bytes or request body ([#1487]) +- **breaking:** Rename `FormRejection::FailedToDeserializeQueryString` to + `FormRejection::FailedToDeserializeForm` ([#1496]) + +[#1352]: https://github.com/tokio-rs/axum/pull/1352 +[#1368]: https://github.com/tokio-rs/axum/pull/1368 +[#1371]: https://github.com/tokio-rs/axum/pull/1371 +[#1382]: https://github.com/tokio-rs/axum/pull/1382 +[#1387]: https://github.com/tokio-rs/axum/pull/1387 +[#1389]: https://github.com/tokio-rs/axum/pull/1389 +[#1396]: https://github.com/tokio-rs/axum/pull/1396 +[#1397]: https://github.com/tokio-rs/axum/pull/1397 +[#1400]: https://github.com/tokio-rs/axum/pull/1400 +[#1408]: https://github.com/tokio-rs/axum/pull/1408 +[#1414]: https://github.com/tokio-rs/axum/pull/1414 +[#1418]: https://github.com/tokio-rs/axum/pull/1418 +[#1420]: https://github.com/tokio-rs/axum/pull/1420 +[#1421]: https://github.com/tokio-rs/axum/pull/1421 +[#1430]: https://github.com/tokio-rs/axum/pull/1430 +[#1462]: https://github.com/tokio-rs/axum/pull/1462 +[#1487]: https://github.com/tokio-rs/axum/pull/1487 +[#1496]: https://github.com/tokio-rs/axum/pull/1496 + +# 0.6.0-rc.3 (8. November, 2022) + +Yanked, as it didn't compile in release mode. + +# 0.6.0-rc.2 (10. September, 2022) + +## Security + +- **breaking:** Added default limit to how much data `Bytes::from_request` will + consume. Previously it would attempt to consume the entire request body + without checking its length. This meant if a malicious peer sent an large (or + infinite) request body your server might run out of memory and crash. + + The default limit is at 2 MB and can be disabled by adding the new + `DefaultBodyLimit::disable()` middleware. See its documentation for more + details. + + This also applies to these extractors which used `Bytes::from_request` + internally: + - `Form` + - `Json` + - `String` + + ([#1346]) + +## Routing + +- **breaking:** Adding a `.route_layer` onto a `Router` or `MethodRouter` + without any routes will now result in a panic. Previously, this just did + nothing. [#1327] + + +[`axum-macros`]: https://docs.rs/axum-macros/latest/axum_macros/ + +## Middleware + +- **added**: Add `middleware::from_fn_with_state` and + `middleware::from_fn_with_state_arc` to enable running extractors that require + state ([#1342]) + +[#1327]: https://github.com/tokio-rs/axum/pull/1327 +[#1342]: https://github.com/tokio-rs/axum/pull/1342 +[#1346]: https://github.com/tokio-rs/axum/pull/1346 + +# 0.6.0-rc.1 (23. August, 2022) + +## Routing + +- **breaking:** Nested `Router`s will no longer delegate to the outer `Router`'s + fallback. Instead you must explicitly set a fallback on the inner `Router` ([#1086]) + + This nested router on 0.5: + + ```rust + use axum::{Router, handler::Handler}; + + let api_routes = Router::new(); + + let app = Router::new() + .nest("/api", api_routes) + .fallback(fallback.into_service()); + + async fn fallback() {} + ``` + + Becomes this in 0.6: + + ```rust + use axum::Router; + + let api_routes = Router::new() + // we have to explicitly set the fallback here + // since nested routers no longer delegate to the outer + // router's fallback + .fallback(fallback); + + let app = Router::new() + .nest("/api", api_routes) + .fallback(fallback); + + async fn fallback() {} + ``` + +- **breaking:** The request `/foo/` no longer matches `/foo/*rest`. If you want + to match `/foo/` you have to add a route specifically for that ([#1086]) + + For example: + + ```rust + use axum::{Router, routing::get, extract::Path}; + + let app = Router::new() + // this will match `/foo/bar/baz` + .route("/foo/*rest", get(handler)) + // this will match `/foo/` + .route("/foo/", get(handler)) + // if you want `/foo` to match you must also add an explicit route for it + .route("/foo", get(handler)); + + async fn handler( + // use an `Option` because `/foo/` and `/foo` don't have any path params + params: Option>, + ) {} + ``` + +- **breaking:** Path params for wildcard routes no longer include the prefix + `/`. e.g. `/foo.js` will match `/*filepath` with a value of `foo.js`, _not_ + `/foo.js` ([#1086]) + + For example: + + ```rust + use axum::{Router, routing::get, extract::Path}; + + let app = Router::new().route("/foo/*rest", get(handler)); + + async fn handler( + Path(params): Path, + ) { + // for the request `/foo/bar/baz` the value of `params` will be `bar/baz` + // + // on 0.5 it would be `/bar/baz` + } + ``` + +- **fixed:** Routes like `/foo` and `/*rest` are no longer considered + overlapping. `/foo` will take priority ([#1086]) + + For example: + + ```rust + use axum::{Router, routing::get}; + + let app = Router::new() + // this used to not be allowed but now just works + .route("/foo/*rest", get(foo)) + .route("/foo/bar", get(bar)); + + async fn foo() {} + + async fn bar() {} + ``` + +- **breaking:** Trailing slash redirects have been removed. Previously if you + added a route for `/foo`, axum would redirect calls to `/foo/` to `/foo` (or + vice versa for `/foo/`). That is no longer supported and such requests will + now be sent to the fallback. Consider using + `axum_extra::routing::RouterExt::route_with_tsr` if you want the old behavior + ([#1119]) + + For example: + + ```rust + use axum::{Router, routing::get}; + + let app = Router::new() + // a request to `GET /foo/` will now get `404 Not Found` + // whereas in 0.5 axum would redirect to `/foo` + // + // same goes the other way if you had the route `/foo/` + // axum will no longer redirect from `/foo` to `/foo/` + .route("/foo", get(handler)); + + async fn handler() {} + ``` + +- **breaking:** `Router::fallback` now only accepts `Handler`s (similarly to + what `get`, `post`, etc accept). Use the new `Router::fallback_service` for + setting any `Service` as the fallback ([#1155]) + + This fallback on 0.5: + + ```rust + use axum::{Router, handler::Handler}; + + let app = Router::new().fallback(fallback.into_service()); + + async fn fallback() {} + ``` + + Becomes this in 0.6 + + ```rust + use axum::Router; + + let app = Router::new().fallback(fallback); + + async fn fallback() {} + ``` + +- **breaking:** Allow `Error: Into` for `Route::{layer, route_layer}` ([#924]) +- **breaking:** `MethodRouter` now panics on overlapping routes ([#1102]) +- **breaking:** `Router::route` now only accepts `MethodRouter`s created with + `get`, `post`, etc. Use the new `Router::route_service` for routing to + any `Service`s ([#1155]) + +## Extractors + +- **added:** Added new type safe `State` extractor. This can be used with + `Router::with_state` and gives compile errors for missing states, whereas + `Extension` would result in runtime errors ([#1155]) + + We recommend migrating from `Extension` to `State` since that is more type + safe and faster. That is done by using `Router::with_state` and `State`. + + This setup in 0.5 + + ```rust + use axum::{routing::get, Extension, Router}; + + let app = Router::new() + .route("/", get(handler)) + .layer(Extension(AppState {})); + + async fn handler(Extension(app_state): Extension) {} + + #[derive(Clone)] + struct AppState {} + ``` + + Becomes this in 0.6 using `State`: + + ```rust + use axum::{routing::get, extract::State, Router}; + + let app = Router::with_state(AppState {}) + .route("/", get(handler)); + + async fn handler(State(app_state): State) {} + + #[derive(Clone)] + struct AppState {} + ``` + + If you have multiple extensions you can use fields on `AppState` and implement + `FromRef`: + + ```rust + use axum::{extract::{State, FromRef}, routing::get, Router}; + + let state = AppState { + client: HttpClient {}, + database: Database {}, + }; + + let app = Router::with_state(state).route("/", get(handler)); + + async fn handler( + State(client): State, + State(database): State, + ) {} + + #[derive(Clone)] + struct AppState { + client: HttpClient, + database: Database, + } + + #[derive(Clone)] + struct HttpClient {} + + impl FromRef for HttpClient { + fn from_ref(state: &AppState) -> Self { + state.client.clone() + } + } + + #[derive(Clone)] + struct Database {} + + impl FromRef for Database { + fn from_ref(state: &AppState) -> Self { + state.database.clone() + } + } + ``` +- **breaking:** It is now only possible for one extractor per handler to consume + the request body. In 0.5 doing so would result in runtime errors but in 0.6 it + is a compile error ([#1272]) + + axum enforces this by only allowing the _last_ extractor to consume the + request. + + For example: + + ```rust + use axum::{Json, http::HeaderMap}; + + // This wont compile on 0.6 because both `Json` and `String` need to consume + // the request body. You can use either `Json` or `String`, but not both. + async fn handler_1( + json: Json, + string: String, + ) {} + + // This won't work either since `Json` is not the last extractor. + async fn handler_2( + json: Json, + headers: HeaderMap, + ) {} + + // This works! + async fn handler_3( + headers: HeaderMap, + json: Json, + ) {} + ``` + + This is done by reworking the `FromRequest` trait and introducing a new + `FromRequestParts` trait. + + If your extractor needs to consume the request body then you should implement + `FromRequest`, otherwise implement `FromRequestParts`. + + This extractor in 0.5: + + ```rust + struct MyExtractor { /* ... */ } + + #[async_trait] + impl FromRequest for MyExtractor + where + B: Send, + { + type Rejection = StatusCode; + + async fn from_request(req: &mut RequestParts) -> Result { + // ... + } + } + ``` + + Becomes this in 0.6: + + ```rust + use axum::{ + extract::{FromRequest, FromRequestParts}, + http::{StatusCode, Request, request::Parts}, + async_trait, + }; + + struct MyExtractor { /* ... */ } + + // implement `FromRequestParts` if you don't need to consume the request body + #[async_trait] + impl FromRequestParts for MyExtractor + where + S: Send + Sync, + { + type Rejection = StatusCode; + + async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { + // ... + } + } + + // implement `FromRequest` if you do need to consume the request body + #[async_trait] + impl FromRequest for MyExtractor + where + S: Send + Sync, + B: Send + 'static, + { + type Rejection = StatusCode; + + async fn from_request(req: Request, state: &S) -> Result { + // ... + } + } + ``` + +- **breaking:** `RequestParts` has been removed as part of the `FromRequest` + rework ([#1272]) +- **breaking:** `BodyAlreadyExtracted` has been removed ([#1272]) +- **breaking:** The following types or traits have a new `S` type param + which represents the state ([#1155]): + - `Router`, defaults to `()` + - `MethodRouter`, defaults to `()` + - `FromRequest`, no default + - `Handler`, no default +- **added:** Add `RequestExt` and `RequestPartsExt` which adds convenience + methods for running extractors to `http::Request` and `http::request::Parts` ([#1301]) + +## Middleware + +- **breaking:** Remove `extractor_middleware` which was previously deprecated. + Use `axum::middleware::from_extractor` instead ([#1077]) +- **added:** Support running extractors on `middleware::from_fn` functions ([#1088]) +- **added:** Support any middleware response that implements `IntoResponse` ([#1152]) +- **breaking:** Require middleware added with `Handler::layer` to have + `Infallible` as the error type ([#1152]) + +## Misc + +- **changed:** axum's MSRV is now 1.60 ([#1239]) +- **changed:** For methods that accept some `S: Service`, the bounds have been + relaxed so the response type must implement `IntoResponse` rather than being a + literal `Response` +- **fixed:** Annotate panicking functions with `#[track_caller]` so the error + message points to where the user added the invalid route, rather than + somewhere internally in axum ([#1248]) +- **added:** Add `ServiceExt` with methods for turning any `Service` into a + `MakeService` similarly to `Router::into_make_service` ([#1302]) + +[#1077]: https://github.com/tokio-rs/axum/pull/1077 +[#1086]: https://github.com/tokio-rs/axum/pull/1086 +[#1088]: https://github.com/tokio-rs/axum/pull/1088 +[#1102]: https://github.com/tokio-rs/axum/pull/1102 +[#1119]: https://github.com/tokio-rs/axum/pull/1119 +[#1152]: https://github.com/tokio-rs/axum/pull/1152 +[#1155]: https://github.com/tokio-rs/axum/pull/1155 +[#1239]: https://github.com/tokio-rs/axum/pull/1239 +[#1248]: https://github.com/tokio-rs/axum/pull/1248 +[#1272]: https://github.com/tokio-rs/axum/pull/1272 +[#1301]: https://github.com/tokio-rs/axum/pull/1301 +[#1302]: https://github.com/tokio-rs/axum/pull/1302 +[#924]: https://github.com/tokio-rs/axum/pull/924 + +
+ +# 0.5.16 (10. September, 2022) + +## Security + +- **breaking:** Added default limit to how much data `Bytes::from_request` will + consume. Previously it would attempt to consume the entire request body + without checking its length. This meant if a malicious peer sent an large (or + infinite) request body your server might run out of memory and crash. + + The default limit is at 2 MB and can be disabled by adding the new + `DefaultBodyLimit::disable()` middleware. See its documentation for more + details. + + This also applies to these extractors which used `Bytes::from_request` + internally: + - `Form` + - `Json` + - `String` + + ([#1346]) + +[#1346]: https://github.com/tokio-rs/axum/pull/1346 + +# 0.5.15 (9. August, 2022) + +- **fixed:** Don't expose internal type names in `QueryRejection` response. ([#1171]) +- **fixed:** Improve performance of JSON serialization ([#1178]) +- **fixed:** Improve build times by generating less IR ([#1192]) + +[#1171]: https://github.com/tokio-rs/axum/pull/1171 +[#1178]: https://github.com/tokio-rs/axum/pull/1178 +[#1192]: https://github.com/tokio-rs/axum/pull/1192 + +# 0.5.14 (25. July, 2022) + +Yanked, as it contained an accidental breaking change. + +# 0.5.13 (15. July, 2022) + +- **fixed:** If `WebSocketUpgrade` cannot upgrade the connection it will return a + `WebSocketUpgradeRejection::ConnectionNotUpgradable` rejection ([#1135]) +- **changed:** `WebSocketUpgradeRejection` has a new variant `ConnectionNotUpgradable` + variant ([#1135]) + +[#1135]: https://github.com/tokio-rs/axum/pull/1135 + +# 0.5.12 (10. July, 2022) + +- **added:** Added `debug_handler` which is an attribute macro that improves + type errors when applied to handler function. It is re-exported from + `axum-macros` ([#1144]) + +[#1144]: https://github.com/tokio-rs/axum/pull/1144 + +# 0.5.11 (02. July, 2022) + +- **added:** Implement `TryFrom` for `MethodFilter` and use new + `NoMatchingMethodFilter` error in case of failure ([#1130]) +- **added:** Document how to run extractors from middleware ([#1140]) + +[#1130]: https://github.com/tokio-rs/axum/pull/1130 +[#1140]: https://github.com/tokio-rs/axum/pull/1140 + +# 0.5.10 (28. June, 2022) + +- **fixed:** Make `Router` cheaper to clone ([#1123]) +- **fixed:** Fix possible panic when doing trailing slash redirect ([#1124]) + +[#1123]: https://github.com/tokio-rs/axum/pull/1123 +[#1124]: https://github.com/tokio-rs/axum/pull/1124 + +# 0.5.9 (20. June, 2022) + +- **fixed:** Fix compile error when the `headers` is enabled and the `form` + feature is disabled ([#1107]) + +[#1107]: https://github.com/tokio-rs/axum/pull/1107 + +# 0.5.8 (18. June, 2022) + +- **added:** Support resolving host name via `Forwarded` header in `Host` + extractor ([#1078]) +- **added:** Implement `IntoResponse` for `Form` ([#1095]) +- **changed:** axum's MSRV is now 1.56 ([#1098]) + +[#1078]: https://github.com/tokio-rs/axum/pull/1078 +[#1095]: https://github.com/tokio-rs/axum/pull/1095 +[#1098]: https://github.com/tokio-rs/axum/pull/1098 + +# 0.5.7 (08. June, 2022) + +- **added:** Implement `Default` for `Extension` ([#1043]) +- **fixed:** Support deserializing `Vec<(String, String)>` in `extract::Path<_>` to get vector of + key/value pairs ([#1059]) +- **added:** Add `extract::ws::close_code` which contains constants for close codes ([#1067]) +- **fixed:** Use `impl IntoResponse` less in docs ([#1049]) + +[#1043]: https://github.com/tokio-rs/axum/pull/1043 +[#1049]: https://github.com/tokio-rs/axum/pull/1049 +[#1059]: https://github.com/tokio-rs/axum/pull/1059 +[#1067]: https://github.com/tokio-rs/axum/pull/1067 + +# 0.5.6 (15. May, 2022) + +- **added:** Add `WebSocket::protocol` to return the selected WebSocket subprotocol, if there is one. ([#1022]) +- **fixed:** Improve error message for `PathRejection::WrongNumberOfParameters` to hint at using + `Path<(String, String)>` or `Path` ([#1023]) +- **fixed:** `PathRejection::WrongNumberOfParameters` now uses `500 Internal Server Error` since + it's a programmer error and not a client error ([#1023]) +- **fixed:** Fix `InvalidFormContentType` mentioning the wrong content type + +[#1022]: https://github.com/tokio-rs/axum/pull/1022 +[#1023]: https://github.com/tokio-rs/axum/pull/1023 + +# 0.5.5 (10. May, 2022) + +- **fixed:** Correctly handle `GET`, `HEAD`, and `OPTIONS` requests in `ContentLengthLimit`. + Request with these methods are now accepted if they _do not_ have a `Content-Length` header, and + the request body will not be checked. If they do have a `Content-Length` header they'll be + rejected. This allows `ContentLengthLimit` to be used as middleware around several routes, + including `GET` routes ([#989]) +- **added:** Add `MethodRouter::{into_make_service, into_make_service_with_connect_info}` ([#1010]) + +[#989]: https://github.com/tokio-rs/axum/pull/989 +[#1010]: https://github.com/tokio-rs/axum/pull/1010 + +# 0.5.4 (26. April, 2022) + +- **added:** Add `response::ErrorResponse` and `response::Result` for + `IntoResponse`-based error handling ([#921]) +- **added:** Add `middleware::from_extractor` and deprecate `extract::extractor_middleware` ([#957]) +- **changed:** Update to tower-http 0.3 ([#965]) + +[#921]: https://github.com/tokio-rs/axum/pull/921 +[#957]: https://github.com/tokio-rs/axum/pull/957 +[#965]: https://github.com/tokio-rs/axum/pull/965 + +# 0.5.3 (19. April, 2022) + +- **added:** Add `AppendHeaders` for appending headers to a response rather than overriding them ([#927]) +- **added:** Add `axum::extract::multipart::Field::chunk` method for streaming a single chunk from + the field ([#901]) +- **fixed:** Fix trailing slash redirection with query parameters ([#936]) + +[#901]: https://github.com/tokio-rs/axum/pull/901 +[#927]: https://github.com/tokio-rs/axum/pull/927 +[#936]: https://github.com/tokio-rs/axum/pull/936 + +# 0.5.2 (19. April, 2022) + +Yanked, as it contained an accidental breaking change. + +# 0.5.1 (03. April, 2022) + +- **added:** Add `RequestParts::extract` which allows applying an extractor as a method call ([#897]) + +[#897]: https://github.com/tokio-rs/axum/pull/897 + +# 0.5.0 (31. March, 2022) + +- **added:** Document sharing state between handler and middleware ([#783]) +- **added:** `Extension<_>` can now be used in tuples for building responses, and will set an + extension on the response ([#797]) +- **added:** `extract::Host` for extracting the hostname of a request ([#827]) +- **added:** Add `IntoResponseParts` trait which allows defining custom response + types for adding headers or extensions to responses ([#797]) +- **added:** `TypedHeader` implements the new `IntoResponseParts` trait so they + can be returned from handlers as parts of a response ([#797]) +- **changed:** `Router::merge` now accepts `Into` ([#819]) +- **breaking:** `sse::Event` now accepts types implementing `AsRef` instead of `Into` + as field values. +- **breaking:** `sse::Event` now panics if a setter method is called twice instead of silently + overwriting old values. +- **breaking:** Require `Output = ()` on `WebSocketStream::on_upgrade` ([#644]) +- **breaking:** Make `TypedHeaderRejectionReason` `#[non_exhaustive]` ([#665]) +- **breaking:** Using `HeaderMap` as an extractor will no longer remove the headers and thus + they'll still be accessible to other extractors, such as `axum::extract::Json`. Instead + `HeaderMap` will clone the headers. You should prefer to use `TypedHeader` to extract only the + headers you need ([#698]) + + This includes these breaking changes: + - `RequestParts::take_headers` has been removed. + - `RequestParts::headers` returns `&HeaderMap`. + - `RequestParts::headers_mut` returns `&mut HeaderMap`. + - `HeadersAlreadyExtracted` has been removed. + - The `HeadersAlreadyExtracted` variant has been removed from these rejections: + - `RequestAlreadyExtracted` + - `RequestPartsAlreadyExtracted` + - `JsonRejection` + - `FormRejection` + - `ContentLengthLimitRejection` + - `WebSocketUpgradeRejection` + - `>::Rejection` has been changed to `std::convert::Infallible`. +- **breaking:** `axum::http::Extensions` is no longer an extractor (ie it + doesn't implement `FromRequest`). The `axum::extract::Extension` extractor is + _not_ impacted by this and works the same. This change makes it harder to + accidentally remove all extensions which would result in confusing errors + elsewhere ([#699]) + This includes these breaking changes: + - `RequestParts::take_extensions` has been removed. + - `RequestParts::extensions` returns `&Extensions`. + - `RequestParts::extensions_mut` returns `&mut Extensions`. + - `RequestAlreadyExtracted` has been removed. + - `::Rejection` is now `BodyAlreadyExtracted`. + - `::Rejection` is now `Infallible`. + - `ExtensionsAlreadyExtracted` has been removed. + - The `ExtensionsAlreadyExtracted` removed variant has been removed from these rejections: + - `ExtensionRejection` + - `PathRejection` + - `MatchedPathRejection` + - `WebSocketUpgradeRejection` +- **breaking:** `Redirect::found` has been removed ([#800]) +- **breaking:** `AddExtensionLayer` has been removed. Use `Extension` instead. It now implements + `tower::Layer` ([#807]) +- **breaking:** `AddExtension` has been moved from the root module to `middleware` +- **breaking:** `.nest("/foo/", Router::new().route("/bar", _))` now does the right thing and + results in a route at `/foo/bar` instead of `/foo//bar` ([#824]) +- **breaking:** Routes are now required to start with `/`. Previously routes such as `:foo` would + be accepted but most likely result in bugs ([#823]) +- **breaking:** `Headers` has been removed. Arrays of tuples directly implement + `IntoResponseParts` so `([("x-foo", "foo")], response)` now works ([#797]) +- **breaking:** `InvalidJsonBody` has been replaced with `JsonDataError` to clearly signal that the + request body was syntactically valid JSON but couldn't be deserialized into the target type +- **breaking:** `Handler` is no longer an `#[async_trait]` but instead has an + associated `Future` type. That allows users to build their own `Handler` types + without paying the cost of `#[async_trait]` ([#879]) +- **changed:** New `JsonSyntaxError` variant added to `JsonRejection`. This is returned when the + request body contains syntactically invalid JSON +- **fixed:** Correctly set the `Content-Length` header for response to `HEAD` + requests ([#734]) +- **fixed:** Fix wrong `content-length` for `HEAD` requests to endpoints that returns chunked + responses ([#755]) +- **fixed:** Fixed several routing bugs related to nested "opaque" tower services (i.e. + non-`Router` services) ([#841] and [#842]) +- **changed:** Update to tokio-tungstenite 0.17 ([#791]) +- **breaking:** `Redirect::{to, temporary, permanent}` now accept `&str` instead + of `Uri` ([#889]) +- **breaking:** Remove second type parameter from `Router::into_make_service_with_connect_info` + and `Handler::into_make_service_with_connect_info` to support `MakeService`s + that accept multiple targets ([#892]) + +[#644]: https://github.com/tokio-rs/axum/pull/644 +[#665]: https://github.com/tokio-rs/axum/pull/665 +[#698]: https://github.com/tokio-rs/axum/pull/698 +[#699]: https://github.com/tokio-rs/axum/pull/699 +[#734]: https://github.com/tokio-rs/axum/pull/734 +[#755]: https://github.com/tokio-rs/axum/pull/755 +[#783]: https://github.com/tokio-rs/axum/pull/783 +[#791]: https://github.com/tokio-rs/axum/pull/791 +[#797]: https://github.com/tokio-rs/axum/pull/797 +[#800]: https://github.com/tokio-rs/axum/pull/800 +[#807]: https://github.com/tokio-rs/axum/pull/807 +[#819]: https://github.com/tokio-rs/axum/pull/819 +[#823]: https://github.com/tokio-rs/axum/pull/823 +[#824]: https://github.com/tokio-rs/axum/pull/824 +[#827]: https://github.com/tokio-rs/axum/pull/827 +[#841]: https://github.com/tokio-rs/axum/pull/841 +[#842]: https://github.com/tokio-rs/axum/pull/842 +[#879]: https://github.com/tokio-rs/axum/pull/879 +[#889]: https://github.com/tokio-rs/axum/pull/889 +[#892]: https://github.com/tokio-rs/axum/pull/892 + +# 0.4.8 (2. March, 2022) + +- Use correct path for `AddExtensionLayer` and `AddExtension::layer` deprecation + notes ([#812]) + +[#812]: https://github.com/tokio-rs/axum/pull/812 + +# 0.4.7 (1. March, 2022) + +- **added:** Implement `tower::Layer` for `Extension` ([#801]) +- **changed:** Deprecate `AddExtensionLayer`. Use `Extension` instead ([#805]) + +[#801]: https://github.com/tokio-rs/axum/pull/801 +[#805]: https://github.com/tokio-rs/axum/pull/805 + +# 0.4.6 (22. February, 2022) + +- **added:** `middleware::from_fn` for creating middleware from async functions. + This previously lived in axum-extra but has been moved to axum ([#719]) +- **fixed:** Set `Allow` header when responding with `405 Method Not Allowed` ([#733]) + +[#719]: https://github.com/tokio-rs/axum/pull/719 +[#733]: https://github.com/tokio-rs/axum/pull/733 + +# 0.4.5 (31. January, 2022) + +- Reference [axum-macros] instead of [axum-debug]. The latter has been superseded by + axum-macros and is deprecated ([#738]) + +[#738]: https://github.com/tokio-rs/axum/pull/738 +[axum-debug]: https://docs.rs/axum-debug +[axum-macros]: https://docs.rs/axum-macros + +# 0.4.4 (13. January, 2022) + +- **fixed:** Fix using incorrect path prefix when nesting `Router`s at `/` ([#691]) +- **fixed:** Make `nest("", service)` work and mean the same as `nest("/", service)` ([#691]) +- **fixed:** Replace response code `301` with `308` for trailing slash redirects. Also deprecates + `Redirect::found` (`302`) in favor of `Redirect::temporary` (`307`) or `Redirect::to` (`303`). + This is to prevent clients from changing non-`GET` requests to `GET` requests ([#682]) + +[#691]: https://github.com/tokio-rs/axum/pull/691 +[#682]: https://github.com/tokio-rs/axum/pull/682 + +# 0.4.3 (21. December, 2021) + +- **added:** `axum::AddExtension::layer` ([#607]) +- **added:** Re-export the headers crate when the headers feature is active ([#630]) +- **fixed:** `sse::Event` will no longer drop the leading space of data, event ID and name values + that have it ([#600]) +- **fixed:** `sse::Event` is more strict about what field values it supports, disallowing any SSE + events that break the specification (such as field values containing carriage returns) ([#599]) +- **fixed:** Improve documentation of `sse::Event` ([#601]) +- **fixed:** Make `Path` fail with `ExtensionsAlreadyExtracted` if another extractor (such as + `Request`) has previously taken the request extensions. Thus `PathRejection` now contains a + variant with `ExtensionsAlreadyExtracted`. This is not a breaking change since `PathRejection` is + marked as `#[non_exhaustive]` ([#619]) +- **fixed:** Fix misleading error message for `PathRejection` if extensions had + previously been extracted ([#619]) +- **fixed:** Use `AtomicU32` internally, rather than `AtomicU64`, to improve portability ([#616]) + +[#599]: https://github.com/tokio-rs/axum/pull/599 +[#600]: https://github.com/tokio-rs/axum/pull/600 +[#601]: https://github.com/tokio-rs/axum/pull/601 +[#607]: https://github.com/tokio-rs/axum/pull/607 +[#616]: https://github.com/tokio-rs/axum/pull/616 +[#619]: https://github.com/tokio-rs/axum/pull/619 +[#619]: https://github.com/tokio-rs/axum/pull/619 +[#630]: https://github.com/tokio-rs/axum/pull/630 + +# 0.4.2 (06. December, 2021) + +- **fix:** Depend on the correct version of `axum-core` ([#592]) + +[#592]: https://github.com/tokio-rs/axum/pull/592 + +# 0.4.1 (06. December, 2021) + +- **added:** `axum::response::Response` now exists as a shorthand for writing `Response` ([#590]) + +[#590]: https://github.com/tokio-rs/axum/pull/590 + +# 0.4.0 (02. December, 2021) + +- **breaking:** New `MethodRouter` that works similarly to `Router`: + - Route to handlers and services with the same type + - Add middleware to some routes more easily with `MethodRouter::layer` and + `MethodRouter::route_layer`. + - Merge method routers with `MethodRouter::merge` + - Customize response for unsupported methods with `MethodRouter::fallback` +- **breaking:** The default for the type parameter in `FromRequest` and + `RequestParts` has been removed. Use `FromRequest` and + `RequestParts` to get the previous behavior ([#564]) +- **added:** `FromRequest` and `IntoResponse` are now defined in a new called + `axum-core`. This crate is intended for library authors to depend on, rather + than `axum` itself, if possible. `axum-core` has a smaller API and will thus + receive fewer breaking changes. `FromRequest` and `IntoResponse` are + re-exported from `axum` in the same location so nothing is changed for `axum` + users ([#564]) +- **breaking:** The previously deprecated `axum::body::box_body` function has + been removed. Use `axum::body::boxed` instead. +- **fixed:** Adding the same route with different methods now works ie + `.route("/", get(_)).route("/", post(_))`. +- **breaking:** `routing::handler_method_router` and + `routing::service_method_router` has been removed in favor of + `routing::{get, get_service, ..., MethodRouter}`. +- **breaking:** `HandleErrorExt` has been removed in favor of + `MethodRouter::handle_error`. +- **breaking:** `HandleErrorLayer` now requires the handler function to be + `async` ([#534]) +- **added:** `HandleErrorLayer` now supports running extractors. +- **breaking:** The `Handler` trait is now defined as `Handler`. That is the type parameters have been swapped and `B` defaults to + `axum::body::Body` ([#527]) +- **breaking:** `Router::merge` will panic if both routers have fallbacks. + Previously the left side fallback would be silently discarded ([#529]) +- **breaking:** `Router::nest` will panic if the nested router has a fallback. + Previously it would be silently discarded ([#529]) +- Update WebSockets to use tokio-tungstenite 0.16 ([#525]) +- **added:** Default to return `charset=utf-8` for text content type. ([#554]) +- **breaking:** The `Body` and `BodyError` associated types on the + `IntoResponse` trait have been removed - instead, `.into_response()` will now + always return `Response` ([#571]) +- **breaking:** `PathParamsRejection` has been renamed to `PathRejection` and its + variants renamed to `FailedToDeserializePathParams` and `MissingPathParams`. This + makes it more consistent with the rest of axum ([#574]) +- **added:** `Path`'s rejection type now provides data about exactly which part of + the path couldn't be deserialized ([#574]) + +[#525]: https://github.com/tokio-rs/axum/pull/525 +[#527]: https://github.com/tokio-rs/axum/pull/527 +[#529]: https://github.com/tokio-rs/axum/pull/529 +[#534]: https://github.com/tokio-rs/axum/pull/534 +[#554]: https://github.com/tokio-rs/axum/pull/554 +[#564]: https://github.com/tokio-rs/axum/pull/564 +[#571]: https://github.com/tokio-rs/axum/pull/571 +[#574]: https://github.com/tokio-rs/axum/pull/574 + +# 0.3.4 (13. November, 2021) + +- **changed:** `box_body` has been renamed to `boxed`. `box_body` still exists + but is deprecated ([#530]) + +[#530]: https://github.com/tokio-rs/axum/pull/530 + +# 0.3.3 (13. November, 2021) + +- Implement `FromRequest` for [`http::request::Parts`] so it can be used an + extractor ([#489]) +- Implement `IntoResponse` for `http::response::Parts` ([#490]) + +[#489]: https://github.com/tokio-rs/axum/pull/489 +[#490]: https://github.com/tokio-rs/axum/pull/490 +[`http::request::Parts`]: https://docs.rs/http/latest/http/request/struct.Parts.html + +# 0.3.2 (08. November, 2021) + +- **added:** Add `Router::route_layer` for applying middleware that + will only run on requests that match a route. This is useful for middleware + that return early, such as authorization ([#474]) + +[#474]: https://github.com/tokio-rs/axum/pull/474 + +# 0.3.1 (06. November, 2021) + +- **fixed:** Implement `Clone` for `IntoMakeServiceWithConnectInfo` ([#471]) + +[#471]: https://github.com/tokio-rs/axum/pull/471 + +# 0.3.0 (02. November, 2021) + +- Overall: + - **fixed:** All known compile time issues are resolved, including those with + `boxed` and those introduced by Rust 1.56 ([#404]) + - **breaking:** The router's type is now always `Router` regardless of how many routes or + middleware are applied ([#404]) + + This means router types are all always nameable: + + ```rust + fn my_routes() -> Router { + Router::new().route( + "/users", + post(|| async { "Hello, World!" }), + ) + } + ``` + - **breaking:** Added feature flags for HTTP1 and JSON. This enables removing a + few dependencies if your app only uses HTTP2 or doesn't use JSON. This is only a + breaking change if you depend on axum with `default_features = false`. ([#286]) + - **breaking:** `Route::boxed` and `BoxRoute` have been removed as they're no longer + necessary ([#404]) + - **breaking:** `Nested`, `Or` types are now private. They no longer had to be + public because `Router` is internally boxed ([#404]) + - **breaking:** Remove `routing::Layered` as it didn't actually do anything and + thus wasn't necessary + - **breaking:** Vendor `AddExtensionLayer` and `AddExtension` to reduce public + dependencies + - **breaking:** `body::BoxBody` is now a type alias for + `http_body::combinators::UnsyncBoxBody` and thus is no longer `Sync`. This + is because bodies are streams and requiring streams to be `Sync` is + unnecessary. + - **added:** Implement `IntoResponse` for `http_body::combinators::UnsyncBoxBody`. + - **added:** Add `Handler::into_make_service` for serving a handler without a + `Router`. + - **added:** Add `Handler::into_make_service_with_connect_info` for serving a + handler without a `Router`, and storing info about the incoming connection. + - **breaking:** axum's minimum supported rust version is now 1.56 +- Routing: + - Big internal refactoring of routing leading to several improvements ([#363]) + - **added:** Wildcard routes like `.route("/api/users/*rest", service)` are now supported. + - **fixed:** The order routes are added in no longer matters. + - **fixed:** Adding a conflicting route will now cause a panic instead of silently making + a route unreachable. + - **fixed:** Route matching is faster as number of routes increases. + - **breaking:** Handlers for multiple HTTP methods must be added in the same + `Router::route` call. So `.route("/", get(get_handler).post(post_handler))` and + _not_ `.route("/", get(get_handler)).route("/", post(post_handler))`. + - **fixed:** Correctly handle trailing slashes in routes: + - If a route with a trailing slash exists and a request without a trailing + slash is received, axum will send a 301 redirection to the route with the + trailing slash. + - Or vice versa if a route without a trailing slash exists and a request + with a trailing slash is received. + - This can be overridden by explicitly defining two routes: One with and one + without a trailing slash. + - **breaking:** Method routing for handlers has been moved from `axum::handler` + to `axum::routing`. So `axum::handler::get` now lives at `axum::routing::get` + ([#405]) + - **breaking:** Method routing for services has been moved from `axum::service` + to `axum::routing::service_method_routing`. So `axum::service::get` now lives at + `axum::routing::service_method_routing::get`, etc. ([#405]) + - **breaking:** `Router::or` renamed to `Router::merge` and will now panic on + overlapping routes. It now only accepts `Router`s and not general `Service`s. + Use `Router::fallback` for adding fallback routes ([#408]) + - **added:** `Router::fallback` for adding handlers for request that didn't + match any routes. `Router::fallback` must be use instead of `nest("/", _)` ([#408]) + - **breaking:** `EmptyRouter` has been renamed to `MethodNotAllowed` as it's only + used in method routers and not in path routers (`Router`) + - **breaking:** Remove support for routing based on the `CONNECT` method. An + example of combining axum with and HTTP proxy can be found [here][proxy] ([#428]) +- Extractors: + - **fixed:** Expand accepted content types for JSON requests ([#378]) + - **fixed:** Support deserializing `i128` and `u128` in `extract::Path` + - **breaking:** Automatically do percent decoding in `extract::Path` + ([#272]) + - **breaking:** Change `Connected::connect_info` to return `Self` and remove + the associated type `ConnectInfo` ([#396]) + - **added:** Add `extract::MatchedPath` for accessing path in router that + matched the request ([#412]) +- Error handling: + - **breaking:** Simplify error handling model ([#402]): + - All services part of the router are now required to be infallible. + - Error handling utilities have been moved to an `error_handling` module. + - `Router::check_infallible` has been removed since routers are always + infallible with the error handling changes. + - Error handling closures must now handle all errors and thus always return + something that implements `IntoResponse`. + + With these changes handling errors from fallible middleware is done like so: + + ```rust,no_run + use axum::{ + routing::get, + http::StatusCode, + error_handling::HandleErrorLayer, + response::IntoResponse, + Router, BoxError, + }; + use tower::ServiceBuilder; + use std::time::Duration; + + let middleware_stack = ServiceBuilder::new() + // Handle errors from middleware + // + // This middleware most be added above any fallible + // ones if you're using `ServiceBuilder`, due to how ordering works + .layer(HandleErrorLayer::new(handle_error)) + // Return an error after 30 seconds + .timeout(Duration::from_secs(30)); + + let app = Router::new() + .route("/", get(|| async { /* ... */ })) + .layer(middleware_stack); + + fn handle_error(_error: BoxError) -> impl IntoResponse { + StatusCode::REQUEST_TIMEOUT + } + ``` + + And handling errors from fallible leaf services is done like so: + + ```rust + use axum::{ + Router, service, + body::Body, + routing::service_method_routing::get, + response::IntoResponse, + http::{Request, Response}, + error_handling::HandleErrorExt, // for `.handle_error` + }; + use std::{io, convert::Infallible}; + use tower::service_fn; + + let app = Router::new() + .route( + "/", + get(service_fn(|_req: Request| async { + let contents = tokio::fs::read_to_string("some_file").await?; + Ok::<_, io::Error>(Response::new(Body::from(contents))) + })) + .handle_error(handle_io_error), + ); + + fn handle_io_error(error: io::Error) -> impl IntoResponse { + // ... + } + ``` +- Misc: + - `InvalidWebsocketVersionHeader` has been renamed to `InvalidWebSocketVersionHeader` ([#416]) + - `WebsocketKeyHeaderMissing` has been renamed to `WebSocketKeyHeaderMissing` ([#416]) + +[#339]: https://github.com/tokio-rs/axum/pull/339 +[#286]: https://github.com/tokio-rs/axum/pull/286 +[#272]: https://github.com/tokio-rs/axum/pull/272 +[#378]: https://github.com/tokio-rs/axum/pull/378 +[#363]: https://github.com/tokio-rs/axum/pull/363 +[#396]: https://github.com/tokio-rs/axum/pull/396 +[#402]: https://github.com/tokio-rs/axum/pull/402 +[#404]: https://github.com/tokio-rs/axum/pull/404 +[#405]: https://github.com/tokio-rs/axum/pull/405 +[#408]: https://github.com/tokio-rs/axum/pull/408 +[#412]: https://github.com/tokio-rs/axum/pull/412 +[#416]: https://github.com/tokio-rs/axum/pull/416 +[#428]: https://github.com/tokio-rs/axum/pull/428 +[proxy]: https://github.com/tokio-rs/axum/blob/main/examples/http-proxy/src/main.rs + +# 0.2.8 (07. October, 2021) + +- Document debugging handler type errors with "axum-debug" ([#372]) + +[#372]: https://github.com/tokio-rs/axum/pull/372 + +# 0.2.7 (06. October, 2021) + +- Bump minimum version of async-trait ([#370]) + +[#370]: https://github.com/tokio-rs/axum/pull/370 + +# 0.2.6 (02. October, 2021) + +- Clarify that `handler::any` and `service::any` only accepts standard HTTP + methods ([#337]) +- Document how to customize error responses from extractors ([#359]) + +[#337]: https://github.com/tokio-rs/axum/pull/337 +[#359]: https://github.com/tokio-rs/axum/pull/359 + +# 0.2.5 (18. September, 2021) + +- Add accessors for `TypedHeaderRejection` fields ([#317]) +- Improve docs for extractors ([#327]) + +[#317]: https://github.com/tokio-rs/axum/pull/317 +[#327]: https://github.com/tokio-rs/axum/pull/327 + +# 0.2.4 (10. September, 2021) + +- Document using `StreamExt::split` with `WebSocket` ([#291]) +- Document adding middleware to multiple groups of routes ([#293]) + +[#291]: https://github.com/tokio-rs/axum/pull/291 +[#293]: https://github.com/tokio-rs/axum/pull/293 + +# 0.2.3 (26. August, 2021) + +- **fixed:** Fix accidental breaking change introduced by internal refactor. + `BoxRoute` used to be `Sync` but was accidental made `!Sync` ([#273](https://github.com/tokio-rs/axum/pull/273)) + +# 0.2.2 (26. August, 2021) + +- **fixed:** Fix URI captures matching empty segments. This means requests with + URI `/` will no longer be matched by `/:key` ([#264](https://github.com/tokio-rs/axum/pull/264)) +- **fixed:** Remove needless trait bounds from `Router::boxed` ([#269](https://github.com/tokio-rs/axum/pull/269)) + +# 0.2.1 (24. August, 2021) + +- **added:** Add `Redirect::to` constructor ([#255](https://github.com/tokio-rs/axum/pull/255)) +- **added:** Document how to implement `IntoResponse` for custom error type ([#258](https://github.com/tokio-rs/axum/pull/258)) + +# 0.2.0 (23. August, 2021) + +- Overall: + - **fixed:** Overall compile time improvements. If you're having issues with compile time + please file an issue! ([#184](https://github.com/tokio-rs/axum/pull/184)) ([#198](https://github.com/tokio-rs/axum/pull/198)) ([#220](https://github.com/tokio-rs/axum/pull/220)) + - **changed:** Remove `prelude`. Explicit imports are now required ([#195](https://github.com/tokio-rs/axum/pull/195)) +- Routing: + - **added:** Add dedicated `Router` to replace the `RoutingDsl` trait ([#214](https://github.com/tokio-rs/axum/pull/214)) + - **added:** Add `Router::or` for combining routes ([#108](https://github.com/tokio-rs/axum/pull/108)) + - **fixed:** Support matching different HTTP methods for the same route that aren't defined + together. So `Router::new().route("/", get(...)).route("/", post(...))` now + accepts both `GET` and `POST`. Previously only `POST` would be accepted ([#224](https://github.com/tokio-rs/axum/pull/224)) + - **fixed:** `get` routes will now also be called for `HEAD` requests but will always have + the response body removed ([#129](https://github.com/tokio-rs/axum/pull/129)) + - **changed:** Replace `axum::route(...)` with `axum::Router::new().route(...)`. This means + there is now only one way to create a new router. Same goes for + `axum::routing::nest`. ([#215](https://github.com/tokio-rs/axum/pull/215)) + - **changed:** Implement `routing::MethodFilter` via [`bitflags`](https://crates.io/crates/bitflags) ([#158](https://github.com/tokio-rs/axum/pull/158)) + - **changed:** Move `handle_error` from `ServiceExt` to `service::OnMethod` ([#160](https://github.com/tokio-rs/axum/pull/160)) + + With these changes this app using 0.1: + + ```rust + use axum::{extract::Extension, prelude::*, routing::BoxRoute, AddExtensionLayer}; + + let app = route("/", get(|| async { "hi" })) + .nest("/api", api_routes()) + .layer(AddExtensionLayer::new(state)); + + fn api_routes() -> BoxRoute { + route( + "/users", + post(|Extension(state): Extension| async { "hi from nested" }), + ) + .boxed() + } + ``` + + Becomes this in 0.2: + + ```rust + use axum::{ + extract::Extension, + handler::{get, post}, + routing::BoxRoute, + Router, + }; + + let app = Router::new() + .route("/", get(|| async { "hi" })) + .nest("/api", api_routes()); + + fn api_routes() -> Router { + Router::new() + .route( + "/users", + post(|Extension(state): Extension| async { "hi from nested" }), + ) + .boxed() + } + ``` +- Extractors: + - **added:** Make `FromRequest` default to being generic over `body::Body` ([#146](https://github.com/tokio-rs/axum/pull/146)) + - **added:** Implement `std::error::Error` for all rejections ([#153](https://github.com/tokio-rs/axum/pull/153)) + - **added:** Add `OriginalUri` for extracting original request URI in nested services ([#197](https://github.com/tokio-rs/axum/pull/197)) + - **added:** Implement `FromRequest` for `http::Extensions` ([#169](https://github.com/tokio-rs/axum/pull/169)) + - **added:** Make `RequestParts::{new, try_into_request}` public so extractors can be used outside axum ([#194](https://github.com/tokio-rs/axum/pull/194)) + - **added:** Implement `FromRequest` for `axum::body::Body` ([#241](https://github.com/tokio-rs/axum/pull/241)) + - **changed:** Removed `extract::UrlParams` and `extract::UrlParamsMap`. Use `extract::Path` instead ([#154](https://github.com/tokio-rs/axum/pull/154)) + - **changed:** `extractor_middleware` now requires `RequestBody: Default` ([#167](https://github.com/tokio-rs/axum/pull/167)) + - **changed:** Convert `RequestAlreadyExtracted` to an enum with each possible error variant ([#167](https://github.com/tokio-rs/axum/pull/167)) + - **changed:** `extract::BodyStream` is no longer generic over the request body ([#234](https://github.com/tokio-rs/axum/pull/234)) + - **changed:** `extract::Body` has been renamed to `extract::RawBody` to avoid conflicting with `body::Body` ([#233](https://github.com/tokio-rs/axum/pull/233)) + - **changed:** `RequestParts` changes ([#153](https://github.com/tokio-rs/axum/pull/153)) + - `method` new returns an `&http::Method` + - `method_mut` new returns an `&mut http::Method` + - `take_method` has been removed + - `uri` new returns an `&http::Uri` + - `uri_mut` new returns an `&mut http::Uri` + - `take_uri` has been removed + - **changed:** Remove several rejection types that were no longer used ([#153](https://github.com/tokio-rs/axum/pull/153)) ([#154](https://github.com/tokio-rs/axum/pull/154)) +- Responses: + - **added:** Add `Headers` for easily customizing headers on a response ([#193](https://github.com/tokio-rs/axum/pull/193)) + - **added:** Add `Redirect` response ([#192](https://github.com/tokio-rs/axum/pull/192)) + - **added:** Add `body::StreamBody` for easily responding with a stream of byte chunks ([#237](https://github.com/tokio-rs/axum/pull/237)) + - **changed:** Add associated `Body` and `BodyError` types to `IntoResponse`. This is + required for returning responses with bodies other than `hyper::Body` from + handlers. See the docs for advice on how to implement `IntoResponse` ([#86](https://github.com/tokio-rs/axum/pull/86)) + - **changed:** `tower::util::Either` no longer implements `IntoResponse` ([#229](https://github.com/tokio-rs/axum/pull/229)) + + This `IntoResponse` from 0.1: + ```rust + use axum::{http::Response, prelude::*, response::IntoResponse}; + + struct MyResponse; + + impl IntoResponse for MyResponse { + fn into_response(self) -> Response { + Response::new(Body::empty()) + } + } + ``` + + Becomes this in 0.2: + ```rust + use axum::{body::Body, http::Response, response::IntoResponse}; + + struct MyResponse; + + impl IntoResponse for MyResponse { + type Body = Body; + type BodyError = ::Error; + + fn into_response(self) -> Response { + Response::new(Body::empty()) + } + } + ``` +- SSE: + - **added:** Add `response::sse::Sse`. This implements SSE using a response rather than a service ([#98](https://github.com/tokio-rs/axum/pull/98)) + - **changed:** Remove `axum::sse`. It has been replaced by `axum::response::sse` ([#98](https://github.com/tokio-rs/axum/pull/98)) + + Handler using SSE in 0.1: + ```rust + use axum::{ + prelude::*, + sse::{sse, Event}, + }; + use std::convert::Infallible; + + let app = route( + "/", + sse(|| async { + let stream = futures::stream::iter(vec![Ok::<_, Infallible>( + Event::default().data("hi there!"), + )]); + Ok::<_, Infallible>(stream) + }), + ); + ``` + + Becomes this in 0.2: + + ```rust + use axum::{ + handler::get, + response::sse::{Event, Sse}, + Router, + }; + use std::convert::Infallible; + + let app = Router::new().route( + "/", + get(|| async { + let stream = futures::stream::iter(vec![Ok::<_, Infallible>( + Event::default().data("hi there!"), + )]); + Sse::new(stream) + }), + ); + ``` +- WebSockets: + - **changed:** Change WebSocket API to use an extractor plus a response ([#121](https://github.com/tokio-rs/axum/pull/121)) + - **changed:** Make WebSocket `Message` an enum ([#116](https://github.com/tokio-rs/axum/pull/116)) + - **changed:** `WebSocket` now uses `Error` as its error type ([#150](https://github.com/tokio-rs/axum/pull/150)) + + Handler using WebSockets in 0.1: + + ```rust + use axum::{ + prelude::*, + ws::{ws, WebSocket}, + }; + + let app = route( + "/", + ws(|socket: WebSocket| async move { + // do stuff with socket + }), + ); + ``` + + Becomes this in 0.2: + + ```rust + use axum::{ + extract::ws::{WebSocket, WebSocketUpgrade}, + handler::get, + Router, + }; + + let app = Router::new().route( + "/", + get(|ws: WebSocketUpgrade| async move { + ws.on_upgrade(|socket: WebSocket| async move { + // do stuff with socket + }) + }), + ); + ``` +- Misc + - **added:** Add default feature `tower-log` which exposes `tower`'s `log` feature. ([#218](https://github.com/tokio-rs/axum/pull/218)) + - **changed:** Replace `body::BoxStdError` with `axum::Error`, which supports downcasting ([#150](https://github.com/tokio-rs/axum/pull/150)) + - **changed:** `EmptyRouter` now requires the response body to implement `Send + Sync + 'static'` ([#108](https://github.com/tokio-rs/axum/pull/108)) + - **changed:** `Router::check_infallible` now returns a `CheckInfallible` service. This + is to improve compile times ([#198](https://github.com/tokio-rs/axum/pull/198)) + - **changed:** `Router::into_make_service` now returns `routing::IntoMakeService` rather than + `tower::make::Shared` ([#229](https://github.com/tokio-rs/axum/pull/229)) + - **changed:** All usage of `tower::BoxError` has been replaced with `axum::BoxError` ([#229](https://github.com/tokio-rs/axum/pull/229)) + - **changed:** Several response future types have been moved into dedicated + `future` modules ([#133](https://github.com/tokio-rs/axum/pull/133)) + - **changed:** `EmptyRouter`, `ExtractorMiddleware`, `ExtractorMiddlewareLayer`, + and `QueryStringMissing` no longer implement `Copy` ([#132](https://github.com/tokio-rs/axum/pull/132)) + - **changed:** `service::OnMethod`, `handler::OnMethod`, and `routing::Nested` have new response future types ([#157](https://github.com/tokio-rs/axum/pull/157)) + +# 0.1.3 (06. August, 2021) + +- Fix stripping prefix when nesting services at `/` ([#91](https://github.com/tokio-rs/axum/pull/91)) +- Add support for WebSocket protocol negotiation ([#83](https://github.com/tokio-rs/axum/pull/83)) +- Use `pin-project-lite` instead of `pin-project` ([#95](https://github.com/tokio-rs/axum/pull/95)) +- Re-export `http` crate and `hyper::Server` ([#110](https://github.com/tokio-rs/axum/pull/110)) +- Fix `Query` and `Form` extractors giving bad request error when query string is empty. ([#117](https://github.com/tokio-rs/axum/pull/117)) +- Add `Path` extractor. ([#124](https://github.com/tokio-rs/axum/pull/124)) +- Fixed the implementation of `IntoResponse` of `(HeaderMap, T)` and `(StatusCode, HeaderMap, T)` would ignore headers from `T` ([#137](https://github.com/tokio-rs/axum/pull/137)) +- Deprecate `extract::UrlParams` and `extract::UrlParamsMap`. Use `extract::Path` instead ([#138](https://github.com/tokio-rs/axum/pull/138)) + +# 0.1.2 (01. August, 2021) + +- Implement `Stream` for `WebSocket` ([#52](https://github.com/tokio-rs/axum/pull/52)) +- Implement `Sink` for `WebSocket` ([#52](https://github.com/tokio-rs/axum/pull/52)) +- Implement `Deref` most extractors ([#56](https://github.com/tokio-rs/axum/pull/56)) +- Return `405 Method Not Allowed` for unsupported method for route ([#63](https://github.com/tokio-rs/axum/pull/63)) +- Add extractor for remote connection info ([#55](https://github.com/tokio-rs/axum/pull/55)) +- Improve error message of `MissingExtension` rejections ([#72](https://github.com/tokio-rs/axum/pull/72)) +- Improve documentation for routing ([#71](https://github.com/tokio-rs/axum/pull/71)) +- Clarify required response body type when routing to `tower::Service`s ([#69](https://github.com/tokio-rs/axum/pull/69)) +- Add `axum::body::box_body` to converting an `http_body::Body` to `axum::body::BoxBody` ([#69](https://github.com/tokio-rs/axum/pull/69)) +- Add `axum::sse` for Server-Sent Events ([#75](https://github.com/tokio-rs/axum/pull/75)) +- Mention required dependencies in docs ([#77](https://github.com/tokio-rs/axum/pull/77)) +- Fix WebSockets failing on Firefox ([#76](https://github.com/tokio-rs/axum/pull/76)) + +# 0.1.1 (30. July, 2021) + +- Misc readme fixes. + +# 0.1.0 (30. July, 2021) + +- Initial release. diff --git a/.cargo-vendor/axum-0.6.20/Cargo.toml b/.cargo-vendor/axum-0.6.20/Cargo.toml new file mode 100644 index 0000000000..e6bc8c5004 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/Cargo.toml @@ -0,0 +1,371 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.63" +name = "axum" +version = "0.6.20" +description = "Web framework that focuses on ergonomics and modularity" +homepage = "https://github.com/tokio-rs/axum" +readme = "README.md" +keywords = [ + "http", + "web", + "framework", +] +categories = [ + "asynchronous", + "network-programming", + "web-programming::http-server", +] +license = "MIT" +repository = "https://github.com/tokio-rs/axum" + +[package.metadata.cargo-public-api-crates] +allowed = [ + "async_trait", + "axum_core", + "axum_macros", + "bytes", + "futures_core", + "futures_sink", + "futures_util", + "headers", + "headers_core", + "http", + "http_body", + "hyper", + "serde", + "serde_json", + "tower_layer", + "tower_service", +] + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = [ + "--cfg", + "docsrs", +] + +[package.metadata.playground] +features = [ + "http1", + "http2", + "json", + "multipart", + "ws", +] + +[[bench]] +name = "benches" +harness = false + +[dependencies.async-trait] +version = "0.1.67" + +[dependencies.axum-core] +version = "0.3.4" + +[dependencies.axum-macros] +version = "0.3.8" +optional = true + +[dependencies.base64] +version = "0.21.0" +optional = true + +[dependencies.bitflags] +version = "1.0" + +[dependencies.bytes] +version = "1.0" + +[dependencies.futures-util] +version = "0.3" +features = ["alloc"] +default-features = false + +[dependencies.headers] +version = "0.3.7" +optional = true + +[dependencies.http] +version = "0.2.9" + +[dependencies.http-body] +version = "0.4.4" + +[dependencies.hyper] +version = "0.14.24" +features = ["stream"] + +[dependencies.itoa] +version = "1.0.5" + +[dependencies.matchit] +version = "0.7" + +[dependencies.memchr] +version = "2.4.1" + +[dependencies.mime] +version = "0.3.16" + +[dependencies.multer] +version = "2.0.0" +optional = true + +[dependencies.percent-encoding] +version = "2.1" + +[dependencies.pin-project-lite] +version = "0.2.7" + +[dependencies.serde] +version = "1.0" + +[dependencies.serde_json] +version = "1.0" +features = ["raw_value"] +optional = true + +[dependencies.serde_path_to_error] +version = "0.1.8" +optional = true + +[dependencies.serde_urlencoded] +version = "0.7" +optional = true + +[dependencies.sha1] +version = "0.10" +optional = true + +[dependencies.sync_wrapper] +version = "0.1.1" + +[dependencies.tokio] +version = "1.25.0" +features = ["time"] +optional = true +package = "tokio" + +[dependencies.tokio-tungstenite] +version = "0.20" +optional = true + +[dependencies.tower] +version = "0.4.13" +features = ["util"] +default-features = false + +[dependencies.tower-http] +version = "0.4" +features = [ + "add-extension", + "auth", + "catch-panic", + "compression-br", + "compression-deflate", + "compression-gzip", + "cors", + "decompression-br", + "decompression-deflate", + "decompression-gzip", + "follow-redirect", + "fs", + "limit", + "map-request-body", + "map-response-body", + "metrics", + "normalize-path", + "propagate-header", + "redirect", + "request-id", + "sensitive-headers", + "set-header", + "set-status", + "timeout", + "trace", + "util", + "validate-request", +] +optional = true + +[dependencies.tower-layer] +version = "0.3.2" + +[dependencies.tower-service] +version = "0.3" + +[dependencies.tracing] +version = "0.1" +optional = true +default-features = false + +[dev-dependencies.anyhow] +version = "1.0" + +[dev-dependencies.axum-macros] +version = "0.3.8" +features = ["__private"] + +[dev-dependencies.quickcheck] +version = "1.0" + +[dev-dependencies.quickcheck_macros] +version = "1.0" + +[dev-dependencies.reqwest] +version = "0.11.14" +features = [ + "json", + "stream", + "multipart", +] +default-features = false + +[dev-dependencies.rustversion] +version = "1.0.9" + +[dev-dependencies.serde] +version = "1.0" +features = ["derive"] + +[dev-dependencies.serde_json] +version = "1.0" + +[dev-dependencies.time] +version = "0.3" +features = ["serde-human-readable"] + +[dev-dependencies.tokio] +version = "1.25.0" +features = [ + "macros", + "rt", + "rt-multi-thread", + "net", + "test-util", +] +package = "tokio" + +[dev-dependencies.tokio-stream] +version = "0.1" + +[dev-dependencies.tower] +version = "0.4.10" +features = [ + "util", + "timeout", + "limit", + "load-shed", + "steer", + "filter", +] +package = "tower" + +[dev-dependencies.tower-http] +version = "0.4" +features = [ + "add-extension", + "auth", + "catch-panic", + "compression-br", + "compression-deflate", + "compression-gzip", + "cors", + "decompression-br", + "decompression-deflate", + "decompression-gzip", + "follow-redirect", + "fs", + "limit", + "map-request-body", + "map-response-body", + "metrics", + "normalize-path", + "propagate-header", + "redirect", + "request-id", + "sensitive-headers", + "set-header", + "set-status", + "timeout", + "trace", + "util", + "validate-request", +] + +[dev-dependencies.tracing] +version = "0.1" + +[dev-dependencies.tracing-subscriber] +version = "0.3" +features = ["json"] + +[dev-dependencies.uuid] +version = "1.0" +features = [ + "serde", + "v4", +] + +[build-dependencies.rustversion] +version = "1.0.9" + +[features] +__private_docs = [ + "tower/full", + "dep:tower-http", +] +default = [ + "form", + "http1", + "json", + "matched-path", + "original-uri", + "query", + "tokio", + "tower-log", +] +form = ["dep:serde_urlencoded"] +http1 = ["hyper/http1"] +http2 = ["hyper/http2"] +json = [ + "dep:serde_json", + "dep:serde_path_to_error", +] +macros = ["dep:axum-macros"] +matched-path = [] +multipart = ["dep:multer"] +original-uri = [] +query = ["dep:serde_urlencoded"] +tokio = [ + "dep:tokio", + "hyper/server", + "hyper/tcp", + "hyper/runtime", + "tower/make", +] +tower-log = ["tower/log"] +tracing = [ + "dep:tracing", + "axum-core/tracing", +] +ws = [ + "tokio", + "dep:tokio-tungstenite", + "dep:sha1", + "dep:base64", +] diff --git a/.cargo-vendor/axum-0.6.20/LICENSE b/.cargo-vendor/axum-0.6.20/LICENSE new file mode 100644 index 0000000000..11598b4b4d --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2019 Axum Contributors + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/.cargo-vendor/axum-0.6.20/README.md b/.cargo-vendor/axum-0.6.20/README.md new file mode 100644 index 0000000000..32e7ebf232 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/README.md @@ -0,0 +1,161 @@ +# axum + +`axum` is a web application framework that focuses on ergonomics and modularity. + +[![Build status](https://github.com/tokio-rs/axum/actions/workflows/CI.yml/badge.svg?branch=main)](https://github.com/tokio-rs/axum/actions/workflows/CI.yml) +[![Crates.io](https://img.shields.io/crates/v/axum)](https://crates.io/crates/axum) +[![Documentation](https://docs.rs/axum/badge.svg)](https://docs.rs/axum) + +More information about this crate can be found in the [crate documentation][docs]. + +## High level features + +- Route requests to handlers with a macro free API. +- Declaratively parse requests using extractors. +- Simple and predictable error handling model. +- Generate responses with minimal boilerplate. +- Take full advantage of the [`tower`] and [`tower-http`] ecosystem of + middleware, services, and utilities. + +In particular the last point is what sets `axum` apart from other frameworks. +`axum` doesn't have its own middleware system but instead uses +[`tower::Service`]. This means `axum` gets timeouts, tracing, compression, +authorization, and more, for free. It also enables you to share middleware with +applications written using [`hyper`] or [`tonic`]. + +## Usage example + +```rust +use axum::{ + routing::{get, post}, + http::StatusCode, + response::IntoResponse, + Json, Router, +}; +use serde::{Deserialize, Serialize}; +use std::net::SocketAddr; + +#[tokio::main] +async fn main() { + // initialize tracing + tracing_subscriber::fmt::init(); + + // build our application with a route + let app = Router::new() + // `GET /` goes to `root` + .route("/", get(root)) + // `POST /users` goes to `create_user` + .route("/users", post(create_user)); + + // run our app with hyper + // `axum::Server` is a re-export of `hyper::Server` + let addr = SocketAddr::from(([127, 0, 0, 1], 3000)); + tracing::debug!("listening on {}", addr); + axum::Server::bind(&addr) + .serve(app.into_make_service()) + .await + .unwrap(); +} + +// basic handler that responds with a static string +async fn root() -> &'static str { + "Hello, World!" +} + +async fn create_user( + // this argument tells axum to parse the request body + // as JSON into a `CreateUser` type + Json(payload): Json, +) -> (StatusCode, Json) { + // insert your application logic here + let user = User { + id: 1337, + username: payload.username, + }; + + // this will be converted into a JSON response + // with a status code of `201 Created` + (StatusCode::CREATED, Json(user)) +} + +// the input to our `create_user` handler +#[derive(Deserialize)] +struct CreateUser { + username: String, +} + +// the output to our `create_user` handler +#[derive(Serialize)] +struct User { + id: u64, + username: String, +} +``` + +You can find this [example][readme-example] as well as other example projects in +the [example directory][examples]. + +See the [crate documentation][docs] for way more examples. + +## Performance + +`axum` is a relatively thin layer on top of [`hyper`] and adds very little +overhead. So `axum`'s performance is comparable to [`hyper`]. You can find +benchmarks [here](https://github.com/programatik29/rust-web-benchmarks) and +[here](https://web-frameworks-benchmark.netlify.app/result?l=rust). + +## Safety + +This crate uses `#![forbid(unsafe_code)]` to ensure everything is implemented in +100% safe Rust. + +## Minimum supported Rust version + +axum's MSRV is 1.63. + +## Examples + +The [examples] folder contains various examples of how to use `axum`. The +[docs] also provide lots of code snippets and examples. For full-fledged examples, check out community-maintained [showcases] or [tutorials]. + +## Getting Help + +In the `axum`'s repo we also have a [number of examples][examples] showing how +to put everything together. Community-maintained [showcases] and [tutorials] also demonstrate how to use `axum` for real-world applications. You're also welcome to ask in the [Discord channel][chat] or open a [discussion] with your question. + +## Community projects + +See [here][ecosystem] for a list of community maintained crates and projects +built with `axum`. + +## Contributing + +🎈 Thanks for your help improving the project! We are so happy to have +you! We have a [contributing guide][contributing] to help you get involved in the +`axum` project. + +## License + +This project is licensed under the [MIT license][license]. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in `axum` by you, shall be licensed as MIT, without any +additional terms or conditions. + +[readme-example]: https://github.com/tokio-rs/axum/tree/main/examples/readme +[examples]: https://github.com/tokio-rs/axum/tree/main/examples +[docs]: https://docs.rs/axum +[`tower`]: https://crates.io/crates/tower +[`hyper`]: https://crates.io/crates/hyper +[`tower-http`]: https://crates.io/crates/tower-http +[`tonic`]: https://crates.io/crates/tonic +[contributing]: https://github.com/tokio-rs/axum/blob/main/CONTRIBUTING.md +[chat]: https://discord.gg/tokio +[discussion]: https://github.com/tokio-rs/axum/discussions/new?category=q-a +[`tower::Service`]: https://docs.rs/tower/latest/tower/trait.Service.html +[ecosystem]: https://github.com/tokio-rs/axum/blob/main/ECOSYSTEM.md +[showcases]: https://github.com/tokio-rs/axum/blob/main/ECOSYSTEM.md#project-showcase +[tutorials]: https://github.com/tokio-rs/axum/blob/main/ECOSYSTEM.md#tutorials +[license]: https://github.com/tokio-rs/axum/blob/main/axum/LICENSE diff --git a/.cargo-vendor/axum-0.6.20/benches/benches.rs b/.cargo-vendor/axum-0.6.20/benches/benches.rs new file mode 100644 index 0000000000..c3b9c19e34 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/benches/benches.rs @@ -0,0 +1,253 @@ +use axum::{ + extract::State, + routing::{get, post}, + Extension, Json, Router, Server, +}; +use hyper::server::conn::AddrIncoming; +use serde::{Deserialize, Serialize}; +use std::{ + io::BufRead, + process::{Command, Stdio}, +}; + +fn main() { + if on_ci() { + install_rewrk(); + } else { + ensure_rewrk_is_installed(); + } + + benchmark("minimal").run(Router::new); + + benchmark("basic") + .path("/a/b/c") + .run(|| Router::new().route("/a/b/c", get(|| async { "Hello, World!" }))); + + benchmark("basic-merge").path("/a/b/c").run(|| { + let inner = Router::new().route("/a/b/c", get(|| async { "Hello, World!" })); + Router::new().merge(inner) + }); + + benchmark("basic-nest").path("/a/b/c").run(|| { + let c = Router::new().route("/c", get(|| async { "Hello, World!" })); + let b = Router::new().nest("/b", c); + Router::new().nest("/a", b) + }); + + benchmark("routing").path("/foo/bar/baz").run(|| { + let mut app = Router::new(); + for a in 0..10 { + for b in 0..10 { + for c in 0..10 { + app = app.route(&format!("/foo-{a}/bar-{b}/baz-{c}"), get(|| async {})); + } + } + } + app.route("/foo/bar/baz", get(|| async {})) + }); + + benchmark("receive-json") + .method("post") + .headers(&[("content-type", "application/json")]) + .body(r#"{"n": 123, "s": "hi there", "b": false}"#) + .run(|| Router::new().route("/", post(|_: Json| async {}))); + + benchmark("send-json").run(|| { + Router::new().route( + "/", + get(|| async { + Json(Payload { + n: 123, + s: "hi there".to_owned(), + b: false, + }) + }), + ) + }); + + let state = AppState { + _string: "aaaaaaaaaaaaaaaaaa".to_owned(), + _vec: Vec::from([ + "aaaaaaaaaaaaaaaaaa".to_owned(), + "bbbbbbbbbbbbbbbbbb".to_owned(), + "cccccccccccccccccc".to_owned(), + ]), + }; + + benchmark("extension").run(|| { + Router::new() + .route("/", get(|_: Extension| async {})) + .layer(Extension(state.clone())) + }); + + benchmark("state").run(|| { + Router::new() + .route("/", get(|_: State| async {})) + .with_state(state.clone()) + }); +} + +#[derive(Clone)] +struct AppState { + _string: String, + _vec: Vec, +} + +#[derive(Deserialize, Serialize)] +struct Payload { + n: u32, + s: String, + b: bool, +} + +fn benchmark(name: &'static str) -> BenchmarkBuilder { + BenchmarkBuilder { + name, + path: None, + method: None, + headers: None, + body: None, + } +} + +struct BenchmarkBuilder { + name: &'static str, + path: Option<&'static str>, + method: Option<&'static str>, + headers: Option<&'static [(&'static str, &'static str)]>, + body: Option<&'static str>, +} + +macro_rules! config_method { + ($name:ident, $ty:ty) => { + fn $name(mut self, $name: $ty) -> Self { + self.$name = Some($name); + self + } + }; +} + +impl BenchmarkBuilder { + config_method!(path, &'static str); + config_method!(method, &'static str); + config_method!(headers, &'static [(&'static str, &'static str)]); + config_method!(body, &'static str); + + fn run(self, f: F) + where + F: FnOnce() -> Router<()>, + { + // support only running some benchmarks with + // ``` + // cargo bench -- routing send-json + // ``` + let args = std::env::args().collect::>(); + if args.len() != 1 { + let names = &args[1..args.len() - 1]; + if !names.is_empty() && !names.contains(&self.name.to_owned()) { + return; + } + } + + let app = f(); + + let rt = tokio::runtime::Builder::new_multi_thread() + .enable_all() + .build() + .unwrap(); + + let listener = rt + .block_on(tokio::net::TcpListener::bind("0.0.0.0:0")) + .unwrap(); + let addr = listener.local_addr().unwrap(); + + std::thread::spawn(move || { + rt.block_on(async move { + let incoming = AddrIncoming::from_listener(listener).unwrap(); + Server::builder(incoming) + .serve(app.into_make_service()) + .await + .unwrap(); + }); + }); + + let mut cmd = Command::new("rewrk"); + cmd.stdout(Stdio::piped()); + + cmd.arg("--host"); + cmd.arg(format!("http://{}{}", addr, self.path.unwrap_or(""))); + + cmd.args(["--connections", "10"]); + cmd.args(["--threads", "10"]); + + if on_ci() { + // don't slow down CI by running the benchmarks for too long + // but do run them for a bit + cmd.args(["--duration", "1s"]); + } else { + cmd.args(["--duration", "10s"]); + } + + if let Some(method) = self.method { + cmd.args(["--method", method]); + } + + for (key, value) in self.headers.into_iter().flatten() { + cmd.arg("--header"); + cmd.arg(format!("{key}: {value}")); + } + + if let Some(body) = self.body { + cmd.args(["--body", body]); + } + + eprintln!("Running {:?} benchmark", self.name); + + // indent output from `rewrk` so its easier to read when running multiple benchmarks + let mut child = cmd.spawn().unwrap(); + let stdout = child.stdout.take().unwrap(); + let stdout = std::io::BufReader::new(stdout); + for line in stdout.lines() { + let line = line.unwrap(); + println!(" {line}"); + } + + let status = child.wait().unwrap(); + + if !status.success() { + eprintln!("`rewrk` command failed"); + std::process::exit(status.code().unwrap()); + } + } +} + +fn install_rewrk() { + println!("installing rewrk"); + let mut cmd = Command::new("cargo"); + cmd.args([ + "install", + "rewrk", + "--git", + "https://github.com/ChillFish8/rewrk.git", + ]); + let status = cmd + .status() + .unwrap_or_else(|_| panic!("failed to install rewrk")); + if !status.success() { + panic!("failed to install rewrk"); + } +} + +fn ensure_rewrk_is_installed() { + let mut cmd = Command::new("rewrk"); + cmd.arg("--help"); + cmd.stdout(Stdio::null()); + cmd.stderr(Stdio::null()); + cmd.status().unwrap_or_else(|_| { + panic!("rewrk is not installed. See https://github.com/lnx-search/rewrk") + }); +} + +fn on_ci() -> bool { + std::env::var("GITHUB_ACTIONS").is_ok() +} diff --git a/.cargo-vendor/axum-0.6.20/build.rs b/.cargo-vendor/axum-0.6.20/build.rs new file mode 100644 index 0000000000..b52885c626 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/build.rs @@ -0,0 +1,7 @@ +#[rustversion::nightly] +fn main() { + println!("cargo:rustc-cfg=nightly_error_messages"); +} + +#[rustversion::not(nightly)] +fn main() {} diff --git a/.cargo-vendor/axum-0.6.20/src/body/mod.rs b/.cargo-vendor/axum-0.6.20/src/body/mod.rs new file mode 100644 index 0000000000..4eceec0ced --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/body/mod.rs @@ -0,0 +1,17 @@ +//! HTTP body utilities. + +mod stream_body; + +pub use self::stream_body::StreamBody; + +#[doc(no_inline)] +pub use http_body::{Body as HttpBody, Empty, Full}; + +#[doc(no_inline)] +pub use hyper::body::Body; + +#[doc(no_inline)] +pub use bytes::Bytes; + +#[doc(inline)] +pub use axum_core::body::{boxed, BoxBody}; diff --git a/.cargo-vendor/axum/src/body/stream_body.rs b/.cargo-vendor/axum-0.6.20/src/body/stream_body.rs similarity index 100% rename from .cargo-vendor/axum/src/body/stream_body.rs rename to .cargo-vendor/axum-0.6.20/src/body/stream_body.rs diff --git a/.cargo-vendor/axum-0.6.20/src/boxed.rs b/.cargo-vendor/axum-0.6.20/src/boxed.rs new file mode 100644 index 0000000000..f8191f2e26 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/boxed.rs @@ -0,0 +1,193 @@ +use std::{convert::Infallible, fmt}; + +use http::Request; +use tower::Service; + +use crate::{ + body::HttpBody, + handler::Handler, + routing::{future::RouteFuture, Route}, + Router, +}; + +pub(crate) struct BoxedIntoRoute(Box>); + +impl BoxedIntoRoute +where + S: Clone + Send + Sync + 'static, + B: Send + 'static, +{ + pub(crate) fn from_handler(handler: H) -> Self + where + H: Handler, + T: 'static, + B: HttpBody, + { + Self(Box::new(MakeErasedHandler { + handler, + into_route: |handler, state| Route::new(Handler::with_state(handler, state)), + })) + } +} + +impl BoxedIntoRoute { + pub(crate) fn map(self, f: F) -> BoxedIntoRoute + where + S: 'static, + B: 'static, + E: 'static, + F: FnOnce(Route) -> Route + Clone + Send + 'static, + B2: HttpBody + 'static, + E2: 'static, + { + BoxedIntoRoute(Box::new(Map { + inner: self.0, + layer: Box::new(f), + })) + } + + pub(crate) fn into_route(self, state: S) -> Route { + self.0.into_route(state) + } +} + +impl Clone for BoxedIntoRoute { + fn clone(&self) -> Self { + Self(self.0.clone_box()) + } +} + +impl fmt::Debug for BoxedIntoRoute { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("BoxedIntoRoute").finish() + } +} + +pub(crate) trait ErasedIntoRoute: Send { + fn clone_box(&self) -> Box>; + + fn into_route(self: Box, state: S) -> Route; + + fn call_with_state(self: Box, request: Request, state: S) -> RouteFuture; +} + +pub(crate) struct MakeErasedHandler { + pub(crate) handler: H, + pub(crate) into_route: fn(H, S) -> Route, +} + +impl ErasedIntoRoute for MakeErasedHandler +where + H: Clone + Send + 'static, + S: 'static, + B: HttpBody + 'static, +{ + fn clone_box(&self) -> Box> { + Box::new(self.clone()) + } + + fn into_route(self: Box, state: S) -> Route { + (self.into_route)(self.handler, state) + } + + fn call_with_state( + self: Box, + request: Request, + state: S, + ) -> RouteFuture { + self.into_route(state).call(request) + } +} + +impl Clone for MakeErasedHandler +where + H: Clone, +{ + fn clone(&self) -> Self { + Self { + handler: self.handler.clone(), + into_route: self.into_route, + } + } +} + +pub(crate) struct MakeErasedRouter { + pub(crate) router: Router, + pub(crate) into_route: fn(Router, S) -> Route, +} + +impl ErasedIntoRoute for MakeErasedRouter +where + S: Clone + Send + Sync + 'static, + B: HttpBody + Send + 'static, +{ + fn clone_box(&self) -> Box> { + Box::new(self.clone()) + } + + fn into_route(self: Box, state: S) -> Route { + (self.into_route)(self.router, state) + } + + fn call_with_state( + mut self: Box, + request: Request, + state: S, + ) -> RouteFuture { + self.router.call_with_state(request, state) + } +} + +impl Clone for MakeErasedRouter +where + S: Clone, +{ + fn clone(&self) -> Self { + Self { + router: self.router.clone(), + into_route: self.into_route, + } + } +} + +pub(crate) struct Map { + pub(crate) inner: Box>, + pub(crate) layer: Box>, +} + +impl ErasedIntoRoute for Map +where + S: 'static, + B: 'static, + E: 'static, + B2: HttpBody + 'static, + E2: 'static, +{ + fn clone_box(&self) -> Box> { + Box::new(Self { + inner: self.inner.clone_box(), + layer: self.layer.clone_box(), + }) + } + + fn into_route(self: Box, state: S) -> Route { + (self.layer)(self.inner.into_route(state)) + } + + fn call_with_state(self: Box, request: Request, state: S) -> RouteFuture { + (self.layer)(self.inner.into_route(state)).call(request) + } +} + +pub(crate) trait LayerFn: FnOnce(Route) -> Route + Send { + fn clone_box(&self) -> Box>; +} + +impl LayerFn for F +where + F: FnOnce(Route) -> Route + Clone + Send + 'static, +{ + fn clone_box(&self) -> Box> { + Box::new(self.clone()) + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/docs/debugging_handler_type_errors.md b/.cargo-vendor/axum-0.6.20/src/docs/debugging_handler_type_errors.md new file mode 100644 index 0000000000..d9a5b45d14 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/docs/debugging_handler_type_errors.md @@ -0,0 +1,37 @@ +## Debugging handler type errors + +For a function to be used as a handler it must implement the [`Handler`] trait. +axum provides blanket implementations for functions that: + +- Are `async fn`s. +- Take no more than 16 arguments that all implement [`FromRequest`]. +- Returns something that implements [`IntoResponse`]. +- If a closure is used it must implement `Clone + Send` and be +`'static`. +- Returns a future that is `Send`. The most common way to accidentally make a +future `!Send` is to hold a `!Send` type across an await. + +Unfortunately Rust gives poor error messages if you try to use a function +that doesn't quite match what's required by [`Handler`]. + +You might get an error like this: + +```not_rust +error[E0277]: the trait bound `fn(bool) -> impl Future {handler}: Handler<_, _>` is not satisfied + --> src/main.rs:13:44 + | +13 | let app = Router::new().route("/", get(handler)); + | ^^^^^^^ the trait `Handler<_, _>` is not implemented for `fn(bool) -> impl Future {handler}` + | + ::: axum/src/handler/mod.rs:116:8 + | +116 | H: Handler, + | ------------- required by this bound in `axum::routing::get` +``` + +This error doesn't tell you _why_ your function doesn't implement +[`Handler`]. It's possible to improve the error with the [`debug_handler`] +proc-macro from the [axum-macros] crate. + +[axum-macros]: https://docs.rs/axum-macros +[`debug_handler`]: https://docs.rs/axum-macros/latest/axum_macros/attr.debug_handler.html diff --git a/.cargo-vendor/axum-0.6.20/src/docs/error_handling.md b/.cargo-vendor/axum-0.6.20/src/docs/error_handling.md new file mode 100644 index 0000000000..d230a24f53 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/docs/error_handling.md @@ -0,0 +1,192 @@ +Error handling model and utilities + +# Table of contents + +- [axum's error handling model](#axums-error-handling-model) +- [Routing to fallible services](#routing-to-fallible-services) +- [Applying fallible middleware](#applying-fallible-middleware) +- [Running extractors for error handling](#running-extractors-for-error-handling) + +# axum's error handling model + +axum is based on [`tower::Service`] which bundles errors through its associated +`Error` type. If you have a [`Service`] that produces an error and that error +makes it all the way up to hyper, the connection will be terminated _without_ +sending a response. This is generally not desirable so axum makes sure you +always produce a response by relying on the type system. + +axum does this by requiring all services have [`Infallible`] as their error +type. `Infallible` is the error type for errors that can never happen. + +This means if you define a handler like: + +```rust +use axum::http::StatusCode; + +async fn handler() -> Result { + # todo!() + // ... +} +``` + +While it looks like it might fail with a `StatusCode` this actually isn't an +"error". If this handler returns `Err(some_status_code)` that will still be +converted into a [`Response`] and sent back to the client. This is done +through `StatusCode`'s [`IntoResponse`] implementation. + +It doesn't matter whether you return `Err(StatusCode::NOT_FOUND)` or +`Err(StatusCode::INTERNAL_SERVER_ERROR)`. These are not considered errors in +axum. + +Instead of a direct `StatusCode`, it makes sense to use intermediate error type +that can ultimately be converted to `Response`. This allows using `?` operator +in handlers. See those examples: + +* [`anyhow-error-response`][anyhow] for generic boxed errors +* [`error-handling-and-dependency-injection`][ehdi] for application-specific detailed errors + +[anyhow]: https://github.com/tokio-rs/axum/blob/main/examples/anyhow-error-response/src/main.rs +[ehdi]: https://github.com/tokio-rs/axum/blob/main/examples/error-handling-and-dependency-injection/src/main.rs + +This also applies to extractors. If an extractor doesn't match the request the +request will be rejected and a response will be returned without calling your +handler. See [`extract`](crate::extract) to learn more about handling extractor +failures. + +# Routing to fallible services + +You generally don't have to think about errors if you're only using async +functions as handlers. However if you're embedding general `Service`s or +applying middleware, which might produce errors you have to tell axum how to +convert those errors into responses. + +```rust +use axum::{ + Router, + body::Body, + http::{Request, Response, StatusCode}, + error_handling::HandleError, +}; + +async fn thing_that_might_fail() -> Result<(), anyhow::Error> { + # Ok(()) + // ... +} + +// this service might fail with `anyhow::Error` +let some_fallible_service = tower::service_fn(|_req| async { + thing_that_might_fail().await?; + Ok::<_, anyhow::Error>(Response::new(Body::empty())) +}); + +let app = Router::new().route_service( + "/", + // we cannot route to `some_fallible_service` directly since it might fail. + // we have to use `handle_error` which converts its errors into responses + // and changes its error type from `anyhow::Error` to `Infallible`. + HandleError::new(some_fallible_service, handle_anyhow_error), +); + +// handle errors by converting them into something that implements +// `IntoResponse` +async fn handle_anyhow_error(err: anyhow::Error) -> (StatusCode, String) { + ( + StatusCode::INTERNAL_SERVER_ERROR, + format!("Something went wrong: {}", err), + ) +} +# async { +# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +# }; +``` + +# Applying fallible middleware + +Similarly axum requires you to handle errors from middleware. That is done with +[`HandleErrorLayer`]: + +```rust +use axum::{ + Router, + BoxError, + routing::get, + http::StatusCode, + error_handling::HandleErrorLayer, +}; +use std::time::Duration; +use tower::ServiceBuilder; + +let app = Router::new() + .route("/", get(|| async {})) + .layer( + ServiceBuilder::new() + // `timeout` will produce an error if the handler takes + // too long so we must handle those + .layer(HandleErrorLayer::new(handle_timeout_error)) + .timeout(Duration::from_secs(30)) + ); + +async fn handle_timeout_error(err: BoxError) -> (StatusCode, String) { + if err.is::() { + ( + StatusCode::REQUEST_TIMEOUT, + "Request took too long".to_string(), + ) + } else { + ( + StatusCode::INTERNAL_SERVER_ERROR, + format!("Unhandled internal error: {}", err), + ) + } +} +# async { +# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +# }; +``` + +# Running extractors for error handling + +`HandleErrorLayer` also supports running extractors: + +```rust +use axum::{ + Router, + BoxError, + routing::get, + http::{StatusCode, Method, Uri}, + error_handling::HandleErrorLayer, +}; +use std::time::Duration; +use tower::ServiceBuilder; + +let app = Router::new() + .route("/", get(|| async {})) + .layer( + ServiceBuilder::new() + // `timeout` will produce an error if the handler takes + // too long so we must handle those + .layer(HandleErrorLayer::new(handle_timeout_error)) + .timeout(Duration::from_secs(30)) + ); + +async fn handle_timeout_error( + // `Method` and `Uri` are extractors so they can be used here + method: Method, + uri: Uri, + // the last argument must be the error itself + err: BoxError, +) -> (StatusCode, String) { + ( + StatusCode::INTERNAL_SERVER_ERROR, + format!("`{} {}` failed with {}", method, uri, err), + ) +} +# async { +# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +# }; +``` + +[`tower::Service`]: `tower::Service` +[`Infallible`]: std::convert::Infallible +[`Response`]: crate::response::Response +[`IntoResponse`]: crate::response::IntoResponse diff --git a/.cargo-vendor/axum-0.6.20/src/docs/extract.md b/.cargo-vendor/axum-0.6.20/src/docs/extract.md new file mode 100644 index 0000000000..1e78d5719b --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/docs/extract.md @@ -0,0 +1,871 @@ +Types and traits for extracting data from requests. + +# Table of contents + +- [Intro](#intro) +- [Common extractors](#common-extractors) +- [Applying multiple extractors](#applying-multiple-extractors) +- [The order of extractors](#the-order-of-extractors) +- [Optional extractors](#optional-extractors) +- [Customizing extractor responses](#customizing-extractor-responses) +- [Accessing inner errors](#accessing-inner-errors) +- [Defining custom extractors](#defining-custom-extractors) +- [Accessing other extractors in `FromRequest` or `FromRequestParts` implementations](#accessing-other-extractors-in-fromrequest-or-fromrequestparts-implementations) +- [Request body limits](#request-body-limits) +- [Request body extractors](#request-body-extractors) +- [Running extractors from middleware](#running-extractors-from-middleware) +- [Wrapping extractors](#wrapping-extractors) +- [Logging rejections](#logging-rejections) + +# Intro + +A handler function is an async function that takes any number of +"extractors" as arguments. An extractor is a type that implements +[`FromRequest`](crate::extract::FromRequest) +or [`FromRequestParts`](crate::extract::FromRequestParts). + +For example, [`Json`] is an extractor that consumes the request body and +deserializes it as JSON into some target type: + +```rust,no_run +use axum::{ + extract::Json, + routing::post, + handler::Handler, + Router, +}; +use serde::Deserialize; + +#[derive(Deserialize)] +struct CreateUser { + email: String, + password: String, +} + +async fn create_user(Json(payload): Json) { + // ... +} + +let app = Router::new().route("/users", post(create_user)); +# async { +# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +# }; +``` + +# Common extractors + +Some commonly used extractors are: + +```rust,no_run +use axum::{ + extract::{Json, TypedHeader, Path, Extension, Query}, + routing::post, + headers::UserAgent, + http::{Request, header::HeaderMap}, + body::{Bytes, Body}, + Router, +}; +use serde_json::Value; +use std::collections::HashMap; + +// `Path` gives you the path parameters and deserializes them. See its docs for +// more details +async fn path(Path(user_id): Path) {} + +// `Query` gives you the query parameters and deserializes them. +async fn query(Query(params): Query>) {} + +// `HeaderMap` gives you all the headers +async fn headers(headers: HeaderMap) {} + +// `TypedHeader` can be used to extract a single header +// note this requires you've enabled axum's `headers` feature +async fn user_agent(TypedHeader(user_agent): TypedHeader) {} + +// `String` consumes the request body and ensures it is valid utf-8 +async fn string(body: String) {} + +// `Bytes` gives you the raw request body +async fn bytes(body: Bytes) {} + +// We've already seen `Json` for parsing the request body as json +async fn json(Json(payload): Json) {} + +// `Request` gives you the whole request for maximum control +async fn request(request: Request) {} + +// `Extension` extracts data from "request extensions" +// This is commonly used to share state with handlers +async fn extension(Extension(state): Extension) {} + +#[derive(Clone)] +struct State { /* ... */ } + +let app = Router::new() + .route("/path/:user_id", post(path)) + .route("/query", post(query)) + .route("/user_agent", post(user_agent)) + .route("/headers", post(headers)) + .route("/string", post(string)) + .route("/bytes", post(bytes)) + .route("/json", post(json)) + .route("/request", post(request)) + .route("/extension", post(extension)); +# async { +# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +# }; +``` + +# Applying multiple extractors + +You can also apply multiple extractors: + +```rust,no_run +use axum::{ + extract::{Path, Query}, + routing::get, + Router, +}; +use uuid::Uuid; +use serde::Deserialize; + +let app = Router::new().route("/users/:id/things", get(get_user_things)); + +#[derive(Deserialize)] +struct Pagination { + page: usize, + per_page: usize, +} + +impl Default for Pagination { + fn default() -> Self { + Self { page: 1, per_page: 30 } + } +} + +async fn get_user_things( + Path(user_id): Path, + pagination: Option>, +) { + let Query(pagination) = pagination.unwrap_or_default(); + + // ... +} +# async { +# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +# }; +``` + +# The order of extractors + +Extractors always run in the order of the function parameters that is from +left to right. + +The request body is an asynchronous stream that can only be consumed once. +Therefore you can only have one extractor that consumes the request body. axum +enforces this by requiring such extractors to be the _last_ argument your +handler takes. + +For example + +```rust +use axum::{extract::State, http::{Method, HeaderMap}}; +# +# #[derive(Clone)] +# struct AppState { +# } + +async fn handler( + // `Method` and `HeaderMap` don't consume the request body so they can + // put anywhere in the argument list (but before `body`) + method: Method, + headers: HeaderMap, + // `State` is also an extractor so it needs to be before `body` + State(state): State, + // `String` consumes the request body and thus must be the last extractor + body: String, +) { + // ... +} +# +# let _: axum::routing::MethodRouter = axum::routing::get(handler); +``` + +We get a compile error if `String` isn't the last extractor: + +```rust,compile_fail +use axum::http::Method; + +async fn handler( + // this doesn't work since `String` must be the last argument + body: String, + method: Method, +) { + // ... +} +# +# let _: axum::routing::MethodRouter = axum::routing::get(handler); +``` + +This also means you cannot consume the request body twice: + +```rust,compile_fail +use axum::Json; +use serde::Deserialize; + +#[derive(Deserialize)] +struct Payload {} + +async fn handler( + // `String` and `Json` both consume the request body + // so they cannot both be used + string_body: String, + json_body: Json, +) { + // ... +} +# +# let _: axum::routing::MethodRouter = axum::routing::get(handler); +``` + +axum enforces this by requiring the last extractor implements [`FromRequest`] +and all others implement [`FromRequestParts`]. + +# Optional extractors + +All extractors defined in axum will reject the request if it doesn't match. +If you wish to make an extractor optional you can wrap it in `Option`: + +```rust,no_run +use axum::{ + extract::Json, + routing::post, + Router, +}; +use serde_json::Value; + +async fn create_user(payload: Option>) { + if let Some(payload) = payload { + // We got a valid JSON payload + } else { + // Payload wasn't valid JSON + } +} + +let app = Router::new().route("/users", post(create_user)); +# async { +# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +# }; +``` + +Wrapping extractors in `Result` makes them optional and gives you the reason +the extraction failed: + +```rust,no_run +use axum::{ + extract::{Json, rejection::JsonRejection}, + routing::post, + Router, +}; +use serde_json::Value; + +async fn create_user(payload: Result, JsonRejection>) { + match payload { + Ok(payload) => { + // We got a valid JSON payload + } + Err(JsonRejection::MissingJsonContentType(_)) => { + // Request didn't have `Content-Type: application/json` + // header + } + Err(JsonRejection::JsonDataError(_)) => { + // Couldn't deserialize the body into the target type + } + Err(JsonRejection::JsonSyntaxError(_)) => { + // Syntax error in the body + } + Err(JsonRejection::BytesRejection(_)) => { + // Failed to extract the request body + } + Err(_) => { + // `JsonRejection` is marked `#[non_exhaustive]` so match must + // include a catch-all case. + } + } +} + +let app = Router::new().route("/users", post(create_user)); +# async { +# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +# }; +``` + +# Customizing extractor responses + +If an extractor fails it will return a response with the error and your +handler will not be called. To customize the error response you have a two +options: + +1. Use `Result` as your extractor like shown in ["Optional + extractors"](#optional-extractors). This works well if you're only using + the extractor in a single handler. +2. Create your own extractor that in its [`FromRequest`] implemention calls + one of axum's built in extractors but returns a different response for + rejections. See the [customize-extractor-error] example for more details. + +# Accessing inner errors + +axum's built-in extractors don't directly expose the inner error. This gives us +more flexibility and allows us to change internal implementations without +breaking the public API. + +For example that means while [`Json`] is implemented using [`serde_json`] it +doesn't directly expose the [`serde_json::Error`] thats contained in +[`JsonRejection::JsonDataError`]. However it is still possible to access via +methods from [`std::error::Error`]: + +```rust +use std::error::Error; +use axum::{ + extract::{Json, rejection::JsonRejection}, + response::IntoResponse, + http::StatusCode, +}; +use serde_json::{json, Value}; + +async fn handler( + result: Result, JsonRejection>, +) -> Result, (StatusCode, String)> { + match result { + // if the client sent valid JSON then we're good + Ok(Json(payload)) => Ok(Json(json!({ "payload": payload }))), + + Err(err) => match err { + JsonRejection::JsonDataError(err) => { + Err(serde_json_error_response(err)) + } + JsonRejection::JsonSyntaxError(err) => { + Err(serde_json_error_response(err)) + } + // handle other rejections from the `Json` extractor + JsonRejection::MissingJsonContentType(_) => Err(( + StatusCode::BAD_REQUEST, + "Missing `Content-Type: application/json` header".to_string(), + )), + JsonRejection::BytesRejection(_) => Err(( + StatusCode::INTERNAL_SERVER_ERROR, + "Failed to buffer request body".to_string(), + )), + // we must provide a catch-all case since `JsonRejection` is marked + // `#[non_exhaustive]` + _ => Err(( + StatusCode::INTERNAL_SERVER_ERROR, + "Unknown error".to_string(), + )), + }, + } +} + +// attempt to extract the inner `serde_path_to_error::Error`, +// if that succeeds we can provide a more specific error. +// +// `Json` uses `serde_path_to_error` so the error will be wrapped in `serde_path_to_error::Error`. +fn serde_json_error_response(err: E) -> (StatusCode, String) +where + E: Error + 'static, +{ + if let Some(err) = find_error_source::>(&err) { + let serde_json_err = err.inner(); + ( + StatusCode::BAD_REQUEST, + format!( + "Invalid JSON at line {} column {}", + serde_json_err.line(), + serde_json_err.column() + ), + ) + } else { + (StatusCode::BAD_REQUEST, "Unknown error".to_string()) + } +} + +// attempt to downcast `err` into a `T` and if that fails recursively try and +// downcast `err`'s source +fn find_error_source<'a, T>(err: &'a (dyn Error + 'static)) -> Option<&'a T> +where + T: Error + 'static, +{ + if let Some(err) = err.downcast_ref::() { + Some(err) + } else if let Some(source) = err.source() { + find_error_source(source) + } else { + None + } +} +# +# #[tokio::main] +# async fn main() { +# use axum::extract::FromRequest; +# +# let req = axum::http::Request::builder() +# .header("content-type", "application/json") +# .body(axum::body::Body::from("{")) +# .unwrap(); +# +# let err = match Json::::from_request(req, &()).await.unwrap_err() { +# JsonRejection::JsonSyntaxError(err) => err, +# _ => panic!(), +# }; +# +# let (_, body) = serde_json_error_response(err); +# assert_eq!(body, "Invalid JSON at line 1 column 1"); +# } +``` + +Note that while this approach works it might break in the future if axum changes +its implementation to use a different error type internally. Such changes might +happen without major breaking versions. + +# Defining custom extractors + +You can also define your own extractors by implementing either +[`FromRequestParts`] or [`FromRequest`]. + +## Implementing `FromRequestParts` + +Implement `FromRequestParts` if your extractor doesn't need access to the +request body: + +```rust,no_run +use axum::{ + async_trait, + extract::FromRequestParts, + routing::get, + Router, + http::{ + StatusCode, + header::{HeaderValue, USER_AGENT}, + request::Parts, + }, +}; + +struct ExtractUserAgent(HeaderValue); + +#[async_trait] +impl FromRequestParts for ExtractUserAgent +where + S: Send + Sync, +{ + type Rejection = (StatusCode, &'static str); + + async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { + if let Some(user_agent) = parts.headers.get(USER_AGENT) { + Ok(ExtractUserAgent(user_agent.clone())) + } else { + Err((StatusCode::BAD_REQUEST, "`User-Agent` header is missing")) + } + } +} + +async fn handler(ExtractUserAgent(user_agent): ExtractUserAgent) { + // ... +} + +let app = Router::new().route("/foo", get(handler)); +# async { +# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +# }; +``` + +## Implementing `FromRequest` + +If your extractor needs to consume the request body you must implement [`FromRequest`] + +```rust,no_run +use axum::{ + async_trait, + extract::FromRequest, + response::{Response, IntoResponse}, + body::Bytes, + routing::get, + Router, + http::{ + StatusCode, + header::{HeaderValue, USER_AGENT}, + Request, + }, +}; + +struct ValidatedBody(Bytes); + +#[async_trait] +impl FromRequest for ValidatedBody +where + Bytes: FromRequest, + B: Send + 'static, + S: Send + Sync, +{ + type Rejection = Response; + + async fn from_request(req: Request, state: &S) -> Result { + let body = Bytes::from_request(req, state) + .await + .map_err(IntoResponse::into_response)?; + + // do validation... + + Ok(Self(body)) + } +} + +async fn handler(ValidatedBody(body): ValidatedBody) { + // ... +} + +let app = Router::new().route("/foo", get(handler)); +# async { +# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +# }; +``` + +## Cannot implement both `FromRequest` and `FromRequestParts` + +Note that you will make your extractor unusable by implementing both +`FromRequest` and `FromRequestParts` directly for the same type, unless it is +wrapping another extractor: + +```rust,compile_fail +use axum::{ + Router, + routing::get, + extract::{FromRequest, FromRequestParts}, + http::{Request, request::Parts}, + async_trait, +}; +use std::convert::Infallible; + +// Some extractor that doesn't wrap another extractor +struct MyExtractor; + +// `MyExtractor` implements both `FromRequest` +#[async_trait] +impl FromRequest for MyExtractor +where + S: Send + Sync, + B: Send + 'static, +{ + type Rejection = Infallible; + + async fn from_request(req: Request, state: &S) -> Result { + // ... + # todo!() + } +} + +// and `FromRequestParts` +#[async_trait] +impl FromRequestParts for MyExtractor +where + S: Send + Sync, +{ + type Rejection = Infallible; + + async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { + // ... + # todo!() + } +} + +let app = Router::new().route( + "/", + // This fails when we go to actually use `MyExtractor` in a handler function. + // This is due to a limit in Rust's type system. + // + // The workaround is to implement either `FromRequest` or `FromRequestParts` + // but not both, if your extractor doesn't wrap another extractor. + // + // See "Wrapping extractors" for how to wrap other extractors. + get(|_: MyExtractor| async {}), +); +# let _: Router = app; +``` + +# Accessing other extractors in `FromRequest` or `FromRequestParts` implementations + +When defining custom extractors you often need to access another extractors +in your implementation. + +```rust +use axum::{ + async_trait, + extract::{Extension, FromRequestParts, TypedHeader}, + headers::{authorization::Bearer, Authorization}, + http::{StatusCode, request::Parts}, + response::{IntoResponse, Response}, + routing::get, + Router, +}; + +#[derive(Clone)] +struct State { + // ... +} + +struct AuthenticatedUser { + // ... +} + +#[async_trait] +impl FromRequestParts for AuthenticatedUser +where + S: Send + Sync, +{ + type Rejection = Response; + + async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { + // You can either call them directly... + let TypedHeader(Authorization(token)) = + TypedHeader::>::from_request_parts(parts, state) + .await + .map_err(|err| err.into_response())?; + + // ... or use `extract` / `extract_with_state` from `RequestExt` / `RequestPartsExt` + use axum::RequestPartsExt; + let Extension(state) = parts.extract::>() + .await + .map_err(|err| err.into_response())?; + + unimplemented!("actually perform the authorization") + } +} + +async fn handler(user: AuthenticatedUser) { + // ... +} + +let state = State { /* ... */ }; + +let app = Router::new().route("/", get(handler)).layer(Extension(state)); +# async { +# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +# }; +``` + +# Request body limits + +For security reasons, [`Bytes`] will, by default, not accept bodies larger than +2MB. This also applies to extractors that uses [`Bytes`] internally such as +`String`, [`Json`], and [`Form`]. + +For more details, including how to disable this limit, see [`DefaultBodyLimit`]. + +# Request body extractors + +Most of the time your request body type will be [`body::Body`] (a re-export +of [`hyper::Body`]), which is directly supported by all extractors. + +However if you're applying a tower middleware that changes the request body type +you might have to apply a different body type to some extractors: + +```rust +use std::{ + task::{Context, Poll}, + pin::Pin, +}; +use tower_http::map_request_body::MapRequestBodyLayer; +use axum::{ + extract::{self, BodyStream}, + body::{Body, HttpBody}, + routing::get, + http::{header::HeaderMap, Request}, + Router, +}; + +struct MyBody(B); + +impl HttpBody for MyBody +where + B: HttpBody + Unpin, +{ + type Data = B::Data; + type Error = B::Error; + + fn poll_data( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll>> { + Pin::new(&mut self.0).poll_data(cx) + } + + fn poll_trailers( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>> { + Pin::new(&mut self.0).poll_trailers(cx) + } +} + +let app = Router::new() + .route( + "/string", + // `String` works directly with any body type + get(|_: String| async {}) + ) + .route( + "/body", + // `extract::Body` defaults to `axum::body::Body` + // but can be customized + get(|_: extract::RawBody>| async {}) + ) + .route( + "/body-stream", + // same for `extract::BodyStream` + get(|_: extract::BodyStream| async {}), + ) + .route( + // and `Request<_>` + "/request", + get(|_: Request>| async {}) + ) + // middleware that changes the request body type + .layer(MapRequestBodyLayer::new(MyBody)); +# async { +# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +# }; +``` + +# Running extractors from middleware + +Extractors can also be run from middleware: + +```rust +use axum::{ + middleware::{self, Next}, + extract::{TypedHeader, FromRequestParts}, + http::{Request, StatusCode}, + response::Response, + headers::authorization::{Authorization, Bearer}, + RequestPartsExt, Router, +}; + +async fn auth_middleware( + request: Request, + next: Next, +) -> Result +where + B: Send, +{ + // running extractors requires a `axum::http::request::Parts` + let (mut parts, body) = request.into_parts(); + + // `TypedHeader>` extracts the auth token + let auth: TypedHeader> = parts.extract() + .await + .map_err(|_| StatusCode::UNAUTHORIZED)?; + + if !token_is_valid(auth.token()) { + return Err(StatusCode::UNAUTHORIZED); + } + + // reconstruct the request + let request = Request::from_parts(parts, body); + + Ok(next.run(request).await) +} + +fn token_is_valid(token: &str) -> bool { + // ... + # false +} + +let app = Router::new().layer(middleware::from_fn(auth_middleware)); +# let _: Router<()> = app; +``` + +# Wrapping extractors + +If you want write an extractor that generically wraps another extractor (that +may or may not consume the request body) you should implement both +[`FromRequest`] and [`FromRequestParts`]: + +```rust +use axum::{ + Router, + routing::get, + extract::{FromRequest, FromRequestParts}, + http::{Request, HeaderMap, request::Parts}, + async_trait, +}; +use std::time::{Instant, Duration}; + +// an extractor that wraps another and measures how long time it takes to run +struct Timing { + extractor: E, + duration: Duration, +} + +// we must implement both `FromRequestParts` +#[async_trait] +impl FromRequestParts for Timing +where + S: Send + Sync, + T: FromRequestParts, +{ + type Rejection = T::Rejection; + + async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { + let start = Instant::now(); + let extractor = T::from_request_parts(parts, state).await?; + let duration = start.elapsed(); + Ok(Timing { + extractor, + duration, + }) + } +} + +// and `FromRequest` +#[async_trait] +impl FromRequest for Timing +where + B: Send + 'static, + S: Send + Sync, + T: FromRequest, +{ + type Rejection = T::Rejection; + + async fn from_request(req: Request, state: &S) -> Result { + let start = Instant::now(); + let extractor = T::from_request(req, state).await?; + let duration = start.elapsed(); + Ok(Timing { + extractor, + duration, + }) + } +} + +async fn handler( + // this uses the `FromRequestParts` impl + _: Timing, + // this uses the `FromRequest` impl + _: Timing, +) {} +# let _: axum::routing::MethodRouter = axum::routing::get(handler); +``` + +# Logging rejections + +All built-in extractors will log rejections for easier debugging. To see the +logs, enable the `tracing` feature for axum and the `axum::rejection=trace` +tracing target, for example with `RUST_LOG=info,axum::rejection=trace cargo +run`. + +[`body::Body`]: crate::body::Body +[`Bytes`]: crate::body::Bytes +[customize-extractor-error]: https://github.com/tokio-rs/axum/blob/main/examples/customize-extractor-error/src/main.rs +[`HeaderMap`]: https://docs.rs/http/latest/http/header/struct.HeaderMap.html +[`Request`]: https://docs.rs/http/latest/http/struct.Request.html +[`RequestParts::body_mut`]: crate::extract::RequestParts::body_mut +[`JsonRejection::JsonDataError`]: rejection::JsonRejection::JsonDataError diff --git a/.cargo-vendor/axum-0.6.20/src/docs/handlers_intro.md b/.cargo-vendor/axum-0.6.20/src/docs/handlers_intro.md new file mode 100644 index 0000000000..4b42d4204e --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/docs/handlers_intro.md @@ -0,0 +1,8 @@ +In axum a "handler" is an async function that accepts zero or more +["extractors"](crate::extract) as arguments and returns something that +can be converted [into a response](crate::response). + +Handlers are where your application logic lives and axum applications are built +by routing between handlers. + +[`debug_handler`]: https://docs.rs/axum-macros/latest/axum_macros/attr.debug_handler.html diff --git a/.cargo-vendor/axum-0.6.20/src/docs/method_routing/fallback.md b/.cargo-vendor/axum-0.6.20/src/docs/method_routing/fallback.md new file mode 100644 index 0000000000..906cbb3b5d --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/docs/method_routing/fallback.md @@ -0,0 +1,60 @@ +Add a fallback service to the router. + +This service will be called if no routes matches the incoming request. + +```rust +use axum::{ + Router, + routing::get, + handler::Handler, + response::IntoResponse, + http::{StatusCode, Method, Uri}, +}; + +let handler = get(|| async {}).fallback(fallback); + +let app = Router::new().route("/", handler); + +async fn fallback(method: Method, uri: Uri) -> (StatusCode, String) { + (StatusCode::NOT_FOUND, format!("`{}` not allowed for {}", method, uri)) +} +# async { +# hyper::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +# }; +``` + +## When used with `MethodRouter::merge` + +Two routers that both have a fallback cannot be merged. Doing so results in a +panic: + +```rust,should_panic +use axum::{ + routing::{get, post}, + handler::Handler, + response::IntoResponse, + http::{StatusCode, Uri}, +}; + +let one = get(|| async {}).fallback(fallback_one); + +let two = post(|| async {}).fallback(fallback_two); + +let method_route = one.merge(two); + +async fn fallback_one() -> impl IntoResponse { /* ... */ } +async fn fallback_two() -> impl IntoResponse { /* ... */ } +# let app = axum::Router::new().route("/", method_route); +# async { +# hyper::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +# }; +``` + +## Setting the `Allow` header + +By default `MethodRouter` will set the `Allow` header when returning `405 Method +Not Allowed`. This is also done when the fallback is used unless the response +generated by the fallback already sets the `Allow` header. + +This means if you use `fallback` to accept additional methods, you should make +sure you set the `Allow` header correctly. diff --git a/.cargo-vendor/axum-0.6.20/src/docs/method_routing/layer.md b/.cargo-vendor/axum-0.6.20/src/docs/method_routing/layer.md new file mode 100644 index 0000000000..cdf6f93342 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/docs/method_routing/layer.md @@ -0,0 +1,29 @@ +Apply a [`tower::Layer`] to all routes in the router. + +This can be used to add additional processing to a request for a group +of routes. + +Note that the middleware is only applied to existing routes. So you have to +first add your routes (and / or fallback) and then call `layer` afterwards. Additional +routes added after `layer` is called will not have the middleware added. + +Works similarly to [`Router::layer`](super::Router::layer). See that method for +more details. + +# Example + +```rust +use axum::{routing::get, Router}; +use tower::limit::ConcurrencyLimitLayer; + +async fn hander() {} + +let app = Router::new().route( + "/", + // All requests to `GET /` will be sent through `ConcurrencyLimitLayer` + get(hander).layer(ConcurrencyLimitLayer::new(64)), +); +# async { +# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +# }; +``` diff --git a/.cargo-vendor/axum-0.6.20/src/docs/method_routing/merge.md b/.cargo-vendor/axum-0.6.20/src/docs/method_routing/merge.md new file mode 100644 index 0000000000..39d74d048a --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/docs/method_routing/merge.md @@ -0,0 +1,25 @@ +Merge two routers into one. + +This is useful for breaking routers into smaller pieces and combining them +into one. + +```rust +use axum::{ + routing::{get, post}, + Router, +}; + +let get = get(|| async {}); +let post = post(|| async {}); + +let merged = get.merge(post); + +let app = Router::new().route("/", merged); + +// Our app now accepts +// - GET / +// - POST / +# async { +# hyper::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +# }; +``` diff --git a/.cargo-vendor/axum-0.6.20/src/docs/method_routing/route_layer.md b/.cargo-vendor/axum-0.6.20/src/docs/method_routing/route_layer.md new file mode 100644 index 0000000000..f497e8b102 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/docs/method_routing/route_layer.md @@ -0,0 +1,34 @@ +Apply a [`tower::Layer`] to the router that will only run if the request matches +a route. + +Note that the middleware is only applied to existing routes. So you have to +first add your routes (and / or fallback) and then call `layer` afterwards. Additional +routes added after `layer` is called will not have the middleware added. + +This works similarly to [`MethodRouter::layer`] except the middleware will only run if +the request matches a route. This is useful for middleware that return early +(such as authorization) which might otherwise convert a `405 Method Not Allowed` into a +`401 Unauthorized`. + +# Example + +```rust +use axum::{ + routing::get, + Router, +}; +use tower_http::validate_request::ValidateRequestHeaderLayer; + +let app = Router::new().route( + "/foo", + get(|| async {}) + .route_layer(ValidateRequestHeaderLayer::bearer("password")) +); + +// `GET /foo` with a valid token will receive `200 OK` +// `GET /foo` with a invalid token will receive `401 Unauthorized` +// `POST /FOO` with a invalid token will receive `405 Method Not Allowed` +# async { +# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +# }; +``` diff --git a/.cargo-vendor/axum-0.6.20/src/docs/middleware.md b/.cargo-vendor/axum-0.6.20/src/docs/middleware.md new file mode 100644 index 0000000000..fd601fcfe8 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/docs/middleware.md @@ -0,0 +1,593 @@ +# Table of contents + +- [Intro](#intro) +- [Applying middleware](#applying-middleware) +- [Commonly used middleware](#commonly-used-middleware) +- [Ordering](#ordering) +- [Writing middleware](#writing-middleware) +- [Routing to services/middleware and backpressure](#routing-to-servicesmiddleware-and-backpressure) +- [Accessing state in middleware](#accessing-state-in-middleware) +- [Passing state from middleware to handlers](#passing-state-from-middleware-to-handlers) +- [Rewriting request URI in middleware](#rewriting-request-uri-in-middleware) + +# Intro + +axum is unique in that it doesn't have its own bespoke middleware system and +instead integrates with [`tower`]. This means the ecosystem of [`tower`] and +[`tower-http`] middleware all work with axum. + +While its not necessary to fully understand tower to write or use middleware +with axum, having at least a basic understanding of tower's concepts is +recommended. See [tower's guides][tower-guides] for a general introduction. +Reading the documentation for [`tower::ServiceBuilder`] is also recommended. + +# Applying middleware + +axum allows you to add middleware just about anywhere + +- To entire routers with [`Router::layer`] and [`Router::route_layer`]. +- To method routers with [`MethodRouter::layer`] and [`MethodRouter::route_layer`]. +- To individual handlers with [`Handler::layer`]. + +## Applying multiple middleware + +Its recommended to use [`tower::ServiceBuilder`] to apply multiple middleware at +once, instead of calling `layer` (or `route_layer`) repeatedly: + +```rust +use axum::{ + routing::get, + Extension, + Router, +}; +use tower_http::{trace::TraceLayer}; +use tower::ServiceBuilder; + +async fn handler() {} + +#[derive(Clone)] +struct State {} + +let app = Router::new() + .route("/", get(handler)) + .layer( + ServiceBuilder::new() + .layer(TraceLayer::new_for_http()) + .layer(Extension(State {})) + ); +# async { +# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +# }; +``` + +# Commonly used middleware + +Some commonly used middleware are: + +- [`TraceLayer`](tower_http::trace) for high level tracing/logging. +- [`CorsLayer`](tower_http::cors) for handling CORS. +- [`CompressionLayer`](tower_http::compression) for automatic compression of + responses. +- [`RequestIdLayer`](tower_http::request_id) and + [`PropagateRequestIdLayer`](tower_http::request_id) set and propagate request + ids. +- [`TimeoutLayer`](tower::timeout::TimeoutLayer) for timeouts. Note this + requires using [`HandleErrorLayer`](crate::error_handling::HandleErrorLayer) + to convert timeouts to responses. + +# Ordering + +When you add middleware with [`Router::layer`] (or similar) all previously added +routes will be wrapped in the middleware. Generally speaking, this results in +middleware being executed from bottom to top. + +So if you do this: + +```rust +use axum::{routing::get, Router}; + +async fn handler() {} + +# let layer_one = axum::Extension(()); +# let layer_two = axum::Extension(()); +# let layer_three = axum::Extension(()); +# +let app = Router::new() + .route("/", get(handler)) + .layer(layer_one) + .layer(layer_two) + .layer(layer_three); +# let _: Router<(), axum::body::Body> = app; +``` + +Think of the middleware as being layered like an onion where each new layer +wraps all previous layers: + +```not_rust + requests + | + v ++----- layer_three -----+ +| +---- layer_two ----+ | +| | +-- layer_one --+ | | +| | | | | | +| | | handler | | | +| | | | | | +| | +-- layer_one --+ | | +| +---- layer_two ----+ | ++----- layer_three -----+ + | + v + responses +``` + +That is: + +- First `layer_three` receives the request +- It then does its thing and passes the request onto `layer_two` +- Which passes the request onto `layer_one` +- Which passes the request onto `handler` where a response is produced +- That response is then passed to `layer_one` +- Then to `layer_two` +- And finally to `layer_three` where it's returned out of your app + +It's a little more complicated in practice because any middleware is free to +return early and not call the next layer, for example if a request cannot be +authorized, but its a useful mental model to have. + +As previously mentioned its recommended to add multiple middleware using +`tower::ServiceBuilder`, however this impacts ordering: + +```rust +use tower::ServiceBuilder; +use axum::{routing::get, Router}; + +async fn handler() {} + +# let layer_one = axum::Extension(()); +# let layer_two = axum::Extension(()); +# let layer_three = axum::Extension(()); +# +let app = Router::new() + .route("/", get(handler)) + .layer( + ServiceBuilder::new() + .layer(layer_one) + .layer(layer_two) + .layer(layer_three), + ); +# let _: Router<(), axum::body::Body> = app; +``` + +`ServiceBuilder` works by composing all layers into one such that they run top +to bottom. So with the previous code `layer_one` would receive the request +first, then `layer_two`, then `layer_three`, then `handler`, and then the +response would bubble back up through `layer_three`, then `layer_two`, and +finally `layer_one`. + +Executing middleware top to bottom is generally easier to understand and follow +mentally which is one of the reasons `ServiceBuilder` is recommended. + +# Writing middleware + +axum offers many ways of writing middleware, at different levels of abstraction +and with different pros and cons. + +## `axum::middleware::from_fn` + +Use [`axum::middleware::from_fn`] to write your middleware when: + +- You're not comfortable with implementing your own futures and would rather use + the familiar `async`/`await` syntax. +- You don't intend to publish your middleware as a crate for others to use. + Middleware written like this are only compatible with axum. + +## `axum::middleware::from_extractor` + +Use [`axum::middleware::from_extractor`] to write your middleware when: + +- You have a type that you sometimes want to use as an extractor and sometimes + as a middleware. If you only need your type as a middleware prefer + [`middleware::from_fn`]. + +## tower's combinators + +tower has several utility combinators that can be used to perform simple +modifications to requests or responses. The most commonly used ones are + +- [`ServiceBuilder::map_request`] +- [`ServiceBuilder::map_response`] +- [`ServiceBuilder::then`] +- [`ServiceBuilder::and_then`] + +You should use these when + +- You want to perform a small ad hoc operation, such as adding a header. +- You don't intend to publish your middleware as a crate for others to use. + +## `tower::Service` and `Pin>` + +For maximum control (and a more low level API) you can write you own middleware +by implementing [`tower::Service`]: + +Use [`tower::Service`] with `Pin>` to write your middleware when: + +- Your middleware needs to be configurable for example via builder methods on + your [`tower::Layer`] such as [`tower_http::trace::TraceLayer`]. +- You do intend to publish your middleware as a crate for others to use. +- You're not comfortable with implementing your own futures. + +A decent template for such a middleware could be: + +```rust +use axum::{ + response::Response, + body::Body, + http::Request, +}; +use futures_util::future::BoxFuture; +use tower::{Service, Layer}; +use std::task::{Context, Poll}; + +#[derive(Clone)] +struct MyLayer; + +impl Layer for MyLayer { + type Service = MyMiddleware; + + fn layer(&self, inner: S) -> Self::Service { + MyMiddleware { inner } + } +} + +#[derive(Clone)] +struct MyMiddleware { + inner: S, +} + +impl Service> for MyMiddleware +where + S: Service, Response = Response> + Send + 'static, + S::Future: Send + 'static, +{ + type Response = S::Response; + type Error = S::Error; + // `BoxFuture` is a type alias for `Pin>` + type Future = BoxFuture<'static, Result>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, request: Request) -> Self::Future { + let future = self.inner.call(request); + Box::pin(async move { + let response: Response = future.await?; + Ok(response) + }) + } +} +``` + +## `tower::Service` and custom futures + +If you're comfortable implementing your own futures (or want to learn it) and +need as much control as possible then using `tower::Service` without boxed +futures is the way to go. + +Use [`tower::Service`] with manual futures to write your middleware when: + +- You want your middleware to have the lowest possible overhead. +- Your middleware needs to be configurable for example via builder methods on + your [`tower::Layer`] such as [`tower_http::trace::TraceLayer`]. +- You do intend to publish your middleware as a crate for others to use, perhaps + as part of tower-http. +- You're comfortable with implementing your own futures, or want to learn how + the lower levels of async Rust works. + +tower's ["Building a middleware from scratch"][tower-from-scratch-guide] +guide is a good place to learn how to do this. + +# Error handling for middleware + +axum's error handling model requires handlers to always return a response. +However middleware is one possible way to introduce errors into an application. +If hyper receives an error the connection will be closed without sending a +response. Thus axum requires those errors to be handled gracefully: + +```rust +use axum::{ + routing::get, + error_handling::HandleErrorLayer, + http::StatusCode, + BoxError, + Router, +}; +use tower::{ServiceBuilder, timeout::TimeoutLayer}; +use std::time::Duration; + +async fn handler() {} + +let app = Router::new() + .route("/", get(handler)) + .layer( + ServiceBuilder::new() + // this middleware goes above `TimeoutLayer` because it will receive + // errors returned by `TimeoutLayer` + .layer(HandleErrorLayer::new(|_: BoxError| async { + StatusCode::REQUEST_TIMEOUT + })) + .layer(TimeoutLayer::new(Duration::from_secs(10))) + ); +# async { +# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +# }; +``` + +See [`error_handling`](crate::error_handling) for more details on axum's error +handling model. + +# Routing to services/middleware and backpressure + +Generally routing to one of multiple services and backpressure doesn't mix +well. Ideally you would want ensure a service is ready to receive a request +before calling it. However, in order to know which service to call, you need +the request... + +One approach is to not consider the router service itself ready until all +destination services are ready. That is the approach used by +[`tower::steer::Steer`]. + +Another approach is to always consider all services ready (always return +`Poll::Ready(Ok(()))`) from `Service::poll_ready` and then actually drive +readiness inside the response future returned by `Service::call`. This works +well when your services don't care about backpressure and are always ready +anyway. + +axum expects that all services used in your app wont care about +backpressure and so it uses the latter strategy. However that means you +should avoid routing to a service (or using a middleware) that _does_ care +about backpressure. At the very least you should [load shed] so requests are +dropped quickly and don't keep piling up. + +It also means that if `poll_ready` returns an error then that error will be +returned in the response future from `call` and _not_ from `poll_ready`. In +that case, the underlying service will _not_ be discarded and will continue +to be used for future requests. Services that expect to be discarded if +`poll_ready` fails should _not_ be used with axum. + +One possible approach is to only apply backpressure sensitive middleware +around your entire app. This is possible because axum applications are +themselves services: + +```rust +use axum::{ + routing::get, + Router, +}; +use tower::ServiceBuilder; +# let some_backpressure_sensitive_middleware = +# tower::layer::util::Identity::new(); + +async fn handler() { /* ... */ } + +let app = Router::new().route("/", get(handler)); + +let app = ServiceBuilder::new() + .layer(some_backpressure_sensitive_middleware) + .service(app); +# async { +# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +# }; +``` + +However when applying middleware around your whole application in this way +you have to take care that errors are still being handled with +appropriately. + +Also note that handlers created from async functions don't care about +backpressure and are always ready. So if you're not using any Tower +middleware you don't have to worry about any of this. + +# Accessing state in middleware + +How to make state available to middleware depends on how the middleware is +written. + +## Accessing state in `axum::middleware::from_fn` + +Use [`axum::middleware::from_fn_with_state`](crate::middleware::from_fn_with_state). + +## Accessing state in custom `tower::Layer`s + +```rust +use axum::{ + Router, + routing::get, + middleware::{self, Next}, + response::Response, + extract::State, + http::Request, +}; +use tower::{Layer, Service}; +use std::task::{Context, Poll}; + +#[derive(Clone)] +struct AppState {} + +#[derive(Clone)] +struct MyLayer { + state: AppState, +} + +impl Layer for MyLayer { + type Service = MyService; + + fn layer(&self, inner: S) -> Self::Service { + MyService { + inner, + state: self.state.clone(), + } + } +} + +#[derive(Clone)] +struct MyService { + inner: S, + state: AppState, +} + +impl Service> for MyService +where + S: Service>, +{ + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + // Do something with `self.state`. + // + // See `axum::RequestExt` for how to run extractors directly from + // a `Request`. + + self.inner.call(req) + } +} + +async fn handler(_: State) {} + +let state = AppState {}; + +let app = Router::new() + .route("/", get(handler)) + .layer(MyLayer { state: state.clone() }) + .with_state(state); +# let _: axum::Router = app; +``` + +# Passing state from middleware to handlers + +State can be passed from middleware to handlers using [request extensions]: + +```rust +use axum::{ + Router, + http::{Request, StatusCode}, + routing::get, + response::{IntoResponse, Response}, + middleware::{self, Next}, + extract::Extension, +}; + +#[derive(Clone)] +struct CurrentUser { /* ... */ } + +async fn auth(mut req: Request, next: Next) -> Result { + let auth_header = req.headers() + .get(http::header::AUTHORIZATION) + .and_then(|header| header.to_str().ok()); + + let auth_header = if let Some(auth_header) = auth_header { + auth_header + } else { + return Err(StatusCode::UNAUTHORIZED); + }; + + if let Some(current_user) = authorize_current_user(auth_header).await { + // insert the current user into a request extension so the handler can + // extract it + req.extensions_mut().insert(current_user); + Ok(next.run(req).await) + } else { + Err(StatusCode::UNAUTHORIZED) + } +} + +async fn authorize_current_user(auth_token: &str) -> Option { + // ... + # unimplemented!() +} + +async fn handler( + // extract the current user, set by the middleware + Extension(current_user): Extension, +) { + // ... +} + +let app = Router::new() + .route("/", get(handler)) + .route_layer(middleware::from_fn(auth)); +# let _: Router<()> = app; +``` + +[Response extensions] can also be used but note that request extensions are not +automatically moved to response extensions. You need to manually do that for the +extensions you need. + +# Rewriting request URI in middleware + +Middleware added with [`Router::layer`] will run after routing. That means it +cannot be used to run middleware that rewrites the request URI. By the time the +middleware runs the routing is already done. + +The workaround is to wrap the middleware around the entire `Router` (this works +because `Router` implements [`Service`]): + +```rust +use tower::Layer; +use axum::{ + Router, + ServiceExt, // for `into_make_service` + response::Response, + middleware::Next, + http::Request, +}; + +async fn rewrite_request_uri(req: Request, next: Next) -> Response { + // ... + # next.run(req).await +} + +// this can be any `tower::Layer` +let middleware = axum::middleware::from_fn(rewrite_request_uri); + +let app = Router::new(); + +// apply the layer around the whole `Router` +// this way the middleware will run before `Router` receives the request +let app_with_middleware = middleware.layer(app); + +# async { +axum::Server::bind(&"0.0.0.0:3000".parse().unwrap()) + .serve(app_with_middleware.into_make_service()) + .await + .unwrap(); +# }; +``` + +[`tower`]: https://crates.io/crates/tower +[`tower-http`]: https://crates.io/crates/tower-http +[tower-guides]: https://github.com/tower-rs/tower/tree/master/guides +[`axum::middleware::from_fn`]: fn@crate::middleware::from_fn +[`middleware::from_fn`]: fn@crate::middleware::from_fn +[tower-from-scratch-guide]: https://github.com/tower-rs/tower/blob/master/guides/building-a-middleware-from-scratch.md +[`ServiceBuilder::map_request`]: tower::ServiceBuilder::map_request +[`ServiceBuilder::map_response`]: tower::ServiceBuilder::map_response +[`ServiceBuilder::then`]: tower::ServiceBuilder::then +[`ServiceBuilder::and_then`]: tower::ServiceBuilder::and_then +[`axum::middleware::from_extractor`]: fn@crate::middleware::from_extractor +[`Handler::layer`]: crate::handler::Handler::layer +[`Router::layer`]: crate::routing::Router::layer +[`MethodRouter::layer`]: crate::routing::MethodRouter::layer +[`Router::route_layer`]: crate::routing::Router::route_layer +[`MethodRouter::route_layer`]: crate::routing::MethodRouter::route_layer +[request extensions]: https://docs.rs/http/latest/http/request/struct.Request.html#method.extensions +[Response extensions]: https://docs.rs/http/latest/http/response/struct.Response.html#method.extensions +[`State`]: crate::extract::State +[`Service`]: tower::Service diff --git a/.cargo-vendor/axum-0.6.20/src/docs/response.md b/.cargo-vendor/axum-0.6.20/src/docs/response.md new file mode 100644 index 0000000000..2afe476046 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/docs/response.md @@ -0,0 +1,326 @@ +Types and traits for generating responses. + +# Table of contents + +- [Building responses](#building-responses) +- [Returning different response types](#returning-different-response-types) +- [Regarding `impl IntoResponse`](#regarding-impl-intoresponse) + +# Building responses + +Anything that implements [`IntoResponse`] can be returned from a handler. axum +provides implementations for common types: + +```rust,no_run +use axum::{ + Json, + response::{Html, IntoResponse}, + http::{StatusCode, Uri, header::{self, HeaderMap, HeaderName}}, +}; + +// `()` gives an empty response +async fn empty() {} + +// String will get a `text/plain; charset=utf-8` content-type +async fn plain_text(uri: Uri) -> String { + format!("Hi from {}", uri.path()) +} + +// Bytes will get a `application/octet-stream` content-type +async fn bytes() -> Vec { + vec![1, 2, 3, 4] +} + +// `Json` will get a `application/json` content-type and work with anything that +// implements `serde::Serialize` +async fn json() -> Json> { + Json(vec!["foo".to_owned(), "bar".to_owned()]) +} + +// `Html` will get a `text/html` content-type +async fn html() -> Html<&'static str> { + Html("

Hello, World!

") +} + +// `StatusCode` gives an empty response with that status code +async fn status() -> StatusCode { + StatusCode::NOT_FOUND +} + +// `HeaderMap` gives an empty response with some headers +async fn headers() -> HeaderMap { + let mut headers = HeaderMap::new(); + headers.insert(header::SERVER, "axum".parse().unwrap()); + headers +} + +// An array of tuples also gives headers +async fn array_headers() -> [(HeaderName, &'static str); 2] { + [ + (header::SERVER, "axum"), + (header::CONTENT_TYPE, "text/plain") + ] +} + +// Use `impl IntoResponse` to avoid writing the whole type +async fn impl_trait() -> impl IntoResponse { + [ + (header::SERVER, "axum"), + (header::CONTENT_TYPE, "text/plain") + ] +} +``` + +Additionally you can return tuples to build more complex responses from +individual parts. + +```rust,no_run +use axum::{ + Json, + response::IntoResponse, + http::{StatusCode, HeaderMap, Uri, header}, + extract::Extension, +}; + +// `(StatusCode, impl IntoResponse)` will override the status code of the response +async fn with_status(uri: Uri) -> (StatusCode, String) { + (StatusCode::NOT_FOUND, format!("Not Found: {}", uri.path())) +} + +// Use `impl IntoResponse` to avoid having to type the whole type +async fn impl_trait(uri: Uri) -> impl IntoResponse { + (StatusCode::NOT_FOUND, format!("Not Found: {}", uri.path())) +} + +// `(HeaderMap, impl IntoResponse)` to add additional headers +async fn with_headers() -> impl IntoResponse { + let mut headers = HeaderMap::new(); + headers.insert(header::CONTENT_TYPE, "text/plain".parse().unwrap()); + (headers, "foo") +} + +// Or an array of tuples to more easily build the headers +async fn with_array_headers() -> impl IntoResponse { + ([(header::CONTENT_TYPE, "text/plain")], "foo") +} + +// Use string keys for custom headers +async fn with_array_headers_custom() -> impl IntoResponse { + ([("x-custom", "custom")], "foo") +} + +// `(StatusCode, headers, impl IntoResponse)` to set status and add headers +// `headers` can be either a `HeaderMap` or an array of tuples +async fn with_status_and_array_headers() -> impl IntoResponse { + ( + StatusCode::NOT_FOUND, + [(header::CONTENT_TYPE, "text/plain")], + "foo", + ) +} + +// `(Extension<_>, impl IntoResponse)` to set response extensions +async fn with_status_extensions() -> impl IntoResponse { + ( + Extension(Foo("foo")), + "foo", + ) +} + +struct Foo(&'static str); + +// Or mix and match all the things +async fn all_the_things(uri: Uri) -> impl IntoResponse { + let mut header_map = HeaderMap::new(); + if uri.path() == "/" { + header_map.insert(header::SERVER, "axum".parse().unwrap()); + } + + ( + // set status code + StatusCode::NOT_FOUND, + // headers with an array + [("x-custom", "custom")], + // some extensions + Extension(Foo("foo")), + Extension(Foo("bar")), + // more headers, built dynamically + header_map, + // and finally the body + "foo", + ) +} +``` + +In general you can return tuples like: + +- `(StatusCode, impl IntoResponse)` +- `(Parts, impl IntoResponse)` +- `(Response<()>, impl IntoResponse)` +- `(T1, .., Tn, impl IntoResponse)` where `T1` to `Tn` all implement [`IntoResponseParts`]. +- `(StatusCode, T1, .., Tn, impl IntoResponse)` where `T1` to `Tn` all implement [`IntoResponseParts`]. +- `(Parts, T1, .., Tn, impl IntoResponse)` where `T1` to `Tn` all implement [`IntoResponseParts`]. +- `(Response<()>, T1, .., Tn, impl IntoResponse)` where `T1` to `Tn` all implement [`IntoResponseParts`]. + +This means you cannot accidentally override the status or body as [`IntoResponseParts`] only allows +setting headers and extensions. + +Use [`Response`](crate::response::Response) for more low level control: + +```rust,no_run +use axum::{ + Json, + response::{IntoResponse, Response}, + body::{Full, Bytes}, + http::StatusCode, +}; + +async fn response() -> Response> { + Response::builder() + .status(StatusCode::NOT_FOUND) + .header("x-foo", "custom header") + .body(Full::from("not found")) + .unwrap() +} +``` + +# Returning different response types + +If you need to return multiple response types, and `Result` isn't appropriate, you can call +`.into_response()` to turn things into `axum::response::Response`: + +```rust +use axum::{ + response::{IntoResponse, Redirect, Response}, + http::StatusCode, +}; + +async fn handle() -> Response { + if something() { + "All good!".into_response() + } else if something_else() { + ( + StatusCode::INTERNAL_SERVER_ERROR, + "Something went wrong...", + ).into_response() + } else { + Redirect::to("/").into_response() + } +} + +fn something() -> bool { + // ... + # true +} + +fn something_else() -> bool { + // ... + # true +} +``` + +# Regarding `impl IntoResponse` + +You can use `impl IntoResponse` as the return type from handlers to avoid +typing large types. For example + +```rust +use axum::http::StatusCode; + +async fn handler() -> (StatusCode, [(&'static str, &'static str); 1], &'static str) { + (StatusCode::OK, [("x-foo", "bar")], "Hello, World!") +} +``` + +Becomes easier using `impl IntoResponse`: + +```rust +use axum::{http::StatusCode, response::IntoResponse}; + +async fn impl_into_response() -> impl IntoResponse { + (StatusCode::OK, [("x-foo", "bar")], "Hello, World!") +} +``` + +However `impl IntoResponse` has a few limitations. Firstly it can only be used +to return a single type: + +```rust,compile_fail +use axum::{http::StatusCode, response::IntoResponse}; + +async fn handler() -> impl IntoResponse { + if check_something() { + StatusCode::NOT_FOUND + } else { + "Hello, World!" + } +} + +fn check_something() -> bool { + # false + // ... +} +``` + +This function returns either a `StatusCode` or a `&'static str` which `impl +Trait` doesn't allow. + +Secondly `impl IntoResponse` can lead to type inference issues when used with +`Result` and `?`: + +```rust,compile_fail +use axum::{http::StatusCode, response::IntoResponse}; + +async fn handler() -> impl IntoResponse { + create_thing()?; + Ok(StatusCode::CREATED) +} + +fn create_thing() -> Result<(), StatusCode> { + # Ok(()) + // ... +} +``` + +This is because `?` supports using the [`From`] trait to convert to a different +error type but it doesn't know which type to convert to, because we only +specified `impl IntoResponse` as the return type. + +`Result` doesn't always work either: + +```rust,compile_fail +use axum::{http::StatusCode, response::IntoResponse}; + +async fn handler() -> Result { + create_thing()?; + Ok(StatusCode::CREATED) +} + +fn create_thing() -> Result<(), StatusCode> { + # Ok(()) + // ... +} +``` + +The solution is to use a concrete error type, such as `Result`: + +```rust +use axum::{http::StatusCode, response::IntoResponse}; + +async fn handler() -> Result { + create_thing()?; + Ok(StatusCode::CREATED) +} + +fn create_thing() -> Result<(), StatusCode> { + # Ok(()) + // ... +} +``` + +Because of this it is generally not recommended to use `impl IntoResponse` +unless you're familiar with the details of how `impl Trait` works. + +[`IntoResponse`]: crate::response::IntoResponse +[`IntoResponseParts`]: crate::response::IntoResponseParts +[`StatusCode`]: http::StatusCode diff --git a/.cargo-vendor/axum-0.6.20/src/docs/routing/fallback.md b/.cargo-vendor/axum-0.6.20/src/docs/routing/fallback.md new file mode 100644 index 0000000000..11b25896ef --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/docs/routing/fallback.md @@ -0,0 +1,63 @@ +Add a fallback [`Handler`] to the router. + +This service will be called if no routes matches the incoming request. + +```rust +use axum::{ + Router, + routing::get, + handler::Handler, + response::IntoResponse, + http::{StatusCode, Uri}, +}; + +let app = Router::new() + .route("/foo", get(|| async { /* ... */ })) + .fallback(fallback); + +async fn fallback(uri: Uri) -> (StatusCode, String) { + (StatusCode::NOT_FOUND, format!("No route for {}", uri)) +} +# async { +# hyper::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +# }; +``` + +Fallbacks only apply to routes that aren't matched by anything in the +router. If a handler is matched by a request but returns 404 the +fallback is not called. + +# Handling all requests without other routes + +Using `Router::new().fallback(...)` to accept all request regardless of path or +method, if you don't have other routes, isn't optimal: + +```rust +use axum::Router; + +async fn handler() {} + +let app = Router::new().fallback(handler); + +# async { +axum::Server::bind(&"0.0.0.0:3000".parse().unwrap()) + .serve(app.into_make_service()) + .await + .unwrap(); +# }; +``` + +Running the handler directly is faster since it avoids the overhead of routing: + +```rust +use axum::handler::HandlerWithoutStateExt; + +async fn handler() {} + +# async { +axum::Server::bind(&"0.0.0.0:3000".parse().unwrap()) + .serve(handler.into_make_service()) + .await + .unwrap(); +# }; +``` diff --git a/.cargo-vendor/axum-0.6.20/src/docs/routing/into_make_service_with_connect_info.md b/.cargo-vendor/axum-0.6.20/src/docs/routing/into_make_service_with_connect_info.md new file mode 100644 index 0000000000..05ee750c56 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/docs/routing/into_make_service_with_connect_info.md @@ -0,0 +1,80 @@ +Convert this router into a [`MakeService`], that will store `C`'s +associated `ConnectInfo` in a request extension such that [`ConnectInfo`] +can extract it. + +This enables extracting things like the client's remote address. + +Extracting [`std::net::SocketAddr`] is supported out of the box: + +```rust +use axum::{ + extract::ConnectInfo, + routing::get, + Router, +}; +use std::net::SocketAddr; + +let app = Router::new().route("/", get(handler)); + +async fn handler(ConnectInfo(addr): ConnectInfo) -> String { + format!("Hello {}", addr) +} + +# async { +axum::Server::bind(&"0.0.0.0:3000".parse().unwrap()) + .serve( + app.into_make_service_with_connect_info::() + ) + .await + .expect("server failed"); +# }; +``` + +You can implement custom a [`Connected`] like so: + +```rust +use axum::{ + extract::connect_info::{ConnectInfo, Connected}, + routing::get, + Router, +}; +use hyper::server::conn::AddrStream; + +let app = Router::new().route("/", get(handler)); + +async fn handler( + ConnectInfo(my_connect_info): ConnectInfo, +) -> String { + format!("Hello {:?}", my_connect_info) +} + +#[derive(Clone, Debug)] +struct MyConnectInfo { + // ... +} + +impl Connected<&AddrStream> for MyConnectInfo { + fn connect_info(target: &AddrStream) -> Self { + MyConnectInfo { + // ... + } + } +} + +# async { +axum::Server::bind(&"0.0.0.0:3000".parse().unwrap()) + .serve( + app.into_make_service_with_connect_info::() + ) + .await + .expect("server failed"); +# }; +``` + +See the [unix domain socket example][uds] for an example of how to use +this to collect UDS connection info. + +[`MakeService`]: tower::make::MakeService +[`Connected`]: crate::extract::connect_info::Connected +[`ConnectInfo`]: crate::extract::connect_info::ConnectInfo +[uds]: https://github.com/tokio-rs/axum/blob/main/examples/unix-domain-socket/src/main.rs diff --git a/.cargo-vendor/axum-0.6.20/src/docs/routing/layer.md b/.cargo-vendor/axum-0.6.20/src/docs/routing/layer.md new file mode 100644 index 0000000000..1c029c7ff4 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/docs/routing/layer.md @@ -0,0 +1,67 @@ +Apply a [`tower::Layer`] to all routes in the router. + +This can be used to add additional processing to a request for a group +of routes. + +Note that the middleware is only applied to existing routes. So you have to +first add your routes (and / or fallback) and then call `layer` afterwards. Additional +routes added after `layer` is called will not have the middleware added. + +If you want to add middleware to a single handler you can either use +[`MethodRouter::layer`] or [`Handler::layer`]. + +# Example + +Adding the [`tower_http::trace::TraceLayer`]: + +```rust +use axum::{routing::get, Router}; +use tower_http::trace::TraceLayer; + +let app = Router::new() + .route("/foo", get(|| async {})) + .route("/bar", get(|| async {})) + .layer(TraceLayer::new_for_http()); +# let _: Router = app; +``` + +If you need to write your own middleware see ["Writing +middleware"](crate::middleware#writing-middleware) for the different options. + +If you only want middleware on some routes you can use [`Router::merge`]: + +```rust +use axum::{routing::get, Router}; +use tower_http::{trace::TraceLayer, compression::CompressionLayer}; + +let with_tracing = Router::new() + .route("/foo", get(|| async {})) + .layer(TraceLayer::new_for_http()); + +let with_compression = Router::new() + .route("/bar", get(|| async {})) + .layer(CompressionLayer::new()); + +// Merge everything into one `Router` +let app = Router::new() + .merge(with_tracing) + .merge(with_compression); +# let _: Router = app; +``` + +# Multiple middleware + +It's recommended to use [`tower::ServiceBuilder`] when applying multiple +middleware. See [`middleware`](crate::middleware) for more details. + +# Runs after routing + +Middleware added with this method will run _after_ routing and thus cannot be +used to rewrite the request URI. See ["Rewriting request URI in +middleware"](crate::middleware#rewriting-request-uri-in-middleware) for more +details and a workaround. + +# Error handling + +See [`middleware`](crate::middleware) for details on how error handling impacts +middleware. diff --git a/.cargo-vendor/axum-0.6.20/src/docs/routing/merge.md b/.cargo-vendor/axum-0.6.20/src/docs/routing/merge.md new file mode 100644 index 0000000000..b88175130b --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/docs/routing/merge.md @@ -0,0 +1,77 @@ +Merge two routers into one. + +This is useful for breaking apps into smaller pieces and combining them +into one. + +```rust +use axum::{ + routing::get, + Router, +}; +# +# async fn users_list() {} +# async fn users_show() {} +# async fn teams_list() {} + +// define some routes separately +let user_routes = Router::new() + .route("/users", get(users_list)) + .route("/users/:id", get(users_show)); + +let team_routes = Router::new() + .route("/teams", get(teams_list)); + +// combine them into one +let app = Router::new() + .merge(user_routes) + .merge(team_routes); + +// could also do `user_routes.merge(team_routes)` + +// Our app now accepts +// - GET /users +// - GET /users/:id +// - GET /teams +# async { +# hyper::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +# }; +``` + +# Merging routers with state + +When combining [`Router`]s with this method, each [`Router`] must have the +same type of state. If your routers have different types you can use +[`Router::with_state`] to provide the state and make the types match: + +```rust +use axum::{ + Router, + routing::get, + extract::State, +}; + +#[derive(Clone)] +struct InnerState {} + +#[derive(Clone)] +struct OuterState {} + +async fn inner_handler(state: State) {} + +let inner_router = Router::new() + .route("/bar", get(inner_handler)) + .with_state(InnerState {}); + +async fn outer_handler(state: State) {} + +let app = Router::new() + .route("/", get(outer_handler)) + .merge(inner_router) + .with_state(OuterState {}); +# let _: axum::Router = app; +``` + +# Panics + +- If two routers that each have a [fallback](Router::fallback) are merged. This + is because `Router` only allows a single fallback. diff --git a/.cargo-vendor/axum-0.6.20/src/docs/routing/nest.md b/.cargo-vendor/axum-0.6.20/src/docs/routing/nest.md new file mode 100644 index 0000000000..b40d0fc951 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/docs/routing/nest.md @@ -0,0 +1,195 @@ +Nest a [`Router`] at some path. + +This allows you to break your application into smaller pieces and compose +them together. + +# Example + +```rust +use axum::{ + routing::{get, post}, + Router, +}; + +let user_routes = Router::new().route("/:id", get(|| async {})); + +let team_routes = Router::new().route("/", post(|| async {})); + +let api_routes = Router::new() + .nest("/users", user_routes) + .nest("/teams", team_routes); + +let app = Router::new().nest("/api", api_routes); + +// Our app now accepts +// - GET /api/users/:id +// - POST /api/teams +# async { +# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +# }; +``` + +# How the URI changes + +Note that nested routes will not see the original request URI but instead +have the matched prefix stripped. This is necessary for services like static +file serving to work. Use [`OriginalUri`] if you need the original request +URI. + +# Captures from outer routes + +Take care when using `nest` together with dynamic routes as nesting also +captures from the outer routes: + +```rust +use axum::{ + extract::Path, + routing::get, + Router, +}; +use std::collections::HashMap; + +async fn users_get(Path(params): Path>) { + // Both `version` and `id` were captured even though `users_api` only + // explicitly captures `id`. + let version = params.get("version"); + let id = params.get("id"); +} + +let users_api = Router::new().route("/users/:id", get(users_get)); + +let app = Router::new().nest("/:version/api", users_api); +# async { +# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +# }; +``` + +# Differences from wildcard routes + +Nested routes are similar to wildcard routes. The difference is that +wildcard routes still see the whole URI whereas nested routes will have +the prefix stripped: + +```rust +use axum::{routing::get, http::Uri, Router}; + +let nested_router = Router::new() + .route("/", get(|uri: Uri| async { + // `uri` will _not_ contain `/bar` + })); + +let app = Router::new() + .route("/foo/*rest", get(|uri: Uri| async { + // `uri` will contain `/foo` + })) + .nest("/bar", nested_router); +# async { +# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +# }; +``` + +# Fallbacks + +If a nested router doesn't have its own fallback then it will inherit the +fallback from the outer router: + +```rust +use axum::{routing::get, http::StatusCode, handler::Handler, Router}; + +async fn fallback() -> (StatusCode, &'static str) { + (StatusCode::NOT_FOUND, "Not Found") +} + +let api_routes = Router::new().route("/users", get(|| async {})); + +let app = Router::new() + .nest("/api", api_routes) + .fallback(fallback); +# let _: Router = app; +``` + +Here requests like `GET /api/not-found` will go into `api_routes` but because +it doesn't have a matching route and doesn't have its own fallback it will call +the fallback from the outer router, i.e. the `fallback` function. + +If the nested router has its own fallback then the outer fallback will not be +inherited: + +```rust +use axum::{ + routing::get, + http::StatusCode, + handler::Handler, + Json, + Router, +}; + +async fn fallback() -> (StatusCode, &'static str) { + (StatusCode::NOT_FOUND, "Not Found") +} + +async fn api_fallback() -> (StatusCode, Json) { + ( + StatusCode::NOT_FOUND, + Json(serde_json::json!({ "status": "Not Found" })), + ) +} + +let api_routes = Router::new() + .route("/users", get(|| async {})) + .fallback(api_fallback); + +let app = Router::new() + .nest("/api", api_routes) + .fallback(fallback); +# let _: Router = app; +``` + +Here requests like `GET /api/not-found` will go to `api_fallback`. + +# Nesting routers with state + +When combining [`Router`]s with this method, each [`Router`] must have the +same type of state. If your routers have different types you can use +[`Router::with_state`] to provide the state and make the types match: + +```rust +use axum::{ + Router, + routing::get, + extract::State, +}; + +#[derive(Clone)] +struct InnerState {} + +#[derive(Clone)] +struct OuterState {} + +async fn inner_handler(state: State) {} + +let inner_router = Router::new() + .route("/bar", get(inner_handler)) + .with_state(InnerState {}); + +async fn outer_handler(state: State) {} + +let app = Router::new() + .route("/", get(outer_handler)) + .nest("/foo", inner_router) + .with_state(OuterState {}); +# let _: axum::Router = app; +``` + +Note that the inner router will still inherit the fallback from the outer +router. + +# Panics + +- If the route overlaps with another route. See [`Router::route`] +for more details. +- If the route contains a wildcard (`*`). +- If `path` is empty. + +[`OriginalUri`]: crate::extract::OriginalUri +[fallbacks]: Router::fallback diff --git a/.cargo-vendor/axum-0.6.20/src/docs/routing/route.md b/.cargo-vendor/axum-0.6.20/src/docs/routing/route.md new file mode 100644 index 0000000000..ac5ed9406b --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/docs/routing/route.md @@ -0,0 +1,148 @@ +Add another route to the router. + +`path` is a string of path segments separated by `/`. Each segment +can be either static, a capture, or a wildcard. + +`method_router` is the [`MethodRouter`] that should receive the request if the +path matches `path`. `method_router` will commonly be a handler wrapped in a method +router like [`get`](crate::routing::get). See [`handler`](crate::handler) for +more details on handlers. + +# Static paths + +Examples: + +- `/` +- `/foo` +- `/users/123` + +If the incoming request matches the path exactly the corresponding service will +be called. + +# Captures + +Paths can contain segments like `/:key` which matches any single segment and +will store the value captured at `key`. + +Examples: + +- `/:key` +- `/users/:id` +- `/users/:id/tweets` + +Captures can be extracted using [`Path`](crate::extract::Path). See its +documentation for more details. + +It is not possible to create segments that only match some types like numbers or +regular expression. You must handle that manually in your handlers. + +[`MatchedPath`](crate::extract::MatchedPath) can be used to extract the matched +path rather than the actual path. + +# Wildcards + +Paths can end in `/*key` which matches all segments and will store the segments +captured at `key`. + +Examples: + +- `/*key` +- `/assets/*path` +- `/:id/:repo/*tree` + +Note that `/*key` doesn't match empty segments. Thus: + +- `/*key` doesn't match `/` but does match `/a`, `/a/`, etc. +- `/x/*key` doesn't match `/x` or `/x/` but does match `/x/a`, `/x/a/`, etc. + +Wildcard captures can also be extracted using [`Path`](crate::extract::Path). +Note that the leading slash is not included, i.e. for the route `/foo/*rest` and +the path `/foo/bar/baz` the value of `rest` will be `bar/baz`. + +# Accepting multiple methods + +To accept multiple methods for the same route you can add all handlers at the +same time: + +```rust +use axum::{Router, routing::{get, delete}, extract::Path}; + +let app = Router::new().route( + "/", + get(get_root).post(post_root).delete(delete_root), +); + +async fn get_root() {} + +async fn post_root() {} + +async fn delete_root() {} +# async { +# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +# }; +``` + +Or you can add them one by one: + +```rust +# use axum::Router; +# use axum::routing::{get, post, delete}; +# +let app = Router::new() + .route("/", get(get_root)) + .route("/", post(post_root)) + .route("/", delete(delete_root)); +# +# let _: Router = app; +# async fn get_root() {} +# async fn post_root() {} +# async fn delete_root() {} +``` + +# More examples + +```rust +use axum::{Router, routing::{get, delete}, extract::Path}; + +let app = Router::new() + .route("/", get(root)) + .route("/users", get(list_users).post(create_user)) + .route("/users/:id", get(show_user)) + .route("/api/:version/users/:id/action", delete(do_users_action)) + .route("/assets/*path", get(serve_asset)); + +async fn root() {} + +async fn list_users() {} + +async fn create_user() {} + +async fn show_user(Path(id): Path) {} + +async fn do_users_action(Path((version, id)): Path<(String, u64)>) {} + +async fn serve_asset(Path(path): Path) {} +# async { +# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +# }; +``` + +# Panics + +Panics if the route overlaps with another route: + +```rust,should_panic +use axum::{routing::get, Router}; + +let app = Router::new() + .route("/", get(|| async {})) + .route("/", get(|| async {})); +# async { +# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +# }; +``` + +The static route `/foo` and the dynamic route `/:key` are not considered to +overlap and `/foo` will take precedence. + +Also panics if `path` is empty. diff --git a/.cargo-vendor/axum-0.6.20/src/docs/routing/route_layer.md b/.cargo-vendor/axum-0.6.20/src/docs/routing/route_layer.md new file mode 100644 index 0000000000..fe5b8faa7d --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/docs/routing/route_layer.md @@ -0,0 +1,32 @@ +Apply a [`tower::Layer`] to the router that will only run if the request matches +a route. + +Note that the middleware is only applied to existing routes. So you have to +first add your routes (and / or fallback) and then call `layer` afterwards. Additional +routes added after `layer` is called will not have the middleware added. + +This works similarly to [`Router::layer`] except the middleware will only run if +the request matches a route. This is useful for middleware that return early +(such as authorization) which might otherwise convert a `404 Not Found` into a +`401 Unauthorized`. + +# Example + +```rust +use axum::{ + routing::get, + Router, +}; +use tower_http::validate_request::ValidateRequestHeaderLayer; + +let app = Router::new() + .route("/foo", get(|| async {})) + .route_layer(ValidateRequestHeaderLayer::bearer("password")); + +// `GET /foo` with a valid token will receive `200 OK` +// `GET /foo` with a invalid token will receive `401 Unauthorized` +// `GET /not-found` with a invalid token will receive `404 Not Found` +# async { +# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +# }; +``` diff --git a/.cargo-vendor/axum-0.6.20/src/docs/routing/route_service.md b/.cargo-vendor/axum-0.6.20/src/docs/routing/route_service.md new file mode 100644 index 0000000000..1be229e4c6 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/docs/routing/route_service.md @@ -0,0 +1,74 @@ +Add another route to the router that calls a [`Service`]. + +# Example + +```rust,no_run +use axum::{ + Router, + body::Body, + routing::{any_service, get_service}, + http::{Request, StatusCode}, + error_handling::HandleErrorLayer, +}; +use tower_http::services::ServeFile; +use http::Response; +use std::{convert::Infallible, io}; +use tower::service_fn; + +let app = Router::new() + .route( + // Any request to `/` goes to a service + "/", + // Services whose response body is not `axum::body::BoxBody` + // can be wrapped in `axum::routing::any_service` (or one of the other routing filters) + // to have the response body mapped + any_service(service_fn(|_: Request| async { + let res = Response::new(Body::from("Hi from `GET /`")); + Ok::<_, Infallible>(res) + })) + ) + .route_service( + "/foo", + // This service's response body is `axum::body::BoxBody` so + // it can be routed to directly. + service_fn(|req: Request| async move { + let body = Body::from(format!("Hi from `{} /foo`", req.method())); + let body = axum::body::boxed(body); + let res = Response::new(body); + Ok::<_, Infallible>(res) + }) + ) + .route_service( + // GET `/static/Cargo.toml` goes to a service from tower-http + "/static/Cargo.toml", + ServeFile::new("Cargo.toml"), + ); +# async { +# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +# }; +``` + +Routing to arbitrary services in this way has complications for backpressure +([`Service::poll_ready`]). See the [Routing to services and backpressure] module +for more details. + +# Panics + +Panics for the same reasons as [`Router::route`] or if you attempt to route to a +`Router`: + +```rust,should_panic +use axum::{routing::get, Router}; + +let app = Router::new().route_service( + "/", + Router::new().route("/foo", get(|| async {})), +); +# async { +# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +# }; +``` + +Use [`Router::nest`] instead. + +[Routing to services and backpressure]: middleware/index.html#routing-to-servicesmiddleware-and-backpressure diff --git a/.cargo-vendor/axum-0.6.20/src/docs/routing/with_state.md b/.cargo-vendor/axum-0.6.20/src/docs/routing/with_state.md new file mode 100644 index 0000000000..3f9c815132 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/docs/routing/with_state.md @@ -0,0 +1,245 @@ +Provide the state for the router. + +```rust +use axum::{Router, routing::get, extract::State}; + +#[derive(Clone)] +struct AppState {} + +let routes = Router::new() + .route("/", get(|State(state): State| async { + // use state + })) + .with_state(AppState {}); + +# async { +axum::Server::bind(&"0.0.0.0:3000".parse().unwrap()) + .serve(routes.into_make_service()) + .await; +# }; +``` + +# Returning routers with states from functions + +When returning `Router`s from functions it is generally recommend not set the +state directly: + +```rust +use axum::{Router, routing::get, extract::State}; + +#[derive(Clone)] +struct AppState {} + +// Don't call `Router::with_state` here +fn routes() -> Router { + Router::new() + .route("/", get(|_: State| async {})) +} + +// Instead do it before you run the server +let routes = routes().with_state(AppState {}); + +# async { +axum::Server::bind(&"0.0.0.0:3000".parse().unwrap()) + .serve(routes.into_make_service()) + .await; +# }; +``` + +If you do need to provide the state, and you're _not_ nesting/merging the router +into another router, then return `Router` without any type parameters: + +```rust +# use axum::{Router, routing::get, extract::State}; +# #[derive(Clone)] +# struct AppState {} +# +// Don't return `Router` +fn routes(state: AppState) -> Router { + Router::new() + .route("/", get(|_: State| async {})) + .with_state(state) +} + +let routes = routes(AppState {}); + +# async { +axum::Server::bind(&"0.0.0.0:3000".parse().unwrap()) + .serve(routes.into_make_service()) + .await; +# }; +``` + +This is because we can only call `Router::into_make_service` on `Router<()>`, +not `Router`. See below for more details about why that is. + +Note that the state defaults to `()` so `Router` and `Router<()>` is the same. + +If you are nesting/merging the router it is recommended to use a generic state +type on the resulting router: + +```rust +# use axum::{Router, routing::get, extract::State}; +# #[derive(Clone)] +# struct AppState {} +# +fn routes(state: AppState) -> Router { + Router::new() + .route("/", get(|_: State| async {})) + .with_state(state) +} + +let routes = Router::new().nest("/api", routes(AppState {})); + +# async { +axum::Server::bind(&"0.0.0.0:3000".parse().unwrap()) + .serve(routes.into_make_service()) + .await; +# }; +``` + +# State is global within the router + +The state passed to this method will be used for all requests this router +receives. That means it is not suitable for holding state derived from a +request, such as authorization data extracted in a middleware. Use [`Extension`] +instead for such data. + +# What `S` in `Router` means + +`Router` means a router that is _missing_ a state of type `S` to be able to +handle requests. It does _not_ mean a `Router` that _has_ a state of type `S`. + +For example: + +```rust +# use axum::{Router, routing::get, extract::State}; +# #[derive(Clone)] +# struct AppState {} +# +// A router that _needs_ an `AppState` to handle requests +let router: Router = Router::new() + .route("/", get(|_: State| async {})); + +// Once we call `Router::with_state` the router isn't missing +// the state anymore, because we just provided it +// +// Therefore the router type becomes `Router<()>`, i.e a router +// that is not missing any state +let router: Router<()> = router.with_state(AppState {}); + +// Only `Router<()>` has the `into_make_service` method. +// +// You cannot call `into_make_service` on a `Router` +// because it is still missing an `AppState`. +# async { +axum::Server::bind(&"0.0.0.0:3000".parse().unwrap()) + .serve(router.into_make_service()) + .await; +# }; +``` + +Perhaps a little counter intuitively, `Router::with_state` doesn't always return a +`Router<()>`. Instead you get to pick what the new missing state type is: + +```rust +# use axum::{Router, routing::get, extract::State}; +# #[derive(Clone)] +# struct AppState {} +# +let router: Router = Router::new() + .route("/", get(|_: State| async {})); + +// When we call `with_state` we're able to pick what the next missing state type is. +// Here we pick `String`. +let string_router: Router = router.with_state(AppState {}); + +// That allows us to add new routes that uses `String` as the state type +let string_router = string_router + .route("/needs-string", get(|_: State| async {})); + +// Provide the `String` and choose `()` as the new missing state. +let final_router: Router<()> = string_router.with_state("foo".to_owned()); + +// Since we have a `Router<()>` we can run it. +# async { +axum::Server::bind(&"0.0.0.0:3000".parse().unwrap()) + .serve(final_router.into_make_service()) + .await; +# }; +``` + +This why this returning `Router` after calling `with_state` doesn't +work: + +```rust,compile_fail +# use axum::{Router, routing::get, extract::State}; +# #[derive(Clone)] +# struct AppState {} +# +// This wont work because we're returning a `Router` +// i.e. we're saying we're still missing an `AppState` +fn routes(state: AppState) -> Router { + Router::new() + .route("/", get(|_: State| async {})) + .with_state(state) +} + +let app = routes(AppState {}); + +// We can only call `Router::into_make_service` on a `Router<()>` +// but `app` is a `Router` +# async { +axum::Server::bind(&"0.0.0.0:3000".parse().unwrap()) + .serve(app.into_make_service()) + .await; +# }; +``` + +Instead return `Router<()>` since we have provided all the state needed: + +```rust +# use axum::{Router, routing::get, extract::State}; +# #[derive(Clone)] +# struct AppState {} +# +// We've provided all the state necessary so return `Router<()>` +fn routes(state: AppState) -> Router<()> { + Router::new() + .route("/", get(|_: State| async {})) + .with_state(state) +} + +let app = routes(AppState {}); + +// We can now call `Router::into_make_service` +# async { +axum::Server::bind(&"0.0.0.0:3000".parse().unwrap()) + .serve(app.into_make_service()) + .await; +# }; +``` + +# A note about performance + +If you need a `Router` that implements `Service` but you don't need any state (perhaps +you're making a library that uses axum internally) then it is recommended to call this +method before you start serving requests: + +```rust +use axum::{Router, routing::get}; + +let app = Router::new() + .route("/", get(|| async { /* ... */ })) + // even though we don't need any state, call `with_state(())` anyway + .with_state(()); +# let _: Router = app; +``` + +This is not required but it gives axum a chance to update some internals in the router +which may impact performance and reduce allocations. + +Note that [`Router::into_make_service`] and [`Router::into_make_service_with_connect_info`] +do this automatically. + +[`Extension`]: crate::Extension diff --git a/.cargo-vendor/axum-0.6.20/src/error_handling/mod.rs b/.cargo-vendor/axum-0.6.20/src/error_handling/mod.rs new file mode 100644 index 0000000000..1b12035024 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/error_handling/mod.rs @@ -0,0 +1,262 @@ +#![doc = include_str!("../docs/error_handling.md")] + +use crate::{ + extract::FromRequestParts, + http::Request, + response::{IntoResponse, Response}, +}; +use std::{ + convert::Infallible, + fmt, + future::Future, + marker::PhantomData, + task::{Context, Poll}, +}; +use tower::ServiceExt; +use tower_layer::Layer; +use tower_service::Service; + +/// [`Layer`] that applies [`HandleError`] which is a [`Service`] adapter +/// that handles errors by converting them into responses. +/// +/// See [module docs](self) for more details on axum's error handling model. +pub struct HandleErrorLayer { + f: F, + _extractor: PhantomData T>, +} + +impl HandleErrorLayer { + /// Create a new `HandleErrorLayer`. + pub fn new(f: F) -> Self { + Self { + f, + _extractor: PhantomData, + } + } +} + +impl Clone for HandleErrorLayer +where + F: Clone, +{ + fn clone(&self) -> Self { + Self { + f: self.f.clone(), + _extractor: PhantomData, + } + } +} + +impl fmt::Debug for HandleErrorLayer { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("HandleErrorLayer") + .field("f", &format_args!("{}", std::any::type_name::())) + .finish() + } +} + +impl Layer for HandleErrorLayer +where + F: Clone, +{ + type Service = HandleError; + + fn layer(&self, inner: S) -> Self::Service { + HandleError::new(inner, self.f.clone()) + } +} + +/// A [`Service`] adapter that handles errors by converting them into responses. +/// +/// See [module docs](self) for more details on axum's error handling model. +pub struct HandleError { + inner: S, + f: F, + _extractor: PhantomData T>, +} + +impl HandleError { + /// Create a new `HandleError`. + pub fn new(inner: S, f: F) -> Self { + Self { + inner, + f, + _extractor: PhantomData, + } + } +} + +impl Clone for HandleError +where + S: Clone, + F: Clone, +{ + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + f: self.f.clone(), + _extractor: PhantomData, + } + } +} + +impl fmt::Debug for HandleError +where + S: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("HandleError") + .field("inner", &self.inner) + .field("f", &format_args!("{}", std::any::type_name::())) + .finish() + } +} + +impl Service> for HandleError +where + S: Service> + Clone + Send + 'static, + S::Response: IntoResponse + Send, + S::Error: Send, + S::Future: Send, + F: FnOnce(S::Error) -> Fut + Clone + Send + 'static, + Fut: Future + Send, + Res: IntoResponse, + B: Send + 'static, +{ + type Response = Response; + type Error = Infallible; + type Future = future::HandleErrorFuture; + + fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: Request) -> Self::Future { + let f = self.f.clone(); + + let clone = self.inner.clone(); + let inner = std::mem::replace(&mut self.inner, clone); + + let future = Box::pin(async move { + match inner.oneshot(req).await { + Ok(res) => Ok(res.into_response()), + Err(err) => Ok(f(err).await.into_response()), + } + }); + + future::HandleErrorFuture { future } + } +} + +#[allow(unused_macros)] +macro_rules! impl_service { + ( $($ty:ident),* $(,)? ) => { + impl Service> + for HandleError + where + S: Service> + Clone + Send + 'static, + S::Response: IntoResponse + Send, + S::Error: Send, + S::Future: Send, + F: FnOnce($($ty),*, S::Error) -> Fut + Clone + Send + 'static, + Fut: Future + Send, + Res: IntoResponse, + $( $ty: FromRequestParts<()> + Send,)* + B: Send + 'static, + { + type Response = Response; + type Error = Infallible; + + type Future = future::HandleErrorFuture; + + fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + #[allow(non_snake_case)] + fn call(&mut self, req: Request) -> Self::Future { + let f = self.f.clone(); + + let clone = self.inner.clone(); + let inner = std::mem::replace(&mut self.inner, clone); + + let future = Box::pin(async move { + let (mut parts, body) = req.into_parts(); + + $( + let $ty = match $ty::from_request_parts(&mut parts, &()).await { + Ok(value) => value, + Err(rejection) => return Ok(rejection.into_response()), + }; + )* + + let req = Request::from_parts(parts, body); + + match inner.oneshot(req).await { + Ok(res) => Ok(res.into_response()), + Err(err) => Ok(f($($ty),*, err).await.into_response()), + } + }); + + future::HandleErrorFuture { future } + } + } + } +} + +impl_service!(T1); +impl_service!(T1, T2); +impl_service!(T1, T2, T3); +impl_service!(T1, T2, T3, T4); +impl_service!(T1, T2, T3, T4, T5); +impl_service!(T1, T2, T3, T4, T5, T6); +impl_service!(T1, T2, T3, T4, T5, T6, T7); +impl_service!(T1, T2, T3, T4, T5, T6, T7, T8); +impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9); +impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10); +impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11); +impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12); +impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13); +impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14); +impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15); +impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16); + +pub mod future { + //! Future types. + + use crate::response::Response; + use pin_project_lite::pin_project; + use std::{ + convert::Infallible, + future::Future, + pin::Pin, + task::{Context, Poll}, + }; + + pin_project! { + /// Response future for [`HandleError`]. + pub struct HandleErrorFuture { + #[pin] + pub(super) future: Pin> + + Send + + 'static + >>, + } + } + + impl Future for HandleErrorFuture { + type Output = Result; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + self.project().future.poll(cx) + } + } +} + +#[test] +fn traits() { + use crate::test_helpers::*; + + assert_send::>(); + assert_sync::>(); +} diff --git a/.cargo-vendor/axum-0.6.20/src/extension.rs b/.cargo-vendor/axum-0.6.20/src/extension.rs new file mode 100644 index 0000000000..d66c9466b9 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/extension.rs @@ -0,0 +1,168 @@ +use crate::{extract::rejection::*, response::IntoResponseParts}; +use async_trait::async_trait; +use axum_core::{ + extract::FromRequestParts, + response::{IntoResponse, Response, ResponseParts}, +}; +use http::{request::Parts, Request}; +use std::{ + convert::Infallible, + task::{Context, Poll}, +}; +use tower_service::Service; + +/// Extractor and response for extensions. +/// +/// # As extractor +/// +/// This is commonly used to share state across handlers. +/// +/// ```rust,no_run +/// use axum::{ +/// Router, +/// Extension, +/// routing::get, +/// }; +/// use std::sync::Arc; +/// +/// // Some shared state used throughout our application +/// struct State { +/// // ... +/// } +/// +/// async fn handler(state: Extension>) { +/// // ... +/// } +/// +/// let state = Arc::new(State { /* ... */ }); +/// +/// let app = Router::new().route("/", get(handler)) +/// // Add middleware that inserts the state into all incoming request's +/// // extensions. +/// .layer(Extension(state)); +/// # async { +/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +/// # }; +/// ``` +/// +/// If the extension is missing it will reject the request with a `500 Internal +/// Server Error` response. +/// +/// # As response +/// +/// Response extensions can be used to share state with middleware. +/// +/// ```rust +/// use axum::{ +/// Extension, +/// response::IntoResponse, +/// }; +/// +/// async fn handler() -> (Extension, &'static str) { +/// ( +/// Extension(Foo("foo")), +/// "Hello, World!" +/// ) +/// } +/// +/// #[derive(Clone)] +/// struct Foo(&'static str); +/// ``` +#[derive(Debug, Clone, Copy, Default)] +#[must_use] +pub struct Extension(pub T); + +#[async_trait] +impl FromRequestParts for Extension +where + T: Clone + Send + Sync + 'static, + S: Send + Sync, +{ + type Rejection = ExtensionRejection; + + async fn from_request_parts(req: &mut Parts, _state: &S) -> Result { + let value = req + .extensions + .get::() + .ok_or_else(|| { + MissingExtension::from_err(format!( + "Extension of type `{}` was not found. Perhaps you forgot to add it? See `axum::Extension`.", + std::any::type_name::() + )) + }) + .map(|x| x.clone())?; + + Ok(Extension(value)) + } +} + +axum_core::__impl_deref!(Extension); + +impl IntoResponseParts for Extension +where + T: Send + Sync + 'static, +{ + type Error = Infallible; + + fn into_response_parts(self, mut res: ResponseParts) -> Result { + res.extensions_mut().insert(self.0); + Ok(res) + } +} + +impl IntoResponse for Extension +where + T: Send + Sync + 'static, +{ + fn into_response(self) -> Response { + let mut res = ().into_response(); + res.extensions_mut().insert(self.0); + res + } +} + +impl tower_layer::Layer for Extension +where + T: Clone + Send + Sync + 'static, +{ + type Service = AddExtension; + + fn layer(&self, inner: S) -> Self::Service { + AddExtension { + inner, + value: self.0.clone(), + } + } +} + +/// Middleware for adding some shareable value to [request extensions]. +/// +/// See [Sharing state with handlers](index.html#sharing-state-with-handlers) +/// for more details. +/// +/// [request extensions]: https://docs.rs/http/latest/http/struct.Extensions.html +#[derive(Clone, Copy, Debug)] +pub struct AddExtension { + pub(crate) inner: S, + pub(crate) value: T, +} + +impl Service> for AddExtension +where + S: Service>, + T: Clone + Send + Sync + 'static, +{ + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, mut req: Request) -> Self::Future { + req.extensions_mut().insert(self.value.clone()); + self.inner.call(req) + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/extract/connect_info.rs b/.cargo-vendor/axum-0.6.20/src/extract/connect_info.rs new file mode 100644 index 0000000000..f22b89815c --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/extract/connect_info.rs @@ -0,0 +1,329 @@ +//! Extractor for getting connection information from a client. +//! +//! See [`Router::into_make_service_with_connect_info`] for more details. +//! +//! [`Router::into_make_service_with_connect_info`]: crate::routing::Router::into_make_service_with_connect_info + +use super::{Extension, FromRequestParts}; +use crate::middleware::AddExtension; +use async_trait::async_trait; +use http::request::Parts; +use hyper::server::conn::AddrStream; +use std::{ + convert::Infallible, + fmt, + future::ready, + marker::PhantomData, + net::SocketAddr, + task::{Context, Poll}, +}; +use tower_layer::Layer; +use tower_service::Service; + +/// A [`MakeService`] created from a router. +/// +/// See [`Router::into_make_service_with_connect_info`] for more details. +/// +/// [`MakeService`]: tower::make::MakeService +/// [`Router::into_make_service_with_connect_info`]: crate::routing::Router::into_make_service_with_connect_info +pub struct IntoMakeServiceWithConnectInfo { + svc: S, + _connect_info: PhantomData C>, +} + +#[test] +fn traits() { + use crate::test_helpers::*; + assert_send::>(); +} + +impl IntoMakeServiceWithConnectInfo { + pub(crate) fn new(svc: S) -> Self { + Self { + svc, + _connect_info: PhantomData, + } + } +} + +impl fmt::Debug for IntoMakeServiceWithConnectInfo +where + S: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("IntoMakeServiceWithConnectInfo") + .field("svc", &self.svc) + .finish() + } +} + +impl Clone for IntoMakeServiceWithConnectInfo +where + S: Clone, +{ + fn clone(&self) -> Self { + Self { + svc: self.svc.clone(), + _connect_info: PhantomData, + } + } +} + +/// Trait that connected IO resources implement and use to produce information +/// about the connection. +/// +/// The goal for this trait is to allow users to implement custom IO types that +/// can still provide the same connection metadata. +/// +/// See [`Router::into_make_service_with_connect_info`] for more details. +/// +/// [`Router::into_make_service_with_connect_info`]: crate::routing::Router::into_make_service_with_connect_info +pub trait Connected: Clone + Send + Sync + 'static { + /// Create type holding information about the connection. + fn connect_info(target: T) -> Self; +} + +impl Connected<&AddrStream> for SocketAddr { + fn connect_info(target: &AddrStream) -> Self { + target.remote_addr() + } +} + +impl Service for IntoMakeServiceWithConnectInfo +where + S: Clone, + C: Connected, +{ + type Response = AddExtension>; + type Error = Infallible; + type Future = ResponseFuture; + + #[inline] + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, target: T) -> Self::Future { + let connect_info = ConnectInfo(C::connect_info(target)); + let svc = Extension(connect_info).layer(self.svc.clone()); + ResponseFuture::new(ready(Ok(svc))) + } +} + +opaque_future! { + /// Response future for [`IntoMakeServiceWithConnectInfo`]. + pub type ResponseFuture = + std::future::Ready>, Infallible>>; +} + +/// Extractor for getting connection information produced by a [`Connected`]. +/// +/// Note this extractor requires you to use +/// [`Router::into_make_service_with_connect_info`] to run your app +/// otherwise it will fail at runtime. +/// +/// See [`Router::into_make_service_with_connect_info`] for more details. +/// +/// [`Router::into_make_service_with_connect_info`]: crate::routing::Router::into_make_service_with_connect_info +#[derive(Clone, Copy, Debug)] +pub struct ConnectInfo(pub T); + +#[async_trait] +impl FromRequestParts for ConnectInfo +where + S: Send + Sync, + T: Clone + Send + Sync + 'static, +{ + type Rejection = as FromRequestParts>::Rejection; + + async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { + match Extension::::from_request_parts(parts, state).await { + Ok(Extension(connect_info)) => Ok(connect_info), + Err(err) => match parts.extensions.get::>() { + Some(MockConnectInfo(connect_info)) => Ok(Self(connect_info.clone())), + None => Err(err), + }, + } + } +} + +axum_core::__impl_deref!(ConnectInfo); + +/// Middleware used to mock [`ConnectInfo`] during tests. +/// +/// If you're accidentally using [`MockConnectInfo`] and +/// [`Router::into_make_service_with_connect_info`] at the same time then +/// [`Router::into_make_service_with_connect_info`] takes precedence. +/// +/// # Example +/// +/// ``` +/// use axum::{ +/// Router, +/// extract::connect_info::{MockConnectInfo, ConnectInfo}, +/// body::Body, +/// routing::get, +/// http::{Request, StatusCode}, +/// }; +/// use std::net::SocketAddr; +/// use tower::ServiceExt; +/// +/// async fn handler(ConnectInfo(addr): ConnectInfo) {} +/// +/// // this router you can run with `app.into_make_service_with_connect_info::()` +/// fn app() -> Router { +/// Router::new().route("/", get(handler)) +/// } +/// +/// // use this router for tests +/// fn test_app() -> Router { +/// app().layer(MockConnectInfo(SocketAddr::from(([0, 0, 0, 0], 1337)))) +/// } +/// +/// // #[tokio::test] +/// async fn some_test() { +/// let app = test_app(); +/// +/// let request = Request::new(Body::empty()); +/// let response = app.oneshot(request).await.unwrap(); +/// assert_eq!(response.status(), StatusCode::OK); +/// } +/// # +/// # #[tokio::main] +/// # async fn main() { +/// # some_test().await; +/// # } +/// ``` +/// +/// [`Router::into_make_service_with_connect_info`]: crate::Router::into_make_service_with_connect_info +#[derive(Clone, Copy, Debug)] +pub struct MockConnectInfo(pub T); + +impl Layer for MockConnectInfo +where + T: Clone + Send + Sync + 'static, +{ + type Service = as Layer>::Service; + + fn layer(&self, inner: S) -> Self::Service { + Extension(self.clone()).layer(inner) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{routing::get, test_helpers::TestClient, Router, Server}; + use std::net::{SocketAddr, TcpListener}; + + #[crate::test] + async fn socket_addr() { + async fn handler(ConnectInfo(addr): ConnectInfo) -> String { + format!("{addr}") + } + + let listener = TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = listener.local_addr().unwrap(); + + let (tx, rx) = tokio::sync::oneshot::channel(); + tokio::spawn(async move { + let app = Router::new().route("/", get(handler)); + let server = Server::from_tcp(listener) + .unwrap() + .serve(app.into_make_service_with_connect_info::()); + tx.send(()).unwrap(); + server.await.expect("server error"); + }); + rx.await.unwrap(); + + let client = reqwest::Client::new(); + + let res = client.get(format!("http://{addr}")).send().await.unwrap(); + let body = res.text().await.unwrap(); + assert!(body.starts_with("127.0.0.1:")); + } + + #[crate::test] + async fn custom() { + #[derive(Clone, Debug)] + struct MyConnectInfo { + value: &'static str, + } + + impl Connected<&AddrStream> for MyConnectInfo { + fn connect_info(_target: &AddrStream) -> Self { + Self { + value: "it worked!", + } + } + } + + async fn handler(ConnectInfo(addr): ConnectInfo) -> &'static str { + addr.value + } + + let listener = TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = listener.local_addr().unwrap(); + + let (tx, rx) = tokio::sync::oneshot::channel(); + tokio::spawn(async move { + let app = Router::new().route("/", get(handler)); + let server = Server::from_tcp(listener) + .unwrap() + .serve(app.into_make_service_with_connect_info::()); + tx.send(()).unwrap(); + server.await.expect("server error"); + }); + rx.await.unwrap(); + + let client = reqwest::Client::new(); + + let res = client.get(format!("http://{addr}")).send().await.unwrap(); + let body = res.text().await.unwrap(); + assert_eq!(body, "it worked!"); + } + + #[crate::test] + async fn mock_connect_info() { + async fn handler(ConnectInfo(addr): ConnectInfo) -> String { + format!("{addr}") + } + + let app = Router::new() + .route("/", get(handler)) + .layer(MockConnectInfo(SocketAddr::from(([0, 0, 0, 0], 1337)))); + + let client = TestClient::new(app); + + let res = client.get("/").send().await; + let body = res.text().await; + assert!(body.starts_with("0.0.0.0:1337")); + } + + #[crate::test] + async fn both_mock_and_real_connect_info() { + async fn handler(ConnectInfo(addr): ConnectInfo) -> String { + format!("{addr}") + } + + let listener = TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = listener.local_addr().unwrap(); + + tokio::spawn(async move { + let app = Router::new() + .route("/", get(handler)) + .layer(MockConnectInfo(SocketAddr::from(([0, 0, 0, 0], 1337)))); + + let server = Server::from_tcp(listener) + .unwrap() + .serve(app.into_make_service_with_connect_info::()); + server.await.expect("server error"); + }); + + let client = reqwest::Client::new(); + + let res = client.get(format!("http://{addr}")).send().await.unwrap(); + let body = res.text().await.unwrap(); + assert!(body.starts_with("127.0.0.1:")); + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/extract/host.rs b/.cargo-vendor/axum-0.6.20/src/extract/host.rs new file mode 100644 index 0000000000..d5be6a978d --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/extract/host.rs @@ -0,0 +1,178 @@ +use super::{ + rejection::{FailedToResolveHost, HostRejection}, + FromRequestParts, +}; +use async_trait::async_trait; +use http::{ + header::{HeaderMap, FORWARDED}, + request::Parts, +}; + +const X_FORWARDED_HOST_HEADER_KEY: &str = "X-Forwarded-Host"; + +/// Extractor that resolves the hostname of the request. +/// +/// Hostname is resolved through the following, in order: +/// - `Forwarded` header +/// - `X-Forwarded-Host` header +/// - `Host` header +/// - request target / URI +/// +/// Note that user agents can set `X-Forwarded-Host` and `Host` headers to arbitrary values so make +/// sure to validate them to avoid security issues. +#[derive(Debug, Clone)] +pub struct Host(pub String); + +#[async_trait] +impl FromRequestParts for Host +where + S: Send + Sync, +{ + type Rejection = HostRejection; + + async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { + if let Some(host) = parse_forwarded(&parts.headers) { + return Ok(Host(host.to_owned())); + } + + if let Some(host) = parts + .headers + .get(X_FORWARDED_HOST_HEADER_KEY) + .and_then(|host| host.to_str().ok()) + { + return Ok(Host(host.to_owned())); + } + + if let Some(host) = parts + .headers + .get(http::header::HOST) + .and_then(|host| host.to_str().ok()) + { + return Ok(Host(host.to_owned())); + } + + if let Some(host) = parts.uri.host() { + return Ok(Host(host.to_owned())); + } + + Err(HostRejection::FailedToResolveHost(FailedToResolveHost)) + } +} + +#[allow(warnings)] +fn parse_forwarded(headers: &HeaderMap) -> Option<&str> { + // if there are multiple `Forwarded` `HeaderMap::get` will return the first one + let forwarded_values = headers.get(FORWARDED)?.to_str().ok()?; + + // get the first set of values + let first_value = forwarded_values.split(',').nth(0)?; + + // find the value of the `host` field + first_value.split(';').find_map(|pair| { + let (key, value) = pair.split_once('=')?; + key.trim() + .eq_ignore_ascii_case("host") + .then(|| value.trim().trim_matches('"')) + }) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{routing::get, test_helpers::TestClient, Router}; + use http::header::HeaderName; + + fn test_client() -> TestClient { + async fn host_as_body(Host(host): Host) -> String { + host + } + + TestClient::new(Router::new().route("/", get(host_as_body))) + } + + #[crate::test] + async fn host_header() { + let original_host = "some-domain:123"; + let host = test_client() + .get("/") + .header(http::header::HOST, original_host) + .send() + .await + .text() + .await; + assert_eq!(host, original_host); + } + + #[crate::test] + async fn x_forwarded_host_header() { + let original_host = "some-domain:456"; + let host = test_client() + .get("/") + .header(X_FORWARDED_HOST_HEADER_KEY, original_host) + .send() + .await + .text() + .await; + assert_eq!(host, original_host); + } + + #[crate::test] + async fn x_forwarded_host_precedence_over_host_header() { + let x_forwarded_host_header = "some-domain:456"; + let host_header = "some-domain:123"; + let host = test_client() + .get("/") + .header(X_FORWARDED_HOST_HEADER_KEY, x_forwarded_host_header) + .header(http::header::HOST, host_header) + .send() + .await + .text() + .await; + assert_eq!(host, x_forwarded_host_header); + } + + #[crate::test] + async fn uri_host() { + let host = test_client().get("/").send().await.text().await; + assert!(host.contains("127.0.0.1")); + } + + #[test] + fn forwarded_parsing() { + // the basic case + let headers = header_map(&[(FORWARDED, "host=192.0.2.60;proto=http;by=203.0.113.43")]); + let value = parse_forwarded(&headers).unwrap(); + assert_eq!(value, "192.0.2.60"); + + // is case insensitive + let headers = header_map(&[(FORWARDED, "host=192.0.2.60;proto=http;by=203.0.113.43")]); + let value = parse_forwarded(&headers).unwrap(); + assert_eq!(value, "192.0.2.60"); + + // ipv6 + let headers = header_map(&[(FORWARDED, "host=\"[2001:db8:cafe::17]:4711\"")]); + let value = parse_forwarded(&headers).unwrap(); + assert_eq!(value, "[2001:db8:cafe::17]:4711"); + + // multiple values in one header + let headers = header_map(&[(FORWARDED, "host=192.0.2.60, host=127.0.0.1")]); + let value = parse_forwarded(&headers).unwrap(); + assert_eq!(value, "192.0.2.60"); + + // multiple header values + let headers = header_map(&[ + (FORWARDED, "host=192.0.2.60"), + (FORWARDED, "host=127.0.0.1"), + ]); + let value = parse_forwarded(&headers).unwrap(); + assert_eq!(value, "192.0.2.60"); + } + + fn header_map(values: &[(HeaderName, &str)]) -> HeaderMap { + let mut headers = HeaderMap::new(); + for (key, value) in values { + headers.append(key, value.parse().unwrap()); + } + headers + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/extract/matched_path.rs b/.cargo-vendor/axum-0.6.20/src/extract/matched_path.rs new file mode 100644 index 0000000000..c3bd7b4589 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/extract/matched_path.rs @@ -0,0 +1,395 @@ +use super::{rejection::*, FromRequestParts}; +use crate::routing::{RouteId, NEST_TAIL_PARAM_CAPTURE}; +use async_trait::async_trait; +use http::request::Parts; +use std::{collections::HashMap, sync::Arc}; + +/// Access the path in the router that matches the request. +/// +/// ``` +/// use axum::{ +/// Router, +/// extract::MatchedPath, +/// routing::get, +/// }; +/// +/// let app = Router::new().route( +/// "/users/:id", +/// get(|path: MatchedPath| async move { +/// let path = path.as_str(); +/// // `path` will be "/users/:id" +/// }) +/// ); +/// # async { +/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +/// # }; +/// ``` +/// +/// # Accessing `MatchedPath` via extensions +/// +/// `MatchedPath` can also be accessed from middleware via request extensions. +/// +/// This is useful for example with [`Trace`](tower_http::trace::Trace) to +/// create a span that contains the matched path: +/// +/// ``` +/// use axum::{ +/// Router, +/// extract::MatchedPath, +/// http::Request, +/// routing::get, +/// }; +/// use tower_http::trace::TraceLayer; +/// +/// let app = Router::new() +/// .route("/users/:id", get(|| async { /* ... */ })) +/// .layer( +/// TraceLayer::new_for_http().make_span_with(|req: &Request<_>| { +/// let path = if let Some(path) = req.extensions().get::() { +/// path.as_str() +/// } else { +/// req.uri().path() +/// }; +/// tracing::info_span!("http-request", %path) +/// }), +/// ); +/// # let _: Router = app; +/// ``` +/// +/// # Matched path in nested routers +/// +/// Because of how [nesting] works `MatchedPath` isn't accessible in middleware on nested routes: +/// +/// ``` +/// use axum::{ +/// Router, +/// RequestExt, +/// routing::get, +/// extract::{MatchedPath, rejection::MatchedPathRejection}, +/// middleware::map_request, +/// http::Request, +/// body::Body, +/// }; +/// +/// async fn access_matched_path(mut request: Request) -> Request { +/// // if `/foo/bar` is called this will be `Err(_)` since that matches +/// // a nested route +/// let matched_path: Result = +/// request.extract_parts::().await; +/// +/// request +/// } +/// +/// // `MatchedPath` is always accessible on handlers added via `Router::route` +/// async fn handler(matched_path: MatchedPath) {} +/// +/// let app = Router::new() +/// .nest( +/// "/foo", +/// Router::new().route("/bar", get(handler)), +/// ) +/// .layer(map_request(access_matched_path)); +/// # let _: Router = app; +/// ``` +/// +/// [nesting]: crate::Router::nest +#[cfg_attr(docsrs, doc(cfg(feature = "matched-path")))] +#[derive(Clone, Debug)] +pub struct MatchedPath(pub(crate) Arc); + +impl MatchedPath { + /// Returns a `str` representation of the path. + pub fn as_str(&self) -> &str { + &self.0 + } +} + +#[async_trait] +impl FromRequestParts for MatchedPath +where + S: Send + Sync, +{ + type Rejection = MatchedPathRejection; + + async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { + let matched_path = parts + .extensions + .get::() + .ok_or(MatchedPathRejection::MatchedPathMissing(MatchedPathMissing))? + .clone(); + + Ok(matched_path) + } +} + +#[derive(Clone, Debug)] +struct MatchedNestedPath(Arc); + +pub(crate) fn set_matched_path_for_request( + id: RouteId, + route_id_to_path: &HashMap>, + extensions: &mut http::Extensions, +) { + let matched_path = if let Some(matched_path) = route_id_to_path.get(&id) { + matched_path + } else { + #[cfg(debug_assertions)] + panic!("should always have a matched path for a route id"); + #[cfg(not(debug_assertions))] + return; + }; + + let matched_path = append_nested_matched_path(matched_path, extensions); + + if matched_path.ends_with(NEST_TAIL_PARAM_CAPTURE) { + extensions.insert(MatchedNestedPath(matched_path)); + debug_assert!(extensions.remove::().is_none()); + } else { + extensions.insert(MatchedPath(matched_path)); + extensions.remove::(); + } +} + +// a previous `MatchedPath` might exist if we're inside a nested Router +fn append_nested_matched_path(matched_path: &Arc, extensions: &http::Extensions) -> Arc { + if let Some(previous) = extensions + .get::() + .map(|matched_path| matched_path.as_str()) + .or_else(|| Some(&extensions.get::()?.0)) + { + let previous = previous + .strip_suffix(NEST_TAIL_PARAM_CAPTURE) + .unwrap_or(previous); + + let matched_path = format!("{previous}{matched_path}"); + matched_path.into() + } else { + Arc::clone(matched_path) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + body::Body, + handler::HandlerWithoutStateExt, + middleware::map_request, + routing::{any, get}, + test_helpers::*, + Router, + }; + use http::{Request, StatusCode}; + + #[crate::test] + async fn extracting_on_handler() { + let app = Router::new().route( + "/:a", + get(|path: MatchedPath| async move { path.as_str().to_owned() }), + ); + + let client = TestClient::new(app); + + let res = client.get("/foo").send().await; + assert_eq!(res.text().await, "/:a"); + } + + #[crate::test] + async fn extracting_on_handler_in_nested_router() { + let app = Router::new().nest( + "/:a", + Router::new().route( + "/:b", + get(|path: MatchedPath| async move { path.as_str().to_owned() }), + ), + ); + + let client = TestClient::new(app); + + let res = client.get("/foo/bar").send().await; + assert_eq!(res.text().await, "/:a/:b"); + } + + #[crate::test] + async fn extracting_on_handler_in_deeply_nested_router() { + let app = Router::new().nest( + "/:a", + Router::new().nest( + "/:b", + Router::new().route( + "/:c", + get(|path: MatchedPath| async move { path.as_str().to_owned() }), + ), + ), + ); + + let client = TestClient::new(app); + + let res = client.get("/foo/bar/baz").send().await; + assert_eq!(res.text().await, "/:a/:b/:c"); + } + + #[crate::test] + async fn cannot_extract_nested_matched_path_in_middleware() { + async fn extract_matched_path( + matched_path: Option, + req: Request, + ) -> Request { + assert!(matched_path.is_none()); + req + } + + let app = Router::new() + .nest_service("/:a", Router::new().route("/:b", get(|| async move {}))) + .layer(map_request(extract_matched_path)); + + let client = TestClient::new(app); + + let res = client.get("/foo/bar").send().await; + assert_eq!(res.status(), StatusCode::OK); + } + + #[crate::test] + async fn can_extract_nested_matched_path_in_middleware_using_nest() { + async fn extract_matched_path( + matched_path: Option, + req: Request, + ) -> Request { + assert_eq!(matched_path.unwrap().as_str(), "/:a/:b"); + req + } + + let app = Router::new() + .nest("/:a", Router::new().route("/:b", get(|| async move {}))) + .layer(map_request(extract_matched_path)); + + let client = TestClient::new(app); + + let res = client.get("/foo/bar").send().await; + assert_eq!(res.status(), StatusCode::OK); + } + + #[crate::test] + async fn cannot_extract_nested_matched_path_in_middleware_via_extension() { + async fn assert_no_matched_path(req: Request) -> Request { + assert!(req.extensions().get::().is_none()); + req + } + + let app = Router::new() + .nest_service("/:a", Router::new().route("/:b", get(|| async move {}))) + .layer(map_request(assert_no_matched_path)); + + let client = TestClient::new(app); + + let res = client.get("/foo/bar").send().await; + assert_eq!(res.status(), StatusCode::OK); + } + + #[tokio::test] + async fn can_extract_nested_matched_path_in_middleware_via_extension_using_nest() { + async fn assert_matched_path(req: Request) -> Request { + assert!(req.extensions().get::().is_some()); + req + } + + let app = Router::new() + .nest("/:a", Router::new().route("/:b", get(|| async move {}))) + .layer(map_request(assert_matched_path)); + + let client = TestClient::new(app); + + let res = client.get("/foo/bar").send().await; + assert_eq!(res.status(), StatusCode::OK); + } + + #[crate::test] + async fn can_extract_nested_matched_path_in_middleware_on_nested_router() { + async fn extract_matched_path(matched_path: MatchedPath, req: Request) -> Request { + assert_eq!(matched_path.as_str(), "/:a/:b"); + req + } + + let app = Router::new().nest( + "/:a", + Router::new() + .route("/:b", get(|| async move {})) + .layer(map_request(extract_matched_path)), + ); + + let client = TestClient::new(app); + + let res = client.get("/foo/bar").send().await; + assert_eq!(res.status(), StatusCode::OK); + } + + #[crate::test] + async fn can_extract_nested_matched_path_in_middleware_on_nested_router_via_extension() { + async fn extract_matched_path(req: Request) -> Request { + let matched_path = req.extensions().get::().unwrap(); + assert_eq!(matched_path.as_str(), "/:a/:b"); + req + } + + let app = Router::new().nest( + "/:a", + Router::new() + .route("/:b", get(|| async move {})) + .layer(map_request(extract_matched_path)), + ); + + let client = TestClient::new(app); + + let res = client.get("/foo/bar").send().await; + assert_eq!(res.status(), StatusCode::OK); + } + + #[crate::test] + async fn extracting_on_nested_handler() { + async fn handler(path: Option) { + assert!(path.is_none()); + } + + let app = Router::new().nest_service("/:a", handler.into_service()); + + let client = TestClient::new(app); + + let res = client.get("/foo/bar").send().await; + assert_eq!(res.status(), StatusCode::OK); + } + + // https://github.com/tokio-rs/axum/issues/1579 + #[crate::test] + async fn doesnt_panic_if_router_called_from_wildcard_route() { + use tower::ServiceExt; + + let app = Router::new().route( + "/*path", + any(|req: Request| { + Router::new() + .nest("/", Router::new().route("/foo", get(|| async {}))) + .oneshot(req) + }), + ); + + let client = TestClient::new(app); + + let res = client.get("/foo").send().await; + assert_eq!(res.status(), StatusCode::OK); + } + + #[crate::test] + async fn cant_extract_in_fallback() { + async fn handler(path: Option, req: Request) { + assert!(path.is_none()); + assert!(req.extensions().get::().is_none()); + } + + let app = Router::new().fallback(handler); + + let client = TestClient::new(app); + + let res = client.get("/foo/bar").send().await; + assert_eq!(res.status(), StatusCode::OK); + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/extract/mod.rs b/.cargo-vendor/axum-0.6.20/src/extract/mod.rs new file mode 100644 index 0000000000..cb4ebcd92c --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/extract/mod.rs @@ -0,0 +1,115 @@ +#![doc = include_str!("../docs/extract.md")] + +use http::header::{self, HeaderMap}; + +#[cfg(feature = "tokio")] +pub mod connect_info; +pub mod path; +pub mod rejection; + +#[cfg(feature = "ws")] +pub mod ws; + +mod host; +mod raw_form; +mod raw_query; +mod request_parts; +mod state; + +#[doc(inline)] +pub use axum_core::extract::{DefaultBodyLimit, FromRef, FromRequest, FromRequestParts}; + +#[cfg(feature = "macros")] +pub use axum_macros::{FromRef, FromRequest, FromRequestParts}; + +#[doc(inline)] +#[allow(deprecated)] +pub use self::{ + host::Host, + path::{Path, RawPathParams}, + raw_form::RawForm, + raw_query::RawQuery, + request_parts::{BodyStream, RawBody}, + state::State, +}; + +#[doc(inline)] +#[cfg(feature = "tokio")] +pub use self::connect_info::ConnectInfo; + +#[doc(no_inline)] +#[cfg(feature = "json")] +pub use crate::Json; + +#[doc(no_inline)] +pub use crate::Extension; + +#[cfg(feature = "form")] +#[doc(no_inline)] +pub use crate::form::Form; + +#[cfg(feature = "matched-path")] +pub(crate) mod matched_path; + +#[cfg(feature = "matched-path")] +#[doc(inline)] +pub use self::matched_path::MatchedPath; + +#[cfg(feature = "multipart")] +pub mod multipart; + +#[cfg(feature = "multipart")] +#[doc(inline)] +pub use self::multipart::Multipart; + +#[cfg(feature = "query")] +mod query; + +#[cfg(feature = "query")] +#[doc(inline)] +pub use self::query::Query; + +#[cfg(feature = "original-uri")] +#[doc(inline)] +pub use self::request_parts::OriginalUri; + +#[cfg(feature = "ws")] +#[doc(inline)] +pub use self::ws::WebSocketUpgrade; + +#[cfg(feature = "headers")] +#[doc(no_inline)] +pub use crate::TypedHeader; + +// this is duplicated in `axum-extra/src/extract/form.rs` +pub(super) fn has_content_type(headers: &HeaderMap, expected_content_type: &mime::Mime) -> bool { + let content_type = if let Some(content_type) = headers.get(header::CONTENT_TYPE) { + content_type + } else { + return false; + }; + + let content_type = if let Ok(content_type) = content_type.to_str() { + content_type + } else { + return false; + }; + + content_type.starts_with(expected_content_type.as_ref()) +} + +#[cfg(test)] +mod tests { + use crate::{routing::get, test_helpers::*, Router}; + + #[crate::test] + async fn consume_body() { + let app = Router::new().route("/", get(|body: String| async { body })); + + let client = TestClient::new(app); + let res = client.get("/").body("foo").send().await; + let body = res.text().await; + + assert_eq!(body, "foo"); + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/extract/multipart.rs b/.cargo-vendor/axum-0.6.20/src/extract/multipart.rs new file mode 100644 index 0000000000..3827734f3e --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/extract/multipart.rs @@ -0,0 +1,375 @@ +//! Extractor that parses `multipart/form-data` requests commonly used with file uploads. +//! +//! See [`Multipart`] for more details. + +use super::{BodyStream, FromRequest}; +use crate::body::{Bytes, HttpBody}; +use crate::BoxError; +use async_trait::async_trait; +use axum_core::__composite_rejection as composite_rejection; +use axum_core::__define_rejection as define_rejection; +use axum_core::response::{IntoResponse, Response}; +use axum_core::RequestExt; +use futures_util::stream::Stream; +use http::header::{HeaderMap, CONTENT_TYPE}; +use http::{Request, StatusCode}; +use std::error::Error; +use std::{ + fmt, + pin::Pin, + task::{Context, Poll}, +}; + +/// Extractor that parses `multipart/form-data` requests (commonly used with file uploads). +/// +/// ⚠️ Since extracting multipart form data from the request requires consuming the body, the +/// `Multipart` extractor must be *last* if there are multiple extractors in a handler. +/// See ["the order of extractors"][order-of-extractors] +/// +/// [order-of-extractors]: crate::extract#the-order-of-extractors +/// +/// # Example +/// +/// ```rust,no_run +/// use axum::{ +/// extract::Multipart, +/// routing::post, +/// Router, +/// }; +/// use futures_util::stream::StreamExt; +/// +/// async fn upload(mut multipart: Multipart) { +/// while let Some(mut field) = multipart.next_field().await.unwrap() { +/// let name = field.name().unwrap().to_string(); +/// let data = field.bytes().await.unwrap(); +/// +/// println!("Length of `{}` is {} bytes", name, data.len()); +/// } +/// } +/// +/// let app = Router::new().route("/upload", post(upload)); +/// # async { +/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +/// # }; +/// ``` +#[cfg_attr(docsrs, doc(cfg(feature = "multipart")))] +#[derive(Debug)] +pub struct Multipart { + inner: multer::Multipart<'static>, +} + +#[async_trait] +impl FromRequest for Multipart +where + B: HttpBody + Send + 'static, + B::Data: Into, + B::Error: Into, + S: Send + Sync, +{ + type Rejection = MultipartRejection; + + async fn from_request(req: Request, state: &S) -> Result { + let boundary = parse_boundary(req.headers()).ok_or(InvalidBoundary)?; + let stream_result = match req.with_limited_body() { + Ok(limited) => BodyStream::from_request(limited, state).await, + Err(unlimited) => BodyStream::from_request(unlimited, state).await, + }; + let stream = stream_result.unwrap_or_else(|err| match err {}); + let multipart = multer::Multipart::new(stream, boundary); + Ok(Self { inner: multipart }) + } +} + +impl Multipart { + /// Yields the next [`Field`] if available. + pub async fn next_field(&mut self) -> Result>, MultipartError> { + let field = self + .inner + .next_field() + .await + .map_err(MultipartError::from_multer)?; + + if let Some(field) = field { + Ok(Some(Field { + inner: field, + _multipart: self, + })) + } else { + Ok(None) + } + } +} + +/// A single field in a multipart stream. +#[derive(Debug)] +pub struct Field<'a> { + inner: multer::Field<'static>, + // multer requires there to only be one live `multer::Field` at any point. This enforces that + // statically, which multer does not do, it returns an error instead. + _multipart: &'a mut Multipart, +} + +impl<'a> Stream for Field<'a> { + type Item = Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.inner) + .poll_next(cx) + .map_err(MultipartError::from_multer) + } +} + +impl<'a> Field<'a> { + /// The field name found in the + /// [`Content-Disposition`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition) + /// header. + pub fn name(&self) -> Option<&str> { + self.inner.name() + } + + /// The file name found in the + /// [`Content-Disposition`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition) + /// header. + pub fn file_name(&self) -> Option<&str> { + self.inner.file_name() + } + + /// Get the [content type](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type) of the field. + pub fn content_type(&self) -> Option<&str> { + self.inner.content_type().map(|m| m.as_ref()) + } + + /// Get a map of headers as [`HeaderMap`]. + pub fn headers(&self) -> &HeaderMap { + self.inner.headers() + } + + /// Get the full data of the field as [`Bytes`]. + pub async fn bytes(self) -> Result { + self.inner + .bytes() + .await + .map_err(MultipartError::from_multer) + } + + /// Get the full field data as text. + pub async fn text(self) -> Result { + self.inner.text().await.map_err(MultipartError::from_multer) + } + + /// Stream a chunk of the field data. + /// + /// When the field data has been exhausted, this will return [`None`]. + /// + /// Note this does the same thing as `Field`'s [`Stream`] implementation. + /// + /// # Example + /// + /// ``` + /// use axum::{ + /// extract::Multipart, + /// routing::post, + /// response::IntoResponse, + /// http::StatusCode, + /// Router, + /// }; + /// + /// async fn upload(mut multipart: Multipart) -> Result<(), (StatusCode, String)> { + /// while let Some(mut field) = multipart + /// .next_field() + /// .await + /// .map_err(|err| (StatusCode::BAD_REQUEST, err.to_string()))? + /// { + /// while let Some(chunk) = field + /// .chunk() + /// .await + /// .map_err(|err| (StatusCode::BAD_REQUEST, err.to_string()))? + /// { + /// println!("received {} bytes", chunk.len()); + /// } + /// } + /// + /// Ok(()) + /// } + /// + /// let app = Router::new().route("/upload", post(upload)); + /// # let _: Router = app; + /// ``` + pub async fn chunk(&mut self) -> Result, MultipartError> { + self.inner + .chunk() + .await + .map_err(MultipartError::from_multer) + } +} + +/// Errors associated with parsing `multipart/form-data` requests. +#[derive(Debug)] +pub struct MultipartError { + source: multer::Error, +} + +impl MultipartError { + fn from_multer(multer: multer::Error) -> Self { + Self { source: multer } + } + + /// Get the response body text used for this rejection. + pub fn body_text(&self) -> String { + self.source.to_string() + } + + /// Get the status code used for this rejection. + pub fn status(&self) -> http::StatusCode { + status_code_from_multer_error(&self.source) + } +} + +fn status_code_from_multer_error(err: &multer::Error) -> StatusCode { + match err { + multer::Error::UnknownField { .. } + | multer::Error::IncompleteFieldData { .. } + | multer::Error::IncompleteHeaders + | multer::Error::ReadHeaderFailed(..) + | multer::Error::DecodeHeaderName { .. } + | multer::Error::DecodeContentType(..) + | multer::Error::NoBoundary + | multer::Error::DecodeHeaderValue { .. } + | multer::Error::NoMultipart + | multer::Error::IncompleteStream => StatusCode::BAD_REQUEST, + multer::Error::FieldSizeExceeded { .. } | multer::Error::StreamSizeExceeded { .. } => { + StatusCode::PAYLOAD_TOO_LARGE + } + multer::Error::StreamReadFailed(err) => { + if let Some(err) = err.downcast_ref::() { + return status_code_from_multer_error(err); + } + + if err + .downcast_ref::() + .and_then(|err| err.source()) + .and_then(|err| err.downcast_ref::()) + .is_some() + { + return StatusCode::PAYLOAD_TOO_LARGE; + } + + StatusCode::INTERNAL_SERVER_ERROR + } + _ => StatusCode::INTERNAL_SERVER_ERROR, + } +} + +impl fmt::Display for MultipartError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Error parsing `multipart/form-data` request") + } +} + +impl std::error::Error for MultipartError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + Some(&self.source) + } +} + +impl IntoResponse for MultipartError { + fn into_response(self) -> Response { + axum_core::__log_rejection!( + rejection_type = Self, + body_text = self.body_text(), + status = self.status(), + ); + (self.status(), self.body_text()).into_response() + } +} + +fn parse_boundary(headers: &HeaderMap) -> Option { + let content_type = headers.get(CONTENT_TYPE)?.to_str().ok()?; + multer::parse_boundary(content_type).ok() +} + +composite_rejection! { + /// Rejection used for [`Multipart`]. + /// + /// Contains one variant for each way the [`Multipart`] extractor can fail. + pub enum MultipartRejection { + InvalidBoundary, + } +} + +define_rejection! { + #[status = BAD_REQUEST] + #[body = "Invalid `boundary` for `multipart/form-data` request"] + /// Rejection type used if the `boundary` in a `multipart/form-data` is + /// missing or invalid. + pub struct InvalidBoundary; +} + +#[cfg(test)] +mod tests { + use axum_core::extract::DefaultBodyLimit; + + use super::*; + use crate::{body::Body, response::IntoResponse, routing::post, test_helpers::*, Router}; + + #[crate::test] + async fn content_type_with_encoding() { + const BYTES: &[u8] = "🦀".as_bytes(); + const FILE_NAME: &str = "index.html"; + const CONTENT_TYPE: &str = "text/html; charset=utf-8"; + + async fn handle(mut multipart: Multipart) -> impl IntoResponse { + let field = multipart.next_field().await.unwrap().unwrap(); + + assert_eq!(field.file_name().unwrap(), FILE_NAME); + assert_eq!(field.content_type().unwrap(), CONTENT_TYPE); + assert_eq!(field.bytes().await.unwrap(), BYTES); + + assert!(multipart.next_field().await.unwrap().is_none()); + } + + let app = Router::new().route("/", post(handle)); + + let client = TestClient::new(app); + + let form = reqwest::multipart::Form::new().part( + "file", + reqwest::multipart::Part::bytes(BYTES) + .file_name(FILE_NAME) + .mime_str(CONTENT_TYPE) + .unwrap(), + ); + + client.post("/").multipart(form).send().await; + } + + // No need for this to be a #[test], we just want to make sure it compiles + fn _multipart_from_request_limited() { + async fn handler(_: Multipart) {} + let _app: Router<(), http_body::Limited> = Router::new().route("/", post(handler)); + } + + #[crate::test] + async fn body_too_large() { + const BYTES: &[u8] = "🦀".as_bytes(); + + async fn handle(mut multipart: Multipart) -> Result<(), MultipartError> { + while let Some(field) = multipart.next_field().await? { + field.bytes().await?; + } + Ok(()) + } + + let app = Router::new() + .route("/", post(handle)) + .layer(DefaultBodyLimit::max(BYTES.len() - 1)); + + let client = TestClient::new(app); + + let form = + reqwest::multipart::Form::new().part("file", reqwest::multipart::Part::bytes(BYTES)); + + let res = client.post("/").multipart(form).send().await; + assert_eq!(res.status(), StatusCode::PAYLOAD_TOO_LARGE); + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/extract/path/de.rs b/.cargo-vendor/axum-0.6.20/src/extract/path/de.rs new file mode 100644 index 0000000000..bbc0c85c9b --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/extract/path/de.rs @@ -0,0 +1,939 @@ +use super::{ErrorKind, PathDeserializationError}; +use crate::util::PercentDecodedStr; +use serde::{ + de::{self, DeserializeSeed, EnumAccess, Error, MapAccess, SeqAccess, VariantAccess, Visitor}, + forward_to_deserialize_any, Deserializer, +}; +use std::{any::type_name, sync::Arc}; + +macro_rules! unsupported_type { + ($trait_fn:ident) => { + fn $trait_fn(self, _: V) -> Result + where + V: Visitor<'de>, + { + Err(PathDeserializationError::unsupported_type(type_name::< + V::Value, + >())) + } + }; +} + +macro_rules! parse_single_value { + ($trait_fn:ident, $visit_fn:ident, $ty:literal) => { + fn $trait_fn(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + if self.url_params.len() != 1 { + return Err(PathDeserializationError::wrong_number_of_parameters() + .got(self.url_params.len()) + .expected(1)); + } + + let value = self.url_params[0].1.parse().map_err(|_| { + PathDeserializationError::new(ErrorKind::ParseError { + value: self.url_params[0].1.as_str().to_owned(), + expected_type: $ty, + }) + })?; + visitor.$visit_fn(value) + } + }; +} + +pub(crate) struct PathDeserializer<'de> { + url_params: &'de [(Arc, PercentDecodedStr)], +} + +impl<'de> PathDeserializer<'de> { + #[inline] + pub(crate) fn new(url_params: &'de [(Arc, PercentDecodedStr)]) -> Self { + PathDeserializer { url_params } + } +} + +impl<'de> Deserializer<'de> for PathDeserializer<'de> { + type Error = PathDeserializationError; + + unsupported_type!(deserialize_bytes); + unsupported_type!(deserialize_option); + unsupported_type!(deserialize_identifier); + unsupported_type!(deserialize_ignored_any); + + parse_single_value!(deserialize_bool, visit_bool, "bool"); + parse_single_value!(deserialize_i8, visit_i8, "i8"); + parse_single_value!(deserialize_i16, visit_i16, "i16"); + parse_single_value!(deserialize_i32, visit_i32, "i32"); + parse_single_value!(deserialize_i64, visit_i64, "i64"); + parse_single_value!(deserialize_i128, visit_i128, "i128"); + parse_single_value!(deserialize_u8, visit_u8, "u8"); + parse_single_value!(deserialize_u16, visit_u16, "u16"); + parse_single_value!(deserialize_u32, visit_u32, "u32"); + parse_single_value!(deserialize_u64, visit_u64, "u64"); + parse_single_value!(deserialize_u128, visit_u128, "u128"); + parse_single_value!(deserialize_f32, visit_f32, "f32"); + parse_single_value!(deserialize_f64, visit_f64, "f64"); + parse_single_value!(deserialize_string, visit_string, "String"); + parse_single_value!(deserialize_byte_buf, visit_string, "String"); + parse_single_value!(deserialize_char, visit_char, "char"); + + fn deserialize_any(self, v: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_str(v) + } + + fn deserialize_str(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + if self.url_params.len() != 1 { + return Err(PathDeserializationError::wrong_number_of_parameters() + .got(self.url_params.len()) + .expected(1)); + } + visitor.visit_borrowed_str(&self.url_params[0].1) + } + + fn deserialize_unit(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + visitor.visit_unit() + } + + fn deserialize_unit_struct( + self, + _name: &'static str, + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + visitor.visit_unit() + } + + fn deserialize_newtype_struct( + self, + _name: &'static str, + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + visitor.visit_newtype_struct(self) + } + + fn deserialize_seq(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + visitor.visit_seq(SeqDeserializer { + params: self.url_params, + idx: 0, + }) + } + + fn deserialize_tuple(self, len: usize, visitor: V) -> Result + where + V: Visitor<'de>, + { + if self.url_params.len() < len { + return Err(PathDeserializationError::wrong_number_of_parameters() + .got(self.url_params.len()) + .expected(len)); + } + visitor.visit_seq(SeqDeserializer { + params: self.url_params, + idx: 0, + }) + } + + fn deserialize_tuple_struct( + self, + _name: &'static str, + len: usize, + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + if self.url_params.len() < len { + return Err(PathDeserializationError::wrong_number_of_parameters() + .got(self.url_params.len()) + .expected(len)); + } + visitor.visit_seq(SeqDeserializer { + params: self.url_params, + idx: 0, + }) + } + + fn deserialize_map(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + visitor.visit_map(MapDeserializer { + params: self.url_params, + value: None, + key: None, + }) + } + + fn deserialize_struct( + self, + _name: &'static str, + _fields: &'static [&'static str], + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + self.deserialize_map(visitor) + } + + fn deserialize_enum( + self, + _name: &'static str, + _variants: &'static [&'static str], + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + if self.url_params.len() != 1 { + return Err(PathDeserializationError::wrong_number_of_parameters() + .got(self.url_params.len()) + .expected(1)); + } + + visitor.visit_enum(EnumDeserializer { + value: self.url_params[0].1.clone().into_inner(), + }) + } +} + +struct MapDeserializer<'de> { + params: &'de [(Arc, PercentDecodedStr)], + key: Option, + value: Option<&'de PercentDecodedStr>, +} + +impl<'de> MapAccess<'de> for MapDeserializer<'de> { + type Error = PathDeserializationError; + + fn next_key_seed(&mut self, seed: K) -> Result, Self::Error> + where + K: DeserializeSeed<'de>, + { + match self.params.split_first() { + Some(((key, value), tail)) => { + self.value = Some(value); + self.params = tail; + self.key = Some(KeyOrIdx::Key(key.clone())); + seed.deserialize(KeyDeserializer { + key: Arc::clone(key), + }) + .map(Some) + } + None => Ok(None), + } + } + + fn next_value_seed(&mut self, seed: V) -> Result + where + V: DeserializeSeed<'de>, + { + match self.value.take() { + Some(value) => seed.deserialize(ValueDeserializer { + key: self.key.take(), + value, + }), + None => Err(PathDeserializationError::custom("value is missing")), + } + } +} + +struct KeyDeserializer { + key: Arc, +} + +macro_rules! parse_key { + ($trait_fn:ident) => { + fn $trait_fn(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + visitor.visit_str(&self.key) + } + }; +} + +impl<'de> Deserializer<'de> for KeyDeserializer { + type Error = PathDeserializationError; + + parse_key!(deserialize_identifier); + parse_key!(deserialize_str); + parse_key!(deserialize_string); + + fn deserialize_any(self, _visitor: V) -> Result + where + V: Visitor<'de>, + { + Err(PathDeserializationError::custom("Unexpected key type")) + } + + forward_to_deserialize_any! { + bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char bytes + byte_buf option unit unit_struct seq tuple + tuple_struct map newtype_struct struct enum ignored_any + } +} + +macro_rules! parse_value { + ($trait_fn:ident, $visit_fn:ident, $ty:literal) => { + fn $trait_fn(mut self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let v = self.value.parse().map_err(|_| { + if let Some(key) = self.key.take() { + let kind = match key { + KeyOrIdx::Key(key) => ErrorKind::ParseErrorAtKey { + key: key.to_string(), + value: self.value.as_str().to_owned(), + expected_type: $ty, + }, + KeyOrIdx::Idx { idx: index, key: _ } => ErrorKind::ParseErrorAtIndex { + index, + value: self.value.as_str().to_owned(), + expected_type: $ty, + }, + }; + PathDeserializationError::new(kind) + } else { + PathDeserializationError::new(ErrorKind::ParseError { + value: self.value.as_str().to_owned(), + expected_type: $ty, + }) + } + })?; + visitor.$visit_fn(v) + } + }; +} + +#[derive(Debug)] +struct ValueDeserializer<'de> { + key: Option, + value: &'de PercentDecodedStr, +} + +impl<'de> Deserializer<'de> for ValueDeserializer<'de> { + type Error = PathDeserializationError; + + unsupported_type!(deserialize_map); + unsupported_type!(deserialize_identifier); + + parse_value!(deserialize_bool, visit_bool, "bool"); + parse_value!(deserialize_i8, visit_i8, "i8"); + parse_value!(deserialize_i16, visit_i16, "i16"); + parse_value!(deserialize_i32, visit_i32, "i32"); + parse_value!(deserialize_i64, visit_i64, "i64"); + parse_value!(deserialize_i128, visit_i128, "i128"); + parse_value!(deserialize_u8, visit_u8, "u8"); + parse_value!(deserialize_u16, visit_u16, "u16"); + parse_value!(deserialize_u32, visit_u32, "u32"); + parse_value!(deserialize_u64, visit_u64, "u64"); + parse_value!(deserialize_u128, visit_u128, "u128"); + parse_value!(deserialize_f32, visit_f32, "f32"); + parse_value!(deserialize_f64, visit_f64, "f64"); + parse_value!(deserialize_string, visit_string, "String"); + parse_value!(deserialize_byte_buf, visit_string, "String"); + parse_value!(deserialize_char, visit_char, "char"); + + fn deserialize_any(self, v: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_str(v) + } + + fn deserialize_str(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + visitor.visit_borrowed_str(self.value) + } + + fn deserialize_bytes(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + visitor.visit_borrowed_bytes(self.value.as_bytes()) + } + + fn deserialize_option(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + visitor.visit_some(self) + } + + fn deserialize_unit(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + visitor.visit_unit() + } + + fn deserialize_unit_struct( + self, + _name: &'static str, + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + visitor.visit_unit() + } + + fn deserialize_newtype_struct( + self, + _name: &'static str, + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + visitor.visit_newtype_struct(self) + } + + fn deserialize_tuple(self, len: usize, visitor: V) -> Result + where + V: Visitor<'de>, + { + struct PairDeserializer<'de> { + key: Option, + value: Option<&'de PercentDecodedStr>, + } + + impl<'de> SeqAccess<'de> for PairDeserializer<'de> { + type Error = PathDeserializationError; + + fn next_element_seed(&mut self, seed: T) -> Result, Self::Error> + where + T: DeserializeSeed<'de>, + { + match self.key.take() { + Some(KeyOrIdx::Idx { idx: _, key }) => { + return seed.deserialize(KeyDeserializer { key }).map(Some); + } + // `KeyOrIdx::Key` is only used when deserializing maps so `deserialize_seq` + // wouldn't be called for that + Some(KeyOrIdx::Key(_)) => unreachable!(), + None => {} + }; + + self.value + .take() + .map(|value| seed.deserialize(ValueDeserializer { key: None, value })) + .transpose() + } + } + + if len == 2 { + match self.key { + Some(key) => visitor.visit_seq(PairDeserializer { + key: Some(key), + value: Some(self.value), + }), + // `self.key` is only `None` when deserializing maps so `deserialize_seq` + // wouldn't be called for that + None => unreachable!(), + } + } else { + Err(PathDeserializationError::unsupported_type(type_name::< + V::Value, + >())) + } + } + + fn deserialize_seq(self, _visitor: V) -> Result + where + V: Visitor<'de>, + { + Err(PathDeserializationError::unsupported_type(type_name::< + V::Value, + >())) + } + + fn deserialize_tuple_struct( + self, + _name: &'static str, + _len: usize, + _visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + Err(PathDeserializationError::unsupported_type(type_name::< + V::Value, + >())) + } + + fn deserialize_struct( + self, + _name: &'static str, + _fields: &'static [&'static str], + _visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + Err(PathDeserializationError::unsupported_type(type_name::< + V::Value, + >())) + } + + fn deserialize_enum( + self, + _name: &'static str, + _variants: &'static [&'static str], + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + visitor.visit_enum(EnumDeserializer { + value: self.value.clone().into_inner(), + }) + } + + fn deserialize_ignored_any(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + visitor.visit_unit() + } +} + +struct EnumDeserializer { + value: Arc, +} + +impl<'de> EnumAccess<'de> for EnumDeserializer { + type Error = PathDeserializationError; + type Variant = UnitVariant; + + fn variant_seed(self, seed: V) -> Result<(V::Value, Self::Variant), Self::Error> + where + V: de::DeserializeSeed<'de>, + { + Ok(( + seed.deserialize(KeyDeserializer { key: self.value })?, + UnitVariant, + )) + } +} + +struct UnitVariant; + +impl<'de> VariantAccess<'de> for UnitVariant { + type Error = PathDeserializationError; + + fn unit_variant(self) -> Result<(), Self::Error> { + Ok(()) + } + + fn newtype_variant_seed(self, _seed: T) -> Result + where + T: DeserializeSeed<'de>, + { + Err(PathDeserializationError::unsupported_type( + "newtype enum variant", + )) + } + + fn tuple_variant(self, _len: usize, _visitor: V) -> Result + where + V: Visitor<'de>, + { + Err(PathDeserializationError::unsupported_type( + "tuple enum variant", + )) + } + + fn struct_variant( + self, + _fields: &'static [&'static str], + _visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + Err(PathDeserializationError::unsupported_type( + "struct enum variant", + )) + } +} + +struct SeqDeserializer<'de> { + params: &'de [(Arc, PercentDecodedStr)], + idx: usize, +} + +impl<'de> SeqAccess<'de> for SeqDeserializer<'de> { + type Error = PathDeserializationError; + + fn next_element_seed(&mut self, seed: T) -> Result, Self::Error> + where + T: DeserializeSeed<'de>, + { + match self.params.split_first() { + Some(((key, value), tail)) => { + self.params = tail; + let idx = self.idx; + self.idx += 1; + Ok(Some(seed.deserialize(ValueDeserializer { + key: Some(KeyOrIdx::Idx { + idx, + key: key.clone(), + }), + value, + })?)) + } + None => Ok(None), + } + } +} + +#[derive(Debug, Clone)] +enum KeyOrIdx { + Key(Arc), + Idx { idx: usize, key: Arc }, +} + +#[cfg(test)] +mod tests { + use super::*; + use serde::Deserialize; + use std::collections::HashMap; + + #[derive(Debug, Deserialize, Eq, PartialEq)] + enum MyEnum { + A, + B, + #[serde(rename = "c")] + C, + } + + #[derive(Debug, Deserialize, Eq, PartialEq)] + struct Struct { + c: String, + b: bool, + a: i32, + } + + fn create_url_params(values: I) -> Vec<(Arc, PercentDecodedStr)> + where + I: IntoIterator, + K: AsRef, + V: AsRef, + { + values + .into_iter() + .map(|(k, v)| (Arc::from(k.as_ref()), PercentDecodedStr::new(v).unwrap())) + .collect() + } + + macro_rules! check_single_value { + ($ty:ty, $value_str:literal, $value:expr) => { + #[allow(clippy::bool_assert_comparison)] + { + let url_params = create_url_params(vec![("value", $value_str)]); + let deserializer = PathDeserializer::new(&url_params); + assert_eq!(<$ty>::deserialize(deserializer).unwrap(), $value); + } + }; + } + + #[test] + fn test_parse_single_value() { + check_single_value!(bool, "true", true); + check_single_value!(bool, "false", false); + check_single_value!(i8, "-123", -123); + check_single_value!(i16, "-123", -123); + check_single_value!(i32, "-123", -123); + check_single_value!(i64, "-123", -123); + check_single_value!(i128, "123", 123); + check_single_value!(u8, "123", 123); + check_single_value!(u16, "123", 123); + check_single_value!(u32, "123", 123); + check_single_value!(u64, "123", 123); + check_single_value!(u128, "123", 123); + check_single_value!(f32, "123", 123.0); + check_single_value!(f64, "123", 123.0); + check_single_value!(String, "abc", "abc"); + check_single_value!(String, "one%20two", "one two"); + check_single_value!(&str, "abc", "abc"); + check_single_value!(&str, "one%20two", "one two"); + check_single_value!(char, "a", 'a'); + + let url_params = create_url_params(vec![("a", "B")]); + assert_eq!( + MyEnum::deserialize(PathDeserializer::new(&url_params)).unwrap(), + MyEnum::B + ); + + let url_params = create_url_params(vec![("a", "1"), ("b", "2")]); + let error_kind = i32::deserialize(PathDeserializer::new(&url_params)) + .unwrap_err() + .kind; + assert!(matches!( + error_kind, + ErrorKind::WrongNumberOfParameters { + expected: 1, + got: 2 + } + )); + } + + #[test] + fn test_parse_seq() { + let url_params = create_url_params(vec![("a", "1"), ("b", "true"), ("c", "abc")]); + assert_eq!( + <(i32, bool, String)>::deserialize(PathDeserializer::new(&url_params)).unwrap(), + (1, true, "abc".to_owned()) + ); + + #[derive(Debug, Deserialize, Eq, PartialEq)] + struct TupleStruct(i32, bool, String); + assert_eq!( + TupleStruct::deserialize(PathDeserializer::new(&url_params)).unwrap(), + TupleStruct(1, true, "abc".to_owned()) + ); + + let url_params = create_url_params(vec![("a", "1"), ("b", "2"), ("c", "3")]); + assert_eq!( + >::deserialize(PathDeserializer::new(&url_params)).unwrap(), + vec![1, 2, 3] + ); + + let url_params = create_url_params(vec![("a", "c"), ("a", "B")]); + assert_eq!( + >::deserialize(PathDeserializer::new(&url_params)).unwrap(), + vec![MyEnum::C, MyEnum::B] + ); + } + + #[test] + fn test_parse_seq_tuple_string_string() { + let url_params = create_url_params(vec![("a", "foo"), ("b", "bar")]); + assert_eq!( + >::deserialize(PathDeserializer::new(&url_params)).unwrap(), + vec![ + ("a".to_owned(), "foo".to_owned()), + ("b".to_owned(), "bar".to_owned()) + ] + ); + } + + #[test] + fn test_parse_seq_tuple_string_parse() { + let url_params = create_url_params(vec![("a", "1"), ("b", "2")]); + assert_eq!( + >::deserialize(PathDeserializer::new(&url_params)).unwrap(), + vec![("a".to_owned(), 1), ("b".to_owned(), 2)] + ); + } + + #[test] + fn test_parse_struct() { + let url_params = create_url_params(vec![("a", "1"), ("b", "true"), ("c", "abc")]); + assert_eq!( + Struct::deserialize(PathDeserializer::new(&url_params)).unwrap(), + Struct { + c: "abc".to_owned(), + b: true, + a: 1, + } + ); + } + + #[test] + fn test_parse_struct_ignoring_additional_fields() { + let url_params = create_url_params(vec![ + ("a", "1"), + ("b", "true"), + ("c", "abc"), + ("d", "false"), + ]); + assert_eq!( + Struct::deserialize(PathDeserializer::new(&url_params)).unwrap(), + Struct { + c: "abc".to_owned(), + b: true, + a: 1, + } + ); + } + + #[test] + fn test_parse_tuple_ignoring_additional_fields() { + let url_params = create_url_params(vec![ + ("a", "abc"), + ("b", "true"), + ("c", "1"), + ("d", "false"), + ]); + assert_eq!( + <(&str, bool, u32)>::deserialize(PathDeserializer::new(&url_params)).unwrap(), + ("abc", true, 1) + ); + } + + #[test] + fn test_parse_map() { + let url_params = create_url_params(vec![("a", "1"), ("b", "true"), ("c", "abc")]); + assert_eq!( + >::deserialize(PathDeserializer::new(&url_params)).unwrap(), + [("a", "1"), ("b", "true"), ("c", "abc")] + .iter() + .map(|(key, value)| ((*key).to_owned(), (*value).to_owned())) + .collect() + ); + } + + macro_rules! test_parse_error { + ( + $params:expr, + $ty:ty, + $expected_error_kind:expr $(,)? + ) => { + let url_params = create_url_params($params); + let actual_error_kind = <$ty>::deserialize(PathDeserializer::new(&url_params)) + .unwrap_err() + .kind; + assert_eq!(actual_error_kind, $expected_error_kind); + }; + } + + #[test] + fn test_wrong_number_of_parameters_error() { + test_parse_error!( + vec![("a", "1")], + (u32, u32), + ErrorKind::WrongNumberOfParameters { + got: 1, + expected: 2, + } + ); + } + + #[test] + fn test_parse_error_at_key_error() { + #[derive(Debug, Deserialize)] + #[allow(dead_code)] + struct Params { + a: u32, + } + test_parse_error!( + vec![("a", "false")], + Params, + ErrorKind::ParseErrorAtKey { + key: "a".to_owned(), + value: "false".to_owned(), + expected_type: "u32", + } + ); + } + + #[test] + fn test_parse_error_at_key_error_multiple() { + #[derive(Debug, Deserialize)] + #[allow(dead_code)] + struct Params { + a: u32, + b: u32, + } + test_parse_error!( + vec![("a", "false")], + Params, + ErrorKind::ParseErrorAtKey { + key: "a".to_owned(), + value: "false".to_owned(), + expected_type: "u32", + } + ); + } + + #[test] + fn test_parse_error_at_index_error() { + test_parse_error!( + vec![("a", "false"), ("b", "true")], + (bool, u32), + ErrorKind::ParseErrorAtIndex { + index: 1, + value: "true".to_owned(), + expected_type: "u32", + } + ); + } + + #[test] + fn test_parse_error_error() { + test_parse_error!( + vec![("a", "false")], + u32, + ErrorKind::ParseError { + value: "false".to_owned(), + expected_type: "u32", + } + ); + } + + #[test] + fn test_unsupported_type_error_nested_data_structure() { + test_parse_error!( + vec![("a", "false")], + Vec>, + ErrorKind::UnsupportedType { + name: "alloc::vec::Vec", + } + ); + } + + #[test] + fn test_parse_seq_tuple_unsupported_key_type() { + test_parse_error!( + vec![("a", "false")], + Vec<(u32, String)>, + ErrorKind::Message("Unexpected key type".to_owned()) + ); + } + + #[test] + fn test_parse_seq_wrong_tuple_length() { + test_parse_error!( + vec![("a", "false")], + Vec<(String, String, String)>, + ErrorKind::UnsupportedType { + name: "(alloc::string::String, alloc::string::String, alloc::string::String)", + } + ); + } + + #[test] + fn test_parse_seq_seq() { + test_parse_error!( + vec![("a", "false")], + Vec>, + ErrorKind::UnsupportedType { + name: "alloc::vec::Vec", + } + ); + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/extract/path/mod.rs b/.cargo-vendor/axum-0.6.20/src/extract/path/mod.rs new file mode 100644 index 0000000000..189e476e5c --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/extract/path/mod.rs @@ -0,0 +1,845 @@ +//! Extractor that will get captures from the URL and parse them using +//! [`serde`]. + +mod de; + +use crate::{ + extract::{rejection::*, FromRequestParts}, + routing::url_params::UrlParams, + util::PercentDecodedStr, +}; +use async_trait::async_trait; +use axum_core::response::{IntoResponse, Response}; +use http::{request::Parts, StatusCode}; +use serde::de::DeserializeOwned; +use std::{fmt, sync::Arc}; + +/// Extractor that will get captures from the URL and parse them using +/// [`serde`]. +/// +/// Any percent encoded parameters will be automatically decoded. The decoded +/// parameters must be valid UTF-8, otherwise `Path` will fail and return a `400 +/// Bad Request` response. +/// +/// # Example +/// +/// These examples assume the `serde` feature of the [`uuid`] crate is enabled. +/// +/// [`uuid`]: https://crates.io/crates/uuid +/// +/// ```rust,no_run +/// use axum::{ +/// extract::Path, +/// routing::get, +/// Router, +/// }; +/// use uuid::Uuid; +/// +/// async fn users_teams_show( +/// Path((user_id, team_id)): Path<(Uuid, Uuid)>, +/// ) { +/// // ... +/// } +/// +/// let app = Router::new().route("/users/:user_id/team/:team_id", get(users_teams_show)); +/// # async { +/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +/// # }; +/// ``` +/// +/// If the path contains only one parameter, then you can omit the tuple. +/// +/// ```rust,no_run +/// use axum::{ +/// extract::Path, +/// routing::get, +/// Router, +/// }; +/// use uuid::Uuid; +/// +/// async fn user_info(Path(user_id): Path) { +/// // ... +/// } +/// +/// let app = Router::new().route("/users/:user_id", get(user_info)); +/// # async { +/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +/// # }; +/// ``` +/// +/// Path segments also can be deserialized into any type that implements +/// [`serde::Deserialize`]. This includes tuples and structs: +/// +/// ```rust,no_run +/// use axum::{ +/// extract::Path, +/// routing::get, +/// Router, +/// }; +/// use serde::Deserialize; +/// use uuid::Uuid; +/// +/// // Path segment labels will be matched with struct field names +/// #[derive(Deserialize)] +/// struct Params { +/// user_id: Uuid, +/// team_id: Uuid, +/// } +/// +/// async fn users_teams_show( +/// Path(Params { user_id, team_id }): Path, +/// ) { +/// // ... +/// } +/// +/// // When using tuples the path segments will be matched by their position in the route +/// async fn users_teams_create( +/// Path((user_id, team_id)): Path<(String, String)>, +/// ) { +/// // ... +/// } +/// +/// let app = Router::new().route( +/// "/users/:user_id/team/:team_id", +/// get(users_teams_show).post(users_teams_create), +/// ); +/// # async { +/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +/// # }; +/// ``` +/// +/// If you wish to capture all path parameters you can use `HashMap` or `Vec`: +/// +/// ```rust,no_run +/// use axum::{ +/// extract::Path, +/// routing::get, +/// Router, +/// }; +/// use std::collections::HashMap; +/// +/// async fn params_map( +/// Path(params): Path>, +/// ) { +/// // ... +/// } +/// +/// async fn params_vec( +/// Path(params): Path>, +/// ) { +/// // ... +/// } +/// +/// let app = Router::new() +/// .route("/users/:user_id/team/:team_id", get(params_map).post(params_vec)); +/// # async { +/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +/// # }; +/// ``` +/// +/// # Providing detailed rejection output +/// +/// If the URI cannot be deserialized into the target type the request will be rejected and an +/// error response will be returned. See [`customize-path-rejection`] for an example of how to customize that error. +/// +/// [`serde`]: https://crates.io/crates/serde +/// [`serde::Deserialize`]: https://docs.rs/serde/1.0.127/serde/trait.Deserialize.html +/// [`customize-path-rejection`]: https://github.com/tokio-rs/axum/blob/main/examples/customize-path-rejection/src/main.rs +#[derive(Debug)] +pub struct Path(pub T); + +axum_core::__impl_deref!(Path); + +#[async_trait] +impl FromRequestParts for Path +where + T: DeserializeOwned + Send, + S: Send + Sync, +{ + type Rejection = PathRejection; + + async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { + let params = match parts.extensions.get::() { + Some(UrlParams::Params(params)) => params, + Some(UrlParams::InvalidUtf8InPathParam { key }) => { + let err = PathDeserializationError { + kind: ErrorKind::InvalidUtf8InPathParam { + key: key.to_string(), + }, + }; + let err = FailedToDeserializePathParams(err); + return Err(err.into()); + } + None => { + return Err(MissingPathParams.into()); + } + }; + + T::deserialize(de::PathDeserializer::new(params)) + .map_err(|err| { + PathRejection::FailedToDeserializePathParams(FailedToDeserializePathParams(err)) + }) + .map(Path) + } +} + +// this wrapper type is used as the deserializer error to hide the `serde::de::Error` impl which +// would otherwise be public if we used `ErrorKind` as the error directly +#[derive(Debug)] +pub(crate) struct PathDeserializationError { + pub(super) kind: ErrorKind, +} + +impl PathDeserializationError { + pub(super) fn new(kind: ErrorKind) -> Self { + Self { kind } + } + + pub(super) fn wrong_number_of_parameters() -> WrongNumberOfParameters<()> { + WrongNumberOfParameters { got: () } + } + + #[track_caller] + pub(super) fn unsupported_type(name: &'static str) -> Self { + Self::new(ErrorKind::UnsupportedType { name }) + } +} + +pub(super) struct WrongNumberOfParameters { + got: G, +} + +impl WrongNumberOfParameters { + #[allow(clippy::unused_self)] + pub(super) fn got(self, got: G2) -> WrongNumberOfParameters { + WrongNumberOfParameters { got } + } +} + +impl WrongNumberOfParameters { + pub(super) fn expected(self, expected: usize) -> PathDeserializationError { + PathDeserializationError::new(ErrorKind::WrongNumberOfParameters { + got: self.got, + expected, + }) + } +} + +impl serde::de::Error for PathDeserializationError { + #[inline] + fn custom(msg: T) -> Self + where + T: fmt::Display, + { + Self { + kind: ErrorKind::Message(msg.to_string()), + } + } +} + +impl fmt::Display for PathDeserializationError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.kind.fmt(f) + } +} + +impl std::error::Error for PathDeserializationError {} + +/// The kinds of errors that can happen we deserializing into a [`Path`]. +/// +/// This type is obtained through [`FailedToDeserializePathParams::kind`] or +/// [`FailedToDeserializePathParams::into_kind`] and is useful for building +/// more precise error messages. +#[derive(Debug, PartialEq, Eq)] +#[non_exhaustive] +pub enum ErrorKind { + /// The URI contained the wrong number of parameters. + WrongNumberOfParameters { + /// The number of actual parameters in the URI. + got: usize, + /// The number of expected parameters. + expected: usize, + }, + + /// Failed to parse the value at a specific key into the expected type. + /// + /// This variant is used when deserializing into types that have named fields, such as structs. + ParseErrorAtKey { + /// The key at which the value was located. + key: String, + /// The value from the URI. + value: String, + /// The expected type of the value. + expected_type: &'static str, + }, + + /// Failed to parse the value at a specific index into the expected type. + /// + /// This variant is used when deserializing into sequence types, such as tuples. + ParseErrorAtIndex { + /// The index at which the value was located. + index: usize, + /// The value from the URI. + value: String, + /// The expected type of the value. + expected_type: &'static str, + }, + + /// Failed to parse a value into the expected type. + /// + /// This variant is used when deserializing into a primitive type (such as `String` and `u32`). + ParseError { + /// The value from the URI. + value: String, + /// The expected type of the value. + expected_type: &'static str, + }, + + /// A parameter contained text that, once percent decoded, wasn't valid UTF-8. + InvalidUtf8InPathParam { + /// The key at which the invalid value was located. + key: String, + }, + + /// Tried to serialize into an unsupported type such as nested maps. + /// + /// This error kind is caused by programmer errors and thus gets converted into a `500 Internal + /// Server Error` response. + UnsupportedType { + /// The name of the unsupported type. + name: &'static str, + }, + + /// Catch-all variant for errors that don't fit any other variant. + Message(String), +} + +impl fmt::Display for ErrorKind { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ErrorKind::Message(error) => error.fmt(f), + ErrorKind::InvalidUtf8InPathParam { key } => write!(f, "Invalid UTF-8 in `{key}`"), + ErrorKind::WrongNumberOfParameters { got, expected } => { + write!( + f, + "Wrong number of path arguments for `Path`. Expected {expected} but got {got}" + )?; + + if *expected == 1 { + write!(f, ". Note that multiple parameters must be extracted with a tuple `Path<(_, _)>` or a struct `Path`")?; + } + + Ok(()) + } + ErrorKind::UnsupportedType { name } => write!(f, "Unsupported type `{name}`"), + ErrorKind::ParseErrorAtKey { + key, + value, + expected_type, + } => write!( + f, + "Cannot parse `{key}` with value `{value:?}` to a `{expected_type}`" + ), + ErrorKind::ParseError { + value, + expected_type, + } => write!(f, "Cannot parse `{value:?}` to a `{expected_type}`"), + ErrorKind::ParseErrorAtIndex { + index, + value, + expected_type, + } => write!( + f, + "Cannot parse value at index {index} with value `{value:?}` to a `{expected_type}`" + ), + } + } +} + +/// Rejection type for [`Path`](super::Path) if the captured routes params couldn't be deserialized +/// into the expected type. +#[derive(Debug)] +pub struct FailedToDeserializePathParams(PathDeserializationError); + +impl FailedToDeserializePathParams { + /// Get a reference to the underlying error kind. + pub fn kind(&self) -> &ErrorKind { + &self.0.kind + } + + /// Convert this error into the underlying error kind. + pub fn into_kind(self) -> ErrorKind { + self.0.kind + } + + /// Get the response body text used for this rejection. + pub fn body_text(&self) -> String { + match self.0.kind { + ErrorKind::Message(_) + | ErrorKind::InvalidUtf8InPathParam { .. } + | ErrorKind::ParseError { .. } + | ErrorKind::ParseErrorAtIndex { .. } + | ErrorKind::ParseErrorAtKey { .. } => format!("Invalid URL: {}", self.0.kind), + ErrorKind::WrongNumberOfParameters { .. } | ErrorKind::UnsupportedType { .. } => { + self.0.kind.to_string() + } + } + } + + /// Get the status code used for this rejection. + pub fn status(&self) -> StatusCode { + match self.0.kind { + ErrorKind::Message(_) + | ErrorKind::InvalidUtf8InPathParam { .. } + | ErrorKind::ParseError { .. } + | ErrorKind::ParseErrorAtIndex { .. } + | ErrorKind::ParseErrorAtKey { .. } => StatusCode::BAD_REQUEST, + ErrorKind::WrongNumberOfParameters { .. } | ErrorKind::UnsupportedType { .. } => { + StatusCode::INTERNAL_SERVER_ERROR + } + } + } +} + +impl IntoResponse for FailedToDeserializePathParams { + fn into_response(self) -> Response { + axum_core::__log_rejection!( + rejection_type = Self, + body_text = self.body_text(), + status = self.status(), + ); + (self.status(), self.body_text()).into_response() + } +} + +impl fmt::Display for FailedToDeserializePathParams { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +impl std::error::Error for FailedToDeserializePathParams {} + +/// Extractor that will get captures from the URL without deserializing them. +/// +/// In general you should prefer to use [`Path`] as it is higher level, however `RawPathParams` is +/// suitable if just want the raw params without deserializing them and thus saving some +/// allocations. +/// +/// Any percent encoded parameters will be automatically decoded. The decoded parameters must be +/// valid UTF-8, otherwise `RawPathParams` will fail and return a `400 Bad Request` response. +/// +/// # Example +/// +/// ```rust,no_run +/// use axum::{ +/// extract::RawPathParams, +/// routing::get, +/// Router, +/// }; +/// +/// async fn users_teams_show(params: RawPathParams) { +/// for (key, value) in ¶ms { +/// println!("{key:?} = {value:?}"); +/// } +/// } +/// +/// let app = Router::new().route("/users/:user_id/team/:team_id", get(users_teams_show)); +/// # let _: Router = app; +/// ``` +#[derive(Debug)] +pub struct RawPathParams(Vec<(Arc, PercentDecodedStr)>); + +#[async_trait] +impl FromRequestParts for RawPathParams +where + S: Send + Sync, +{ + type Rejection = RawPathParamsRejection; + + async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { + let params = match parts.extensions.get::() { + Some(UrlParams::Params(params)) => params, + Some(UrlParams::InvalidUtf8InPathParam { key }) => { + return Err(InvalidUtf8InPathParam { + key: Arc::clone(key), + } + .into()); + } + None => { + return Err(MissingPathParams.into()); + } + }; + + Ok(Self(params.clone())) + } +} + +impl RawPathParams { + /// Get an iterator over the path parameters. + pub fn iter(&self) -> RawPathParamsIter<'_> { + self.into_iter() + } +} + +impl<'a> IntoIterator for &'a RawPathParams { + type Item = (&'a str, &'a str); + type IntoIter = RawPathParamsIter<'a>; + + fn into_iter(self) -> Self::IntoIter { + RawPathParamsIter(self.0.iter()) + } +} + +/// An iterator over raw path parameters. +/// +/// Created with [`RawPathParams::iter`]. +#[derive(Debug)] +pub struct RawPathParamsIter<'a>(std::slice::Iter<'a, (Arc, PercentDecodedStr)>); + +impl<'a> Iterator for RawPathParamsIter<'a> { + type Item = (&'a str, &'a str); + + fn next(&mut self) -> Option { + let (key, value) = self.0.next()?; + Some((&**key, value.as_str())) + } +} + +/// Rejection used by [`RawPathParams`] if a parameter contained text that, once percent decoded, +/// wasn't valid UTF-8. +#[derive(Debug)] +pub struct InvalidUtf8InPathParam { + key: Arc, +} + +impl InvalidUtf8InPathParam { + /// Get the response body text used for this rejection. + pub fn body_text(&self) -> String { + self.to_string() + } + + /// Get the status code used for this rejection. + pub fn status(&self) -> StatusCode { + StatusCode::BAD_REQUEST + } +} + +impl fmt::Display for InvalidUtf8InPathParam { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Invalid UTF-8 in `{}`", self.key) + } +} + +impl std::error::Error for InvalidUtf8InPathParam {} + +impl IntoResponse for InvalidUtf8InPathParam { + fn into_response(self) -> Response { + (self.status(), self.body_text()).into_response() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{routing::get, test_helpers::*, Router}; + use http::StatusCode; + use serde::Deserialize; + use std::collections::HashMap; + + #[crate::test] + async fn extracting_url_params() { + let app = Router::new().route( + "/users/:id", + get(|Path(id): Path| async move { + assert_eq!(id, 42); + }) + .post(|Path(params_map): Path>| async move { + assert_eq!(params_map.get("id").unwrap(), &1337); + }), + ); + + let client = TestClient::new(app); + + let res = client.get("/users/42").send().await; + assert_eq!(res.status(), StatusCode::OK); + + let res = client.post("/users/1337").send().await; + assert_eq!(res.status(), StatusCode::OK); + } + + #[crate::test] + async fn extracting_url_params_multiple_times() { + let app = Router::new().route("/users/:id", get(|_: Path, _: Path| async {})); + + let client = TestClient::new(app); + + let res = client.get("/users/42").send().await; + assert_eq!(res.status(), StatusCode::OK); + } + + #[crate::test] + async fn percent_decoding() { + let app = Router::new().route( + "/:key", + get(|Path(param): Path| async move { param }), + ); + + let client = TestClient::new(app); + + let res = client.get("/one%20two").send().await; + + assert_eq!(res.text().await, "one two"); + } + + #[crate::test] + async fn supports_128_bit_numbers() { + let app = Router::new() + .route( + "/i/:key", + get(|Path(param): Path| async move { param.to_string() }), + ) + .route( + "/u/:key", + get(|Path(param): Path| async move { param.to_string() }), + ); + + let client = TestClient::new(app); + + let res = client.get("/i/123").send().await; + assert_eq!(res.text().await, "123"); + + let res = client.get("/u/123").send().await; + assert_eq!(res.text().await, "123"); + } + + #[crate::test] + async fn wildcard() { + let app = Router::new() + .route( + "/foo/*rest", + get(|Path(param): Path| async move { param }), + ) + .route( + "/bar/*rest", + get(|Path(params): Path>| async move { + params.get("rest").unwrap().clone() + }), + ); + + let client = TestClient::new(app); + + let res = client.get("/foo/bar/baz").send().await; + assert_eq!(res.text().await, "bar/baz"); + + let res = client.get("/bar/baz/qux").send().await; + assert_eq!(res.text().await, "baz/qux"); + } + + #[crate::test] + async fn captures_dont_match_empty_segments() { + let app = Router::new().route("/:key", get(|| async {})); + + let client = TestClient::new(app); + + let res = client.get("/").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + + let res = client.get("/foo").send().await; + assert_eq!(res.status(), StatusCode::OK); + } + + #[crate::test] + async fn str_reference_deserialize() { + struct Param(String); + impl<'de> serde::Deserialize<'de> for Param { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let s = <&str as serde::Deserialize>::deserialize(deserializer)?; + Ok(Param(s.to_owned())) + } + } + + let app = Router::new().route("/:key", get(|param: Path| async move { param.0 .0 })); + + let client = TestClient::new(app); + + let res = client.get("/foo").send().await; + assert_eq!(res.text().await, "foo"); + + // percent decoding should also work + let res = client.get("/foo%20bar").send().await; + assert_eq!(res.text().await, "foo bar"); + } + + #[crate::test] + async fn two_path_extractors() { + let app = Router::new().route("/:a/:b", get(|_: Path, _: Path| async {})); + + let client = TestClient::new(app); + + let res = client.get("/a/b").send().await; + assert_eq!(res.status(), StatusCode::INTERNAL_SERVER_ERROR); + assert_eq!( + res.text().await, + "Wrong number of path arguments for `Path`. Expected 1 but got 2. \ + Note that multiple parameters must be extracted with a tuple `Path<(_, _)>` or a struct `Path`", + ); + } + + #[crate::test] + async fn deserialize_into_vec_of_tuples() { + let app = Router::new().route( + "/:a/:b", + get(|Path(params): Path>| async move { + assert_eq!( + params, + vec![ + ("a".to_owned(), "foo".to_owned()), + ("b".to_owned(), "bar".to_owned()) + ] + ); + }), + ); + + let client = TestClient::new(app); + + let res = client.get("/foo/bar").send().await; + assert_eq!(res.status(), StatusCode::OK); + } + + #[crate::test] + async fn type_that_uses_deserialize_any() { + use time::Date; + + #[derive(Deserialize)] + struct Params { + a: Date, + b: Date, + c: Date, + } + + let app = Router::new() + .route( + "/single/:a", + get(|Path(a): Path| async move { format!("single: {a}") }), + ) + .route( + "/tuple/:a/:b/:c", + get(|Path((a, b, c)): Path<(Date, Date, Date)>| async move { + format!("tuple: {a} {b} {c}") + }), + ) + .route( + "/vec/:a/:b/:c", + get(|Path(vec): Path>| async move { + let [a, b, c]: [Date; 3] = vec.try_into().unwrap(); + format!("vec: {a} {b} {c}") + }), + ) + .route( + "/vec_pairs/:a/:b/:c", + get(|Path(vec): Path>| async move { + let [(_, a), (_, b), (_, c)]: [(String, Date); 3] = vec.try_into().unwrap(); + format!("vec_pairs: {a} {b} {c}") + }), + ) + .route( + "/map/:a/:b/:c", + get(|Path(mut map): Path>| async move { + let a = map.remove("a").unwrap(); + let b = map.remove("b").unwrap(); + let c = map.remove("c").unwrap(); + format!("map: {a} {b} {c}") + }), + ) + .route( + "/struct/:a/:b/:c", + get(|Path(params): Path| async move { + format!("struct: {} {} {}", params.a, params.b, params.c) + }), + ); + + let client = TestClient::new(app); + + let res = client.get("/single/2023-01-01").send().await; + assert_eq!(res.text().await, "single: 2023-01-01"); + + let res = client + .get("/tuple/2023-01-01/2023-01-02/2023-01-03") + .send() + .await; + assert_eq!(res.text().await, "tuple: 2023-01-01 2023-01-02 2023-01-03"); + + let res = client + .get("/vec/2023-01-01/2023-01-02/2023-01-03") + .send() + .await; + assert_eq!(res.text().await, "vec: 2023-01-01 2023-01-02 2023-01-03"); + + let res = client + .get("/vec_pairs/2023-01-01/2023-01-02/2023-01-03") + .send() + .await; + assert_eq!( + res.text().await, + "vec_pairs: 2023-01-01 2023-01-02 2023-01-03", + ); + + let res = client + .get("/map/2023-01-01/2023-01-02/2023-01-03") + .send() + .await; + assert_eq!(res.text().await, "map: 2023-01-01 2023-01-02 2023-01-03"); + + let res = client + .get("/struct/2023-01-01/2023-01-02/2023-01-03") + .send() + .await; + assert_eq!(res.text().await, "struct: 2023-01-01 2023-01-02 2023-01-03"); + } + + #[crate::test] + async fn wrong_number_of_parameters_json() { + use serde_json::Value; + + let app = Router::new() + .route("/one/:a", get(|_: Path<(Value, Value)>| async {})) + .route("/two/:a/:b", get(|_: Path| async {})); + + let client = TestClient::new(app); + + let res = client.get("/one/1").send().await; + assert!(res + .text() + .await + .starts_with("Wrong number of path arguments for `Path`. Expected 2 but got 1")); + + let res = client.get("/two/1/2").send().await; + assert!(res + .text() + .await + .starts_with("Wrong number of path arguments for `Path`. Expected 1 but got 2")); + } + + #[crate::test] + async fn raw_path_params() { + let app = Router::new().route( + "/:a/:b/:c", + get(|params: RawPathParams| async move { + params + .into_iter() + .map(|(key, value)| format!("{key}={value}")) + .collect::>() + .join(" ") + }), + ); + + let client = TestClient::new(app); + let res = client.get("/foo/bar/baz").send().await; + let body = res.text().await; + assert_eq!(body, "a=foo b=bar c=baz"); + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/extract/query.rs b/.cargo-vendor/axum-0.6.20/src/extract/query.rs new file mode 100644 index 0000000000..6f8cb89dc1 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/extract/query.rs @@ -0,0 +1,195 @@ +use super::{rejection::*, FromRequestParts}; +use async_trait::async_trait; +use http::{request::Parts, Uri}; +use serde::de::DeserializeOwned; + +/// Extractor that deserializes query strings into some type. +/// +/// `T` is expected to implement [`serde::Deserialize`]. +/// +/// # Example +/// +/// ```rust,no_run +/// use axum::{ +/// extract::Query, +/// routing::get, +/// Router, +/// }; +/// use serde::Deserialize; +/// +/// #[derive(Deserialize)] +/// struct Pagination { +/// page: usize, +/// per_page: usize, +/// } +/// +/// // This will parse query strings like `?page=2&per_page=30` into `Pagination` +/// // structs. +/// async fn list_things(pagination: Query) { +/// let pagination: Pagination = pagination.0; +/// +/// // ... +/// } +/// +/// let app = Router::new().route("/list_things", get(list_things)); +/// # async { +/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +/// # }; +/// ``` +/// +/// If the query string cannot be parsed it will reject the request with a `400 +/// Bad Request` response. +/// +/// For handling values being empty vs missing see the [query-params-with-empty-strings][example] +/// example. +/// +/// [example]: https://github.com/tokio-rs/axum/blob/main/examples/query-params-with-empty-strings/src/main.rs +#[cfg_attr(docsrs, doc(cfg(feature = "query")))] +#[derive(Debug, Clone, Copy, Default)] +pub struct Query(pub T); + +#[async_trait] +impl FromRequestParts for Query +where + T: DeserializeOwned, + S: Send + Sync, +{ + type Rejection = QueryRejection; + + async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { + Self::try_from_uri(&parts.uri) + } +} + +impl Query +where + T: DeserializeOwned, +{ + /// Attempts to construct a [`Query`] from a reference to a [`Uri`]. + /// + /// # Example + /// ``` + /// use axum::extract::Query; + /// use http::Uri; + /// use serde::Deserialize; + /// + /// #[derive(Deserialize)] + /// struct ExampleParams { + /// foo: String, + /// bar: u32, + /// } + /// + /// let uri: Uri = "http://example.com/path?foo=hello&bar=42".parse().unwrap(); + /// let result: Query = Query::try_from_uri(&uri).unwrap(); + /// assert_eq!(result.foo, String::from("hello")); + /// assert_eq!(result.bar, 42); + /// ``` + pub fn try_from_uri(value: &Uri) -> Result { + let query = value.query().unwrap_or_default(); + let params = + serde_urlencoded::from_str(query).map_err(FailedToDeserializeQueryString::from_err)?; + Ok(Query(params)) + } +} + +axum_core::__impl_deref!(Query); + +#[cfg(test)] +mod tests { + use crate::{routing::get, test_helpers::TestClient, Router}; + + use super::*; + use axum_core::extract::FromRequest; + use http::{Request, StatusCode}; + use serde::Deserialize; + use std::fmt::Debug; + + async fn check(uri: impl AsRef, value: T) + where + T: DeserializeOwned + PartialEq + Debug, + { + let req = Request::builder().uri(uri.as_ref()).body(()).unwrap(); + assert_eq!(Query::::from_request(req, &()).await.unwrap().0, value); + } + + #[crate::test] + async fn test_query() { + #[derive(Debug, PartialEq, Deserialize)] + struct Pagination { + size: Option, + page: Option, + } + + check( + "http://example.com/test", + Pagination { + size: None, + page: None, + }, + ) + .await; + + check( + "http://example.com/test?size=10", + Pagination { + size: Some(10), + page: None, + }, + ) + .await; + + check( + "http://example.com/test?size=10&page=20", + Pagination { + size: Some(10), + page: Some(20), + }, + ) + .await; + } + + #[crate::test] + async fn correct_rejection_status_code() { + #[derive(Deserialize)] + #[allow(dead_code)] + struct Params { + n: i32, + } + + async fn handler(_: Query) {} + + let app = Router::new().route("/", get(handler)); + let client = TestClient::new(app); + + let res = client.get("/?n=hi").send().await; + assert_eq!(res.status(), StatusCode::BAD_REQUEST); + } + + #[test] + fn test_try_from_uri() { + #[derive(Deserialize)] + struct TestQueryParams { + foo: String, + bar: u32, + } + let uri: Uri = "http://example.com/path?foo=hello&bar=42".parse().unwrap(); + let result: Query = Query::try_from_uri(&uri).unwrap(); + assert_eq!(result.foo, String::from("hello")); + assert_eq!(result.bar, 42); + } + + #[test] + fn test_try_from_uri_with_invalid_query() { + #[derive(Deserialize)] + struct TestQueryParams { + _foo: String, + _bar: u32, + } + let uri: Uri = "http://example.com/path?foo=hello&bar=invalid" + .parse() + .unwrap(); + let result: Result, _> = Query::try_from_uri(&uri); + + assert!(result.is_err()); + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/extract/raw_form.rs b/.cargo-vendor/axum-0.6.20/src/extract/raw_form.rs new file mode 100644 index 0000000000..830d8b62ae --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/extract/raw_form.rs @@ -0,0 +1,120 @@ +use async_trait::async_trait; +use axum_core::extract::FromRequest; +use bytes::{Bytes, BytesMut}; +use http::{Method, Request}; + +use super::{ + has_content_type, + rejection::{InvalidFormContentType, RawFormRejection}, +}; + +use crate::{body::HttpBody, BoxError}; + +/// Extractor that extracts raw form requests. +/// +/// For `GET` requests it will extract the raw query. For other methods it extracts the raw +/// `application/x-www-form-urlencoded` encoded request body. +/// +/// # Example +/// +/// ```rust,no_run +/// use axum::{ +/// extract::RawForm, +/// routing::get, +/// Router +/// }; +/// +/// async fn handler(RawForm(form): RawForm) {} +/// +/// let app = Router::new().route("/", get(handler)); +/// # async { +/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +/// # }; +/// ``` +#[derive(Debug)] +pub struct RawForm(pub Bytes); + +#[async_trait] +impl FromRequest for RawForm +where + B: HttpBody + Send + 'static, + B::Data: Send, + B::Error: Into, + S: Send + Sync, +{ + type Rejection = RawFormRejection; + + async fn from_request(req: Request, state: &S) -> Result { + if req.method() == Method::GET { + let mut bytes = BytesMut::new(); + + if let Some(query) = req.uri().query() { + bytes.extend(query.as_bytes()); + } + + Ok(Self(bytes.freeze())) + } else { + if !has_content_type(req.headers(), &mime::APPLICATION_WWW_FORM_URLENCODED) { + return Err(InvalidFormContentType.into()); + } + + Ok(Self(Bytes::from_request(req, state).await?)) + } + } +} + +#[cfg(test)] +mod tests { + use http::{header::CONTENT_TYPE, Request}; + + use super::{InvalidFormContentType, RawForm, RawFormRejection}; + + use crate::{ + body::{Bytes, Empty, Full}, + extract::FromRequest, + }; + + async fn check_query(uri: &str, value: &[u8]) { + let req = Request::builder() + .uri(uri) + .body(Empty::::new()) + .unwrap(); + + assert_eq!(RawForm::from_request(req, &()).await.unwrap().0, value); + } + + async fn check_body(body: &'static [u8]) { + let req = Request::post("http://example.com/test") + .header(CONTENT_TYPE, mime::APPLICATION_WWW_FORM_URLENCODED.as_ref()) + .body(Full::new(Bytes::from(body))) + .unwrap(); + + assert_eq!(RawForm::from_request(req, &()).await.unwrap().0, body); + } + + #[crate::test] + async fn test_from_query() { + check_query("http://example.com/test", b"").await; + + check_query("http://example.com/test?page=0&size=10", b"page=0&size=10").await; + } + + #[crate::test] + async fn test_from_body() { + check_body(b"").await; + + check_body(b"username=user&password=secure%20password").await; + } + + #[crate::test] + async fn test_incorrect_content_type() { + let req = Request::post("http://example.com/test") + .body(Full::::from(Bytes::from("page=0&size=10"))) + .unwrap(); + + assert!(matches!( + RawForm::from_request(req, &()).await.unwrap_err(), + RawFormRejection::InvalidFormContentType(InvalidFormContentType) + )) + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/extract/raw_query.rs b/.cargo-vendor/axum-0.6.20/src/extract/raw_query.rs new file mode 100644 index 0000000000..98a60b0930 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/extract/raw_query.rs @@ -0,0 +1,41 @@ +use super::FromRequestParts; +use async_trait::async_trait; +use http::request::Parts; +use std::convert::Infallible; + +/// Extractor that extracts the raw query string, without parsing it. +/// +/// # Example +/// +/// ```rust,no_run +/// use axum::{ +/// extract::RawQuery, +/// routing::get, +/// Router, +/// }; +/// use futures_util::StreamExt; +/// +/// async fn handler(RawQuery(query): RawQuery) { +/// // ... +/// } +/// +/// let app = Router::new().route("/users", get(handler)); +/// # async { +/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +/// # }; +/// ``` +#[derive(Debug)] +pub struct RawQuery(pub Option); + +#[async_trait] +impl FromRequestParts for RawQuery +where + S: Send + Sync, +{ + type Rejection = Infallible; + + async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { + let query = parts.uri.query().map(|query| query.to_owned()); + Ok(Self(query)) + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/extract/rejection.rs b/.cargo-vendor/axum-0.6.20/src/extract/rejection.rs new file mode 100644 index 0000000000..07c322e94d --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/extract/rejection.rs @@ -0,0 +1,212 @@ +//! Rejection response types. + +use axum_core::__composite_rejection as composite_rejection; +use axum_core::__define_rejection as define_rejection; + +pub use crate::extract::path::{FailedToDeserializePathParams, InvalidUtf8InPathParam}; +pub use axum_core::extract::rejection::*; + +#[cfg(feature = "json")] +define_rejection! { + #[status = UNPROCESSABLE_ENTITY] + #[body = "Failed to deserialize the JSON body into the target type"] + #[cfg_attr(docsrs, doc(cfg(feature = "json")))] + /// Rejection type for [`Json`](super::Json). + /// + /// This rejection is used if the request body is syntactically valid JSON but couldn't be + /// deserialized into the target type. + pub struct JsonDataError(Error); +} + +#[cfg(feature = "json")] +define_rejection! { + #[status = BAD_REQUEST] + #[body = "Failed to parse the request body as JSON"] + #[cfg_attr(docsrs, doc(cfg(feature = "json")))] + /// Rejection type for [`Json`](super::Json). + /// + /// This rejection is used if the request body didn't contain syntactically valid JSON. + pub struct JsonSyntaxError(Error); +} + +#[cfg(feature = "json")] +define_rejection! { + #[status = UNSUPPORTED_MEDIA_TYPE] + #[body = "Expected request with `Content-Type: application/json`"] + #[cfg_attr(docsrs, doc(cfg(feature = "json")))] + /// Rejection type for [`Json`](super::Json) used if the `Content-Type` + /// header is missing. + pub struct MissingJsonContentType; +} + +define_rejection! { + #[status = INTERNAL_SERVER_ERROR] + #[body = "Missing request extension"] + /// Rejection type for [`Extension`](super::Extension) if an expected + /// request extension was not found. + pub struct MissingExtension(Error); +} + +define_rejection! { + #[status = INTERNAL_SERVER_ERROR] + #[body = "No paths parameters found for matched route"] + /// Rejection type used if axum's internal representation of path parameters + /// is missing. This is commonly caused by extracting `Request<_>`. `Path` + /// must be extracted first. + pub struct MissingPathParams; +} + +define_rejection! { + #[status = UNSUPPORTED_MEDIA_TYPE] + #[body = "Form requests must have `Content-Type: application/x-www-form-urlencoded`"] + /// Rejection type for [`Form`](super::Form) or [`RawForm`](super::RawForm) + /// used if the `Content-Type` header is missing + /// or its value is not `application/x-www-form-urlencoded`. + pub struct InvalidFormContentType; +} + +define_rejection! { + #[status = BAD_REQUEST] + #[body = "No host found in request"] + /// Rejection type used if the [`Host`](super::Host) extractor is unable to + /// resolve a host. + pub struct FailedToResolveHost; +} + +define_rejection! { + #[status = BAD_REQUEST] + #[body = "Failed to deserialize form"] + /// Rejection type used if the [`Form`](super::Form) extractor is unable to + /// deserialize the form into the target type. + pub struct FailedToDeserializeForm(Error); +} + +define_rejection! { + #[status = UNPROCESSABLE_ENTITY] + #[body = "Failed to deserialize form body"] + /// Rejection type used if the [`Form`](super::Form) extractor is unable to + /// deserialize the form body into the target type. + pub struct FailedToDeserializeFormBody(Error); +} + +define_rejection! { + #[status = BAD_REQUEST] + #[body = "Failed to deserialize query string"] + /// Rejection type used if the [`Query`](super::Query) extractor is unable to + /// deserialize the query string into the target type. + pub struct FailedToDeserializeQueryString(Error); +} + +composite_rejection! { + /// Rejection used for [`Query`](super::Query). + /// + /// Contains one variant for each way the [`Query`](super::Query) extractor + /// can fail. + pub enum QueryRejection { + FailedToDeserializeQueryString, + } +} + +composite_rejection! { + /// Rejection used for [`Form`](super::Form). + /// + /// Contains one variant for each way the [`Form`](super::Form) extractor + /// can fail. + pub enum FormRejection { + InvalidFormContentType, + FailedToDeserializeForm, + FailedToDeserializeFormBody, + BytesRejection, + } +} + +composite_rejection! { + /// Rejection used for [`RawForm`](super::RawForm). + /// + /// Contains one variant for each way the [`RawForm`](super::RawForm) extractor + /// can fail. + pub enum RawFormRejection { + InvalidFormContentType, + BytesRejection, + } +} + +#[cfg(feature = "json")] +composite_rejection! { + /// Rejection used for [`Json`](super::Json). + /// + /// Contains one variant for each way the [`Json`](super::Json) extractor + /// can fail. + #[cfg_attr(docsrs, doc(cfg(feature = "json")))] + pub enum JsonRejection { + JsonDataError, + JsonSyntaxError, + MissingJsonContentType, + BytesRejection, + } +} + +composite_rejection! { + /// Rejection used for [`Extension`](super::Extension). + /// + /// Contains one variant for each way the [`Extension`](super::Extension) extractor + /// can fail. + pub enum ExtensionRejection { + MissingExtension, + } +} + +composite_rejection! { + /// Rejection used for [`Path`](super::Path). + /// + /// Contains one variant for each way the [`Path`](super::Path) extractor + /// can fail. + pub enum PathRejection { + FailedToDeserializePathParams, + MissingPathParams, + } +} + +composite_rejection! { + /// Rejection used for [`RawPathParams`](super::RawPathParams). + /// + /// Contains one variant for each way the [`RawPathParams`](super::RawPathParams) extractor + /// can fail. + pub enum RawPathParamsRejection { + InvalidUtf8InPathParam, + MissingPathParams, + } +} + +composite_rejection! { + /// Rejection used for [`Host`](super::Host). + /// + /// Contains one variant for each way the [`Host`](super::Host) extractor + /// can fail. + pub enum HostRejection { + FailedToResolveHost, + } +} + +#[cfg(feature = "matched-path")] +define_rejection! { + #[status = INTERNAL_SERVER_ERROR] + #[body = "No matched path found"] + /// Rejection if no matched path could be found. + /// + /// See [`MatchedPath`](super::MatchedPath) for more details. + #[cfg_attr(docsrs, doc(cfg(feature = "matched-path")))] + pub struct MatchedPathMissing; +} + +#[cfg(feature = "matched-path")] +composite_rejection! { + /// Rejection used for [`MatchedPath`](super::MatchedPath). + #[cfg_attr(docsrs, doc(cfg(feature = "matched-path")))] + pub enum MatchedPathRejection { + MatchedPathMissing, + } +} + +#[cfg(feature = "headers")] +pub use crate::typed_header::{TypedHeaderRejection, TypedHeaderRejectionReason}; diff --git a/.cargo-vendor/axum-0.6.20/src/extract/request_parts.rs b/.cargo-vendor/axum-0.6.20/src/extract/request_parts.rs new file mode 100644 index 0000000000..9af618fa20 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/extract/request_parts.rs @@ -0,0 +1,252 @@ +use super::{Extension, FromRequest, FromRequestParts}; +use crate::{ + body::{Body, Bytes, HttpBody}, + BoxError, Error, +}; +use async_trait::async_trait; +use futures_util::stream::Stream; +use http::{request::Parts, Request, Uri}; +use std::{ + convert::Infallible, + fmt, + pin::Pin, + task::{Context, Poll}, +}; +use sync_wrapper::SyncWrapper; + +/// Extractor that gets the original request URI regardless of nesting. +/// +/// This is necessary since [`Uri`](http::Uri), when used as an extractor, will +/// have the prefix stripped if used in a nested service. +/// +/// # Example +/// +/// ``` +/// use axum::{ +/// routing::get, +/// Router, +/// extract::OriginalUri, +/// http::Uri +/// }; +/// +/// let api_routes = Router::new() +/// .route( +/// "/users", +/// get(|uri: Uri, OriginalUri(original_uri): OriginalUri| async { +/// // `uri` is `/users` +/// // `original_uri` is `/api/users` +/// }), +/// ); +/// +/// let app = Router::new().nest("/api", api_routes); +/// # async { +/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +/// # }; +/// ``` +/// +/// # Extracting via request extensions +/// +/// `OriginalUri` can also be accessed from middleware via request extensions. +/// This is useful for example with [`Trace`](tower_http::trace::Trace) to +/// create a span that contains the full path, if your service might be nested: +/// +/// ``` +/// use axum::{ +/// Router, +/// extract::OriginalUri, +/// http::Request, +/// routing::get, +/// }; +/// use tower_http::trace::TraceLayer; +/// +/// let api_routes = Router::new() +/// .route("/users/:id", get(|| async { /* ... */ })) +/// .layer( +/// TraceLayer::new_for_http().make_span_with(|req: &Request<_>| { +/// let path = if let Some(path) = req.extensions().get::() { +/// // This will include `/api` +/// path.0.path().to_owned() +/// } else { +/// // The `OriginalUri` extension will always be present if using +/// // `Router` unless another extractor or middleware has removed it +/// req.uri().path().to_owned() +/// }; +/// tracing::info_span!("http-request", %path) +/// }), +/// ); +/// +/// let app = Router::new().nest("/api", api_routes); +/// # async { +/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +/// # }; +/// ``` +#[cfg(feature = "original-uri")] +#[derive(Debug, Clone)] +pub struct OriginalUri(pub Uri); + +#[cfg(feature = "original-uri")] +#[async_trait] +impl FromRequestParts for OriginalUri +where + S: Send + Sync, +{ + type Rejection = Infallible; + + async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { + let uri = Extension::::from_request_parts(parts, state) + .await + .unwrap_or_else(|_| Extension(OriginalUri(parts.uri.clone()))) + .0; + Ok(uri) + } +} + +#[cfg(feature = "original-uri")] +axum_core::__impl_deref!(OriginalUri: Uri); + +/// Extractor that extracts the request body as a [`Stream`]. +/// +/// Since extracting the request body requires consuming it, the `BodyStream` extractor must be +/// *last* if there are multiple extractors in a handler. +/// See ["the order of extractors"][order-of-extractors] +/// +/// [order-of-extractors]: crate::extract#the-order-of-extractors +/// +/// # Example +/// +/// ```rust,no_run +/// use axum::{ +/// extract::BodyStream, +/// routing::get, +/// Router, +/// }; +/// use futures_util::StreamExt; +/// +/// async fn handler(mut stream: BodyStream) { +/// while let Some(chunk) = stream.next().await { +/// // ... +/// } +/// } +/// +/// let app = Router::new().route("/users", get(handler)); +/// # async { +/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +/// # }; +/// ``` +/// +/// [`Stream`]: https://docs.rs/futures/latest/futures/stream/trait.Stream.html +/// [`body::Body`]: crate::body::Body +pub struct BodyStream( + SyncWrapper + Send + 'static>>>, +); + +impl Stream for BodyStream { + type Item = Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(self.0.get_mut()).poll_data(cx) + } +} + +#[async_trait] +impl FromRequest for BodyStream +where + B: HttpBody + Send + 'static, + B::Data: Into, + B::Error: Into, + S: Send + Sync, +{ + type Rejection = Infallible; + + async fn from_request(req: Request, _state: &S) -> Result { + let body = req + .into_body() + .map_data(Into::into) + .map_err(|err| Error::new(err.into())); + let stream = BodyStream(SyncWrapper::new(Box::pin(body))); + Ok(stream) + } +} + +impl fmt::Debug for BodyStream { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("BodyStream").finish() + } +} + +#[test] +fn body_stream_traits() { + crate::test_helpers::assert_send::(); + crate::test_helpers::assert_sync::(); +} + +/// Extractor that extracts the raw request body. +/// +/// Since extracting the raw request body requires consuming it, the `RawBody` extractor must be +/// *last* if there are multiple extractors in a handler. See ["the order of extractors"][order-of-extractors] +/// +/// [order-of-extractors]: crate::extract#the-order-of-extractors +/// +/// # Example +/// +/// ```rust,no_run +/// use axum::{ +/// extract::RawBody, +/// routing::get, +/// Router, +/// }; +/// use futures_util::StreamExt; +/// +/// async fn handler(RawBody(body): RawBody) { +/// // ... +/// } +/// +/// let app = Router::new().route("/users", get(handler)); +/// # async { +/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +/// # }; +/// ``` +/// +/// [`body::Body`]: crate::body::Body +#[derive(Debug, Default, Clone)] +pub struct RawBody(pub B); + +#[async_trait] +impl FromRequest for RawBody +where + B: Send, + S: Send + Sync, +{ + type Rejection = Infallible; + + async fn from_request(req: Request, _state: &S) -> Result { + Ok(Self(req.into_body())) + } +} + +axum_core::__impl_deref!(RawBody); + +#[cfg(test)] +mod tests { + use crate::{extract::Extension, routing::get, test_helpers::*, Router}; + use http::{Method, StatusCode}; + + #[crate::test] + async fn extract_request_parts() { + #[derive(Clone)] + struct Ext; + + async fn handler(parts: http::request::Parts) { + assert_eq!(parts.method, Method::GET); + assert_eq!(parts.uri, "/"); + assert_eq!(parts.version, http::Version::HTTP_11); + assert_eq!(parts.headers["x-foo"], "123"); + parts.extensions.get::().unwrap(); + } + + let client = TestClient::new(Router::new().route("/", get(handler)).layer(Extension(Ext))); + + let res = client.get("/").header("x-foo", "123").send().await; + assert_eq!(res.status(), StatusCode::OK); + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/extract/state.rs b/.cargo-vendor/axum-0.6.20/src/extract/state.rs new file mode 100644 index 0000000000..e2307d391c --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/extract/state.rs @@ -0,0 +1,378 @@ +use async_trait::async_trait; +use axum_core::extract::{FromRef, FromRequestParts}; +use http::request::Parts; +use std::{ + convert::Infallible, + ops::{Deref, DerefMut}, +}; + +/// Extractor for state. +/// +/// See ["Accessing state in middleware"][state-from-middleware] for how to +/// access state in middleware. +/// +/// [state-from-middleware]: crate::middleware#accessing-state-in-middleware +/// +/// # With `Router` +/// +/// ``` +/// use axum::{Router, routing::get, extract::State}; +/// +/// // the application state +/// // +/// // here you can put configuration, database connection pools, or whatever +/// // state you need +/// // +/// // see "When states need to implement `Clone`" for more details on why we need +/// // `#[derive(Clone)]` here. +/// #[derive(Clone)] +/// struct AppState {} +/// +/// let state = AppState {}; +/// +/// // create a `Router` that holds our state +/// let app = Router::new() +/// .route("/", get(handler)) +/// // provide the state so the router can access it +/// .with_state(state); +/// +/// async fn handler( +/// // access the state via the `State` extractor +/// // extracting a state of the wrong type results in a compile error +/// State(state): State, +/// ) { +/// // use `state`... +/// } +/// # let _: axum::Router = app; +/// ``` +/// +/// Note that `State` is an extractor, so be sure to put it before any body +/// extractors, see ["the order of extractors"][order-of-extractors]. +/// +/// [order-of-extractors]: crate::extract#the-order-of-extractors +/// +/// ## Combining stateful routers +/// +/// Multiple [`Router`]s can be combined with [`Router::nest`] or [`Router::merge`] +/// When combining [`Router`]s with one of these methods, the [`Router`]s must have +/// the same state type. Generally, this can be inferred automatically: +/// +/// ``` +/// use axum::{Router, routing::get, extract::State}; +/// +/// #[derive(Clone)] +/// struct AppState {} +/// +/// let state = AppState {}; +/// +/// // create a `Router` that will be nested within another +/// let api = Router::new() +/// .route("/posts", get(posts_handler)); +/// +/// let app = Router::new() +/// .nest("/api", api) +/// .with_state(state); +/// +/// async fn posts_handler(State(state): State) { +/// // use `state`... +/// } +/// # let _: axum::Router = app; +/// ``` +/// +/// However, if you are composing [`Router`]s that are defined in separate scopes, +/// you may need to annotate the [`State`] type explicitly: +/// +/// ``` +/// use axum::{Router, routing::get, extract::State}; +/// +/// #[derive(Clone)] +/// struct AppState {} +/// +/// fn make_app() -> Router { +/// let state = AppState {}; +/// +/// Router::new() +/// .nest("/api", make_api()) +/// .with_state(state) // the outer Router's state is inferred +/// } +/// +/// // the inner Router must specify its state type to compose with the +/// // outer router +/// fn make_api() -> Router { +/// Router::new() +/// .route("/posts", get(posts_handler)) +/// } +/// +/// async fn posts_handler(State(state): State) { +/// // use `state`... +/// } +/// # let _: axum::Router = make_app(); +/// ``` +/// +/// In short, a [`Router`]'s generic state type defaults to `()` +/// (no state) unless [`Router::with_state`] is called or the value +/// of the generic type is given explicitly. +/// +/// [`Router`]: crate::Router +/// [`Router::merge`]: crate::Router::merge +/// [`Router::nest`]: crate::Router::nest +/// [`Router::with_state`]: crate::Router::with_state +/// +/// # With `MethodRouter` +/// +/// ``` +/// use axum::{routing::get, extract::State}; +/// +/// #[derive(Clone)] +/// struct AppState {} +/// +/// let state = AppState {}; +/// +/// let method_router_with_state = get(handler) +/// // provide the state so the handler can access it +/// .with_state(state); +/// +/// async fn handler(State(state): State) { +/// // use `state`... +/// } +/// # async { +/// # axum::Server::bind(&"".parse().unwrap()).serve(method_router_with_state.into_make_service()).await.unwrap(); +/// # }; +/// ``` +/// +/// # With `Handler` +/// +/// ``` +/// use axum::{routing::get, handler::Handler, extract::State}; +/// +/// #[derive(Clone)] +/// struct AppState {} +/// +/// let state = AppState {}; +/// +/// async fn handler(State(state): State) { +/// // use `state`... +/// } +/// +/// // provide the state so the handler can access it +/// let handler_with_state = handler.with_state(state); +/// +/// # async { +/// axum::Server::bind(&"0.0.0.0:3000".parse().unwrap()) +/// .serve(handler_with_state.into_make_service()) +/// .await +/// .expect("server failed"); +/// # }; +/// ``` +/// +/// # Substates +/// +/// [`State`] only allows a single state type but you can use [`FromRef`] to extract "substates": +/// +/// ``` +/// use axum::{Router, routing::get, extract::{State, FromRef}}; +/// +/// // the application state +/// #[derive(Clone)] +/// struct AppState { +/// // that holds some api specific state +/// api_state: ApiState, +/// } +/// +/// // the api specific state +/// #[derive(Clone)] +/// struct ApiState {} +/// +/// // support converting an `AppState` in an `ApiState` +/// impl FromRef for ApiState { +/// fn from_ref(app_state: &AppState) -> ApiState { +/// app_state.api_state.clone() +/// } +/// } +/// +/// let state = AppState { +/// api_state: ApiState {}, +/// }; +/// +/// let app = Router::new() +/// .route("/", get(handler)) +/// .route("/api/users", get(api_users)) +/// .with_state(state); +/// +/// async fn api_users( +/// // access the api specific state +/// State(api_state): State, +/// ) { +/// } +/// +/// async fn handler( +/// // we can still access to top level state +/// State(state): State, +/// ) { +/// } +/// # let _: axum::Router = app; +/// ``` +/// +/// For convenience `FromRef` can also be derived using `#[derive(FromRef)]`. +/// +/// # For library authors +/// +/// If you're writing a library that has an extractor that needs state, this is the recommended way +/// to do it: +/// +/// ```rust +/// use axum_core::extract::{FromRequestParts, FromRef}; +/// use http::request::Parts; +/// use async_trait::async_trait; +/// use std::convert::Infallible; +/// +/// // the extractor your library provides +/// struct MyLibraryExtractor; +/// +/// #[async_trait] +/// impl FromRequestParts for MyLibraryExtractor +/// where +/// // keep `S` generic but require that it can produce a `MyLibraryState` +/// // this means users will have to implement `FromRef for MyLibraryState` +/// MyLibraryState: FromRef, +/// S: Send + Sync, +/// { +/// type Rejection = Infallible; +/// +/// async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { +/// // get a `MyLibraryState` from a reference to the state +/// let state = MyLibraryState::from_ref(state); +/// +/// // ... +/// # todo!() +/// } +/// } +/// +/// // the state your library needs +/// struct MyLibraryState { +/// // ... +/// } +/// ``` +/// +/// # When states need to implement `Clone` +/// +/// Your top level state type must implement `Clone` to be extractable with `State`: +/// +/// ``` +/// use axum::extract::State; +/// +/// // no substates, so to extract to `State` we must implement `Clone` for `AppState` +/// #[derive(Clone)] +/// struct AppState {} +/// +/// async fn handler(State(state): State) { +/// // ... +/// } +/// ``` +/// +/// This works because of [`impl FromRef for S where S: Clone`][`FromRef`]. +/// +/// This is also true if you're extracting substates, unless you _never_ extract the top level +/// state itself: +/// +/// ``` +/// use axum::extract::{State, FromRef}; +/// +/// // we never extract `State`, just `State`. So `AppState` doesn't need to +/// // implement `Clone` +/// struct AppState { +/// inner: InnerState, +/// } +/// +/// #[derive(Clone)] +/// struct InnerState {} +/// +/// impl FromRef for InnerState { +/// fn from_ref(app_state: &AppState) -> InnerState { +/// app_state.inner.clone() +/// } +/// } +/// +/// async fn api_users(State(inner): State) { +/// // ... +/// } +/// ``` +/// +/// In general however we recommend you implement `Clone` for all your state types to avoid +/// potential type errors. +/// +/// # Shared mutable state +/// +/// [As state is global within a `Router`][global] you can't directly get a mutable reference to +/// the state. +/// +/// The most basic solution is to use an `Arc>`. Which kind of mutex you need depends on +/// your use case. See [the tokio docs] for more details. +/// +/// Note that holding a locked `std::sync::Mutex` across `.await` points will result in `!Send` +/// futures which are incompatible with axum. If you need to hold a mutex across `.await` points, +/// consider using a `tokio::sync::Mutex` instead. +/// +/// ## Example +/// +/// ``` +/// use axum::{Router, routing::get, extract::State}; +/// use std::sync::{Arc, Mutex}; +/// +/// #[derive(Clone)] +/// struct AppState { +/// data: Arc>, +/// } +/// +/// async fn handler(State(state): State) { +/// let mut data = state.data.lock().expect("mutex was poisoned"); +/// *data = "updated foo".to_owned(); +/// +/// // ... +/// } +/// +/// let state = AppState { +/// data: Arc::new(Mutex::new("foo".to_owned())), +/// }; +/// +/// let app = Router::new() +/// .route("/", get(handler)) +/// .with_state(state); +/// # let _: Router = app; +/// ``` +/// +/// [global]: crate::Router::with_state +/// [the tokio docs]: https://docs.rs/tokio/1.25.0/tokio/sync/struct.Mutex.html#which-kind-of-mutex-should-you-use +#[derive(Debug, Default, Clone, Copy)] +pub struct State(pub S); + +#[async_trait] +impl FromRequestParts for State +where + InnerState: FromRef, + OuterState: Send + Sync, +{ + type Rejection = Infallible; + + async fn from_request_parts( + _parts: &mut Parts, + state: &OuterState, + ) -> Result { + let inner_state = InnerState::from_ref(state); + Ok(Self(inner_state)) + } +} + +impl Deref for State { + type Target = S; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for State { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/extract/ws.rs b/.cargo-vendor/axum-0.6.20/src/extract/ws.rs new file mode 100644 index 0000000000..26f28609ad --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/extract/ws.rs @@ -0,0 +1,894 @@ +//! Handle WebSocket connections. +//! +//! # Example +//! +//! ``` +//! use axum::{ +//! extract::ws::{WebSocketUpgrade, WebSocket}, +//! routing::get, +//! response::{IntoResponse, Response}, +//! Router, +//! }; +//! +//! let app = Router::new().route("/ws", get(handler)); +//! +//! async fn handler(ws: WebSocketUpgrade) -> Response { +//! ws.on_upgrade(handle_socket) +//! } +//! +//! async fn handle_socket(mut socket: WebSocket) { +//! while let Some(msg) = socket.recv().await { +//! let msg = if let Ok(msg) = msg { +//! msg +//! } else { +//! // client disconnected +//! return; +//! }; +//! +//! if socket.send(msg).await.is_err() { +//! // client disconnected +//! return; +//! } +//! } +//! } +//! # async { +//! # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +//! # }; +//! ``` +//! +//! # Passing data and/or state to an `on_upgrade` callback +//! +//! ``` +//! use axum::{ +//! extract::{ws::{WebSocketUpgrade, WebSocket}, State}, +//! response::Response, +//! routing::get, +//! Router, +//! }; +//! +//! #[derive(Clone)] +//! struct AppState { +//! // ... +//! } +//! +//! async fn handler(ws: WebSocketUpgrade, State(state): State) -> Response { +//! ws.on_upgrade(|socket| handle_socket(socket, state)) +//! } +//! +//! async fn handle_socket(socket: WebSocket, state: AppState) { +//! // ... +//! } +//! +//! let app = Router::new() +//! .route("/ws", get(handler)) +//! .with_state(AppState { /* ... */ }); +//! # async { +//! # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +//! # }; +//! ``` +//! +//! # Read and write concurrently +//! +//! If you need to read and write concurrently from a [`WebSocket`] you can use +//! [`StreamExt::split`]: +//! +//! ```rust,no_run +//! use axum::{Error, extract::ws::{WebSocket, Message}}; +//! use futures_util::{sink::SinkExt, stream::{StreamExt, SplitSink, SplitStream}}; +//! +//! async fn handle_socket(mut socket: WebSocket) { +//! let (mut sender, mut receiver) = socket.split(); +//! +//! tokio::spawn(write(sender)); +//! tokio::spawn(read(receiver)); +//! } +//! +//! async fn read(receiver: SplitStream) { +//! // ... +//! } +//! +//! async fn write(sender: SplitSink) { +//! // ... +//! } +//! ``` +//! +//! [`StreamExt::split`]: https://docs.rs/futures/0.3.17/futures/stream/trait.StreamExt.html#method.split + +use self::rejection::*; +use super::FromRequestParts; +use crate::{ + body::{self, Bytes}, + response::Response, + Error, +}; +use async_trait::async_trait; +use futures_util::{ + sink::{Sink, SinkExt}, + stream::{Stream, StreamExt}, +}; +use http::{ + header::{self, HeaderMap, HeaderName, HeaderValue}, + request::Parts, + Method, StatusCode, +}; +use hyper::upgrade::{OnUpgrade, Upgraded}; +use sha1::{Digest, Sha1}; +use std::{ + borrow::Cow, + future::Future, + pin::Pin, + task::{Context, Poll}, +}; +use tokio_tungstenite::{ + tungstenite::{ + self as ts, + protocol::{self, WebSocketConfig}, + }, + WebSocketStream, +}; + +/// Extractor for establishing WebSocket connections. +/// +/// Note: This extractor requires the request method to be `GET` so it should +/// always be used with [`get`](crate::routing::get). Requests with other methods will be +/// rejected. +/// +/// See the [module docs](self) for an example. +#[cfg_attr(docsrs, doc(cfg(feature = "ws")))] +pub struct WebSocketUpgrade { + config: WebSocketConfig, + /// The chosen protocol sent in the `Sec-WebSocket-Protocol` header of the response. + protocol: Option, + sec_websocket_key: HeaderValue, + on_upgrade: OnUpgrade, + on_failed_upgrade: F, + sec_websocket_protocol: Option, +} + +impl std::fmt::Debug for WebSocketUpgrade { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("WebSocketUpgrade") + .field("config", &self.config) + .field("protocol", &self.protocol) + .field("sec_websocket_key", &self.sec_websocket_key) + .field("sec_websocket_protocol", &self.sec_websocket_protocol) + .finish_non_exhaustive() + } +} + +impl WebSocketUpgrade { + /// Does nothing, instead use `max_write_buffer_size`. + #[deprecated] + pub fn max_send_queue(self, _: usize) -> Self { + self + } + + /// The target minimum size of the write buffer to reach before writing the data + /// to the underlying stream. + /// + /// The default value is 128 KiB. + /// + /// If set to `0` each message will be eagerly written to the underlying stream. + /// It is often more optimal to allow them to buffer a little, hence the default value. + /// + /// Note: [`flush`](SinkExt::flush) will always fully write the buffer regardless. + pub fn write_buffer_size(mut self, size: usize) -> Self { + self.config.write_buffer_size = size; + self + } + + /// The max size of the write buffer in bytes. Setting this can provide backpressure + /// in the case the write buffer is filling up due to write errors. + /// + /// The default value is unlimited. + /// + /// Note: The write buffer only builds up past [`write_buffer_size`](Self::write_buffer_size) + /// when writes to the underlying stream are failing. So the **write buffer can not + /// fill up if you are not observing write errors even if not flushing**. + /// + /// Note: Should always be at least [`write_buffer_size + 1 message`](Self::write_buffer_size) + /// and probably a little more depending on error handling strategy. + pub fn max_write_buffer_size(mut self, max: usize) -> Self { + self.config.max_write_buffer_size = max; + self + } + + /// Set the maximum message size (defaults to 64 megabytes) + pub fn max_message_size(mut self, max: usize) -> Self { + self.config.max_message_size = Some(max); + self + } + + /// Set the maximum frame size (defaults to 16 megabytes) + pub fn max_frame_size(mut self, max: usize) -> Self { + self.config.max_frame_size = Some(max); + self + } + + /// Allow server to accept unmasked frames (defaults to false) + pub fn accept_unmasked_frames(mut self, accept: bool) -> Self { + self.config.accept_unmasked_frames = accept; + self + } + + /// Set the known protocols. + /// + /// If the protocol name specified by `Sec-WebSocket-Protocol` header + /// to match any of them, the upgrade response will include `Sec-WebSocket-Protocol` header and + /// return the protocol name. + /// + /// The protocols should be listed in decreasing order of preference: if the client offers + /// multiple protocols that the server could support, the server will pick the first one in + /// this list. + /// + /// # Examples + /// + /// ``` + /// use axum::{ + /// extract::ws::{WebSocketUpgrade, WebSocket}, + /// routing::get, + /// response::{IntoResponse, Response}, + /// Router, + /// }; + /// + /// let app = Router::new().route("/ws", get(handler)); + /// + /// async fn handler(ws: WebSocketUpgrade) -> Response { + /// ws.protocols(["graphql-ws", "graphql-transport-ws"]) + /// .on_upgrade(|socket| async { + /// // ... + /// }) + /// } + /// # async { + /// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); + /// # }; + /// ``` + pub fn protocols(mut self, protocols: I) -> Self + where + I: IntoIterator, + I::Item: Into>, + { + if let Some(req_protocols) = self + .sec_websocket_protocol + .as_ref() + .and_then(|p| p.to_str().ok()) + { + self.protocol = protocols + .into_iter() + // FIXME: This will often allocate a new `String` and so is less efficient than it + // could be. But that can't be fixed without breaking changes to the public API. + .map(Into::into) + .find(|protocol| { + req_protocols + .split(',') + .any(|req_protocol| req_protocol.trim() == protocol) + }) + .map(|protocol| match protocol { + Cow::Owned(s) => HeaderValue::from_str(&s).unwrap(), + Cow::Borrowed(s) => HeaderValue::from_static(s), + }); + } + + self + } + + /// Provide a callback to call if upgrading the connection fails. + /// + /// The connection upgrade is performed in a background task. If that fails this callback + /// will be called. + /// + /// By default any errors will be silently ignored. + /// + /// # Example + /// + /// ``` + /// use axum::{ + /// extract::{WebSocketUpgrade}, + /// response::Response, + /// }; + /// + /// async fn handler(ws: WebSocketUpgrade) -> Response { + /// ws.on_failed_upgrade(|error| { + /// report_error(error); + /// }) + /// .on_upgrade(|socket| async { /* ... */ }) + /// } + /// # + /// # fn report_error(_: axum::Error) {} + /// ``` + pub fn on_failed_upgrade(self, callback: C) -> WebSocketUpgrade + where + C: OnFailedUpdgrade, + { + WebSocketUpgrade { + config: self.config, + protocol: self.protocol, + sec_websocket_key: self.sec_websocket_key, + on_upgrade: self.on_upgrade, + on_failed_upgrade: callback, + sec_websocket_protocol: self.sec_websocket_protocol, + } + } + + /// Finalize upgrading the connection and call the provided callback with + /// the stream. + #[must_use = "to setup the WebSocket connection, this response must be returned"] + pub fn on_upgrade(self, callback: C) -> Response + where + C: FnOnce(WebSocket) -> Fut + Send + 'static, + Fut: Future + Send + 'static, + F: OnFailedUpdgrade, + { + let on_upgrade = self.on_upgrade; + let config = self.config; + let on_failed_upgrade = self.on_failed_upgrade; + + let protocol = self.protocol.clone(); + + tokio::spawn(async move { + let upgraded = match on_upgrade.await { + Ok(upgraded) => upgraded, + Err(err) => { + on_failed_upgrade.call(Error::new(err)); + return; + } + }; + + let socket = + WebSocketStream::from_raw_socket(upgraded, protocol::Role::Server, Some(config)) + .await; + let socket = WebSocket { + inner: socket, + protocol, + }; + callback(socket).await; + }); + + #[allow(clippy::declare_interior_mutable_const)] + const UPGRADE: HeaderValue = HeaderValue::from_static("upgrade"); + #[allow(clippy::declare_interior_mutable_const)] + const WEBSOCKET: HeaderValue = HeaderValue::from_static("websocket"); + + let mut builder = Response::builder() + .status(StatusCode::SWITCHING_PROTOCOLS) + .header(header::CONNECTION, UPGRADE) + .header(header::UPGRADE, WEBSOCKET) + .header( + header::SEC_WEBSOCKET_ACCEPT, + sign(self.sec_websocket_key.as_bytes()), + ); + + if let Some(protocol) = self.protocol { + builder = builder.header(header::SEC_WEBSOCKET_PROTOCOL, protocol); + } + + builder.body(body::boxed(body::Empty::new())).unwrap() + } +} + +/// What to do when a connection upgrade fails. +/// +/// See [`WebSocketUpgrade::on_failed_upgrade`] for more details. +pub trait OnFailedUpdgrade: Send + 'static { + /// Call the callback. + fn call(self, error: Error); +} + +impl OnFailedUpdgrade for F +where + F: FnOnce(Error) + Send + 'static, +{ + fn call(self, error: Error) { + self(error) + } +} + +/// The default `OnFailedUpdgrade` used by `WebSocketUpgrade`. +/// +/// It simply ignores the error. +#[non_exhaustive] +#[derive(Debug)] +pub struct DefaultOnFailedUpdgrade; + +impl OnFailedUpdgrade for DefaultOnFailedUpdgrade { + #[inline] + fn call(self, _error: Error) {} +} + +#[async_trait] +impl FromRequestParts for WebSocketUpgrade +where + S: Send + Sync, +{ + type Rejection = WebSocketUpgradeRejection; + + async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { + if parts.method != Method::GET { + return Err(MethodNotGet.into()); + } + + if !header_contains(&parts.headers, header::CONNECTION, "upgrade") { + return Err(InvalidConnectionHeader.into()); + } + + if !header_eq(&parts.headers, header::UPGRADE, "websocket") { + return Err(InvalidUpgradeHeader.into()); + } + + if !header_eq(&parts.headers, header::SEC_WEBSOCKET_VERSION, "13") { + return Err(InvalidWebSocketVersionHeader.into()); + } + + let sec_websocket_key = parts + .headers + .get(header::SEC_WEBSOCKET_KEY) + .ok_or(WebSocketKeyHeaderMissing)? + .clone(); + + let on_upgrade = parts + .extensions + .remove::() + .ok_or(ConnectionNotUpgradable)?; + + let sec_websocket_protocol = parts.headers.get(header::SEC_WEBSOCKET_PROTOCOL).cloned(); + + Ok(Self { + config: Default::default(), + protocol: None, + sec_websocket_key, + on_upgrade, + sec_websocket_protocol, + on_failed_upgrade: DefaultOnFailedUpdgrade, + }) + } +} + +fn header_eq(headers: &HeaderMap, key: HeaderName, value: &'static str) -> bool { + if let Some(header) = headers.get(&key) { + header.as_bytes().eq_ignore_ascii_case(value.as_bytes()) + } else { + false + } +} + +fn header_contains(headers: &HeaderMap, key: HeaderName, value: &'static str) -> bool { + let header = if let Some(header) = headers.get(&key) { + header + } else { + return false; + }; + + if let Ok(header) = std::str::from_utf8(header.as_bytes()) { + header.to_ascii_lowercase().contains(value) + } else { + false + } +} + +/// A stream of WebSocket messages. +/// +/// See [the module level documentation](self) for more details. +#[derive(Debug)] +pub struct WebSocket { + inner: WebSocketStream, + protocol: Option, +} + +impl WebSocket { + /// Receive another message. + /// + /// Returns `None` if the stream has closed. + pub async fn recv(&mut self) -> Option> { + self.next().await + } + + /// Send a message. + pub async fn send(&mut self, msg: Message) -> Result<(), Error> { + self.inner + .send(msg.into_tungstenite()) + .await + .map_err(Error::new) + } + + /// Gracefully close this WebSocket. + pub async fn close(mut self) -> Result<(), Error> { + self.inner.close(None).await.map_err(Error::new) + } + + /// Return the selected WebSocket subprotocol, if one has been chosen. + pub fn protocol(&self) -> Option<&HeaderValue> { + self.protocol.as_ref() + } +} + +impl Stream for WebSocket { + type Item = Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + loop { + match futures_util::ready!(self.inner.poll_next_unpin(cx)) { + Some(Ok(msg)) => { + if let Some(msg) = Message::from_tungstenite(msg) { + return Poll::Ready(Some(Ok(msg))); + } + } + Some(Err(err)) => return Poll::Ready(Some(Err(Error::new(err)))), + None => return Poll::Ready(None), + } + } + } +} + +impl Sink for WebSocket { + type Error = Error; + + fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.inner).poll_ready(cx).map_err(Error::new) + } + + fn start_send(mut self: Pin<&mut Self>, item: Message) -> Result<(), Self::Error> { + Pin::new(&mut self.inner) + .start_send(item.into_tungstenite()) + .map_err(Error::new) + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.inner).poll_flush(cx).map_err(Error::new) + } + + fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.inner).poll_close(cx).map_err(Error::new) + } +} + +/// Status code used to indicate why an endpoint is closing the WebSocket connection. +pub type CloseCode = u16; + +/// A struct representing the close command. +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct CloseFrame<'t> { + /// The reason as a code. + pub code: CloseCode, + /// The reason as text string. + pub reason: Cow<'t, str>, +} + +/// A WebSocket message. +// +// This code comes from https://github.com/snapview/tungstenite-rs/blob/master/src/protocol/message.rs and is under following license: +// Copyright (c) 2017 Alexey Galakhov +// Copyright (c) 2016 Jason Housley +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +#[derive(Debug, Eq, PartialEq, Clone)] +pub enum Message { + /// A text WebSocket message + Text(String), + /// A binary WebSocket message + Binary(Vec), + /// A ping message with the specified payload + /// + /// The payload here must have a length less than 125 bytes. + /// + /// Ping messages will be automatically responded to by the server, so you do not have to worry + /// about dealing with them yourself. + Ping(Vec), + /// A pong message with the specified payload + /// + /// The payload here must have a length less than 125 bytes. + /// + /// Pong messages will be automatically sent to the client if a ping message is received, so + /// you do not have to worry about constructing them yourself unless you want to implement a + /// [unidirectional heartbeat](https://tools.ietf.org/html/rfc6455#section-5.5.3). + Pong(Vec), + /// A close message with the optional close frame. + Close(Option>), +} + +impl Message { + fn into_tungstenite(self) -> ts::Message { + match self { + Self::Text(text) => ts::Message::Text(text), + Self::Binary(binary) => ts::Message::Binary(binary), + Self::Ping(ping) => ts::Message::Ping(ping), + Self::Pong(pong) => ts::Message::Pong(pong), + Self::Close(Some(close)) => ts::Message::Close(Some(ts::protocol::CloseFrame { + code: ts::protocol::frame::coding::CloseCode::from(close.code), + reason: close.reason, + })), + Self::Close(None) => ts::Message::Close(None), + } + } + + fn from_tungstenite(message: ts::Message) -> Option { + match message { + ts::Message::Text(text) => Some(Self::Text(text)), + ts::Message::Binary(binary) => Some(Self::Binary(binary)), + ts::Message::Ping(ping) => Some(Self::Ping(ping)), + ts::Message::Pong(pong) => Some(Self::Pong(pong)), + ts::Message::Close(Some(close)) => Some(Self::Close(Some(CloseFrame { + code: close.code.into(), + reason: close.reason, + }))), + ts::Message::Close(None) => Some(Self::Close(None)), + // we can ignore `Frame` frames as recommended by the tungstenite maintainers + // https://github.com/snapview/tungstenite-rs/issues/268 + ts::Message::Frame(_) => None, + } + } + + /// Consume the WebSocket and return it as binary data. + pub fn into_data(self) -> Vec { + match self { + Self::Text(string) => string.into_bytes(), + Self::Binary(data) | Self::Ping(data) | Self::Pong(data) => data, + Self::Close(None) => Vec::new(), + Self::Close(Some(frame)) => frame.reason.into_owned().into_bytes(), + } + } + + /// Attempt to consume the WebSocket message and convert it to a String. + pub fn into_text(self) -> Result { + match self { + Self::Text(string) => Ok(string), + Self::Binary(data) | Self::Ping(data) | Self::Pong(data) => Ok(String::from_utf8(data) + .map_err(|err| err.utf8_error()) + .map_err(Error::new)?), + Self::Close(None) => Ok(String::new()), + Self::Close(Some(frame)) => Ok(frame.reason.into_owned()), + } + } + + /// Attempt to get a &str from the WebSocket message, + /// this will try to convert binary data to utf8. + pub fn to_text(&self) -> Result<&str, Error> { + match *self { + Self::Text(ref string) => Ok(string), + Self::Binary(ref data) | Self::Ping(ref data) | Self::Pong(ref data) => { + Ok(std::str::from_utf8(data).map_err(Error::new)?) + } + Self::Close(None) => Ok(""), + Self::Close(Some(ref frame)) => Ok(&frame.reason), + } + } +} + +impl From for Message { + fn from(string: String) -> Self { + Message::Text(string) + } +} + +impl<'s> From<&'s str> for Message { + fn from(string: &'s str) -> Self { + Message::Text(string.into()) + } +} + +impl<'b> From<&'b [u8]> for Message { + fn from(data: &'b [u8]) -> Self { + Message::Binary(data.into()) + } +} + +impl From> for Message { + fn from(data: Vec) -> Self { + Message::Binary(data) + } +} + +impl From for Vec { + fn from(msg: Message) -> Self { + msg.into_data() + } +} + +fn sign(key: &[u8]) -> HeaderValue { + use base64::engine::Engine as _; + + let mut sha1 = Sha1::default(); + sha1.update(key); + sha1.update(&b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11"[..]); + let b64 = Bytes::from(base64::engine::general_purpose::STANDARD.encode(sha1.finalize())); + HeaderValue::from_maybe_shared(b64).expect("base64 is a valid value") +} + +pub mod rejection { + //! WebSocket specific rejections. + + use axum_core::__composite_rejection as composite_rejection; + use axum_core::__define_rejection as define_rejection; + + define_rejection! { + #[status = METHOD_NOT_ALLOWED] + #[body = "Request method must be `GET`"] + /// Rejection type for [`WebSocketUpgrade`](super::WebSocketUpgrade). + pub struct MethodNotGet; + } + + define_rejection! { + #[status = BAD_REQUEST] + #[body = "Connection header did not include 'upgrade'"] + /// Rejection type for [`WebSocketUpgrade`](super::WebSocketUpgrade). + pub struct InvalidConnectionHeader; + } + + define_rejection! { + #[status = BAD_REQUEST] + #[body = "`Upgrade` header did not include 'websocket'"] + /// Rejection type for [`WebSocketUpgrade`](super::WebSocketUpgrade). + pub struct InvalidUpgradeHeader; + } + + define_rejection! { + #[status = BAD_REQUEST] + #[body = "`Sec-WebSocket-Version` header did not include '13'"] + /// Rejection type for [`WebSocketUpgrade`](super::WebSocketUpgrade). + pub struct InvalidWebSocketVersionHeader; + } + + define_rejection! { + #[status = BAD_REQUEST] + #[body = "`Sec-WebSocket-Key` header missing"] + /// Rejection type for [`WebSocketUpgrade`](super::WebSocketUpgrade). + pub struct WebSocketKeyHeaderMissing; + } + + define_rejection! { + #[status = UPGRADE_REQUIRED] + #[body = "WebSocket request couldn't be upgraded since no upgrade state was present"] + /// Rejection type for [`WebSocketUpgrade`](super::WebSocketUpgrade). + /// + /// This rejection is returned if the connection cannot be upgraded for example if the + /// request is HTTP/1.0. + /// + /// See [MDN] for more details about connection upgrades. + /// + /// [MDN]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Upgrade + pub struct ConnectionNotUpgradable; + } + + composite_rejection! { + /// Rejection used for [`WebSocketUpgrade`](super::WebSocketUpgrade). + /// + /// Contains one variant for each way the [`WebSocketUpgrade`](super::WebSocketUpgrade) + /// extractor can fail. + pub enum WebSocketUpgradeRejection { + MethodNotGet, + InvalidConnectionHeader, + InvalidUpgradeHeader, + InvalidWebSocketVersionHeader, + WebSocketKeyHeaderMissing, + ConnectionNotUpgradable, + } + } +} + +pub mod close_code { + //! Constants for [`CloseCode`]s. + //! + //! [`CloseCode`]: super::CloseCode + + /// Indicates a normal closure, meaning that the purpose for which the connection was + /// established has been fulfilled. + pub const NORMAL: u16 = 1000; + + /// Indicates that an endpoint is "going away", such as a server going down or a browser having + /// navigated away from a page. + pub const AWAY: u16 = 1001; + + /// Indicates that an endpoint is terminating the connection due to a protocol error. + pub const PROTOCOL: u16 = 1002; + + /// Indicates that an endpoint is terminating the connection because it has received a type of + /// data it cannot accept (e.g., an endpoint that understands only text data MAY send this if + /// it receives a binary message). + pub const UNSUPPORTED: u16 = 1003; + + /// Indicates that no status code was included in a closing frame. + pub const STATUS: u16 = 1005; + + /// Indicates an abnormal closure. + pub const ABNORMAL: u16 = 1006; + + /// Indicates that an endpoint is terminating the connection because it has received data + /// within a message that was not consistent with the type of the message (e.g., non-UTF-8 + /// RFC3629 data within a text message). + pub const INVALID: u16 = 1007; + + /// Indicates that an endpoint is terminating the connection because it has received a message + /// that violates its policy. This is a generic status code that can be returned when there is + /// no other more suitable status code (e.g., `UNSUPPORTED` or `SIZE`) or if there is a need to + /// hide specific details about the policy. + pub const POLICY: u16 = 1008; + + /// Indicates that an endpoint is terminating the connection because it has received a message + /// that is too big for it to process. + pub const SIZE: u16 = 1009; + + /// Indicates that an endpoint (client) is terminating the connection because it has expected + /// the server to negotiate one or more extension, but the server didn't return them in the + /// response message of the WebSocket handshake. The list of extensions that are needed should + /// be given as the reason for closing. Note that this status code is not used by the server, + /// because it can fail the WebSocket handshake instead. + pub const EXTENSION: u16 = 1010; + + /// Indicates that a server is terminating the connection because it encountered an unexpected + /// condition that prevented it from fulfilling the request. + pub const ERROR: u16 = 1011; + + /// Indicates that the server is restarting. + pub const RESTART: u16 = 1012; + + /// Indicates that the server is overloaded and the client should either connect to a different + /// IP (when multiple targets exist), or reconnect to the same IP when a user has performed an + /// action. + pub const AGAIN: u16 = 1013; +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{body::Body, routing::get, Router}; + use http::{Request, Version}; + use tower::ServiceExt; + + #[crate::test] + async fn rejects_http_1_0_requests() { + let svc = get(|ws: Result| { + let rejection = ws.unwrap_err(); + assert!(matches!( + rejection, + WebSocketUpgradeRejection::ConnectionNotUpgradable(_) + )); + std::future::ready(()) + }); + + let req = Request::builder() + .version(Version::HTTP_10) + .method(Method::GET) + .header("upgrade", "websocket") + .header("connection", "Upgrade") + .header("sec-websocket-key", "6D69KGBOr4Re+Nj6zx9aQA==") + .header("sec-websocket-version", "13") + .body(Body::empty()) + .unwrap(); + + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + } + + #[allow(dead_code)] + fn default_on_failed_upgrade() { + async fn handler(ws: WebSocketUpgrade) -> Response { + ws.on_upgrade(|_| async {}) + } + let _: Router = Router::new().route("/", get(handler)); + } + + #[allow(dead_code)] + fn on_failed_upgrade() { + async fn handler(ws: WebSocketUpgrade) -> Response { + ws.on_failed_upgrade(|_error: Error| println!("oops!")) + .on_upgrade(|_| async {}) + } + let _: Router = Router::new().route("/", get(handler)); + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/form.rs b/.cargo-vendor/axum-0.6.20/src/form.rs new file mode 100644 index 0000000000..c690d48ef3 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/form.rs @@ -0,0 +1,263 @@ +use crate::body::HttpBody; +use crate::extract::{rejection::*, FromRequest, RawForm}; +use crate::BoxError; +use async_trait::async_trait; +use axum_core::response::{IntoResponse, Response}; +use axum_core::RequestExt; +use http::header::CONTENT_TYPE; +use http::{Request, StatusCode}; +use serde::de::DeserializeOwned; +use serde::Serialize; + +/// URL encoded extractor and response. +/// +/// # As extractor +/// +/// If used as an extractor `Form` will deserialize the query parameters for `GET` and `HEAD` +/// requests and `application/x-www-form-urlencoded` encoded request bodies for other methods. It +/// supports any type that implements [`serde::Deserialize`]. +/// +/// ⚠️ Since parsing form data might require consuming the request body, the `Form` extractor must be +/// *last* if there are multiple extractors in a handler. See ["the order of +/// extractors"][order-of-extractors] +/// +/// [order-of-extractors]: crate::extract#the-order-of-extractors +/// +/// ```rust +/// use axum::Form; +/// use serde::Deserialize; +/// +/// #[derive(Deserialize)] +/// struct SignUp { +/// username: String, +/// password: String, +/// } +/// +/// async fn accept_form(Form(sign_up): Form) { +/// // ... +/// } +/// ``` +/// +/// Note that `Content-Type: multipart/form-data` requests are not supported. Use [`Multipart`] +/// instead. +/// +/// # As response +/// +/// ```rust +/// use axum::Form; +/// use serde::Serialize; +/// +/// #[derive(Serialize)] +/// struct Payload { +/// value: String, +/// } +/// +/// async fn handler() -> Form { +/// Form(Payload { value: "foo".to_owned() }) +/// } +/// ``` +/// +/// [`Multipart`]: crate::extract::Multipart +#[cfg_attr(docsrs, doc(cfg(feature = "form")))] +#[derive(Debug, Clone, Copy, Default)] +#[must_use] +pub struct Form(pub T); + +#[async_trait] +impl FromRequest for Form +where + T: DeserializeOwned, + B: HttpBody + Send + 'static, + B::Data: Send, + B::Error: Into, + S: Send + Sync, +{ + type Rejection = FormRejection; + + async fn from_request(req: Request, _state: &S) -> Result { + let is_get_or_head = + req.method() == http::Method::GET || req.method() == http::Method::HEAD; + + match req.extract().await { + Ok(RawForm(bytes)) => { + let value = + serde_urlencoded::from_bytes(&bytes).map_err(|err| -> FormRejection { + if is_get_or_head { + FailedToDeserializeForm::from_err(err).into() + } else { + FailedToDeserializeFormBody::from_err(err).into() + } + })?; + Ok(Form(value)) + } + Err(RawFormRejection::BytesRejection(r)) => Err(FormRejection::BytesRejection(r)), + Err(RawFormRejection::InvalidFormContentType(r)) => { + Err(FormRejection::InvalidFormContentType(r)) + } + } + } +} + +impl IntoResponse for Form +where + T: Serialize, +{ + fn into_response(self) -> Response { + match serde_urlencoded::to_string(&self.0) { + Ok(body) => ( + [(CONTENT_TYPE, mime::APPLICATION_WWW_FORM_URLENCODED.as_ref())], + body, + ) + .into_response(), + Err(err) => (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()).into_response(), + } + } +} + +axum_core::__impl_deref!(Form); + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + body::{Empty, Full}, + routing::{on, MethodFilter}, + test_helpers::TestClient, + Router, + }; + use bytes::Bytes; + use http::{header::CONTENT_TYPE, Method, Request}; + use mime::APPLICATION_WWW_FORM_URLENCODED; + use serde::{Deserialize, Serialize}; + use std::fmt::Debug; + + #[derive(Debug, PartialEq, Serialize, Deserialize)] + struct Pagination { + size: Option, + page: Option, + } + + async fn check_query(uri: impl AsRef, value: T) { + let req = Request::builder() + .uri(uri.as_ref()) + .body(Empty::::new()) + .unwrap(); + assert_eq!(Form::::from_request(req, &()).await.unwrap().0, value); + } + + async fn check_body(value: T) { + let req = Request::builder() + .uri("http://example.com/test") + .method(Method::POST) + .header(CONTENT_TYPE, APPLICATION_WWW_FORM_URLENCODED.as_ref()) + .body(Full::::new( + serde_urlencoded::to_string(&value).unwrap().into(), + )) + .unwrap(); + assert_eq!(Form::::from_request(req, &()).await.unwrap().0, value); + } + + #[crate::test] + async fn test_form_query() { + check_query( + "http://example.com/test", + Pagination { + size: None, + page: None, + }, + ) + .await; + + check_query( + "http://example.com/test?size=10", + Pagination { + size: Some(10), + page: None, + }, + ) + .await; + + check_query( + "http://example.com/test?size=10&page=20", + Pagination { + size: Some(10), + page: Some(20), + }, + ) + .await; + } + + #[crate::test] + async fn test_form_body() { + check_body(Pagination { + size: None, + page: None, + }) + .await; + + check_body(Pagination { + size: Some(10), + page: None, + }) + .await; + + check_body(Pagination { + size: Some(10), + page: Some(20), + }) + .await; + } + + #[crate::test] + async fn test_incorrect_content_type() { + let req = Request::builder() + .uri("http://example.com/test") + .method(Method::POST) + .header(CONTENT_TYPE, mime::APPLICATION_JSON.as_ref()) + .body(Full::::new( + serde_urlencoded::to_string(&Pagination { + size: Some(10), + page: None, + }) + .unwrap() + .into(), + )) + .unwrap(); + assert!(matches!( + Form::::from_request(req, &()) + .await + .unwrap_err(), + FormRejection::InvalidFormContentType(InvalidFormContentType) + )); + } + + #[tokio::test] + async fn deserialize_error_status_codes() { + #[allow(dead_code)] + #[derive(Deserialize)] + struct Payload { + a: i32, + } + + let app = Router::new().route( + "/", + on( + MethodFilter::GET | MethodFilter::POST, + |_: Form| async {}, + ), + ); + + let client = TestClient::new(app); + + let res = client.get("/?a=false").send().await; + assert_eq!(res.status(), StatusCode::BAD_REQUEST); + + let res = client + .post("/") + .header(CONTENT_TYPE, APPLICATION_WWW_FORM_URLENCODED.as_ref()) + .body("a=false") + .send() + .await; + assert_eq!(res.status(), StatusCode::UNPROCESSABLE_ENTITY); + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/handler/future.rs b/.cargo-vendor/axum-0.6.20/src/handler/future.rs new file mode 100644 index 0000000000..59487c31b2 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/handler/future.rs @@ -0,0 +1,52 @@ +//! Handler future types. + +use crate::response::Response; +use futures_util::future::Map; +use http::Request; +use pin_project_lite::pin_project; +use std::{convert::Infallible, future::Future, pin::Pin, task::Context}; +use tower::util::Oneshot; +use tower_service::Service; + +opaque_future! { + /// The response future for [`IntoService`](super::IntoService). + pub type IntoServiceFuture = + Map< + F, + fn(Response) -> Result, + >; +} + +pin_project! { + /// The response future for [`Layered`](super::Layered). + pub struct LayeredFuture + where + S: Service>, + { + #[pin] + inner: Map>, fn(Result) -> Response>, + } +} + +impl LayeredFuture +where + S: Service>, +{ + pub(super) fn new( + inner: Map>, fn(Result) -> Response>, + ) -> Self { + Self { inner } + } +} + +impl Future for LayeredFuture +where + S: Service>, +{ + type Output = Response; + + #[inline] + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> std::task::Poll { + self.project().inner.poll(cx) + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/handler/into_service_state_in_extension.rs b/.cargo-vendor/axum-0.6.20/src/handler/into_service_state_in_extension.rs new file mode 100644 index 0000000000..61f7ed4351 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/handler/into_service_state_in_extension.rs @@ -0,0 +1,84 @@ +use super::Handler; +use crate::response::Response; +use http::Request; +use std::{ + convert::Infallible, + fmt, + marker::PhantomData, + task::{Context, Poll}, +}; +use tower_service::Service; + +pub(crate) struct IntoServiceStateInExtension { + handler: H, + _marker: PhantomData (T, S, B)>, +} + +#[test] +fn traits() { + use crate::test_helpers::*; + assert_send::>(); + assert_sync::>(); +} + +impl IntoServiceStateInExtension { + pub(crate) fn new(handler: H) -> Self { + Self { + handler, + _marker: PhantomData, + } + } +} + +impl fmt::Debug for IntoServiceStateInExtension { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("IntoServiceStateInExtension") + .finish_non_exhaustive() + } +} + +impl Clone for IntoServiceStateInExtension +where + H: Clone, +{ + fn clone(&self) -> Self { + Self { + handler: self.handler.clone(), + _marker: PhantomData, + } + } +} + +impl Service> for IntoServiceStateInExtension +where + H: Handler + Clone + Send + 'static, + B: Send + 'static, + S: Send + Sync + 'static, +{ + type Response = Response; + type Error = Infallible; + type Future = super::future::IntoServiceFuture; + + #[inline] + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + // `IntoServiceStateInExtension` can only be constructed from async functions which are always ready, or + // from `Layered` which buffers in `::call` and is therefore + // also always ready. + Poll::Ready(Ok(())) + } + + fn call(&mut self, mut req: Request) -> Self::Future { + use futures_util::future::FutureExt; + + let state = req + .extensions_mut() + .remove::() + .expect("state extension missing. This is a bug in axum, please file an issue"); + + let handler = self.handler.clone(); + let future = Handler::call(handler, req, state); + let future = future.map(Ok as _); + + super::future::IntoServiceFuture::new(future) + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/handler/mod.rs b/.cargo-vendor/axum-0.6.20/src/handler/mod.rs new file mode 100644 index 0000000000..5087ac7c93 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/handler/mod.rs @@ -0,0 +1,441 @@ +//! Async functions that can be used to handle requests. +//! +#![doc = include_str!("../docs/handlers_intro.md")] +//! +//! Some examples of handlers: +//! +//! ```rust +//! use axum::{body::Bytes, http::StatusCode}; +//! +//! // Handler that immediately returns an empty `200 OK` response. +//! async fn unit_handler() {} +//! +//! // Handler that immediately returns an empty `200 OK` response with a plain +//! // text body. +//! async fn string_handler() -> String { +//! "Hello, World!".to_string() +//! } +//! +//! // Handler that buffers the request body and returns it. +//! // +//! // This works because `Bytes` implements `FromRequest` +//! // and therefore can be used as an extractor. +//! // +//! // `String` and `StatusCode` both implement `IntoResponse` and +//! // therefore `Result` also implements `IntoResponse` +//! async fn echo(body: Bytes) -> Result { +//! if let Ok(string) = String::from_utf8(body.to_vec()) { +//! Ok(string) +//! } else { +//! Err(StatusCode::BAD_REQUEST) +//! } +//! } +//! ``` +//! +//! Instead of a direct `StatusCode`, it makes sense to use intermediate error type +//! that can ultimately be converted to `Response`. This allows using `?` operator +//! in handlers. See those examples: +//! +//! * [`anyhow-error-response`][anyhow] for generic boxed errors +//! * [`error-handling-and-dependency-injection`][ehdi] for application-specific detailed errors +//! +//! [anyhow]: https://github.com/tokio-rs/axum/blob/main/examples/anyhow-error-response/src/main.rs +//! [ehdi]: https://github.com/tokio-rs/axum/blob/main/examples/error-handling-and-dependency-injection/src/main.rs +//! +#![doc = include_str!("../docs/debugging_handler_type_errors.md")] + +#[cfg(feature = "tokio")] +use crate::extract::connect_info::IntoMakeServiceWithConnectInfo; +use crate::{ + body::Body, + extract::{FromRequest, FromRequestParts}, + response::{IntoResponse, Response}, + routing::IntoMakeService, +}; +use http::Request; +use std::{convert::Infallible, fmt, future::Future, marker::PhantomData, pin::Pin}; +use tower::ServiceExt; +use tower_layer::Layer; +use tower_service::Service; + +pub mod future; +mod service; + +pub use self::service::HandlerService; + +/// Trait for async functions that can be used to handle requests. +/// +/// You shouldn't need to depend on this trait directly. It is automatically +/// implemented to closures of the right types. +/// +/// See the [module docs](crate::handler) for more details. +/// +/// # Converting `Handler`s into [`Service`]s +/// +/// To convert `Handler`s into [`Service`]s you have to call either +/// [`HandlerWithoutStateExt::into_service`] or [`Handler::with_state`]: +/// +/// ``` +/// use tower::Service; +/// use axum::{ +/// extract::State, +/// body::Body, +/// http::Request, +/// handler::{HandlerWithoutStateExt, Handler}, +/// }; +/// +/// // this handler doesn't require any state +/// async fn one() {} +/// // so it can be converted to a service with `HandlerWithoutStateExt::into_service` +/// assert_service(one.into_service()); +/// +/// // this handler requires state +/// async fn two(_: State) {} +/// // so we have to provide it +/// let handler_with_state = two.with_state(String::new()); +/// // which gives us a `Service` +/// assert_service(handler_with_state); +/// +/// // helper to check that a value implements `Service` +/// fn assert_service(service: S) +/// where +/// S: Service>, +/// {} +/// ``` +#[doc = include_str!("../docs/debugging_handler_type_errors.md")] +/// +/// # Handlers that aren't functions +/// +/// The `Handler` trait is also implemented for `T: IntoResponse`. That allows easily returning +/// fixed data for routes: +/// +/// ``` +/// use axum::{ +/// Router, +/// routing::{get, post}, +/// Json, +/// http::StatusCode, +/// }; +/// use serde_json::json; +/// +/// let app = Router::new() +/// // respond with a fixed string +/// .route("/", get("Hello, World!")) +/// // or return some mock data +/// .route("/users", post(( +/// StatusCode::CREATED, +/// Json(json!({ "id": 1, "username": "alice" })), +/// ))); +/// # let _: Router = app; +/// ``` +#[cfg_attr( + nightly_error_messages, + rustc_on_unimplemented( + note = "Consider using `#[axum::debug_handler]` to improve the error message" + ) +)] +pub trait Handler: Clone + Send + Sized + 'static { + /// The type of future calling this handler returns. + type Future: Future + Send + 'static; + + /// Call the handler with the given request. + fn call(self, req: Request, state: S) -> Self::Future; + + /// Apply a [`tower::Layer`] to the handler. + /// + /// All requests to the handler will be processed by the layer's + /// corresponding middleware. + /// + /// This can be used to add additional processing to a request for a single + /// handler. + /// + /// Note this differs from [`routing::Router::layer`](crate::routing::Router::layer) + /// which adds a middleware to a group of routes. + /// + /// If you're applying middleware that produces errors you have to handle the errors + /// so they're converted into responses. You can learn more about doing that + /// [here](crate::error_handling). + /// + /// # Example + /// + /// Adding the [`tower::limit::ConcurrencyLimit`] middleware to a handler + /// can be done like so: + /// + /// ```rust + /// use axum::{ + /// routing::get, + /// handler::Handler, + /// Router, + /// }; + /// use tower::limit::{ConcurrencyLimitLayer, ConcurrencyLimit}; + /// + /// async fn handler() { /* ... */ } + /// + /// let layered_handler = handler.layer(ConcurrencyLimitLayer::new(64)); + /// let app = Router::new().route("/", get(layered_handler)); + /// # async { + /// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); + /// # }; + /// ``` + fn layer(self, layer: L) -> Layered + where + L: Layer> + Clone, + L::Service: Service>, + { + Layered { + layer, + handler: self, + _marker: PhantomData, + } + } + + /// Convert the handler into a [`Service`] by providing the state + fn with_state(self, state: S) -> HandlerService { + HandlerService::new(self, state) + } +} + +impl Handler<((),), S, B> for F +where + F: FnOnce() -> Fut + Clone + Send + 'static, + Fut: Future + Send, + Res: IntoResponse, + B: Send + 'static, +{ + type Future = Pin + Send>>; + + fn call(self, _req: Request, _state: S) -> Self::Future { + Box::pin(async move { self().await.into_response() }) + } +} + +macro_rules! impl_handler { + ( + [$($ty:ident),*], $last:ident + ) => { + #[allow(non_snake_case, unused_mut)] + impl Handler<(M, $($ty,)* $last,), S, B> for F + where + F: FnOnce($($ty,)* $last,) -> Fut + Clone + Send + 'static, + Fut: Future + Send, + B: Send + 'static, + S: Send + Sync + 'static, + Res: IntoResponse, + $( $ty: FromRequestParts + Send, )* + $last: FromRequest + Send, + { + type Future = Pin + Send>>; + + fn call(self, req: Request, state: S) -> Self::Future { + Box::pin(async move { + let (mut parts, body) = req.into_parts(); + let state = &state; + + $( + let $ty = match $ty::from_request_parts(&mut parts, state).await { + Ok(value) => value, + Err(rejection) => return rejection.into_response(), + }; + )* + + let req = Request::from_parts(parts, body); + + let $last = match $last::from_request(req, state).await { + Ok(value) => value, + Err(rejection) => return rejection.into_response(), + }; + + let res = self($($ty,)* $last,).await; + + res.into_response() + }) + } + } + }; +} + +all_the_tuples!(impl_handler); + +mod private { + // Marker type for `impl Handler for T` + #[allow(missing_debug_implementations)] + pub enum IntoResponseHandler {} +} + +impl Handler for T +where + T: IntoResponse + Clone + Send + 'static, + B: Send + 'static, +{ + type Future = std::future::Ready; + + fn call(self, _req: Request, _state: S) -> Self::Future { + std::future::ready(self.into_response()) + } +} + +/// A [`Service`] created from a [`Handler`] by applying a Tower middleware. +/// +/// Created with [`Handler::layer`]. See that method for more details. +pub struct Layered { + layer: L, + handler: H, + _marker: PhantomData (T, S, B, B2)>, +} + +impl fmt::Debug for Layered +where + L: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Layered") + .field("layer", &self.layer) + .finish() + } +} + +impl Clone for Layered +where + L: Clone, + H: Clone, +{ + fn clone(&self) -> Self { + Self { + layer: self.layer.clone(), + handler: self.handler.clone(), + _marker: PhantomData, + } + } +} + +impl Handler for Layered +where + L: Layer> + Clone + Send + 'static, + H: Handler, + L::Service: Service, Error = Infallible> + Clone + Send + 'static, + >>::Response: IntoResponse, + >>::Future: Send, + T: 'static, + S: 'static, + B: Send + 'static, + B2: Send + 'static, +{ + type Future = future::LayeredFuture; + + fn call(self, req: Request, state: S) -> Self::Future { + use futures_util::future::{FutureExt, Map}; + + let svc = self.handler.with_state(state); + let svc = self.layer.layer(svc); + + let future: Map< + _, + fn( + Result< + >>::Response, + >>::Error, + >, + ) -> _, + > = svc.oneshot(req).map(|result| match result { + Ok(res) => res.into_response(), + Err(err) => match err {}, + }); + + future::LayeredFuture::new(future) + } +} + +/// Extension trait for [`Handler`]s that don't have state. +/// +/// This provides convenience methods to convert the [`Handler`] into a [`Service`] or [`MakeService`]. +/// +/// [`MakeService`]: tower::make::MakeService +pub trait HandlerWithoutStateExt: Handler { + /// Convert the handler into a [`Service`] and no state. + fn into_service(self) -> HandlerService; + + /// Convert the handler into a [`MakeService`] and no state. + /// + /// See [`HandlerService::into_make_service`] for more details. + /// + /// [`MakeService`]: tower::make::MakeService + fn into_make_service(self) -> IntoMakeService>; + + /// Convert the handler into a [`MakeService`] which stores information + /// about the incoming connection and has no state. + /// + /// See [`HandlerService::into_make_service_with_connect_info`] for more details. + /// + /// [`MakeService`]: tower::make::MakeService + #[cfg(feature = "tokio")] + fn into_make_service_with_connect_info( + self, + ) -> IntoMakeServiceWithConnectInfo, C>; +} + +impl HandlerWithoutStateExt for H +where + H: Handler, +{ + fn into_service(self) -> HandlerService { + self.with_state(()) + } + + fn into_make_service(self) -> IntoMakeService> { + self.into_service().into_make_service() + } + + #[cfg(feature = "tokio")] + fn into_make_service_with_connect_info( + self, + ) -> IntoMakeServiceWithConnectInfo, C> { + self.into_service().into_make_service_with_connect_info() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{body, extract::State, test_helpers::*}; + use http::StatusCode; + use std::time::Duration; + use tower_http::{ + compression::CompressionLayer, limit::RequestBodyLimitLayer, + map_request_body::MapRequestBodyLayer, map_response_body::MapResponseBodyLayer, + timeout::TimeoutLayer, + }; + + #[crate::test] + async fn handler_into_service() { + async fn handle(body: String) -> impl IntoResponse { + format!("you said: {body}") + } + + let client = TestClient::new(handle.into_service()); + + let res = client.post("/").body("hi there!").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.text().await, "you said: hi there!"); + } + + #[crate::test] + async fn with_layer_that_changes_request_body_and_state() { + async fn handle(State(state): State<&'static str>) -> &'static str { + state + } + + let svc = handle + .layer(( + RequestBodyLimitLayer::new(1024), + TimeoutLayer::new(Duration::from_secs(10)), + MapResponseBodyLayer::new(body::boxed), + CompressionLayer::new(), + )) + .layer(MapRequestBodyLayer::new(body::boxed)) + .with_state("foo"); + + let client = TestClient::new(svc); + let res = client.get("/").send().await; + assert_eq!(res.text().await, "foo"); + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/handler/service.rs b/.cargo-vendor/axum-0.6.20/src/handler/service.rs new file mode 100644 index 0000000000..52fd5de67d --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/handler/service.rs @@ -0,0 +1,176 @@ +use super::Handler; +#[cfg(feature = "tokio")] +use crate::extract::connect_info::IntoMakeServiceWithConnectInfo; +use crate::response::Response; +use crate::routing::IntoMakeService; +use http::Request; +use std::{ + convert::Infallible, + fmt, + marker::PhantomData, + task::{Context, Poll}, +}; +use tower_service::Service; + +/// An adapter that makes a [`Handler`] into a [`Service`]. +/// +/// Created with [`Handler::with_state`] or [`HandlerWithoutStateExt::into_service`]. +/// +/// [`HandlerWithoutStateExt::into_service`]: super::HandlerWithoutStateExt::into_service +pub struct HandlerService { + handler: H, + state: S, + _marker: PhantomData (T, B)>, +} + +impl HandlerService { + /// Get a reference to the state. + pub fn state(&self) -> &S { + &self.state + } + + /// Convert the handler into a [`MakeService`]. + /// + /// This allows you to serve a single handler if you don't need any routing: + /// + /// ```rust + /// use axum::{ + /// Server, + /// handler::Handler, + /// extract::State, + /// http::{Uri, Method}, + /// response::IntoResponse, + /// }; + /// use std::net::SocketAddr; + /// + /// #[derive(Clone)] + /// struct AppState {} + /// + /// async fn handler(State(state): State) { + /// // ... + /// } + /// + /// let app = handler.with_state(AppState {}); + /// + /// # async { + /// Server::bind(&SocketAddr::from(([127, 0, 0, 1], 3000))) + /// .serve(app.into_make_service()) + /// .await?; + /// # Ok::<_, hyper::Error>(()) + /// # }; + /// ``` + /// + /// [`MakeService`]: tower::make::MakeService + pub fn into_make_service(self) -> IntoMakeService> { + IntoMakeService::new(self) + } + + /// Convert the handler into a [`MakeService`] which stores information + /// about the incoming connection. + /// + /// See [`Router::into_make_service_with_connect_info`] for more details. + /// + /// ```rust + /// use axum::{ + /// Server, + /// handler::Handler, + /// response::IntoResponse, + /// extract::{ConnectInfo, State}, + /// }; + /// use std::net::SocketAddr; + /// + /// #[derive(Clone)] + /// struct AppState {}; + /// + /// async fn handler( + /// ConnectInfo(addr): ConnectInfo, + /// State(state): State, + /// ) -> String { + /// format!("Hello {}", addr) + /// } + /// + /// let app = handler.with_state(AppState {}); + /// + /// # async { + /// Server::bind(&SocketAddr::from(([127, 0, 0, 1], 3000))) + /// .serve(app.into_make_service_with_connect_info::()) + /// .await?; + /// # Ok::<_, hyper::Error>(()) + /// # }; + /// ``` + /// + /// [`MakeService`]: tower::make::MakeService + /// [`Router::into_make_service_with_connect_info`]: crate::routing::Router::into_make_service_with_connect_info + #[cfg(feature = "tokio")] + pub fn into_make_service_with_connect_info( + self, + ) -> IntoMakeServiceWithConnectInfo, C> { + IntoMakeServiceWithConnectInfo::new(self) + } +} + +#[test] +fn traits() { + use crate::test_helpers::*; + assert_send::>(); + assert_sync::>(); +} + +impl HandlerService { + pub(super) fn new(handler: H, state: S) -> Self { + Self { + handler, + state, + _marker: PhantomData, + } + } +} + +impl fmt::Debug for HandlerService { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("IntoService").finish_non_exhaustive() + } +} + +impl Clone for HandlerService +where + H: Clone, + S: Clone, +{ + fn clone(&self) -> Self { + Self { + handler: self.handler.clone(), + state: self.state.clone(), + _marker: PhantomData, + } + } +} + +impl Service> for HandlerService +where + H: Handler + Clone + Send + 'static, + B: Send + 'static, + S: Clone + Send + Sync, +{ + type Response = Response; + type Error = Infallible; + type Future = super::future::IntoServiceFuture; + + #[inline] + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + // `IntoService` can only be constructed from async functions which are always ready, or + // from `Layered` which buffers in `::call` and is therefore + // also always ready. + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: Request) -> Self::Future { + use futures_util::future::FutureExt; + + let handler = self.handler.clone(); + let future = Handler::call(handler, req, self.state.clone()); + let future = future.map(Ok as _); + + super::future::IntoServiceFuture::new(future) + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/json.rs b/.cargo-vendor/axum-0.6.20/src/json.rs new file mode 100644 index 0000000000..39fd7f4280 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/json.rs @@ -0,0 +1,324 @@ +use crate::{ + body::{Bytes, HttpBody}, + extract::{rejection::*, FromRequest}, + BoxError, +}; +use async_trait::async_trait; +use axum_core::response::{IntoResponse, Response}; +use bytes::{BufMut, BytesMut}; +use http::{ + header::{self, HeaderMap, HeaderValue}, + Request, StatusCode, +}; +use serde::{de::DeserializeOwned, Serialize}; + +/// JSON Extractor / Response. +/// +/// When used as an extractor, it can deserialize request bodies into some type that +/// implements [`serde::Deserialize`]. The request will be rejected (and a [`JsonRejection`] will +/// be returned) if: +/// +/// - The request doesn't have a `Content-Type: application/json` (or similar) header. +/// - The body doesn't contain syntactically valid JSON. +/// - The body contains syntactically valid JSON but it couldn't be deserialized into the target +/// type. +/// - Buffering the request body fails. +/// +/// ⚠️ Since parsing JSON requires consuming the request body, the `Json` extractor must be +/// *last* if there are multiple extractors in a handler. +/// See ["the order of extractors"][order-of-extractors] +/// +/// [order-of-extractors]: crate::extract#the-order-of-extractors +/// +/// See [`JsonRejection`] for more details. +/// +/// # Extractor example +/// +/// ```rust,no_run +/// use axum::{ +/// extract, +/// routing::post, +/// Router, +/// }; +/// use serde::Deserialize; +/// +/// #[derive(Deserialize)] +/// struct CreateUser { +/// email: String, +/// password: String, +/// } +/// +/// async fn create_user(extract::Json(payload): extract::Json) { +/// // payload is a `CreateUser` +/// } +/// +/// let app = Router::new().route("/users", post(create_user)); +/// # async { +/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +/// # }; +/// ``` +/// +/// When used as a response, it can serialize any type that implements [`serde::Serialize`] to +/// `JSON`, and will automatically set `Content-Type: application/json` header. +/// +/// # Response example +/// +/// ``` +/// use axum::{ +/// extract::Path, +/// routing::get, +/// Router, +/// Json, +/// }; +/// use serde::Serialize; +/// use uuid::Uuid; +/// +/// #[derive(Serialize)] +/// struct User { +/// id: Uuid, +/// username: String, +/// } +/// +/// async fn get_user(Path(user_id) : Path) -> Json { +/// let user = find_user(user_id).await; +/// Json(user) +/// } +/// +/// async fn find_user(user_id: Uuid) -> User { +/// // ... +/// # unimplemented!() +/// } +/// +/// let app = Router::new().route("/users/:id", get(get_user)); +/// # async { +/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +/// # }; +/// ``` +#[derive(Debug, Clone, Copy, Default)] +#[cfg_attr(docsrs, doc(cfg(feature = "json")))] +#[must_use] +pub struct Json(pub T); + +#[async_trait] +impl FromRequest for Json +where + T: DeserializeOwned, + B: HttpBody + Send + 'static, + B::Data: Send, + B::Error: Into, + S: Send + Sync, +{ + type Rejection = JsonRejection; + + async fn from_request(req: Request, state: &S) -> Result { + if json_content_type(req.headers()) { + let bytes = Bytes::from_request(req, state).await?; + let deserializer = &mut serde_json::Deserializer::from_slice(&bytes); + + let value = match serde_path_to_error::deserialize(deserializer) { + Ok(value) => value, + Err(err) => { + let rejection = match err.inner().classify() { + serde_json::error::Category::Data => JsonDataError::from_err(err).into(), + serde_json::error::Category::Syntax | serde_json::error::Category::Eof => { + JsonSyntaxError::from_err(err).into() + } + serde_json::error::Category::Io => { + if cfg!(debug_assertions) { + // we don't use `serde_json::from_reader` and instead always buffer + // bodies first, so we shouldn't encounter any IO errors + unreachable!() + } else { + JsonSyntaxError::from_err(err).into() + } + } + }; + return Err(rejection); + } + }; + + Ok(Json(value)) + } else { + Err(MissingJsonContentType.into()) + } + } +} + +fn json_content_type(headers: &HeaderMap) -> bool { + let content_type = if let Some(content_type) = headers.get(header::CONTENT_TYPE) { + content_type + } else { + return false; + }; + + let content_type = if let Ok(content_type) = content_type.to_str() { + content_type + } else { + return false; + }; + + let mime = if let Ok(mime) = content_type.parse::() { + mime + } else { + return false; + }; + + let is_json_content_type = mime.type_() == "application" + && (mime.subtype() == "json" || mime.suffix().map_or(false, |name| name == "json")); + + is_json_content_type +} + +axum_core::__impl_deref!(Json); + +impl From for Json { + fn from(inner: T) -> Self { + Self(inner) + } +} + +impl IntoResponse for Json +where + T: Serialize, +{ + fn into_response(self) -> Response { + // Use a small initial capacity of 128 bytes like serde_json::to_vec + // https://docs.rs/serde_json/1.0.82/src/serde_json/ser.rs.html#2189 + let mut buf = BytesMut::with_capacity(128).writer(); + match serde_json::to_writer(&mut buf, &self.0) { + Ok(()) => ( + [( + header::CONTENT_TYPE, + HeaderValue::from_static(mime::APPLICATION_JSON.as_ref()), + )], + buf.into_inner().freeze(), + ) + .into_response(), + Err(err) => ( + StatusCode::INTERNAL_SERVER_ERROR, + [( + header::CONTENT_TYPE, + HeaderValue::from_static(mime::TEXT_PLAIN_UTF_8.as_ref()), + )], + err.to_string(), + ) + .into_response(), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{routing::post, test_helpers::*, Router}; + use serde::Deserialize; + use serde_json::{json, Value}; + + #[crate::test] + async fn deserialize_body() { + #[derive(Debug, Deserialize)] + struct Input { + foo: String, + } + + let app = Router::new().route("/", post(|input: Json| async { input.0.foo })); + + let client = TestClient::new(app); + let res = client.post("/").json(&json!({ "foo": "bar" })).send().await; + let body = res.text().await; + + assert_eq!(body, "bar"); + } + + #[crate::test] + async fn consume_body_to_json_requires_json_content_type() { + #[derive(Debug, Deserialize)] + struct Input { + foo: String, + } + + let app = Router::new().route("/", post(|input: Json| async { input.0.foo })); + + let client = TestClient::new(app); + let res = client.post("/").body(r#"{ "foo": "bar" }"#).send().await; + + let status = res.status(); + + assert_eq!(status, StatusCode::UNSUPPORTED_MEDIA_TYPE); + } + + #[crate::test] + async fn json_content_types() { + async fn valid_json_content_type(content_type: &str) -> bool { + println!("testing {content_type:?}"); + + let app = Router::new().route("/", post(|Json(_): Json| async {})); + + let res = TestClient::new(app) + .post("/") + .header("content-type", content_type) + .body("{}") + .send() + .await; + + res.status() == StatusCode::OK + } + + assert!(valid_json_content_type("application/json").await); + assert!(valid_json_content_type("application/json; charset=utf-8").await); + assert!(valid_json_content_type("application/json;charset=utf-8").await); + assert!(valid_json_content_type("application/cloudevents+json").await); + assert!(!valid_json_content_type("text/json").await); + } + + #[crate::test] + async fn invalid_json_syntax() { + let app = Router::new().route("/", post(|_: Json| async {})); + + let client = TestClient::new(app); + let res = client + .post("/") + .body("{") + .header("content-type", "application/json") + .send() + .await; + + assert_eq!(res.status(), StatusCode::BAD_REQUEST); + } + + #[derive(Deserialize)] + struct Foo { + #[allow(dead_code)] + a: i32, + #[allow(dead_code)] + b: Vec, + } + + #[derive(Deserialize)] + struct Bar { + #[allow(dead_code)] + x: i32, + #[allow(dead_code)] + y: i32, + } + + #[crate::test] + async fn invalid_json_data() { + let app = Router::new().route("/", post(|_: Json| async {})); + + let client = TestClient::new(app); + let res = client + .post("/") + .body("{\"a\": 1, \"b\": [{\"x\": 2}]}") + .header("content-type", "application/json") + .send() + .await; + + assert_eq!(res.status(), StatusCode::UNPROCESSABLE_ENTITY); + let body_text = res.text().await; + assert_eq!( + body_text, + "Failed to deserialize the JSON body into the target type: b[0]: missing field `y` at line 1 column 23" + ); + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/lib.rs b/.cargo-vendor/axum-0.6.20/src/lib.rs new file mode 100644 index 0000000000..da60aef5ae --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/lib.rs @@ -0,0 +1,502 @@ +#![cfg_attr(nightly_error_messages, feature(rustc_attrs))] +//! axum is a web application framework that focuses on ergonomics and modularity. +//! +//! # Table of contents +//! +//! - [High-level features](#high-level-features) +//! - [Compatibility](#compatibility) +//! - [Example](#example) +//! - [Routing](#routing) +//! - [Handlers](#handlers) +//! - [Extractors](#extractors) +//! - [Responses](#responses) +//! - [Error handling](#error-handling) +//! - [Middleware](#middleware) +//! - [Sharing state with handlers](#sharing-state-with-handlers) +//! - [Building integrations for axum](#building-integrations-for-axum) +//! - [Required dependencies](#required-dependencies) +//! - [Examples](#examples) +//! - [Feature flags](#feature-flags) +//! +//! # High-level features +//! +//! - Route requests to handlers with a macro-free API. +//! - Declaratively parse requests using extractors. +//! - Simple and predictable error handling model. +//! - Generate responses with minimal boilerplate. +//! - Take full advantage of the [`tower`] and [`tower-http`] ecosystem of +//! middleware, services, and utilities. +//! +//! In particular, the last point is what sets `axum` apart from other frameworks. +//! `axum` doesn't have its own middleware system but instead uses +//! [`tower::Service`]. This means `axum` gets timeouts, tracing, compression, +//! authorization, and more, for free. It also enables you to share middleware with +//! applications written using [`hyper`] or [`tonic`]. +//! +//! # Compatibility +//! +//! axum is designed to work with [tokio] and [hyper]. Runtime and +//! transport layer independence is not a goal, at least for the time being. +//! +//! # Example +//! +//! The "Hello, World!" of axum is: +//! +//! ```rust,no_run +//! use axum::{ +//! routing::get, +//! Router, +//! }; +//! +//! #[tokio::main] +//! async fn main() { +//! // build our application with a single route +//! let app = Router::new().route("/", get(|| async { "Hello, World!" })); +//! +//! // run it with hyper on localhost:3000 +//! axum::Server::bind(&"0.0.0.0:3000".parse().unwrap()) +//! .serve(app.into_make_service()) +//! .await +//! .unwrap(); +//! } +//! ``` +//! +//! Note using `#[tokio::main]` requires you enable tokio's `macros` and `rt-multi-thread` features +//! or just `full` to enable all features (`cargo add tokio --features macros,rt-multi-thread`). +//! +//! # Routing +//! +//! [`Router`] is used to setup which paths goes to which services: +//! +//! ```rust +//! use axum::{Router, routing::get}; +//! +//! // our router +//! let app = Router::new() +//! .route("/", get(root)) +//! .route("/foo", get(get_foo).post(post_foo)) +//! .route("/foo/bar", get(foo_bar)); +//! +//! // which calls one of these handlers +//! async fn root() {} +//! async fn get_foo() {} +//! async fn post_foo() {} +//! async fn foo_bar() {} +//! # async { +//! # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +//! # }; +//! ``` +//! +//! See [`Router`] for more details on routing. +//! +//! # Handlers +//! +#![doc = include_str!("docs/handlers_intro.md")] +//! +//! See [`handler`](crate::handler) for more details on handlers. +//! +//! # Extractors +//! +//! An extractor is a type that implements [`FromRequest`] or [`FromRequestParts`]. Extractors are +//! how you pick apart the incoming request to get the parts your handler needs. +//! +//! ```rust +//! use axum::extract::{Path, Query, Json}; +//! use std::collections::HashMap; +//! +//! // `Path` gives you the path parameters and deserializes them. +//! async fn path(Path(user_id): Path) {} +//! +//! // `Query` gives you the query parameters and deserializes them. +//! async fn query(Query(params): Query>) {} +//! +//! // Buffer the request body and deserialize it as JSON into a +//! // `serde_json::Value`. `Json` supports any type that implements +//! // `serde::Deserialize`. +//! async fn json(Json(payload): Json) {} +//! ``` +//! +//! See [`extract`](crate::extract) for more details on extractors. +//! +//! # Responses +//! +//! Anything that implements [`IntoResponse`] can be returned from handlers. +//! +//! ```rust,no_run +//! use axum::{ +//! body::Body, +//! routing::get, +//! response::Json, +//! Router, +//! }; +//! use serde_json::{Value, json}; +//! +//! // `&'static str` becomes a `200 OK` with `content-type: text/plain; charset=utf-8` +//! async fn plain_text() -> &'static str { +//! "foo" +//! } +//! +//! // `Json` gives a content-type of `application/json` and works with any type +//! // that implements `serde::Serialize` +//! async fn json() -> Json { +//! Json(json!({ "data": 42 })) +//! } +//! +//! let app = Router::new() +//! .route("/plain_text", get(plain_text)) +//! .route("/json", get(json)); +//! # async { +//! # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +//! # }; +//! ``` +//! +//! See [`response`](crate::response) for more details on building responses. +//! +//! # Error handling +//! +//! axum aims to have a simple and predictable error handling model. That means +//! it is simple to convert errors into responses and you are guaranteed that +//! all errors are handled. +//! +//! See [`error_handling`](crate::error_handling) for more details on axum's +//! error handling model and how to handle errors gracefully. +//! +//! # Middleware +//! +//! There are several different ways to write middleware for axum. See +//! [`middleware`](crate::middleware) for more details. +//! +//! # Sharing state with handlers +//! +//! It is common to share some state between handlers. For example, a +//! pool of database connections or clients to other services may need to +//! be shared. +//! +//! The three most common ways of doing that are: +//! - Using the [`State`] extractor +//! - Using request extensions +//! - Using closure captures +//! +//! ## Using the [`State`] extractor +//! +//! ```rust,no_run +//! use axum::{ +//! extract::State, +//! routing::get, +//! Router, +//! }; +//! use std::sync::Arc; +//! +//! struct AppState { +//! // ... +//! } +//! +//! let shared_state = Arc::new(AppState { /* ... */ }); +//! +//! let app = Router::new() +//! .route("/", get(handler)) +//! .with_state(shared_state); +//! +//! async fn handler( +//! State(state): State>, +//! ) { +//! // ... +//! } +//! # async { +//! # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +//! # }; +//! ``` +//! +//! You should prefer using [`State`] if possible since it's more type safe. The downside is that +//! it's less dynamic than request extensions. +//! +//! See [`State`] for more details about accessing state. +//! +//! ## Using request extensions +//! +//! Another way to extract state in handlers is using [`Extension`](crate::extract::Extension) as +//! layer and extractor: +//! +//! ```rust,no_run +//! use axum::{ +//! extract::Extension, +//! routing::get, +//! Router, +//! }; +//! use std::sync::Arc; +//! +//! struct AppState { +//! // ... +//! } +//! +//! let shared_state = Arc::new(AppState { /* ... */ }); +//! +//! let app = Router::new() +//! .route("/", get(handler)) +//! .layer(Extension(shared_state)); +//! +//! async fn handler( +//! Extension(state): Extension>, +//! ) { +//! // ... +//! } +//! # async { +//! # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +//! # }; +//! ``` +//! +//! The downside to this approach is that you'll get runtime errors +//! (specifically a `500 Internal Server Error` response) if you try and extract +//! an extension that doesn't exist, perhaps because you forgot to add the +//! middleware or because you're extracting the wrong type. +//! +//! ## Using closure captures +//! +//! State can also be passed directly to handlers using closure captures: +//! +//! ```rust,no_run +//! use axum::{ +//! Json, +//! extract::{Extension, Path}, +//! routing::{get, post}, +//! Router, +//! }; +//! use std::sync::Arc; +//! use serde::Deserialize; +//! +//! struct AppState { +//! // ... +//! } +//! +//! let shared_state = Arc::new(AppState { /* ... */ }); +//! +//! let app = Router::new() +//! .route( +//! "/users", +//! post({ +//! let shared_state = Arc::clone(&shared_state); +//! move |body| create_user(body, shared_state) +//! }), +//! ) +//! .route( +//! "/users/:id", +//! get({ +//! let shared_state = Arc::clone(&shared_state); +//! move |path| get_user(path, shared_state) +//! }), +//! ); +//! +//! async fn get_user(Path(user_id): Path, state: Arc) { +//! // ... +//! } +//! +//! async fn create_user(Json(payload): Json, state: Arc) { +//! // ... +//! } +//! +//! #[derive(Deserialize)] +//! struct CreateUserPayload { +//! // ... +//! } +//! # async { +//! # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +//! # }; +//! ``` +//! +//! The downside to this approach is that it's a little more verbose than using +//! [`State`] or extensions. +//! +//! # Building integrations for axum +//! +//! Libraries authors that want to provide [`FromRequest`], [`FromRequestParts`], or +//! [`IntoResponse`] implementations should depend on the [`axum-core`] crate, instead of `axum` if +//! possible. [`axum-core`] contains core types and traits and is less likely to receive breaking +//! changes. +//! +//! # Required dependencies +//! +//! To use axum there are a few dependencies you have to pull in as well: +//! +//! ```toml +//! [dependencies] +//! axum = "" +//! hyper = { version = "", features = ["full"] } +//! tokio = { version = "", features = ["full"] } +//! tower = "" +//! ``` +//! +//! The `"full"` feature for hyper and tokio isn't strictly necessary but it's +//! the easiest way to get started. +//! +//! Note that [`hyper::Server`] is re-exported by axum so if that's all you need +//! then you don't have to explicitly depend on hyper. +//! +//! Tower isn't strictly necessary either but helpful for testing. See the +//! testing example in the repo to learn more about testing axum apps. +//! +//! # Examples +//! +//! The axum repo contains [a number of examples][examples] that show how to put all the +//! pieces together. +//! +//! # Feature flags +//! +//! axum uses a set of [feature flags] to reduce the amount of compiled and +//! optional dependencies. +//! +//! The following optional features are available: +//! +//! Name | Description | Default? +//! ---|---|--- +//! `headers` | Enables extracting typed headers via [`TypedHeader`] | No +//! `http1` | Enables hyper's `http1` feature | Yes +//! `http2` | Enables hyper's `http2` feature | No +//! `json` | Enables the [`Json`] type and some similar convenience functionality | Yes +//! `macros` | Enables optional utility macros | No +//! `matched-path` | Enables capturing of every request's router path and the [`MatchedPath`] extractor | Yes +//! `multipart` | Enables parsing `multipart/form-data` requests with [`Multipart`] | No +//! `original-uri` | Enables capturing of every request's original URI and the [`OriginalUri`] extractor | Yes +//! `tokio` | Enables `tokio` as a dependency and `axum::Server`, `SSE` and `extract::connect_info` types. | Yes +//! `tower-log` | Enables `tower`'s `log` feature | Yes +//! `tracing` | Log rejections from built-in extractors | No +//! `ws` | Enables WebSockets support via [`extract::ws`] | No +//! `form` | Enables the `Form` extractor | Yes +//! `query` | Enables the `Query` extractor | Yes +//! +//! [`TypedHeader`]: crate::extract::TypedHeader +//! [`MatchedPath`]: crate::extract::MatchedPath +//! [`Multipart`]: crate::extract::Multipart +//! [`OriginalUri`]: crate::extract::OriginalUri +//! [`tower`]: https://crates.io/crates/tower +//! [`tower-http`]: https://crates.io/crates/tower-http +//! [`tokio`]: http://crates.io/crates/tokio +//! [`hyper`]: http://crates.io/crates/hyper +//! [`tonic`]: http://crates.io/crates/tonic +//! [feature flags]: https://doc.rust-lang.org/cargo/reference/features.html#the-features-section +//! [`IntoResponse`]: crate::response::IntoResponse +//! [`Timeout`]: tower::timeout::Timeout +//! [examples]: https://github.com/tokio-rs/axum/tree/main/examples +//! [`Router::merge`]: crate::routing::Router::merge +//! [`axum::Server`]: hyper::server::Server +//! [`Service`]: tower::Service +//! [`Service::poll_ready`]: tower::Service::poll_ready +//! [`Service`'s]: tower::Service +//! [`tower::Service`]: tower::Service +//! [tower-guides]: https://github.com/tower-rs/tower/tree/master/guides +//! [`Uuid`]: https://docs.rs/uuid/latest/uuid/ +//! [`FromRequest`]: crate::extract::FromRequest +//! [`FromRequestParts`]: crate::extract::FromRequestParts +//! [`HeaderMap`]: http::header::HeaderMap +//! [`Request`]: http::Request +//! [customize-extractor-error]: https://github.com/tokio-rs/axum/blob/main/examples/customize-extractor-error/src/main.rs +//! [axum-macros]: https://docs.rs/axum-macros +//! [`debug_handler`]: https://docs.rs/axum-macros/latest/axum_macros/attr.debug_handler.html +//! [`Handler`]: crate::handler::Handler +//! [`Infallible`]: std::convert::Infallible +//! [load shed]: tower::load_shed +//! [`axum-core`]: http://crates.io/crates/axum-core +//! [`State`]: crate::extract::State + +#![warn( + clippy::all, + clippy::todo, + clippy::empty_enum, + clippy::enum_glob_use, + clippy::mem_forget, + clippy::unused_self, + clippy::filter_map_next, + clippy::needless_continue, + clippy::needless_borrow, + clippy::match_wildcard_for_single_variants, + clippy::if_let_mutex, + clippy::mismatched_target_os, + clippy::await_holding_lock, + clippy::match_on_vec_items, + clippy::imprecise_flops, + clippy::suboptimal_flops, + clippy::lossy_float_literal, + clippy::rest_pat_in_fully_bound_structs, + clippy::fn_params_excessive_bools, + clippy::exit, + clippy::inefficient_to_string, + clippy::linkedlist, + clippy::macro_use_imports, + clippy::option_option, + clippy::verbose_file_reads, + clippy::unnested_or_patterns, + clippy::str_to_string, + rust_2018_idioms, + future_incompatible, + nonstandard_style, + missing_debug_implementations, + missing_docs +)] +#![deny(unreachable_pub, private_in_public)] +#![allow(elided_lifetimes_in_paths, clippy::type_complexity)] +#![forbid(unsafe_code)] +#![cfg_attr(docsrs, feature(doc_auto_cfg, doc_cfg))] +#![cfg_attr(test, allow(clippy::float_cmp))] +#![cfg_attr(not(test), warn(clippy::print_stdout, clippy::dbg_macro))] + +#[macro_use] +pub(crate) mod macros; + +mod boxed; +mod extension; +#[cfg(feature = "form")] +mod form; +#[cfg(feature = "json")] +mod json; +mod service_ext; +#[cfg(feature = "headers")] +mod typed_header; +mod util; + +pub mod body; +pub mod error_handling; +pub mod extract; +pub mod handler; +pub mod middleware; +pub mod response; +pub mod routing; + +#[cfg(test)] +mod test_helpers; + +#[doc(no_inline)] +pub use async_trait::async_trait; +#[cfg(feature = "headers")] +#[doc(no_inline)] +pub use headers; +#[doc(no_inline)] +pub use http; +#[cfg(feature = "tokio")] +#[doc(no_inline)] +pub use hyper::Server; + +#[doc(inline)] +pub use self::extension::Extension; +#[doc(inline)] +#[cfg(feature = "json")] +pub use self::json::Json; +#[doc(inline)] +pub use self::routing::Router; + +#[doc(inline)] +#[cfg(feature = "headers")] +pub use self::typed_header::TypedHeader; + +#[doc(inline)] +#[cfg(feature = "form")] +pub use self::form::Form; + +#[doc(inline)] +pub use axum_core::{BoxError, Error, RequestExt, RequestPartsExt}; + +#[cfg(feature = "macros")] +pub use axum_macros::debug_handler; + +pub use self::service_ext::ServiceExt; + +#[cfg(test)] +use axum_macros::__private_axum_test as test; diff --git a/.cargo-vendor/axum-0.6.20/src/macros.rs b/.cargo-vendor/axum-0.6.20/src/macros.rs new file mode 100644 index 0000000000..180c3c05a5 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/macros.rs @@ -0,0 +1,68 @@ +//! Internal macros + +macro_rules! opaque_future { + ($(#[$m:meta])* pub type $name:ident = $actual:ty;) => { + opaque_future! { + $(#[$m])* + pub type $name<> = $actual; + } + }; + + ($(#[$m:meta])* pub type $name:ident<$($param:ident),*> = $actual:ty;) => { + pin_project_lite::pin_project! { + $(#[$m])* + pub struct $name<$($param),*> { + #[pin] future: $actual, + } + } + + impl<$($param),*> $name<$($param),*> { + pub(crate) fn new(future: $actual) -> Self { + Self { future } + } + } + + impl<$($param),*> std::fmt::Debug for $name<$($param),*> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct(stringify!($name)).finish_non_exhaustive() + } + } + + impl<$($param),*> std::future::Future for $name<$($param),*> + where + $actual: std::future::Future, + { + type Output = <$actual as std::future::Future>::Output; + + #[inline] + fn poll( + self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll { + self.project().future.poll(cx) + } + } + }; +} + +#[rustfmt::skip] +macro_rules! all_the_tuples { + ($name:ident) => { + $name!([], T1); + $name!([T1], T2); + $name!([T1, T2], T3); + $name!([T1, T2, T3], T4); + $name!([T1, T2, T3, T4], T5); + $name!([T1, T2, T3, T4, T5], T6); + $name!([T1, T2, T3, T4, T5, T6], T7); + $name!([T1, T2, T3, T4, T5, T6, T7], T8); + $name!([T1, T2, T3, T4, T5, T6, T7, T8], T9); + $name!([T1, T2, T3, T4, T5, T6, T7, T8, T9], T10); + $name!([T1, T2, T3, T4, T5, T6, T7, T8, T9, T10], T11); + $name!([T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11], T12); + $name!([T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12], T13); + $name!([T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13], T14); + $name!([T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14], T15); + $name!([T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15], T16); + }; +} diff --git a/.cargo-vendor/axum-0.6.20/src/middleware/from_extractor.rs b/.cargo-vendor/axum-0.6.20/src/middleware/from_extractor.rs new file mode 100644 index 0000000000..8c9a24833f --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/middleware/from_extractor.rs @@ -0,0 +1,392 @@ +use crate::{ + extract::FromRequestParts, + response::{IntoResponse, Response}, +}; +use futures_util::{future::BoxFuture, ready}; +use http::Request; +use pin_project_lite::pin_project; +use std::{ + fmt, + future::Future, + marker::PhantomData, + pin::Pin, + task::{Context, Poll}, +}; +use tower_layer::Layer; +use tower_service::Service; + +/// Create a middleware from an extractor. +/// +/// If the extractor succeeds the value will be discarded and the inner service +/// will be called. If the extractor fails the rejection will be returned and +/// the inner service will _not_ be called. +/// +/// This can be used to perform validation of requests if the validation doesn't +/// produce any useful output, and run the extractor for several handlers +/// without repeating it in the function signature. +/// +/// Note that if the extractor consumes the request body, as `String` or +/// [`Bytes`] does, an empty body will be left in its place. Thus wont be +/// accessible to subsequent extractors or handlers. +/// +/// # Example +/// +/// ```rust +/// use axum::{ +/// extract::FromRequestParts, +/// middleware::from_extractor, +/// routing::{get, post}, +/// Router, +/// http::{header, StatusCode, request::Parts}, +/// }; +/// use async_trait::async_trait; +/// +/// // An extractor that performs authorization. +/// struct RequireAuth; +/// +/// #[async_trait] +/// impl FromRequestParts for RequireAuth +/// where +/// S: Send + Sync, +/// { +/// type Rejection = StatusCode; +/// +/// async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { +/// let auth_header = parts +/// .headers +/// .get(header::AUTHORIZATION) +/// .and_then(|value| value.to_str().ok()); +/// +/// match auth_header { +/// Some(auth_header) if token_is_valid(auth_header) => { +/// Ok(Self) +/// } +/// _ => Err(StatusCode::UNAUTHORIZED), +/// } +/// } +/// } +/// +/// fn token_is_valid(token: &str) -> bool { +/// // ... +/// # false +/// } +/// +/// async fn handler() { +/// // If we get here the request has been authorized +/// } +/// +/// async fn other_handler() { +/// // If we get here the request has been authorized +/// } +/// +/// let app = Router::new() +/// .route("/", get(handler)) +/// .route("/foo", post(other_handler)) +/// // The extractor will run before all routes +/// .route_layer(from_extractor::()); +/// # async { +/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +/// # }; +/// ``` +/// +/// [`Bytes`]: bytes::Bytes +pub fn from_extractor() -> FromExtractorLayer { + from_extractor_with_state(()) +} + +/// Create a middleware from an extractor with the given state. +/// +/// See [`State`](crate::extract::State) for more details about accessing state. +pub fn from_extractor_with_state(state: S) -> FromExtractorLayer { + FromExtractorLayer { + state, + _marker: PhantomData, + } +} + +/// [`Layer`] that applies [`FromExtractor`] that runs an extractor and +/// discards the value. +/// +/// See [`from_extractor`] for more details. +/// +/// [`Layer`]: tower::Layer +#[must_use] +pub struct FromExtractorLayer { + state: S, + _marker: PhantomData E>, +} + +impl Clone for FromExtractorLayer +where + S: Clone, +{ + fn clone(&self) -> Self { + Self { + state: self.state.clone(), + _marker: PhantomData, + } + } +} + +impl fmt::Debug for FromExtractorLayer +where + S: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("FromExtractorLayer") + .field("state", &self.state) + .field("extractor", &format_args!("{}", std::any::type_name::())) + .finish() + } +} + +impl Layer for FromExtractorLayer +where + S: Clone, +{ + type Service = FromExtractor; + + fn layer(&self, inner: T) -> Self::Service { + FromExtractor { + inner, + state: self.state.clone(), + _extractor: PhantomData, + } + } +} + +/// Middleware that runs an extractor and discards the value. +/// +/// See [`from_extractor`] for more details. +pub struct FromExtractor { + inner: T, + state: S, + _extractor: PhantomData E>, +} + +#[test] +fn traits() { + use crate::test_helpers::*; + assert_send::>(); + assert_sync::>(); +} + +impl Clone for FromExtractor +where + T: Clone, + S: Clone, +{ + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + state: self.state.clone(), + _extractor: PhantomData, + } + } +} + +impl fmt::Debug for FromExtractor +where + T: fmt::Debug, + S: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("FromExtractor") + .field("inner", &self.inner) + .field("state", &self.state) + .field("extractor", &format_args!("{}", std::any::type_name::())) + .finish() + } +} + +impl Service> for FromExtractor +where + E: FromRequestParts + 'static, + B: Send + 'static, + T: Service> + Clone, + T::Response: IntoResponse, + S: Clone + Send + Sync + 'static, +{ + type Response = Response; + type Error = T::Error; + type Future = ResponseFuture; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + let state = self.state.clone(); + let extract_future = Box::pin(async move { + let (mut parts, body) = req.into_parts(); + let extracted = E::from_request_parts(&mut parts, &state).await; + let req = Request::from_parts(parts, body); + (req, extracted) + }); + + ResponseFuture { + state: State::Extracting { + future: extract_future, + }, + svc: Some(self.inner.clone()), + } + } +} + +pin_project! { + /// Response future for [`FromExtractor`]. + #[allow(missing_debug_implementations)] + pub struct ResponseFuture + where + E: FromRequestParts, + T: Service>, + { + #[pin] + state: State, + svc: Option, + } +} + +pin_project! { + #[project = StateProj] + enum State + where + E: FromRequestParts, + T: Service>, + { + Extracting { + future: BoxFuture<'static, (Request, Result)>, + }, + Call { #[pin] future: T::Future }, + } +} + +impl Future for ResponseFuture +where + E: FromRequestParts, + T: Service>, + T::Response: IntoResponse, +{ + type Output = Result; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + loop { + let mut this = self.as_mut().project(); + + let new_state = match this.state.as_mut().project() { + StateProj::Extracting { future } => { + let (req, extracted) = ready!(future.as_mut().poll(cx)); + + match extracted { + Ok(_) => { + let mut svc = this.svc.take().expect("future polled after completion"); + let future = svc.call(req); + State::Call { future } + } + Err(err) => { + let res = err.into_response(); + return Poll::Ready(Ok(res)); + } + } + } + StateProj::Call { future } => { + return future + .poll(cx) + .map(|result| result.map(IntoResponse::into_response)); + } + }; + + this.state.set(new_state); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{async_trait, handler::Handler, routing::get, test_helpers::*, Router}; + use axum_core::extract::FromRef; + use http::{header, request::Parts, StatusCode}; + use tower_http::limit::RequestBodyLimitLayer; + + #[crate::test] + async fn test_from_extractor() { + #[derive(Clone)] + struct Secret(&'static str); + + struct RequireAuth; + + #[async_trait::async_trait] + impl FromRequestParts for RequireAuth + where + S: Send + Sync, + Secret: FromRef, + { + type Rejection = StatusCode; + + async fn from_request_parts( + parts: &mut Parts, + state: &S, + ) -> Result { + let Secret(secret) = Secret::from_ref(state); + if let Some(auth) = parts + .headers + .get(header::AUTHORIZATION) + .and_then(|v| v.to_str().ok()) + { + if auth == secret { + return Ok(Self); + } + } + + Err(StatusCode::UNAUTHORIZED) + } + } + + async fn handler() {} + + let state = Secret("secret"); + let app = Router::new().route( + "/", + get(handler.layer(from_extractor_with_state::(state))), + ); + + let client = TestClient::new(app); + + let res = client.get("/").send().await; + assert_eq!(res.status(), StatusCode::UNAUTHORIZED); + + let res = client + .get("/") + .header(http::header::AUTHORIZATION, "secret") + .send() + .await; + assert_eq!(res.status(), StatusCode::OK); + } + + // just needs to compile + #[allow(dead_code)] + fn works_with_request_body_limit() { + struct MyExtractor; + + #[async_trait] + impl FromRequestParts for MyExtractor + where + S: Send + Sync, + { + type Rejection = std::convert::Infallible; + + async fn from_request_parts( + _parts: &mut Parts, + _state: &S, + ) -> Result { + unimplemented!() + } + } + + let _: Router = Router::new() + .layer(from_extractor::()) + .layer(RequestBodyLimitLayer::new(1)); + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/middleware/from_fn.rs b/.cargo-vendor/axum-0.6.20/src/middleware/from_fn.rs new file mode 100644 index 0000000000..f380a580ad --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/middleware/from_fn.rs @@ -0,0 +1,423 @@ +use crate::response::{IntoResponse, Response}; +use axum_core::extract::{FromRequest, FromRequestParts}; +use futures_util::future::BoxFuture; +use http::Request; +use std::{ + any::type_name, + convert::Infallible, + fmt, + future::Future, + marker::PhantomData, + pin::Pin, + task::{Context, Poll}, +}; +use tower::{util::BoxCloneService, ServiceBuilder}; +use tower_layer::Layer; +use tower_service::Service; + +/// Create a middleware from an async function. +/// +/// `from_fn` requires the function given to +/// +/// 1. Be an `async fn`. +/// 2. Take one or more [extractors] as the first arguments. +/// 3. Take [`Next`](Next) as the final argument. +/// 4. Return something that implements [`IntoResponse`]. +/// +/// Note that this function doesn't support extracting [`State`]. For that, use [`from_fn_with_state`]. +/// +/// # Example +/// +/// ```rust +/// use axum::{ +/// Router, +/// http::{self, Request}, +/// routing::get, +/// response::Response, +/// middleware::{self, Next}, +/// }; +/// +/// async fn my_middleware( +/// request: Request, +/// next: Next, +/// ) -> Response { +/// // do something with `request`... +/// +/// let response = next.run(request).await; +/// +/// // do something with `response`... +/// +/// response +/// } +/// +/// let app = Router::new() +/// .route("/", get(|| async { /* ... */ })) +/// .layer(middleware::from_fn(my_middleware)); +/// # let app: Router = app; +/// ``` +/// +/// # Running extractors +/// +/// ```rust +/// use axum::{ +/// Router, +/// extract::TypedHeader, +/// http::StatusCode, +/// headers::authorization::{Authorization, Bearer}, +/// http::Request, +/// middleware::{self, Next}, +/// response::Response, +/// routing::get, +/// }; +/// +/// async fn auth( +/// // run the `TypedHeader` extractor +/// TypedHeader(auth): TypedHeader>, +/// // you can also add more extractors here but the last +/// // extractor must implement `FromRequest` which +/// // `Request` does +/// request: Request, +/// next: Next, +/// ) -> Result { +/// if token_is_valid(auth.token()) { +/// let response = next.run(request).await; +/// Ok(response) +/// } else { +/// Err(StatusCode::UNAUTHORIZED) +/// } +/// } +/// +/// fn token_is_valid(token: &str) -> bool { +/// // ... +/// # false +/// } +/// +/// let app = Router::new() +/// .route("/", get(|| async { /* ... */ })) +/// .route_layer(middleware::from_fn(auth)); +/// # let app: Router = app; +/// ``` +/// +/// [extractors]: crate::extract::FromRequest +/// [`State`]: crate::extract::State +pub fn from_fn(f: F) -> FromFnLayer { + from_fn_with_state((), f) +} + +/// Create a middleware from an async function with the given state. +/// +/// See [`State`](crate::extract::State) for more details about accessing state. +/// +/// # Example +/// +/// ```rust +/// use axum::{ +/// Router, +/// http::{Request, StatusCode}, +/// routing::get, +/// response::{IntoResponse, Response}, +/// middleware::{self, Next}, +/// extract::State, +/// }; +/// +/// #[derive(Clone)] +/// struct AppState { /* ... */ } +/// +/// async fn my_middleware( +/// State(state): State, +/// // you can add more extractors here but the last +/// // extractor must implement `FromRequest` which +/// // `Request` does +/// request: Request, +/// next: Next, +/// ) -> Response { +/// // do something with `request`... +/// +/// let response = next.run(request).await; +/// +/// // do something with `response`... +/// +/// response +/// } +/// +/// let state = AppState { /* ... */ }; +/// +/// let app = Router::new() +/// .route("/", get(|| async { /* ... */ })) +/// .route_layer(middleware::from_fn_with_state(state.clone(), my_middleware)) +/// .with_state(state); +/// # let _: axum::Router = app; +/// ``` +pub fn from_fn_with_state(state: S, f: F) -> FromFnLayer { + FromFnLayer { + f, + state, + _extractor: PhantomData, + } +} + +/// A [`tower::Layer`] from an async function. +/// +/// [`tower::Layer`] is used to apply middleware to [`Router`](crate::Router)'s. +/// +/// Created with [`from_fn`]. See that function for more details. +#[must_use] +pub struct FromFnLayer { + f: F, + state: S, + _extractor: PhantomData T>, +} + +impl Clone for FromFnLayer +where + F: Clone, + S: Clone, +{ + fn clone(&self) -> Self { + Self { + f: self.f.clone(), + state: self.state.clone(), + _extractor: self._extractor, + } + } +} + +impl Layer for FromFnLayer +where + F: Clone, + S: Clone, +{ + type Service = FromFn; + + fn layer(&self, inner: I) -> Self::Service { + FromFn { + f: self.f.clone(), + state: self.state.clone(), + inner, + _extractor: PhantomData, + } + } +} + +impl fmt::Debug for FromFnLayer +where + S: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("FromFnLayer") + // Write out the type name, without quoting it as `&type_name::()` would + .field("f", &format_args!("{}", type_name::())) + .field("state", &self.state) + .finish() + } +} + +/// A middleware created from an async function. +/// +/// Created with [`from_fn`]. See that function for more details. +pub struct FromFn { + f: F, + inner: I, + state: S, + _extractor: PhantomData T>, +} + +impl Clone for FromFn +where + F: Clone, + I: Clone, + S: Clone, +{ + fn clone(&self) -> Self { + Self { + f: self.f.clone(), + inner: self.inner.clone(), + state: self.state.clone(), + _extractor: self._extractor, + } + } +} + +macro_rules! impl_service { + ( + [$($ty:ident),*], $last:ident + ) => { + #[allow(non_snake_case, unused_mut)] + impl Service> for FromFn + where + F: FnMut($($ty,)* $last, Next) -> Fut + Clone + Send + 'static, + $( $ty: FromRequestParts + Send, )* + $last: FromRequest + Send, + Fut: Future + Send + 'static, + Out: IntoResponse + 'static, + I: Service, Error = Infallible> + + Clone + + Send + + 'static, + I::Response: IntoResponse, + I::Future: Send + 'static, + B: Send + 'static, + S: Clone + Send + Sync + 'static, + { + type Response = Response; + type Error = Infallible; + type Future = ResponseFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + let not_ready_inner = self.inner.clone(); + let ready_inner = std::mem::replace(&mut self.inner, not_ready_inner); + + let mut f = self.f.clone(); + let state = self.state.clone(); + + let future = Box::pin(async move { + let (mut parts, body) = req.into_parts(); + + $( + let $ty = match $ty::from_request_parts(&mut parts, &state).await { + Ok(value) => value, + Err(rejection) => return rejection.into_response(), + }; + )* + + let req = Request::from_parts(parts, body); + + let $last = match $last::from_request(req, &state).await { + Ok(value) => value, + Err(rejection) => return rejection.into_response(), + }; + + let inner = ServiceBuilder::new() + .boxed_clone() + .map_response(IntoResponse::into_response) + .service(ready_inner); + let next = Next { inner }; + + f($($ty,)* $last, next).await.into_response() + }); + + ResponseFuture { + inner: future + } + } + } + }; +} + +all_the_tuples!(impl_service); + +impl fmt::Debug for FromFn +where + S: fmt::Debug, + I: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("FromFnLayer") + .field("f", &format_args!("{}", type_name::())) + .field("inner", &self.inner) + .field("state", &self.state) + .finish() + } +} + +/// The remainder of a middleware stack, including the handler. +pub struct Next { + inner: BoxCloneService, Response, Infallible>, +} + +impl Next { + /// Execute the remaining middleware stack. + pub async fn run(mut self, req: Request) -> Response { + match self.inner.call(req).await { + Ok(res) => res, + Err(err) => match err {}, + } + } +} + +impl fmt::Debug for Next { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("FromFnLayer") + .field("inner", &self.inner) + .finish() + } +} + +impl Clone for Next { + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + } + } +} + +impl Service> for Next { + type Response = Response; + type Error = Infallible; + type Future = Pin> + Send>>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + self.inner.call(req) + } +} + +/// Response future for [`FromFn`]. +pub struct ResponseFuture { + inner: BoxFuture<'static, Response>, +} + +impl Future for ResponseFuture { + type Output = Result; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + self.inner.as_mut().poll(cx).map(Ok) + } +} + +impl fmt::Debug for ResponseFuture { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ResponseFuture").finish() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{body::Body, routing::get, Router}; + use http::{HeaderMap, StatusCode}; + use tower::ServiceExt; + + #[crate::test] + async fn basic() { + async fn insert_header(mut req: Request, next: Next) -> impl IntoResponse { + req.headers_mut() + .insert("x-axum-test", "ok".parse().unwrap()); + + next.run(req).await + } + + async fn handle(headers: HeaderMap) -> String { + headers["x-axum-test"].to_str().unwrap().to_owned() + } + + let app = Router::new() + .route("/", get(handle)) + .layer(from_fn(insert_header)); + + let res = app + .oneshot(Request::builder().uri("/").body(Body::empty()).unwrap()) + .await + .unwrap(); + assert_eq!(res.status(), StatusCode::OK); + let body = hyper::body::to_bytes(res).await.unwrap(); + assert_eq!(&body[..], b"ok"); + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/middleware/map_request.rs b/.cargo-vendor/axum-0.6.20/src/middleware/map_request.rs new file mode 100644 index 0000000000..5d1801ac7c --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/middleware/map_request.rs @@ -0,0 +1,434 @@ +use crate::response::{IntoResponse, Response}; +use axum_core::extract::{FromRequest, FromRequestParts}; +use futures_util::future::BoxFuture; +use http::Request; +use std::{ + any::type_name, + convert::Infallible, + fmt, + future::Future, + marker::PhantomData, + pin::Pin, + task::{Context, Poll}, +}; +use tower_layer::Layer; +use tower_service::Service; + +/// Create a middleware from an async function that transforms a request. +/// +/// This differs from [`tower::util::MapRequest`] in that it allows you to easily run axum-specific +/// extractors. +/// +/// # Example +/// +/// ``` +/// use axum::{ +/// Router, +/// routing::get, +/// middleware::map_request, +/// http::Request, +/// }; +/// +/// async fn set_header(mut request: Request) -> Request { +/// request.headers_mut().insert("x-foo", "foo".parse().unwrap()); +/// request +/// } +/// +/// async fn handler(request: Request) { +/// // `request` will have an `x-foo` header +/// } +/// +/// let app = Router::new() +/// .route("/", get(handler)) +/// .layer(map_request(set_header)); +/// # let _: Router = app; +/// ``` +/// +/// # Rejecting the request +/// +/// The function given to `map_request` is allowed to also return a `Result` which can be used to +/// reject the request and return a response immediately, without calling the remaining +/// middleware. +/// +/// Specifically the valid return types are: +/// +/// - `Request` +/// - `Result, E> where E: IntoResponse` +/// +/// ``` +/// use axum::{ +/// Router, +/// http::{Request, StatusCode}, +/// routing::get, +/// middleware::map_request, +/// }; +/// +/// async fn auth(request: Request) -> Result, StatusCode> { +/// let auth_header = request.headers() +/// .get(http::header::AUTHORIZATION) +/// .and_then(|header| header.to_str().ok()); +/// +/// match auth_header { +/// Some(auth_header) if token_is_valid(auth_header) => Ok(request), +/// _ => Err(StatusCode::UNAUTHORIZED), +/// } +/// } +/// +/// fn token_is_valid(token: &str) -> bool { +/// // ... +/// # false +/// } +/// +/// let app = Router::new() +/// .route("/", get(|| async { /* ... */ })) +/// .route_layer(map_request(auth)); +/// # let app: Router = app; +/// ``` +/// +/// # Running extractors +/// +/// ``` +/// use axum::{ +/// Router, +/// routing::get, +/// middleware::map_request, +/// extract::Path, +/// http::Request, +/// }; +/// use std::collections::HashMap; +/// +/// async fn log_path_params( +/// Path(path_params): Path>, +/// request: Request, +/// ) -> Request { +/// tracing::debug!(?path_params); +/// request +/// } +/// +/// let app = Router::new() +/// .route("/", get(|| async { /* ... */ })) +/// .layer(map_request(log_path_params)); +/// # let _: Router = app; +/// ``` +/// +/// Note that to access state you must use either [`map_request_with_state`]. +pub fn map_request(f: F) -> MapRequestLayer { + map_request_with_state((), f) +} + +/// Create a middleware from an async function that transforms a request, with the given state. +/// +/// See [`State`](crate::extract::State) for more details about accessing state. +/// +/// # Example +/// +/// ```rust +/// use axum::{ +/// Router, +/// http::{Request, StatusCode}, +/// routing::get, +/// response::IntoResponse, +/// middleware::map_request_with_state, +/// extract::State, +/// }; +/// +/// #[derive(Clone)] +/// struct AppState { /* ... */ } +/// +/// async fn my_middleware( +/// State(state): State, +/// // you can add more extractors here but the last +/// // extractor must implement `FromRequest` which +/// // `Request` does +/// request: Request, +/// ) -> Request { +/// // do something with `state` and `request`... +/// request +/// } +/// +/// let state = AppState { /* ... */ }; +/// +/// let app = Router::new() +/// .route("/", get(|| async { /* ... */ })) +/// .route_layer(map_request_with_state(state.clone(), my_middleware)) +/// .with_state(state); +/// # let _: axum::Router = app; +/// ``` +pub fn map_request_with_state(state: S, f: F) -> MapRequestLayer { + MapRequestLayer { + f, + state, + _extractor: PhantomData, + } +} + +/// A [`tower::Layer`] from an async function that transforms a request. +/// +/// Created with [`map_request`]. See that function for more details. +#[must_use] +pub struct MapRequestLayer { + f: F, + state: S, + _extractor: PhantomData T>, +} + +impl Clone for MapRequestLayer +where + F: Clone, + S: Clone, +{ + fn clone(&self) -> Self { + Self { + f: self.f.clone(), + state: self.state.clone(), + _extractor: self._extractor, + } + } +} + +impl Layer for MapRequestLayer +where + F: Clone, + S: Clone, +{ + type Service = MapRequest; + + fn layer(&self, inner: I) -> Self::Service { + MapRequest { + f: self.f.clone(), + state: self.state.clone(), + inner, + _extractor: PhantomData, + } + } +} + +impl fmt::Debug for MapRequestLayer +where + S: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("MapRequestLayer") + // Write out the type name, without quoting it as `&type_name::()` would + .field("f", &format_args!("{}", type_name::())) + .field("state", &self.state) + .finish() + } +} + +/// A middleware created from an async function that transforms a request. +/// +/// Created with [`map_request`]. See that function for more details. +pub struct MapRequest { + f: F, + inner: I, + state: S, + _extractor: PhantomData T>, +} + +impl Clone for MapRequest +where + F: Clone, + I: Clone, + S: Clone, +{ + fn clone(&self) -> Self { + Self { + f: self.f.clone(), + inner: self.inner.clone(), + state: self.state.clone(), + _extractor: self._extractor, + } + } +} + +macro_rules! impl_service { + ( + [$($ty:ident),*], $last:ident + ) => { + #[allow(non_snake_case, unused_mut)] + impl Service> for MapRequest + where + F: FnMut($($ty,)* $last) -> Fut + Clone + Send + 'static, + $( $ty: FromRequestParts + Send, )* + $last: FromRequest + Send, + Fut: Future + Send + 'static, + Fut::Output: IntoMapRequestResult + Send + 'static, + I: Service, Error = Infallible> + + Clone + + Send + + 'static, + I::Response: IntoResponse, + I::Future: Send + 'static, + B: Send + 'static, + S: Clone + Send + Sync + 'static, + { + type Response = Response; + type Error = Infallible; + type Future = ResponseFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + let not_ready_inner = self.inner.clone(); + let mut ready_inner = std::mem::replace(&mut self.inner, not_ready_inner); + + let mut f = self.f.clone(); + let state = self.state.clone(); + + let future = Box::pin(async move { + let (mut parts, body) = req.into_parts(); + + $( + let $ty = match $ty::from_request_parts(&mut parts, &state).await { + Ok(value) => value, + Err(rejection) => return rejection.into_response(), + }; + )* + + let req = Request::from_parts(parts, body); + + let $last = match $last::from_request(req, &state).await { + Ok(value) => value, + Err(rejection) => return rejection.into_response(), + }; + + match f($($ty,)* $last).await.into_map_request_result() { + Ok(req) => { + ready_inner.call(req).await.into_response() + } + Err(res) => { + res + } + } + }); + + ResponseFuture { + inner: future + } + } + } + }; +} + +all_the_tuples!(impl_service); + +impl fmt::Debug for MapRequest +where + S: fmt::Debug, + I: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("MapRequest") + .field("f", &format_args!("{}", type_name::())) + .field("inner", &self.inner) + .field("state", &self.state) + .finish() + } +} + +/// Response future for [`MapRequest`]. +pub struct ResponseFuture { + inner: BoxFuture<'static, Response>, +} + +impl Future for ResponseFuture { + type Output = Result; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + self.inner.as_mut().poll(cx).map(Ok) + } +} + +impl fmt::Debug for ResponseFuture { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ResponseFuture").finish() + } +} + +mod private { + use crate::{http::Request, response::IntoResponse}; + + pub trait Sealed {} + impl Sealed for Result, E> where E: IntoResponse {} + impl Sealed for Request {} +} + +/// Trait implemented by types that can be returned from [`map_request`], +/// [`map_request_with_state`]. +/// +/// This trait is sealed such that it cannot be implemented outside this crate. +pub trait IntoMapRequestResult: private::Sealed { + /// Perform the conversion. + fn into_map_request_result(self) -> Result, Response>; +} + +impl IntoMapRequestResult for Result, E> +where + E: IntoResponse, +{ + fn into_map_request_result(self) -> Result, Response> { + self.map_err(IntoResponse::into_response) + } +} + +impl IntoMapRequestResult for Request { + fn into_map_request_result(self) -> Result, Response> { + Ok(self) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{routing::get, test_helpers::TestClient, Router}; + use http::{HeaderMap, StatusCode}; + + #[crate::test] + async fn works() { + async fn add_header(mut req: Request) -> Request { + req.headers_mut().insert("x-foo", "foo".parse().unwrap()); + req + } + + async fn handler(headers: HeaderMap) -> Response { + headers["x-foo"] + .to_str() + .unwrap() + .to_owned() + .into_response() + } + + let app = Router::new() + .route("/", get(handler)) + .layer(map_request(add_header)); + let client = TestClient::new(app); + + let res = client.get("/").send().await; + + assert_eq!(res.text().await, "foo"); + } + + #[crate::test] + async fn works_for_short_circutting() { + async fn add_header(_req: Request) -> Result, (StatusCode, &'static str)> { + Err((StatusCode::INTERNAL_SERVER_ERROR, "something went wrong")) + } + + async fn handler(_headers: HeaderMap) -> Response { + unreachable!() + } + + let app = Router::new() + .route("/", get(handler)) + .layer(map_request(add_header)); + let client = TestClient::new(app); + + let res = client.get("/").send().await; + + assert_eq!(res.status(), StatusCode::INTERNAL_SERVER_ERROR); + assert_eq!(res.text().await, "something went wrong"); + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/middleware/map_response.rs b/.cargo-vendor/axum-0.6.20/src/middleware/map_response.rs new file mode 100644 index 0000000000..06f9825740 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/middleware/map_response.rs @@ -0,0 +1,364 @@ +use crate::response::{IntoResponse, Response}; +use axum_core::extract::FromRequestParts; +use futures_util::future::BoxFuture; +use http::Request; +use std::{ + any::type_name, + convert::Infallible, + fmt, + future::Future, + marker::PhantomData, + pin::Pin, + task::{Context, Poll}, +}; +use tower_layer::Layer; +use tower_service::Service; + +/// Create a middleware from an async function that transforms a response. +/// +/// This differs from [`tower::util::MapResponse`] in that it allows you to easily run axum-specific +/// extractors. +/// +/// # Example +/// +/// ``` +/// use axum::{ +/// Router, +/// routing::get, +/// middleware::map_response, +/// response::Response, +/// }; +/// +/// async fn set_header(mut response: Response) -> Response { +/// response.headers_mut().insert("x-foo", "foo".parse().unwrap()); +/// response +/// } +/// +/// let app = Router::new() +/// .route("/", get(|| async { /* ... */ })) +/// .layer(map_response(set_header)); +/// # let _: Router = app; +/// ``` +/// +/// # Running extractors +/// +/// It is also possible to run extractors that implement [`FromRequestParts`]. These will be run +/// before calling the handler. +/// +/// ``` +/// use axum::{ +/// Router, +/// routing::get, +/// middleware::map_response, +/// extract::Path, +/// response::Response, +/// }; +/// use std::collections::HashMap; +/// +/// async fn log_path_params( +/// Path(path_params): Path>, +/// response: Response, +/// ) -> Response { +/// tracing::debug!(?path_params); +/// response +/// } +/// +/// let app = Router::new() +/// .route("/", get(|| async { /* ... */ })) +/// .layer(map_response(log_path_params)); +/// # let _: Router = app; +/// ``` +/// +/// Note that to access state you must use either [`map_response_with_state`]. +/// +/// # Returning any `impl IntoResponse` +/// +/// It is also possible to return anything that implements [`IntoResponse`] +/// +/// ``` +/// use axum::{ +/// Router, +/// routing::get, +/// middleware::map_response, +/// response::{Response, IntoResponse}, +/// }; +/// use std::collections::HashMap; +/// +/// async fn set_header(response: Response) -> impl IntoResponse { +/// ( +/// [("x-foo", "foo")], +/// response, +/// ) +/// } +/// +/// let app = Router::new() +/// .route("/", get(|| async { /* ... */ })) +/// .layer(map_response(set_header)); +/// # let _: Router = app; +/// ``` +pub fn map_response(f: F) -> MapResponseLayer { + map_response_with_state((), f) +} + +/// Create a middleware from an async function that transforms a response, with the given state. +/// +/// See [`State`](crate::extract::State) for more details about accessing state. +/// +/// # Example +/// +/// ```rust +/// use axum::{ +/// Router, +/// http::StatusCode, +/// routing::get, +/// response::Response, +/// middleware::map_response_with_state, +/// extract::State, +/// }; +/// +/// #[derive(Clone)] +/// struct AppState { /* ... */ } +/// +/// async fn my_middleware( +/// State(state): State, +/// // you can add more extractors here but they must +/// // all implement `FromRequestParts` +/// // `FromRequest` is not allowed +/// response: Response, +/// ) -> Response { +/// // do something with `state` and `response`... +/// response +/// } +/// +/// let state = AppState { /* ... */ }; +/// +/// let app = Router::new() +/// .route("/", get(|| async { /* ... */ })) +/// .route_layer(map_response_with_state(state.clone(), my_middleware)) +/// .with_state(state); +/// # let _: axum::Router = app; +/// ``` +pub fn map_response_with_state(state: S, f: F) -> MapResponseLayer { + MapResponseLayer { + f, + state, + _extractor: PhantomData, + } +} + +/// A [`tower::Layer`] from an async function that transforms a response. +/// +/// Created with [`map_response`]. See that function for more details. +#[must_use] +pub struct MapResponseLayer { + f: F, + state: S, + _extractor: PhantomData T>, +} + +impl Clone for MapResponseLayer +where + F: Clone, + S: Clone, +{ + fn clone(&self) -> Self { + Self { + f: self.f.clone(), + state: self.state.clone(), + _extractor: self._extractor, + } + } +} + +impl Layer for MapResponseLayer +where + F: Clone, + S: Clone, +{ + type Service = MapResponse; + + fn layer(&self, inner: I) -> Self::Service { + MapResponse { + f: self.f.clone(), + state: self.state.clone(), + inner, + _extractor: PhantomData, + } + } +} + +impl fmt::Debug for MapResponseLayer +where + S: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("MapResponseLayer") + // Write out the type name, without quoting it as `&type_name::()` would + .field("f", &format_args!("{}", type_name::())) + .field("state", &self.state) + .finish() + } +} + +/// A middleware created from an async function that transforms a response. +/// +/// Created with [`map_response`]. See that function for more details. +pub struct MapResponse { + f: F, + inner: I, + state: S, + _extractor: PhantomData T>, +} + +impl Clone for MapResponse +where + F: Clone, + I: Clone, + S: Clone, +{ + fn clone(&self) -> Self { + Self { + f: self.f.clone(), + inner: self.inner.clone(), + state: self.state.clone(), + _extractor: self._extractor, + } + } +} + +macro_rules! impl_service { + ( + $($ty:ident),* + ) => { + #[allow(non_snake_case, unused_mut)] + impl Service> for MapResponse + where + F: FnMut($($ty,)* Response) -> Fut + Clone + Send + 'static, + $( $ty: FromRequestParts + Send, )* + Fut: Future + Send + 'static, + Fut::Output: IntoResponse + Send + 'static, + I: Service, Response = Response, Error = Infallible> + + Clone + + Send + + 'static, + I::Future: Send + 'static, + B: Send + 'static, + ResBody: Send + 'static, + S: Clone + Send + Sync + 'static, + { + type Response = Response; + type Error = Infallible; + type Future = ResponseFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + + fn call(&mut self, req: Request) -> Self::Future { + let not_ready_inner = self.inner.clone(); + let mut ready_inner = std::mem::replace(&mut self.inner, not_ready_inner); + + let mut f = self.f.clone(); + let _state = self.state.clone(); + + let future = Box::pin(async move { + let (mut parts, body) = req.into_parts(); + + $( + let $ty = match $ty::from_request_parts(&mut parts, &_state).await { + Ok(value) => value, + Err(rejection) => return rejection.into_response(), + }; + )* + + let req = Request::from_parts(parts, body); + + match ready_inner.call(req).await { + Ok(res) => { + f($($ty,)* res).await.into_response() + } + Err(err) => match err {} + } + }); + + ResponseFuture { + inner: future + } + } + } + }; +} + +impl_service!(); +impl_service!(T1); +impl_service!(T1, T2); +impl_service!(T1, T2, T3); +impl_service!(T1, T2, T3, T4); +impl_service!(T1, T2, T3, T4, T5); +impl_service!(T1, T2, T3, T4, T5, T6); +impl_service!(T1, T2, T3, T4, T5, T6, T7); +impl_service!(T1, T2, T3, T4, T5, T6, T7, T8); +impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9); +impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10); +impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11); +impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12); +impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13); +impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14); +impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15); +impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16); + +impl fmt::Debug for MapResponse +where + S: fmt::Debug, + I: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("MapResponse") + .field("f", &format_args!("{}", type_name::())) + .field("inner", &self.inner) + .field("state", &self.state) + .finish() + } +} + +/// Response future for [`MapResponse`]. +pub struct ResponseFuture { + inner: BoxFuture<'static, Response>, +} + +impl Future for ResponseFuture { + type Output = Result; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + self.inner.as_mut().poll(cx).map(Ok) + } +} + +impl fmt::Debug for ResponseFuture { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ResponseFuture").finish() + } +} + +#[cfg(test)] +mod tests { + #[allow(unused_imports)] + use super::*; + use crate::{test_helpers::TestClient, Router}; + + #[crate::test] + async fn works() { + async fn add_header(mut res: Response) -> Response { + res.headers_mut().insert("x-foo", "foo".parse().unwrap()); + res + } + + let app = Router::new().layer(map_response(add_header)); + let client = TestClient::new(app); + + let res = client.get("/").send().await; + + assert_eq!(res.headers()["x-foo"], "foo"); + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/middleware/mod.rs b/.cargo-vendor/axum-0.6.20/src/middleware/mod.rs new file mode 100644 index 0000000000..22dab1433e --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/middleware/mod.rs @@ -0,0 +1,29 @@ +//! Utilities for writing middleware +//! +#![doc = include_str!("../docs/middleware.md")] + +mod from_extractor; +mod from_fn; +mod map_request; +mod map_response; + +pub use self::from_extractor::{ + from_extractor, from_extractor_with_state, FromExtractor, FromExtractorLayer, +}; +pub use self::from_fn::{from_fn, from_fn_with_state, FromFn, FromFnLayer, Next}; +pub use self::map_request::{ + map_request, map_request_with_state, IntoMapRequestResult, MapRequest, MapRequestLayer, +}; +pub use self::map_response::{ + map_response, map_response_with_state, MapResponse, MapResponseLayer, +}; +pub use crate::extension::AddExtension; + +pub mod future { + //! Future types. + + pub use super::from_extractor::ResponseFuture as FromExtractorResponseFuture; + pub use super::from_fn::ResponseFuture as FromFnResponseFuture; + pub use super::map_request::ResponseFuture as MapRequestResponseFuture; + pub use super::map_response::ResponseFuture as MapResponseResponseFuture; +} diff --git a/.cargo-vendor/axum-0.6.20/src/response/mod.rs b/.cargo-vendor/axum-0.6.20/src/response/mod.rs new file mode 100644 index 0000000000..2c149748a6 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/response/mod.rs @@ -0,0 +1,231 @@ +#![doc = include_str!("../docs/response.md")] + +use crate::body::{Bytes, Full}; +use http::{header, HeaderValue}; + +mod redirect; + +#[cfg(feature = "tokio")] +pub mod sse; + +#[doc(no_inline)] +#[cfg(feature = "json")] +pub use crate::Json; + +#[doc(no_inline)] +#[cfg(feature = "headers")] +pub use crate::TypedHeader; + +#[cfg(feature = "form")] +#[doc(no_inline)] +pub use crate::form::Form; + +#[doc(no_inline)] +pub use crate::Extension; + +#[doc(inline)] +pub use axum_core::response::{ + AppendHeaders, ErrorResponse, IntoResponse, IntoResponseParts, Response, ResponseParts, Result, +}; + +#[doc(inline)] +pub use self::redirect::Redirect; + +#[doc(inline)] +#[cfg(feature = "tokio")] +pub use sse::Sse; + +/// An HTML response. +/// +/// Will automatically get `Content-Type: text/html`. +#[derive(Clone, Copy, Debug)] +#[must_use] +pub struct Html(pub T); + +impl IntoResponse for Html +where + T: Into>, +{ + fn into_response(self) -> Response { + ( + [( + header::CONTENT_TYPE, + HeaderValue::from_static(mime::TEXT_HTML_UTF_8.as_ref()), + )], + self.0.into(), + ) + .into_response() + } +} + +impl From for Html { + fn from(inner: T) -> Self { + Self(inner) + } +} + +#[cfg(test)] +mod tests { + use crate::extract::Extension; + use crate::{body::Body, routing::get, Router}; + use axum_core::response::IntoResponse; + use http::HeaderMap; + use http::{StatusCode, Uri}; + + // just needs to compile + #[allow(dead_code)] + fn impl_trait_result_works() { + async fn impl_trait_ok() -> Result { + Ok(()) + } + + async fn impl_trait_err() -> Result<(), impl IntoResponse> { + Err(()) + } + + async fn impl_trait_both(uri: Uri) -> Result { + if uri.path() == "/" { + Ok(()) + } else { + Err(()) + } + } + + async fn impl_trait(uri: Uri) -> impl IntoResponse { + if uri.path() == "/" { + Ok(()) + } else { + Err(()) + } + } + + _ = Router::<(), Body>::new() + .route("/", get(impl_trait_ok)) + .route("/", get(impl_trait_err)) + .route("/", get(impl_trait_both)) + .route("/", get(impl_trait)); + } + + // just needs to compile + #[allow(dead_code)] + fn tuple_responses() { + async fn status() -> impl IntoResponse { + StatusCode::OK + } + + async fn status_headermap() -> impl IntoResponse { + (StatusCode::OK, HeaderMap::new()) + } + + async fn status_header_array() -> impl IntoResponse { + (StatusCode::OK, [("content-type", "text/plain")]) + } + + async fn status_headermap_body() -> impl IntoResponse { + (StatusCode::OK, HeaderMap::new(), String::new()) + } + + async fn status_header_array_body() -> impl IntoResponse { + ( + StatusCode::OK, + [("content-type", "text/plain")], + String::new(), + ) + } + + async fn status_headermap_impl_into_response() -> impl IntoResponse { + (StatusCode::OK, HeaderMap::new(), impl_into_response()) + } + + async fn status_header_array_impl_into_response() -> impl IntoResponse { + ( + StatusCode::OK, + [("content-type", "text/plain")], + impl_into_response(), + ) + } + + fn impl_into_response() -> impl IntoResponse {} + + async fn status_header_array_extension_body() -> impl IntoResponse { + ( + StatusCode::OK, + [("content-type", "text/plain")], + Extension(1), + String::new(), + ) + } + + async fn status_header_array_extension_mixed_body() -> impl IntoResponse { + ( + StatusCode::OK, + [("content-type", "text/plain")], + Extension(1), + HeaderMap::new(), + String::new(), + ) + } + + // + + async fn headermap() -> impl IntoResponse { + HeaderMap::new() + } + + async fn header_array() -> impl IntoResponse { + [("content-type", "text/plain")] + } + + async fn headermap_body() -> impl IntoResponse { + (HeaderMap::new(), String::new()) + } + + async fn header_array_body() -> impl IntoResponse { + ([("content-type", "text/plain")], String::new()) + } + + async fn headermap_impl_into_response() -> impl IntoResponse { + (HeaderMap::new(), impl_into_response()) + } + + async fn header_array_impl_into_response() -> impl IntoResponse { + ([("content-type", "text/plain")], impl_into_response()) + } + + async fn header_array_extension_body() -> impl IntoResponse { + ( + [("content-type", "text/plain")], + Extension(1), + String::new(), + ) + } + + async fn header_array_extension_mixed_body() -> impl IntoResponse { + ( + [("content-type", "text/plain")], + Extension(1), + HeaderMap::new(), + String::new(), + ) + } + + _ = Router::<(), Body>::new() + .route("/", get(status)) + .route("/", get(status_headermap)) + .route("/", get(status_header_array)) + .route("/", get(status_headermap_body)) + .route("/", get(status_header_array_body)) + .route("/", get(status_headermap_impl_into_response)) + .route("/", get(status_header_array_impl_into_response)) + .route("/", get(status_header_array_extension_body)) + .route("/", get(status_header_array_extension_mixed_body)) + .route("/", get(headermap)) + .route("/", get(header_array)) + .route("/", get(headermap_body)) + .route("/", get(header_array_body)) + .route("/", get(headermap_impl_into_response)) + .route("/", get(header_array_impl_into_response)) + .route("/", get(header_array_extension_body)) + .route("/", get(header_array_extension_mixed_body)); + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/response/redirect.rs b/.cargo-vendor/axum-0.6.20/src/response/redirect.rs new file mode 100644 index 0000000000..4dee5b5c82 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/response/redirect.rs @@ -0,0 +1,93 @@ +use axum_core::response::{IntoResponse, Response}; +use http::{header::LOCATION, HeaderValue, StatusCode}; + +/// Response that redirects the request to another location. +/// +/// # Example +/// +/// ```rust +/// use axum::{ +/// routing::get, +/// response::Redirect, +/// Router, +/// }; +/// +/// let app = Router::new() +/// .route("/old", get(|| async { Redirect::permanent("/new") })) +/// .route("/new", get(|| async { "Hello!" })); +/// # async { +/// # hyper::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +/// # }; +/// ``` +#[must_use = "needs to be returned from a handler or otherwise turned into a Response to be useful"] +#[derive(Debug, Clone)] +pub struct Redirect { + status_code: StatusCode, + location: HeaderValue, +} + +impl Redirect { + /// Create a new [`Redirect`] that uses a [`303 See Other`][mdn] status code. + /// + /// This redirect instructs the client to change the method to GET for the subsequent request + /// to the given `uri`, which is useful after successful form submission, file upload or when + /// you generally don't want the redirected-to page to observe the original request method and + /// body (if non-empty). If you want to preserve the request method and body, + /// [`Redirect::temporary`] should be used instead. + /// + /// # Panics + /// + /// If `uri` isn't a valid [`HeaderValue`]. + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/303 + pub fn to(uri: &str) -> Self { + Self::with_status_code(StatusCode::SEE_OTHER, uri) + } + + /// Create a new [`Redirect`] that uses a [`307 Temporary Redirect`][mdn] status code. + /// + /// This has the same behavior as [`Redirect::to`], except it will preserve the original HTTP + /// method and body. + /// + /// # Panics + /// + /// If `uri` isn't a valid [`HeaderValue`]. + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/307 + pub fn temporary(uri: &str) -> Self { + Self::with_status_code(StatusCode::TEMPORARY_REDIRECT, uri) + } + + /// Create a new [`Redirect`] that uses a [`308 Permanent Redirect`][mdn] status code. + /// + /// # Panics + /// + /// If `uri` isn't a valid [`HeaderValue`]. + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/308 + pub fn permanent(uri: &str) -> Self { + Self::with_status_code(StatusCode::PERMANENT_REDIRECT, uri) + } + + // This is intentionally not public since other kinds of redirects might not + // use the `Location` header, namely `304 Not Modified`. + // + // We're open to adding more constructors upon request, if they make sense :) + fn with_status_code(status_code: StatusCode, uri: &str) -> Self { + assert!( + status_code.is_redirection(), + "not a redirection status code" + ); + + Self { + status_code, + location: HeaderValue::try_from(uri).expect("URI isn't a valid header value"), + } + } +} + +impl IntoResponse for Redirect { + fn into_response(self) -> Response { + (self.status_code, [(LOCATION, self.location)]).into_response() + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/response/sse.rs b/.cargo-vendor/axum-0.6.20/src/response/sse.rs new file mode 100644 index 0000000000..2e9e28535e --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/response/sse.rs @@ -0,0 +1,694 @@ +//! Server-Sent Events (SSE) responses. +//! +//! # Example +//! +//! ``` +//! use axum::{ +//! Router, +//! routing::get, +//! response::sse::{Event, KeepAlive, Sse}, +//! }; +//! use std::{time::Duration, convert::Infallible}; +//! use tokio_stream::StreamExt as _ ; +//! use futures_util::stream::{self, Stream}; +//! +//! let app = Router::new().route("/sse", get(sse_handler)); +//! +//! async fn sse_handler() -> Sse>> { +//! // A `Stream` that repeats an event every second +//! let stream = stream::repeat_with(|| Event::default().data("hi!")) +//! .map(Ok) +//! .throttle(Duration::from_secs(1)); +//! +//! Sse::new(stream).keep_alive(KeepAlive::default()) +//! } +//! # async { +//! # hyper::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +//! # }; +//! ``` + +use crate::{ + body::{Bytes, HttpBody}, + BoxError, +}; +use axum_core::{ + body, + response::{IntoResponse, Response}, +}; +use bytes::{BufMut, BytesMut}; +use futures_util::{ + ready, + stream::{Stream, TryStream}, +}; +use pin_project_lite::pin_project; +use std::{ + fmt, + future::Future, + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; +use sync_wrapper::SyncWrapper; +use tokio::time::Sleep; + +/// An SSE response +#[derive(Clone)] +#[must_use] +pub struct Sse { + stream: S, + keep_alive: Option, +} + +impl Sse { + /// Create a new [`Sse`] response that will respond with the given stream of + /// [`Event`]s. + /// + /// See the [module docs](self) for more details. + pub fn new(stream: S) -> Self + where + S: TryStream + Send + 'static, + S::Error: Into, + { + Sse { + stream, + keep_alive: None, + } + } + + /// Configure the interval between keep-alive messages. + /// + /// Defaults to no keep-alive messages. + pub fn keep_alive(mut self, keep_alive: KeepAlive) -> Self { + self.keep_alive = Some(keep_alive); + self + } +} + +impl fmt::Debug for Sse { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Sse") + .field("stream", &format_args!("{}", std::any::type_name::())) + .field("keep_alive", &self.keep_alive) + .finish() + } +} + +impl IntoResponse for Sse +where + S: Stream> + Send + 'static, + E: Into, +{ + fn into_response(self) -> Response { + ( + [ + (http::header::CONTENT_TYPE, mime::TEXT_EVENT_STREAM.as_ref()), + (http::header::CACHE_CONTROL, "no-cache"), + ], + body::boxed(Body { + event_stream: SyncWrapper::new(self.stream), + keep_alive: self.keep_alive.map(KeepAliveStream::new), + }), + ) + .into_response() + } +} + +pin_project! { + struct Body { + #[pin] + event_stream: SyncWrapper, + #[pin] + keep_alive: Option, + } +} + +impl HttpBody for Body +where + S: Stream>, +{ + type Data = Bytes; + type Error = E; + + fn poll_data( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll>> { + let this = self.project(); + + match this.event_stream.get_pin_mut().poll_next(cx) { + Poll::Pending => { + if let Some(keep_alive) = this.keep_alive.as_pin_mut() { + keep_alive.poll_event(cx).map(|e| Some(Ok(e))) + } else { + Poll::Pending + } + } + Poll::Ready(Some(Ok(event))) => { + if let Some(keep_alive) = this.keep_alive.as_pin_mut() { + keep_alive.reset(); + } + Poll::Ready(Some(Ok(event.finalize()))) + } + Poll::Ready(Some(Err(error))) => Poll::Ready(Some(Err(error))), + Poll::Ready(None) => Poll::Ready(None), + } + } + + fn poll_trailers( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll, Self::Error>> { + Poll::Ready(Ok(None)) + } +} + +/// Server-sent event +#[derive(Debug, Default, Clone)] +#[must_use] +pub struct Event { + buffer: BytesMut, + flags: EventFlags, +} + +impl Event { + /// Set the event's data data field(s) (`data:`) + /// + /// Newlines in `data` will automatically be broken across `data:` fields. + /// + /// This corresponds to [`MessageEvent`'s data field]. + /// + /// Note that events with an empty data field will be ignored by the browser. + /// + /// # Panics + /// + /// - Panics if `data` contains any carriage returns, as they cannot be transmitted over SSE. + /// - Panics if `data` or `json_data` have already been called. + /// + /// [`MessageEvent`'s data field]: https://developer.mozilla.org/en-US/docs/Web/API/MessageEvent/data + pub fn data(mut self, data: T) -> Event + where + T: AsRef, + { + if self.flags.contains(EventFlags::HAS_DATA) { + panic!("Called `EventBuilder::data` multiple times"); + } + + for line in memchr_split(b'\n', data.as_ref().as_bytes()) { + self.field("data", line); + } + + self.flags.insert(EventFlags::HAS_DATA); + + self + } + + /// Set the event's data field to a value serialized as unformatted JSON (`data:`). + /// + /// This corresponds to [`MessageEvent`'s data field]. + /// + /// # Panics + /// + /// Panics if `data` or `json_data` have already been called. + /// + /// [`MessageEvent`'s data field]: https://developer.mozilla.org/en-US/docs/Web/API/MessageEvent/data + #[cfg(feature = "json")] + pub fn json_data(mut self, data: T) -> serde_json::Result + where + T: serde::Serialize, + { + if self.flags.contains(EventFlags::HAS_DATA) { + panic!("Called `EventBuilder::json_data` multiple times"); + } + + self.buffer.extend_from_slice(b"data:"); + serde_json::to_writer((&mut self.buffer).writer(), &data)?; + self.buffer.put_u8(b'\n'); + + self.flags.insert(EventFlags::HAS_DATA); + + Ok(self) + } + + /// Set the event's comment field (`:`). + /// + /// This field will be ignored by most SSE clients. + /// + /// Unlike other functions, this function can be called multiple times to add many comments. + /// + /// # Panics + /// + /// Panics if `comment` contains any newlines or carriage returns, as they are not allowed in + /// comments. + pub fn comment(mut self, comment: T) -> Event + where + T: AsRef, + { + self.field("", comment.as_ref()); + self + } + + /// Set the event's name field (`event:`). + /// + /// This corresponds to the `type` parameter given when calling `addEventListener` on an + /// [`EventSource`]. For example, `.event("update")` should correspond to + /// `.addEventListener("update", ...)`. If no event type is given, browsers will fire a + /// [`message` event] instead. + /// + /// [`EventSource`]: https://developer.mozilla.org/en-US/docs/Web/API/EventSource + /// [`message` event]: https://developer.mozilla.org/en-US/docs/Web/API/EventSource/message_event + /// + /// # Panics + /// + /// - Panics if `event` contains any newlines or carriage returns. + /// - Panics if this function has already been called on this event. + pub fn event(mut self, event: T) -> Event + where + T: AsRef, + { + if self.flags.contains(EventFlags::HAS_EVENT) { + panic!("Called `EventBuilder::event` multiple times"); + } + self.flags.insert(EventFlags::HAS_EVENT); + + self.field("event", event.as_ref()); + + self + } + + /// Set the event's retry timeout field (`retry:`). + /// + /// This sets how long clients will wait before reconnecting if they are disconnected from the + /// SSE endpoint. Note that this is just a hint: clients are free to wait for longer if they + /// wish, such as if they implement exponential backoff. + /// + /// # Panics + /// + /// Panics if this function has already been called on this event. + pub fn retry(mut self, duration: Duration) -> Event { + if self.flags.contains(EventFlags::HAS_RETRY) { + panic!("Called `EventBuilder::retry` multiple times"); + } + self.flags.insert(EventFlags::HAS_RETRY); + + self.buffer.extend_from_slice(b"retry:"); + + let secs = duration.as_secs(); + let millis = duration.subsec_millis(); + + if secs > 0 { + // format seconds + self.buffer + .extend_from_slice(itoa::Buffer::new().format(secs).as_bytes()); + + // pad milliseconds + if millis < 10 { + self.buffer.extend_from_slice(b"00"); + } else if millis < 100 { + self.buffer.extend_from_slice(b"0"); + } + } + + // format milliseconds + self.buffer + .extend_from_slice(itoa::Buffer::new().format(millis).as_bytes()); + + self.buffer.put_u8(b'\n'); + + self + } + + /// Set the event's identifier field (`id:`). + /// + /// This corresponds to [`MessageEvent`'s `lastEventId` field]. If no ID is in the event itself, + /// the browser will set that field to the last known message ID, starting with the empty + /// string. + /// + /// [`MessageEvent`'s `lastEventId` field]: https://developer.mozilla.org/en-US/docs/Web/API/MessageEvent/lastEventId + /// + /// # Panics + /// + /// - Panics if `id` contains any newlines, carriage returns or null characters. + /// - Panics if this function has already been called on this event. + pub fn id(mut self, id: T) -> Event + where + T: AsRef, + { + if self.flags.contains(EventFlags::HAS_ID) { + panic!("Called `EventBuilder::id` multiple times"); + } + self.flags.insert(EventFlags::HAS_ID); + + let id = id.as_ref().as_bytes(); + assert_eq!( + memchr::memchr(b'\0', id), + None, + "Event ID cannot contain null characters", + ); + + self.field("id", id); + self + } + + fn field(&mut self, name: &str, value: impl AsRef<[u8]>) { + let value = value.as_ref(); + assert_eq!( + memchr::memchr2(b'\r', b'\n', value), + None, + "SSE field value cannot contain newlines or carriage returns", + ); + self.buffer.extend_from_slice(name.as_bytes()); + self.buffer.put_u8(b':'); + // Prevent values that start with spaces having that space stripped + if value.starts_with(b" ") { + self.buffer.put_u8(b' '); + } + self.buffer.extend_from_slice(value); + self.buffer.put_u8(b'\n'); + } + + fn finalize(mut self) -> Bytes { + self.buffer.put_u8(b'\n'); + self.buffer.freeze() + } +} + +bitflags::bitflags! { + #[derive(Default)] + struct EventFlags: u8 { + const HAS_DATA = 0b0001; + const HAS_EVENT = 0b0010; + const HAS_RETRY = 0b0100; + const HAS_ID = 0b1000; + } +} + +/// Configure the interval between keep-alive messages, the content +/// of each message, and the associated stream. +#[derive(Debug, Clone)] +#[must_use] +pub struct KeepAlive { + event: Bytes, + max_interval: Duration, +} + +impl KeepAlive { + /// Create a new `KeepAlive`. + pub fn new() -> Self { + Self { + event: Bytes::from_static(b":\n\n"), + max_interval: Duration::from_secs(15), + } + } + + /// Customize the interval between keep-alive messages. + /// + /// Default is 15 seconds. + pub fn interval(mut self, time: Duration) -> Self { + self.max_interval = time; + self + } + + /// Customize the text of the keep-alive message. + /// + /// Default is an empty comment. + /// + /// # Panics + /// + /// Panics if `text` contains any newline or carriage returns, as they are not allowed in SSE + /// comments. + pub fn text(self, text: I) -> Self + where + I: AsRef, + { + self.event(Event::default().comment(text)) + } + + /// Customize the event of the keep-alive message. + /// + /// Default is an empty comment. + /// + /// # Panics + /// + /// Panics if `event` contains any newline or carriage returns, as they are not allowed in SSE + /// comments. + pub fn event(mut self, event: Event) -> Self { + self.event = event.finalize(); + self + } +} + +impl Default for KeepAlive { + fn default() -> Self { + Self::new() + } +} + +pin_project! { + #[derive(Debug)] + struct KeepAliveStream { + keep_alive: KeepAlive, + #[pin] + alive_timer: Sleep, + } +} + +impl KeepAliveStream { + fn new(keep_alive: KeepAlive) -> Self { + Self { + alive_timer: tokio::time::sleep(keep_alive.max_interval), + keep_alive, + } + } + + fn reset(self: Pin<&mut Self>) { + let this = self.project(); + this.alive_timer + .reset(tokio::time::Instant::now() + this.keep_alive.max_interval); + } + + fn poll_event(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.as_mut().project(); + + ready!(this.alive_timer.poll(cx)); + + let event = this.keep_alive.event.clone(); + + self.reset(); + + Poll::Ready(event) + } +} + +fn memchr_split(needle: u8, haystack: &[u8]) -> MemchrSplit<'_> { + MemchrSplit { + needle, + haystack: Some(haystack), + } +} + +struct MemchrSplit<'a> { + needle: u8, + haystack: Option<&'a [u8]>, +} + +impl<'a> Iterator for MemchrSplit<'a> { + type Item = &'a [u8]; + fn next(&mut self) -> Option { + let haystack = self.haystack?; + if let Some(pos) = memchr::memchr(self.needle, haystack) { + let (front, back) = haystack.split_at(pos); + self.haystack = Some(&back[1..]); + Some(front) + } else { + self.haystack.take() + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{routing::get, test_helpers::*, Router}; + use futures_util::stream; + use std::{collections::HashMap, convert::Infallible}; + use tokio_stream::StreamExt as _; + + #[test] + fn leading_space_is_not_stripped() { + let no_leading_space = Event::default().data("\tfoobar"); + assert_eq!(&*no_leading_space.finalize(), b"data:\tfoobar\n\n"); + + let leading_space = Event::default().data(" foobar"); + assert_eq!(&*leading_space.finalize(), b"data: foobar\n\n"); + } + + #[crate::test] + async fn basic() { + let app = Router::new().route( + "/", + get(|| async { + let stream = stream::iter(vec![ + Event::default().data("one").comment("this is a comment"), + Event::default() + .json_data(serde_json::json!({ "foo": "bar" })) + .unwrap(), + Event::default() + .event("three") + .retry(Duration::from_secs(30)) + .id("unique-id"), + ]) + .map(Ok::<_, Infallible>); + Sse::new(stream) + }), + ); + + let client = TestClient::new(app); + let mut stream = client.get("/").send().await; + + assert_eq!(stream.headers()["content-type"], "text/event-stream"); + assert_eq!(stream.headers()["cache-control"], "no-cache"); + + let event_fields = parse_event(&stream.chunk_text().await.unwrap()); + assert_eq!(event_fields.get("data").unwrap(), "one"); + assert_eq!(event_fields.get("comment").unwrap(), "this is a comment"); + + let event_fields = parse_event(&stream.chunk_text().await.unwrap()); + assert_eq!(event_fields.get("data").unwrap(), "{\"foo\":\"bar\"}"); + assert!(event_fields.get("comment").is_none()); + + let event_fields = parse_event(&stream.chunk_text().await.unwrap()); + assert_eq!(event_fields.get("event").unwrap(), "three"); + assert_eq!(event_fields.get("retry").unwrap(), "30000"); + assert_eq!(event_fields.get("id").unwrap(), "unique-id"); + assert!(event_fields.get("comment").is_none()); + + assert!(stream.chunk_text().await.is_none()); + } + + #[tokio::test(start_paused = true)] + async fn keep_alive() { + const DELAY: Duration = Duration::from_secs(5); + + let app = Router::new().route( + "/", + get(|| async { + let stream = stream::repeat_with(|| Event::default().data("msg")) + .map(Ok::<_, Infallible>) + .throttle(DELAY); + + Sse::new(stream).keep_alive( + KeepAlive::new() + .interval(Duration::from_secs(1)) + .text("keep-alive-text"), + ) + }), + ); + + let client = TestClient::new(app); + let mut stream = client.get("/").send().await; + + for _ in 0..5 { + // first message should be an event + let event_fields = parse_event(&stream.chunk_text().await.unwrap()); + assert_eq!(event_fields.get("data").unwrap(), "msg"); + + // then 4 seconds of keep-alive messages + for _ in 0..4 { + tokio::time::sleep(Duration::from_secs(1)).await; + let event_fields = parse_event(&stream.chunk_text().await.unwrap()); + assert_eq!(event_fields.get("comment").unwrap(), "keep-alive-text"); + } + } + } + + #[tokio::test(start_paused = true)] + async fn keep_alive_ends_when_the_stream_ends() { + const DELAY: Duration = Duration::from_secs(5); + + let app = Router::new().route( + "/", + get(|| async { + let stream = stream::repeat_with(|| Event::default().data("msg")) + .map(Ok::<_, Infallible>) + .throttle(DELAY) + .take(2); + + Sse::new(stream).keep_alive( + KeepAlive::new() + .interval(Duration::from_secs(1)) + .text("keep-alive-text"), + ) + }), + ); + + let client = TestClient::new(app); + let mut stream = client.get("/").send().await; + + // first message should be an event + let event_fields = parse_event(&stream.chunk_text().await.unwrap()); + assert_eq!(event_fields.get("data").unwrap(), "msg"); + + // then 4 seconds of keep-alive messages + for _ in 0..4 { + tokio::time::sleep(Duration::from_secs(1)).await; + let event_fields = parse_event(&stream.chunk_text().await.unwrap()); + assert_eq!(event_fields.get("comment").unwrap(), "keep-alive-text"); + } + + // then the last event + let event_fields = parse_event(&stream.chunk_text().await.unwrap()); + assert_eq!(event_fields.get("data").unwrap(), "msg"); + + // then no more events or keep-alive messages + assert!(stream.chunk_text().await.is_none()); + } + + fn parse_event(payload: &str) -> HashMap { + let mut fields = HashMap::new(); + + let mut lines = payload.lines().peekable(); + while let Some(line) = lines.next() { + if line.is_empty() { + assert!(lines.next().is_none()); + break; + } + + let (mut key, value) = line.split_once(':').unwrap(); + let value = value.trim(); + if key.is_empty() { + key = "comment"; + } + fields.insert(key.to_owned(), value.to_owned()); + } + + fields + } + + #[test] + fn memchr_spliting() { + assert_eq!( + memchr_split(2, &[]).collect::>(), + [&[]] as [&[u8]; 1] + ); + assert_eq!( + memchr_split(2, &[2]).collect::>(), + [&[], &[]] as [&[u8]; 2] + ); + assert_eq!( + memchr_split(2, &[1]).collect::>(), + [&[1]] as [&[u8]; 1] + ); + assert_eq!( + memchr_split(2, &[1, 2]).collect::>(), + [&[1], &[]] as [&[u8]; 2] + ); + assert_eq!( + memchr_split(2, &[2, 1]).collect::>(), + [&[], &[1]] as [&[u8]; 2] + ); + assert_eq!( + memchr_split(2, &[1, 2, 2, 1]).collect::>(), + [&[1], &[], &[1]] as [&[u8]; 3] + ); + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/routing/future.rs b/.cargo-vendor/axum-0.6.20/src/routing/future.rs new file mode 100644 index 0000000000..5c05bd3bfd --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/routing/future.rs @@ -0,0 +1,6 @@ +//! Future types. + +pub use super::{ + into_make_service::IntoMakeServiceFuture, + route::{InfallibleRouteFuture, RouteFuture}, +}; diff --git a/.cargo-vendor/axum-0.6.20/src/routing/into_make_service.rs b/.cargo-vendor/axum-0.6.20/src/routing/into_make_service.rs new file mode 100644 index 0000000000..fbc57c4acc --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/routing/into_make_service.rs @@ -0,0 +1,57 @@ +use std::{ + convert::Infallible, + future::ready, + task::{Context, Poll}, +}; +use tower_service::Service; + +/// A [`MakeService`] that produces axum router services. +/// +/// [`MakeService`]: tower::make::MakeService +#[derive(Debug, Clone)] +pub struct IntoMakeService { + svc: S, +} + +impl IntoMakeService { + pub(crate) fn new(svc: S) -> Self { + Self { svc } + } +} + +impl Service for IntoMakeService +where + S: Clone, +{ + type Response = S; + type Error = Infallible; + type Future = IntoMakeServiceFuture; + + #[inline] + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, _target: T) -> Self::Future { + IntoMakeServiceFuture::new(ready(Ok(self.svc.clone()))) + } +} + +opaque_future! { + /// Response future for [`IntoMakeService`]. + pub type IntoMakeServiceFuture = + std::future::Ready>; +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::body::Body; + + #[test] + fn traits() { + use crate::test_helpers::*; + + assert_send::>(); + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/routing/method_filter.rs b/.cargo-vendor/axum-0.6.20/src/routing/method_filter.rs new file mode 100644 index 0000000000..ca9b0c06e3 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/routing/method_filter.rs @@ -0,0 +1,120 @@ +use bitflags::bitflags; +use http::Method; +use std::{ + fmt, + fmt::{Debug, Formatter}, +}; + +bitflags! { + /// A filter that matches one or more HTTP methods. + pub struct MethodFilter: u16 { + /// Match `DELETE` requests. + const DELETE = 0b000000010; + /// Match `GET` requests. + const GET = 0b000000100; + /// Match `HEAD` requests. + const HEAD = 0b000001000; + /// Match `OPTIONS` requests. + const OPTIONS = 0b000010000; + /// Match `PATCH` requests. + const PATCH = 0b000100000; + /// Match `POST` requests. + const POST = 0b001000000; + /// Match `PUT` requests. + const PUT = 0b010000000; + /// Match `TRACE` requests. + const TRACE = 0b100000000; + } +} + +/// Error type used when converting a [`Method`] to a [`MethodFilter`] fails. +#[derive(Debug)] +pub struct NoMatchingMethodFilter { + method: Method, +} + +impl NoMatchingMethodFilter { + /// Get the [`Method`] that couldn't be converted to a [`MethodFilter`]. + pub fn method(&self) -> &Method { + &self.method + } +} + +impl fmt::Display for NoMatchingMethodFilter { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "no `MethodFilter` for `{}`", self.method.as_str()) + } +} + +impl std::error::Error for NoMatchingMethodFilter {} + +impl TryFrom for MethodFilter { + type Error = NoMatchingMethodFilter; + + fn try_from(m: Method) -> Result { + match m { + Method::DELETE => Ok(MethodFilter::DELETE), + Method::GET => Ok(MethodFilter::GET), + Method::HEAD => Ok(MethodFilter::HEAD), + Method::OPTIONS => Ok(MethodFilter::OPTIONS), + Method::PATCH => Ok(MethodFilter::PATCH), + Method::POST => Ok(MethodFilter::POST), + Method::PUT => Ok(MethodFilter::PUT), + Method::TRACE => Ok(MethodFilter::TRACE), + other => Err(NoMatchingMethodFilter { method: other }), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn from_http_method() { + assert_eq!( + MethodFilter::try_from(Method::DELETE).unwrap(), + MethodFilter::DELETE + ); + + assert_eq!( + MethodFilter::try_from(Method::GET).unwrap(), + MethodFilter::GET + ); + + assert_eq!( + MethodFilter::try_from(Method::HEAD).unwrap(), + MethodFilter::HEAD + ); + + assert_eq!( + MethodFilter::try_from(Method::OPTIONS).unwrap(), + MethodFilter::OPTIONS + ); + + assert_eq!( + MethodFilter::try_from(Method::PATCH).unwrap(), + MethodFilter::PATCH + ); + + assert_eq!( + MethodFilter::try_from(Method::POST).unwrap(), + MethodFilter::POST + ); + + assert_eq!( + MethodFilter::try_from(Method::PUT).unwrap(), + MethodFilter::PUT + ); + + assert_eq!( + MethodFilter::try_from(Method::TRACE).unwrap(), + MethodFilter::TRACE + ); + + assert!(MethodFilter::try_from(http::Method::CONNECT) + .unwrap_err() + .to_string() + .contains("CONNECT")); + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/routing/method_routing.rs b/.cargo-vendor/axum-0.6.20/src/routing/method_routing.rs new file mode 100644 index 0000000000..cdc7a11cd3 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/routing/method_routing.rs @@ -0,0 +1,1587 @@ +//! Route to services and handlers based on HTTP methods. + +use super::{future::InfallibleRouteFuture, IntoMakeService}; +#[cfg(feature = "tokio")] +use crate::extract::connect_info::IntoMakeServiceWithConnectInfo; +use crate::{ + body::{Body, Bytes, HttpBody}, + boxed::BoxedIntoRoute, + error_handling::{HandleError, HandleErrorLayer}, + handler::Handler, + http::{Method, Request, StatusCode}, + response::Response, + routing::{future::RouteFuture, Fallback, MethodFilter, Route}, +}; +use axum_core::response::IntoResponse; +use bytes::BytesMut; +use std::{ + convert::Infallible, + fmt, + task::{Context, Poll}, +}; +use tower::{service_fn, util::MapResponseLayer}; +use tower_layer::Layer; +use tower_service::Service; + +macro_rules! top_level_service_fn { + ( + $name:ident, GET + ) => { + top_level_service_fn!( + /// Route `GET` requests to the given service. + /// + /// # Example + /// + /// ```rust + /// use axum::{ + /// http::Request, + /// Router, + /// routing::get_service, + /// }; + /// use http::Response; + /// use std::convert::Infallible; + /// use hyper::Body; + /// + /// let service = tower::service_fn(|request: Request| async { + /// Ok::<_, Infallible>(Response::new(Body::empty())) + /// }); + /// + /// // Requests to `GET /` will go to `service`. + /// let app = Router::new().route("/", get_service(service)); + /// # async { + /// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); + /// # }; + /// ``` + /// + /// Note that `get` routes will also be called for `HEAD` requests but will have + /// the response body removed. Make sure to add explicit `HEAD` routes + /// afterwards. + $name, + GET + ); + }; + + ( + $name:ident, $method:ident + ) => { + top_level_service_fn!( + #[doc = concat!("Route `", stringify!($method) ,"` requests to the given service.")] + /// + /// See [`get_service`] for an example. + $name, + $method + ); + }; + + ( + $(#[$m:meta])+ + $name:ident, $method:ident + ) => { + $(#[$m])+ + pub fn $name(svc: T) -> MethodRouter + where + T: Service> + Clone + Send + 'static, + T::Response: IntoResponse + 'static, + T::Future: Send + 'static, + B: HttpBody + Send + 'static, + S: Clone, + { + on_service(MethodFilter::$method, svc) + } + }; +} + +macro_rules! top_level_handler_fn { + ( + $name:ident, GET + ) => { + top_level_handler_fn!( + /// Route `GET` requests to the given handler. + /// + /// # Example + /// + /// ```rust + /// use axum::{ + /// routing::get, + /// Router, + /// }; + /// + /// async fn handler() {} + /// + /// // Requests to `GET /` will go to `handler`. + /// let app = Router::new().route("/", get(handler)); + /// # async { + /// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); + /// # }; + /// ``` + /// + /// Note that `get` routes will also be called for `HEAD` requests but will have + /// the response body removed. Make sure to add explicit `HEAD` routes + /// afterwards. + $name, + GET + ); + }; + + ( + $name:ident, $method:ident + ) => { + top_level_handler_fn!( + #[doc = concat!("Route `", stringify!($method) ,"` requests to the given handler.")] + /// + /// See [`get`] for an example. + $name, + $method + ); + }; + + ( + $(#[$m:meta])+ + $name:ident, $method:ident + ) => { + $(#[$m])+ + pub fn $name(handler: H) -> MethodRouter + where + H: Handler, + B: HttpBody + Send + 'static, + T: 'static, + S: Clone + Send + Sync + 'static, + { + on(MethodFilter::$method, handler) + } + }; +} + +macro_rules! chained_service_fn { + ( + $name:ident, GET + ) => { + chained_service_fn!( + /// Chain an additional service that will only accept `GET` requests. + /// + /// # Example + /// + /// ```rust + /// use axum::{ + /// http::Request, + /// Router, + /// routing::post_service, + /// }; + /// use http::Response; + /// use std::convert::Infallible; + /// use hyper::Body; + /// + /// let service = tower::service_fn(|request: Request| async { + /// Ok::<_, Infallible>(Response::new(Body::empty())) + /// }); + /// + /// let other_service = tower::service_fn(|request: Request| async { + /// Ok::<_, Infallible>(Response::new(Body::empty())) + /// }); + /// + /// // Requests to `POST /` will go to `service` and `GET /` will go to + /// // `other_service`. + /// let app = Router::new().route("/", post_service(service).get_service(other_service)); + /// # async { + /// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); + /// # }; + /// ``` + /// + /// Note that `get` routes will also be called for `HEAD` requests but will have + /// the response body removed. Make sure to add explicit `HEAD` routes + /// afterwards. + $name, + GET + ); + }; + + ( + $name:ident, $method:ident + ) => { + chained_service_fn!( + #[doc = concat!("Chain an additional service that will only accept `", stringify!($method),"` requests.")] + /// + /// See [`MethodRouter::get_service`] for an example. + $name, + $method + ); + }; + + ( + $(#[$m:meta])+ + $name:ident, $method:ident + ) => { + $(#[$m])+ + #[track_caller] + pub fn $name(self, svc: T) -> Self + where + T: Service, Error = E> + + Clone + + Send + + 'static, + T::Response: IntoResponse + 'static, + T::Future: Send + 'static, + { + self.on_service(MethodFilter::$method, svc) + } + }; +} + +macro_rules! chained_handler_fn { + ( + $name:ident, GET + ) => { + chained_handler_fn!( + /// Chain an additional handler that will only accept `GET` requests. + /// + /// # Example + /// + /// ```rust + /// use axum::{routing::post, Router}; + /// + /// async fn handler() {} + /// + /// async fn other_handler() {} + /// + /// // Requests to `POST /` will go to `handler` and `GET /` will go to + /// // `other_handler`. + /// let app = Router::new().route("/", post(handler).get(other_handler)); + /// # async { + /// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); + /// # }; + /// ``` + /// + /// Note that `get` routes will also be called for `HEAD` requests but will have + /// the response body removed. Make sure to add explicit `HEAD` routes + /// afterwards. + $name, + GET + ); + }; + + ( + $name:ident, $method:ident + ) => { + chained_handler_fn!( + #[doc = concat!("Chain an additional handler that will only accept `", stringify!($method),"` requests.")] + /// + /// See [`MethodRouter::get`] for an example. + $name, + $method + ); + }; + + ( + $(#[$m:meta])+ + $name:ident, $method:ident + ) => { + $(#[$m])+ + #[track_caller] + pub fn $name(self, handler: H) -> Self + where + H: Handler, + T: 'static, + S: Send + Sync + 'static, + { + self.on(MethodFilter::$method, handler) + } + }; +} + +top_level_service_fn!(delete_service, DELETE); +top_level_service_fn!(get_service, GET); +top_level_service_fn!(head_service, HEAD); +top_level_service_fn!(options_service, OPTIONS); +top_level_service_fn!(patch_service, PATCH); +top_level_service_fn!(post_service, POST); +top_level_service_fn!(put_service, PUT); +top_level_service_fn!(trace_service, TRACE); + +/// Route requests with the given method to the service. +/// +/// # Example +/// +/// ```rust +/// use axum::{ +/// http::Request, +/// routing::on, +/// Router, +/// routing::{MethodFilter, on_service}, +/// }; +/// use http::Response; +/// use std::convert::Infallible; +/// use hyper::Body; +/// +/// let service = tower::service_fn(|request: Request| async { +/// Ok::<_, Infallible>(Response::new(Body::empty())) +/// }); +/// +/// // Requests to `POST /` will go to `service`. +/// let app = Router::new().route("/", on_service(MethodFilter::POST, service)); +/// # async { +/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +/// # }; +/// ``` +pub fn on_service(filter: MethodFilter, svc: T) -> MethodRouter +where + T: Service> + Clone + Send + 'static, + T::Response: IntoResponse + 'static, + T::Future: Send + 'static, + B: HttpBody + Send + 'static, + S: Clone, +{ + MethodRouter::new().on_service(filter, svc) +} + +/// Route requests to the given service regardless of its method. +/// +/// # Example +/// +/// ```rust +/// use axum::{ +/// http::Request, +/// Router, +/// routing::any_service, +/// }; +/// use http::Response; +/// use std::convert::Infallible; +/// use hyper::Body; +/// +/// let service = tower::service_fn(|request: Request| async { +/// Ok::<_, Infallible>(Response::new(Body::empty())) +/// }); +/// +/// // All requests to `/` will go to `service`. +/// let app = Router::new().route("/", any_service(service)); +/// # async { +/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +/// # }; +/// ``` +/// +/// Additional methods can still be chained: +/// +/// ```rust +/// use axum::{ +/// http::Request, +/// Router, +/// routing::any_service, +/// }; +/// use http::Response; +/// use std::convert::Infallible; +/// use hyper::Body; +/// +/// let service = tower::service_fn(|request: Request| async { +/// # Ok::<_, Infallible>(Response::new(Body::empty())) +/// // ... +/// }); +/// +/// let other_service = tower::service_fn(|request: Request| async { +/// # Ok::<_, Infallible>(Response::new(Body::empty())) +/// // ... +/// }); +/// +/// // `POST /` goes to `other_service`. All other requests go to `service` +/// let app = Router::new().route("/", any_service(service).post_service(other_service)); +/// # async { +/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +/// # }; +/// ``` +pub fn any_service(svc: T) -> MethodRouter +where + T: Service> + Clone + Send + 'static, + T::Response: IntoResponse + 'static, + T::Future: Send + 'static, + B: HttpBody + Send + 'static, + S: Clone, +{ + MethodRouter::new() + .fallback_service(svc) + .skip_allow_header() +} + +top_level_handler_fn!(delete, DELETE); +top_level_handler_fn!(get, GET); +top_level_handler_fn!(head, HEAD); +top_level_handler_fn!(options, OPTIONS); +top_level_handler_fn!(patch, PATCH); +top_level_handler_fn!(post, POST); +top_level_handler_fn!(put, PUT); +top_level_handler_fn!(trace, TRACE); + +/// Route requests with the given method to the handler. +/// +/// # Example +/// +/// ```rust +/// use axum::{ +/// routing::on, +/// Router, +/// routing::MethodFilter, +/// }; +/// +/// async fn handler() {} +/// +/// // Requests to `POST /` will go to `handler`. +/// let app = Router::new().route("/", on(MethodFilter::POST, handler)); +/// # async { +/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +/// # }; +/// ``` +pub fn on(filter: MethodFilter, handler: H) -> MethodRouter +where + H: Handler, + B: HttpBody + Send + 'static, + T: 'static, + S: Clone + Send + Sync + 'static, +{ + MethodRouter::new().on(filter, handler) +} + +/// Route requests with the given handler regardless of the method. +/// +/// # Example +/// +/// ```rust +/// use axum::{ +/// routing::any, +/// Router, +/// }; +/// +/// async fn handler() {} +/// +/// // All requests to `/` will go to `handler`. +/// let app = Router::new().route("/", any(handler)); +/// # async { +/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +/// # }; +/// ``` +/// +/// Additional methods can still be chained: +/// +/// ```rust +/// use axum::{ +/// routing::any, +/// Router, +/// }; +/// +/// async fn handler() {} +/// +/// async fn other_handler() {} +/// +/// // `POST /` goes to `other_handler`. All other requests go to `handler` +/// let app = Router::new().route("/", any(handler).post(other_handler)); +/// # async { +/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +/// # }; +/// ``` +pub fn any(handler: H) -> MethodRouter +where + H: Handler, + B: HttpBody + Send + 'static, + T: 'static, + S: Clone + Send + Sync + 'static, +{ + MethodRouter::new().fallback(handler).skip_allow_header() +} + +/// A [`Service`] that accepts requests based on a [`MethodFilter`] and +/// allows chaining additional handlers and services. +/// +/// # When does `MethodRouter` implement [`Service`]? +/// +/// Whether or not `MethodRouter` implements [`Service`] depends on the state type it requires. +/// +/// ``` +/// use tower::Service; +/// use axum::{routing::get, extract::State, body::Body, http::Request}; +/// +/// // this `MethodRouter` doesn't require any state, i.e. the state is `()`, +/// let method_router = get(|| async {}); +/// // and thus it implements `Service` +/// assert_service(method_router); +/// +/// // this requires a `String` and doesn't implement `Service` +/// let method_router = get(|_: State| async {}); +/// // until you provide the `String` with `.with_state(...)` +/// let method_router_with_state = method_router.with_state(String::new()); +/// // and then it implements `Service` +/// assert_service(method_router_with_state); +/// +/// // helper to check that a value implements `Service` +/// fn assert_service(service: S) +/// where +/// S: Service>, +/// {} +/// ``` +#[must_use] +pub struct MethodRouter { + get: MethodEndpoint, + head: MethodEndpoint, + delete: MethodEndpoint, + options: MethodEndpoint, + patch: MethodEndpoint, + post: MethodEndpoint, + put: MethodEndpoint, + trace: MethodEndpoint, + fallback: Fallback, + allow_header: AllowHeader, +} + +#[derive(Clone, Debug)] +enum AllowHeader { + /// No `Allow` header value has been built-up yet. This is the default state + None, + /// Don't set an `Allow` header. This is used when `any` or `any_service` are called. + Skip, + /// The current value of the `Allow` header. + Bytes(BytesMut), +} + +impl AllowHeader { + fn merge(self, other: Self) -> Self { + match (self, other) { + (AllowHeader::Skip, _) | (_, AllowHeader::Skip) => AllowHeader::Skip, + (AllowHeader::None, AllowHeader::None) => AllowHeader::None, + (AllowHeader::None, AllowHeader::Bytes(pick)) => AllowHeader::Bytes(pick), + (AllowHeader::Bytes(pick), AllowHeader::None) => AllowHeader::Bytes(pick), + (AllowHeader::Bytes(mut a), AllowHeader::Bytes(b)) => { + a.extend_from_slice(b","); + a.extend_from_slice(&b); + AllowHeader::Bytes(a) + } + } + } +} + +impl fmt::Debug for MethodRouter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("MethodRouter") + .field("get", &self.get) + .field("head", &self.head) + .field("delete", &self.delete) + .field("options", &self.options) + .field("patch", &self.patch) + .field("post", &self.post) + .field("put", &self.put) + .field("trace", &self.trace) + .field("fallback", &self.fallback) + .field("allow_header", &self.allow_header) + .finish() + } +} + +impl MethodRouter +where + B: HttpBody + Send + 'static, + S: Clone, +{ + /// Chain an additional handler that will accept requests matching the given + /// `MethodFilter`. + /// + /// # Example + /// + /// ```rust + /// use axum::{ + /// routing::get, + /// Router, + /// routing::MethodFilter + /// }; + /// + /// async fn handler() {} + /// + /// async fn other_handler() {} + /// + /// // Requests to `GET /` will go to `handler` and `DELETE /` will go to + /// // `other_handler` + /// let app = Router::new().route("/", get(handler).on(MethodFilter::DELETE, other_handler)); + /// # async { + /// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); + /// # }; + /// ``` + #[track_caller] + pub fn on(self, filter: MethodFilter, handler: H) -> Self + where + H: Handler, + T: 'static, + S: Send + Sync + 'static, + { + self.on_endpoint( + filter, + MethodEndpoint::BoxedHandler(BoxedIntoRoute::from_handler(handler)), + ) + } + + chained_handler_fn!(delete, DELETE); + chained_handler_fn!(get, GET); + chained_handler_fn!(head, HEAD); + chained_handler_fn!(options, OPTIONS); + chained_handler_fn!(patch, PATCH); + chained_handler_fn!(post, POST); + chained_handler_fn!(put, PUT); + chained_handler_fn!(trace, TRACE); + + /// Add a fallback [`Handler`] to the router. + pub fn fallback(mut self, handler: H) -> Self + where + H: Handler, + T: 'static, + S: Send + Sync + 'static, + { + self.fallback = Fallback::BoxedHandler(BoxedIntoRoute::from_handler(handler)); + self + } +} + +impl MethodRouter<(), B, Infallible> +where + B: HttpBody + Send + 'static, +{ + /// Convert the handler into a [`MakeService`]. + /// + /// This allows you to serve a single handler if you don't need any routing: + /// + /// ```rust + /// use axum::{ + /// Server, + /// handler::Handler, + /// http::{Uri, Method}, + /// response::IntoResponse, + /// routing::get, + /// }; + /// use std::net::SocketAddr; + /// + /// async fn handler(method: Method, uri: Uri, body: String) -> String { + /// format!("received `{} {}` with body `{:?}`", method, uri, body) + /// } + /// + /// let router = get(handler).post(handler); + /// + /// # async { + /// Server::bind(&SocketAddr::from(([127, 0, 0, 1], 3000))) + /// .serve(router.into_make_service()) + /// .await?; + /// # Ok::<_, hyper::Error>(()) + /// # }; + /// ``` + /// + /// [`MakeService`]: tower::make::MakeService + pub fn into_make_service(self) -> IntoMakeService { + IntoMakeService::new(self.with_state(())) + } + + /// Convert the router into a [`MakeService`] which stores information + /// about the incoming connection. + /// + /// See [`Router::into_make_service_with_connect_info`] for more details. + /// + /// ```rust + /// use axum::{ + /// Server, + /// handler::Handler, + /// response::IntoResponse, + /// extract::ConnectInfo, + /// routing::get, + /// }; + /// use std::net::SocketAddr; + /// + /// async fn handler(ConnectInfo(addr): ConnectInfo) -> String { + /// format!("Hello {}", addr) + /// } + /// + /// let router = get(handler).post(handler); + /// + /// # async { + /// Server::bind(&SocketAddr::from(([127, 0, 0, 1], 3000))) + /// .serve(router.into_make_service_with_connect_info::()) + /// .await?; + /// # Ok::<_, hyper::Error>(()) + /// # }; + /// ``` + /// + /// [`MakeService`]: tower::make::MakeService + /// [`Router::into_make_service_with_connect_info`]: crate::routing::Router::into_make_service_with_connect_info + #[cfg(feature = "tokio")] + pub fn into_make_service_with_connect_info(self) -> IntoMakeServiceWithConnectInfo { + IntoMakeServiceWithConnectInfo::new(self.with_state(())) + } +} + +impl MethodRouter +where + B: HttpBody + Send + 'static, + S: Clone, +{ + /// Create a default `MethodRouter` that will respond with `405 Method Not Allowed` to all + /// requests. + pub fn new() -> Self { + let fallback = Route::new(service_fn(|_: Request| async { + Ok(StatusCode::METHOD_NOT_ALLOWED.into_response()) + })); + + Self { + get: MethodEndpoint::None, + head: MethodEndpoint::None, + delete: MethodEndpoint::None, + options: MethodEndpoint::None, + patch: MethodEndpoint::None, + post: MethodEndpoint::None, + put: MethodEndpoint::None, + trace: MethodEndpoint::None, + allow_header: AllowHeader::None, + fallback: Fallback::Default(fallback), + } + } + + /// Provide the state for the router. + pub fn with_state(self, state: S) -> MethodRouter { + MethodRouter { + get: self.get.with_state(&state), + head: self.head.with_state(&state), + delete: self.delete.with_state(&state), + options: self.options.with_state(&state), + patch: self.patch.with_state(&state), + post: self.post.with_state(&state), + put: self.put.with_state(&state), + trace: self.trace.with_state(&state), + allow_header: self.allow_header, + fallback: self.fallback.with_state(state), + } + } + + /// Chain an additional service that will accept requests matching the given + /// `MethodFilter`. + /// + /// # Example + /// + /// ```rust + /// use axum::{ + /// http::Request, + /// Router, + /// routing::{MethodFilter, on_service}, + /// }; + /// use http::Response; + /// use std::convert::Infallible; + /// use hyper::Body; + /// + /// let service = tower::service_fn(|request: Request| async { + /// Ok::<_, Infallible>(Response::new(Body::empty())) + /// }); + /// + /// // Requests to `DELETE /` will go to `service` + /// let app = Router::new().route("/", on_service(MethodFilter::DELETE, service)); + /// # async { + /// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); + /// # }; + /// ``` + #[track_caller] + pub fn on_service(self, filter: MethodFilter, svc: T) -> Self + where + T: Service, Error = E> + Clone + Send + 'static, + T::Response: IntoResponse + 'static, + T::Future: Send + 'static, + { + self.on_endpoint(filter, MethodEndpoint::Route(Route::new(svc))) + } + + #[track_caller] + fn on_endpoint(mut self, filter: MethodFilter, endpoint: MethodEndpoint) -> Self { + // written as a separate function to generate less IR + #[track_caller] + fn set_endpoint( + method_name: &str, + out: &mut MethodEndpoint, + endpoint: &MethodEndpoint, + endpoint_filter: MethodFilter, + filter: MethodFilter, + allow_header: &mut AllowHeader, + methods: &[&'static str], + ) where + MethodEndpoint: Clone, + S: Clone, + { + if endpoint_filter.contains(filter) { + if out.is_some() { + panic!( + "Overlapping method route. Cannot add two method routes that both handle \ + `{method_name}`", + ) + } + *out = endpoint.clone(); + for method in methods { + append_allow_header(allow_header, method); + } + } + } + + set_endpoint( + "GET", + &mut self.get, + &endpoint, + filter, + MethodFilter::GET, + &mut self.allow_header, + &["GET", "HEAD"], + ); + + set_endpoint( + "HEAD", + &mut self.head, + &endpoint, + filter, + MethodFilter::HEAD, + &mut self.allow_header, + &["HEAD"], + ); + + set_endpoint( + "TRACE", + &mut self.trace, + &endpoint, + filter, + MethodFilter::TRACE, + &mut self.allow_header, + &["TRACE"], + ); + + set_endpoint( + "PUT", + &mut self.put, + &endpoint, + filter, + MethodFilter::PUT, + &mut self.allow_header, + &["PUT"], + ); + + set_endpoint( + "POST", + &mut self.post, + &endpoint, + filter, + MethodFilter::POST, + &mut self.allow_header, + &["POST"], + ); + + set_endpoint( + "PATCH", + &mut self.patch, + &endpoint, + filter, + MethodFilter::PATCH, + &mut self.allow_header, + &["PATCH"], + ); + + set_endpoint( + "OPTIONS", + &mut self.options, + &endpoint, + filter, + MethodFilter::OPTIONS, + &mut self.allow_header, + &["OPTIONS"], + ); + + set_endpoint( + "DELETE", + &mut self.delete, + &endpoint, + filter, + MethodFilter::DELETE, + &mut self.allow_header, + &["DELETE"], + ); + + self + } + + chained_service_fn!(delete_service, DELETE); + chained_service_fn!(get_service, GET); + chained_service_fn!(head_service, HEAD); + chained_service_fn!(options_service, OPTIONS); + chained_service_fn!(patch_service, PATCH); + chained_service_fn!(post_service, POST); + chained_service_fn!(put_service, PUT); + chained_service_fn!(trace_service, TRACE); + + #[doc = include_str!("../docs/method_routing/fallback.md")] + pub fn fallback_service(mut self, svc: T) -> Self + where + T: Service, Error = E> + Clone + Send + 'static, + T::Response: IntoResponse + 'static, + T::Future: Send + 'static, + { + self.fallback = Fallback::Service(Route::new(svc)); + self + } + + #[doc = include_str!("../docs/method_routing/layer.md")] + pub fn layer(self, layer: L) -> MethodRouter + where + L: Layer> + Clone + Send + 'static, + L::Service: Service> + Clone + Send + 'static, + >>::Response: IntoResponse + 'static, + >>::Error: Into + 'static, + >>::Future: Send + 'static, + E: 'static, + S: 'static, + NewReqBody: HttpBody + 'static, + NewError: 'static, + { + let layer_fn = move |route: Route| route.layer(layer.clone()); + + MethodRouter { + get: self.get.map(layer_fn.clone()), + head: self.head.map(layer_fn.clone()), + delete: self.delete.map(layer_fn.clone()), + options: self.options.map(layer_fn.clone()), + patch: self.patch.map(layer_fn.clone()), + post: self.post.map(layer_fn.clone()), + put: self.put.map(layer_fn.clone()), + trace: self.trace.map(layer_fn.clone()), + fallback: self.fallback.map(layer_fn), + allow_header: self.allow_header, + } + } + + #[doc = include_str!("../docs/method_routing/route_layer.md")] + #[track_caller] + pub fn route_layer(mut self, layer: L) -> MethodRouter + where + L: Layer> + Clone + Send + 'static, + L::Service: Service, Error = E> + Clone + Send + 'static, + >>::Response: IntoResponse + 'static, + >>::Future: Send + 'static, + E: 'static, + S: 'static, + { + if self.get.is_none() + && self.head.is_none() + && self.delete.is_none() + && self.options.is_none() + && self.patch.is_none() + && self.post.is_none() + && self.put.is_none() + && self.trace.is_none() + { + panic!( + "Adding a route_layer before any routes is a no-op. \ + Add the routes you want the layer to apply to first." + ); + } + + let layer_fn = move |svc| { + let svc = layer.layer(svc); + let svc = MapResponseLayer::new(IntoResponse::into_response).layer(svc); + Route::new(svc) + }; + + self.get = self.get.map(layer_fn.clone()); + self.head = self.head.map(layer_fn.clone()); + self.delete = self.delete.map(layer_fn.clone()); + self.options = self.options.map(layer_fn.clone()); + self.patch = self.patch.map(layer_fn.clone()); + self.post = self.post.map(layer_fn.clone()); + self.put = self.put.map(layer_fn.clone()); + self.trace = self.trace.map(layer_fn); + + self + } + + #[track_caller] + pub(crate) fn merge_for_path( + mut self, + path: Option<&str>, + other: MethodRouter, + ) -> Self { + // written using inner functions to generate less IR + #[track_caller] + fn merge_inner( + path: Option<&str>, + name: &str, + first: MethodEndpoint, + second: MethodEndpoint, + ) -> MethodEndpoint { + match (first, second) { + (MethodEndpoint::None, MethodEndpoint::None) => MethodEndpoint::None, + (pick, MethodEndpoint::None) | (MethodEndpoint::None, pick) => pick, + _ => { + if let Some(path) = path { + panic!( + "Overlapping method route. Handler for `{name} {path}` already exists" + ); + } else { + panic!( + "Overlapping method route. Cannot merge two method routes that both \ + define `{name}`" + ); + } + } + } + } + + self.get = merge_inner(path, "GET", self.get, other.get); + self.head = merge_inner(path, "HEAD", self.head, other.head); + self.delete = merge_inner(path, "DELETE", self.delete, other.delete); + self.options = merge_inner(path, "OPTIONS", self.options, other.options); + self.patch = merge_inner(path, "PATCH", self.patch, other.patch); + self.post = merge_inner(path, "POST", self.post, other.post); + self.put = merge_inner(path, "PUT", self.put, other.put); + self.trace = merge_inner(path, "TRACE", self.trace, other.trace); + + self.fallback = self + .fallback + .merge(other.fallback) + .expect("Cannot merge two `MethodRouter`s that both have a fallback"); + + self.allow_header = self.allow_header.merge(other.allow_header); + + self + } + + #[doc = include_str!("../docs/method_routing/merge.md")] + #[track_caller] + pub fn merge(self, other: MethodRouter) -> Self { + self.merge_for_path(None, other) + } + + /// Apply a [`HandleErrorLayer`]. + /// + /// This is a convenience method for doing `self.layer(HandleErrorLayer::new(f))`. + pub fn handle_error(self, f: F) -> MethodRouter + where + F: Clone + Send + Sync + 'static, + HandleError, F, T>: Service, Error = Infallible>, + , F, T> as Service>>::Future: Send, + , F, T> as Service>>::Response: IntoResponse + Send, + T: 'static, + E: 'static, + B: 'static, + S: 'static, + { + self.layer(HandleErrorLayer::new(f)) + } + + fn skip_allow_header(mut self) -> Self { + self.allow_header = AllowHeader::Skip; + self + } + + pub(crate) fn call_with_state(&mut self, req: Request, state: S) -> RouteFuture { + macro_rules! call { + ( + $req:expr, + $method:expr, + $method_variant:ident, + $svc:expr + ) => { + if $method == Method::$method_variant { + match $svc { + MethodEndpoint::None => {} + MethodEndpoint::Route(route) => { + return RouteFuture::from_future(route.oneshot_inner($req)) + .strip_body($method == Method::HEAD); + } + MethodEndpoint::BoxedHandler(handler) => { + let mut route = handler.clone().into_route(state); + return RouteFuture::from_future(route.oneshot_inner($req)) + .strip_body($method == Method::HEAD); + } + } + } + }; + } + + let method = req.method().clone(); + + // written with a pattern match like this to ensure we call all routes + let Self { + get, + head, + delete, + options, + patch, + post, + put, + trace, + fallback, + allow_header, + } = self; + + call!(req, method, HEAD, head); + call!(req, method, HEAD, get); + call!(req, method, GET, get); + call!(req, method, POST, post); + call!(req, method, OPTIONS, options); + call!(req, method, PATCH, patch); + call!(req, method, PUT, put); + call!(req, method, DELETE, delete); + call!(req, method, TRACE, trace); + + let future = fallback.call_with_state(req, state); + + match allow_header { + AllowHeader::None => future.allow_header(Bytes::new()), + AllowHeader::Skip => future, + AllowHeader::Bytes(allow_header) => future.allow_header(allow_header.clone().freeze()), + } + } +} + +fn append_allow_header(allow_header: &mut AllowHeader, method: &'static str) { + match allow_header { + AllowHeader::None => { + *allow_header = AllowHeader::Bytes(BytesMut::from(method)); + } + AllowHeader::Skip => {} + AllowHeader::Bytes(allow_header) => { + if let Ok(s) = std::str::from_utf8(allow_header) { + if !s.contains(method) { + allow_header.extend_from_slice(b","); + allow_header.extend_from_slice(method.as_bytes()); + } + } else { + #[cfg(debug_assertions)] + panic!("`allow_header` contained invalid uft-8. This should never happen") + } + } + } +} + +impl Clone for MethodRouter { + fn clone(&self) -> Self { + Self { + get: self.get.clone(), + head: self.head.clone(), + delete: self.delete.clone(), + options: self.options.clone(), + patch: self.patch.clone(), + post: self.post.clone(), + put: self.put.clone(), + trace: self.trace.clone(), + fallback: self.fallback.clone(), + allow_header: self.allow_header.clone(), + } + } +} + +impl Default for MethodRouter +where + B: HttpBody + Send + 'static, + S: Clone, +{ + fn default() -> Self { + Self::new() + } +} + +enum MethodEndpoint { + None, + Route(Route), + BoxedHandler(BoxedIntoRoute), +} + +impl MethodEndpoint +where + S: Clone, +{ + fn is_some(&self) -> bool { + matches!(self, Self::Route(_) | Self::BoxedHandler(_)) + } + + fn is_none(&self) -> bool { + matches!(self, Self::None) + } + + fn map(self, f: F) -> MethodEndpoint + where + S: 'static, + B: 'static, + E: 'static, + F: FnOnce(Route) -> Route + Clone + Send + 'static, + B2: HttpBody + 'static, + E2: 'static, + { + match self { + Self::None => MethodEndpoint::None, + Self::Route(route) => MethodEndpoint::Route(f(route)), + Self::BoxedHandler(handler) => MethodEndpoint::BoxedHandler(handler.map(f)), + } + } + + fn with_state(self, state: &S) -> MethodEndpoint { + match self { + MethodEndpoint::None => MethodEndpoint::None, + MethodEndpoint::Route(route) => MethodEndpoint::Route(route), + MethodEndpoint::BoxedHandler(handler) => { + MethodEndpoint::Route(handler.into_route(state.clone())) + } + } + } +} + +impl Clone for MethodEndpoint { + fn clone(&self) -> Self { + match self { + Self::None => Self::None, + Self::Route(inner) => Self::Route(inner.clone()), + Self::BoxedHandler(inner) => Self::BoxedHandler(inner.clone()), + } + } +} + +impl fmt::Debug for MethodEndpoint { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::None => f.debug_tuple("None").finish(), + Self::Route(inner) => inner.fmt(f), + Self::BoxedHandler(_) => f.debug_tuple("BoxedHandler").finish(), + } + } +} + +impl Service> for MethodRouter<(), B, E> +where + B: HttpBody + Send + 'static, +{ + type Response = Response; + type Error = E; + type Future = RouteFuture; + + #[inline] + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + #[inline] + fn call(&mut self, req: Request) -> Self::Future { + self.call_with_state(req, ()) + } +} + +impl Handler<(), S, B> for MethodRouter +where + S: Clone + 'static, + B: HttpBody + Send + 'static, +{ + type Future = InfallibleRouteFuture; + + fn call(mut self, req: Request, state: S) -> Self::Future { + InfallibleRouteFuture::new(self.call_with_state(req, state)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + body::Body, error_handling::HandleErrorLayer, extract::State, + handler::HandlerWithoutStateExt, + }; + use axum_core::response::IntoResponse; + use http::{header::ALLOW, HeaderMap}; + use std::time::Duration; + use tower::{timeout::TimeoutLayer, Service, ServiceBuilder, ServiceExt}; + use tower_http::{services::fs::ServeDir, validate_request::ValidateRequestHeaderLayer}; + + #[crate::test] + async fn method_not_allowed_by_default() { + let mut svc = MethodRouter::new(); + let (status, _, body) = call(Method::GET, &mut svc).await; + assert_eq!(status, StatusCode::METHOD_NOT_ALLOWED); + assert!(body.is_empty()); + } + + #[crate::test] + async fn get_service_fn() { + async fn handle(_req: Request) -> Result, Infallible> { + Ok(Response::new(Body::from("ok"))) + } + + let mut svc = get_service(service_fn(handle)); + + let (status, _, body) = call(Method::GET, &mut svc).await; + assert_eq!(status, StatusCode::OK); + assert_eq!(body, "ok"); + } + + #[crate::test] + async fn get_handler() { + let mut svc = MethodRouter::new().get(ok); + let (status, _, body) = call(Method::GET, &mut svc).await; + assert_eq!(status, StatusCode::OK); + assert_eq!(body, "ok"); + } + + #[crate::test] + async fn get_accepts_head() { + let mut svc = MethodRouter::new().get(ok); + let (status, _, body) = call(Method::HEAD, &mut svc).await; + assert_eq!(status, StatusCode::OK); + assert!(body.is_empty()); + } + + #[crate::test] + async fn head_takes_precedence_over_get() { + let mut svc = MethodRouter::new().head(created).get(ok); + let (status, _, body) = call(Method::HEAD, &mut svc).await; + assert_eq!(status, StatusCode::CREATED); + assert!(body.is_empty()); + } + + #[crate::test] + async fn merge() { + let mut svc = get(ok).merge(post(ok)); + + let (status, _, _) = call(Method::GET, &mut svc).await; + assert_eq!(status, StatusCode::OK); + + let (status, _, _) = call(Method::POST, &mut svc).await; + assert_eq!(status, StatusCode::OK); + } + + #[crate::test] + async fn layer() { + let mut svc = MethodRouter::new() + .get(|| async { std::future::pending::<()>().await }) + .layer(ValidateRequestHeaderLayer::bearer("password")); + + // method with route + let (status, _, _) = call(Method::GET, &mut svc).await; + assert_eq!(status, StatusCode::UNAUTHORIZED); + + // method without route + let (status, _, _) = call(Method::DELETE, &mut svc).await; + assert_eq!(status, StatusCode::UNAUTHORIZED); + } + + #[crate::test] + async fn route_layer() { + let mut svc = MethodRouter::new() + .get(|| async { std::future::pending::<()>().await }) + .route_layer(ValidateRequestHeaderLayer::bearer("password")); + + // method with route + let (status, _, _) = call(Method::GET, &mut svc).await; + assert_eq!(status, StatusCode::UNAUTHORIZED); + + // method without route + let (status, _, _) = call(Method::DELETE, &mut svc).await; + assert_eq!(status, StatusCode::METHOD_NOT_ALLOWED); + } + + #[allow(dead_code)] + fn buiding_complex_router() { + let app = crate::Router::new().route( + "/", + // use the all the things 💣️ + get(ok) + .post(ok) + .route_layer(ValidateRequestHeaderLayer::bearer("password")) + .merge(delete_service(ServeDir::new("."))) + .fallback(|| async { StatusCode::NOT_FOUND }) + .put(ok) + .layer( + ServiceBuilder::new() + .layer(HandleErrorLayer::new(|_| async { + StatusCode::REQUEST_TIMEOUT + })) + .layer(TimeoutLayer::new(Duration::from_secs(10))), + ), + ); + + crate::Server::bind(&"0.0.0.0:0".parse().unwrap()).serve(app.into_make_service()); + } + + #[crate::test] + async fn sets_allow_header() { + let mut svc = MethodRouter::new().put(ok).patch(ok); + let (status, headers, _) = call(Method::GET, &mut svc).await; + assert_eq!(status, StatusCode::METHOD_NOT_ALLOWED); + assert_eq!(headers[ALLOW], "PUT,PATCH"); + } + + #[crate::test] + async fn sets_allow_header_get_head() { + let mut svc = MethodRouter::new().get(ok).head(ok); + let (status, headers, _) = call(Method::PUT, &mut svc).await; + assert_eq!(status, StatusCode::METHOD_NOT_ALLOWED); + assert_eq!(headers[ALLOW], "GET,HEAD"); + } + + #[crate::test] + async fn empty_allow_header_by_default() { + let mut svc = MethodRouter::new(); + let (status, headers, _) = call(Method::PATCH, &mut svc).await; + assert_eq!(status, StatusCode::METHOD_NOT_ALLOWED); + assert_eq!(headers[ALLOW], ""); + } + + #[crate::test] + async fn allow_header_when_merging() { + let a = put(ok).patch(ok); + let b = get(ok).head(ok); + let mut svc = a.merge(b); + + let (status, headers, _) = call(Method::DELETE, &mut svc).await; + assert_eq!(status, StatusCode::METHOD_NOT_ALLOWED); + assert_eq!(headers[ALLOW], "PUT,PATCH,GET,HEAD"); + } + + #[crate::test] + async fn allow_header_any() { + let mut svc = any(ok); + + let (status, headers, _) = call(Method::GET, &mut svc).await; + assert_eq!(status, StatusCode::OK); + assert!(!headers.contains_key(ALLOW)); + } + + #[crate::test] + async fn allow_header_with_fallback() { + let mut svc = MethodRouter::new() + .get(ok) + .fallback(|| async { (StatusCode::METHOD_NOT_ALLOWED, "Method not allowed") }); + + let (status, headers, _) = call(Method::DELETE, &mut svc).await; + assert_eq!(status, StatusCode::METHOD_NOT_ALLOWED); + assert_eq!(headers[ALLOW], "GET,HEAD"); + } + + #[crate::test] + async fn allow_header_with_fallback_that_sets_allow() { + async fn fallback(method: Method) -> Response { + if method == Method::POST { + "OK".into_response() + } else { + ( + StatusCode::METHOD_NOT_ALLOWED, + [(ALLOW, "GET,POST")], + "Method not allowed", + ) + .into_response() + } + } + + let mut svc = MethodRouter::new().get(ok).fallback(fallback); + + let (status, _, _) = call(Method::GET, &mut svc).await; + assert_eq!(status, StatusCode::OK); + + let (status, _, _) = call(Method::POST, &mut svc).await; + assert_eq!(status, StatusCode::OK); + + let (status, headers, _) = call(Method::DELETE, &mut svc).await; + assert_eq!(status, StatusCode::METHOD_NOT_ALLOWED); + assert_eq!(headers[ALLOW], "GET,POST"); + } + + #[crate::test] + async fn allow_header_noop_middleware() { + let mut svc = MethodRouter::new() + .get(ok) + .layer(tower::layer::util::Identity::new()); + + let (status, headers, _) = call(Method::DELETE, &mut svc).await; + assert_eq!(status, StatusCode::METHOD_NOT_ALLOWED); + assert_eq!(headers[ALLOW], "GET,HEAD"); + } + + #[crate::test] + #[should_panic( + expected = "Overlapping method route. Cannot add two method routes that both handle `GET`" + )] + async fn handler_overlaps() { + let _: MethodRouter<()> = get(ok).get(ok); + } + + #[crate::test] + #[should_panic( + expected = "Overlapping method route. Cannot add two method routes that both handle `POST`" + )] + async fn service_overlaps() { + let _: MethodRouter<()> = post_service(ok.into_service()).post_service(ok.into_service()); + } + + #[crate::test] + async fn get_head_does_not_overlap() { + let _: MethodRouter<()> = get(ok).head(ok); + } + + #[crate::test] + async fn head_get_does_not_overlap() { + let _: MethodRouter<()> = head(ok).get(ok); + } + + #[crate::test] + async fn accessing_state() { + let mut svc = MethodRouter::new() + .get(|State(state): State<&'static str>| async move { state }) + .with_state("state"); + + let (status, _, text) = call(Method::GET, &mut svc).await; + + assert_eq!(status, StatusCode::OK); + assert_eq!(text, "state"); + } + + #[crate::test] + async fn fallback_accessing_state() { + let mut svc = MethodRouter::new() + .fallback(|State(state): State<&'static str>| async move { state }) + .with_state("state"); + + let (status, _, text) = call(Method::GET, &mut svc).await; + + assert_eq!(status, StatusCode::OK); + assert_eq!(text, "state"); + } + + #[crate::test] + async fn merge_accessing_state() { + let one = get(|State(state): State<&'static str>| async move { state }); + let two = post(|State(state): State<&'static str>| async move { state }); + + let mut svc = one.merge(two).with_state("state"); + + let (status, _, text) = call(Method::GET, &mut svc).await; + assert_eq!(status, StatusCode::OK); + assert_eq!(text, "state"); + + let (status, _, _) = call(Method::POST, &mut svc).await; + assert_eq!(status, StatusCode::OK); + assert_eq!(text, "state"); + } + + async fn call(method: Method, svc: &mut S) -> (StatusCode, HeaderMap, String) + where + S: Service, Error = Infallible>, + S::Response: IntoResponse, + { + let request = Request::builder() + .uri("/") + .method(method) + .body(Body::empty()) + .unwrap(); + let response = svc + .ready() + .await + .unwrap() + .call(request) + .await + .unwrap() + .into_response(); + let (parts, body) = response.into_parts(); + let body = String::from_utf8(hyper::body::to_bytes(body).await.unwrap().to_vec()).unwrap(); + (parts.status, parts.headers, body) + } + + async fn ok() -> (StatusCode, &'static str) { + (StatusCode::OK, "ok") + } + + async fn created() -> (StatusCode, &'static str) { + (StatusCode::CREATED, "created") + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/routing/mod.rs b/.cargo-vendor/axum-0.6.20/src/routing/mod.rs new file mode 100644 index 0000000000..1760157cbc --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/routing/mod.rs @@ -0,0 +1,541 @@ +//! Routing between [`Service`]s and handlers. + +use self::{future::RouteFuture, not_found::NotFound, path_router::PathRouter}; +#[cfg(feature = "tokio")] +use crate::extract::connect_info::IntoMakeServiceWithConnectInfo; +use crate::{ + body::{Body, HttpBody}, + boxed::BoxedIntoRoute, + handler::Handler, + util::try_downcast, +}; +use axum_core::response::{IntoResponse, Response}; +use http::Request; +use std::{ + convert::Infallible, + fmt, + task::{Context, Poll}, +}; +use sync_wrapper::SyncWrapper; +use tower_layer::Layer; +use tower_service::Service; + +pub mod future; +pub mod method_routing; + +mod into_make_service; +mod method_filter; +mod not_found; +pub(crate) mod path_router; +mod route; +mod strip_prefix; +pub(crate) mod url_params; + +#[cfg(test)] +mod tests; + +pub use self::{into_make_service::IntoMakeService, method_filter::MethodFilter, route::Route}; + +pub use self::method_routing::{ + any, any_service, delete, delete_service, get, get_service, head, head_service, on, on_service, + options, options_service, patch, patch_service, post, post_service, put, put_service, trace, + trace_service, MethodRouter, +}; + +macro_rules! panic_on_err { + ($expr:expr) => { + match $expr { + Ok(x) => x, + Err(err) => panic!("{err}"), + } + }; +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub(crate) struct RouteId(u32); + +/// The router type for composing handlers and services. +#[must_use] +pub struct Router { + path_router: PathRouter, + fallback_router: PathRouter, + default_fallback: bool, + catch_all_fallback: Fallback, +} + +impl Clone for Router { + fn clone(&self) -> Self { + Self { + path_router: self.path_router.clone(), + fallback_router: self.fallback_router.clone(), + default_fallback: self.default_fallback, + catch_all_fallback: self.catch_all_fallback.clone(), + } + } +} + +impl Default for Router +where + B: HttpBody + Send + 'static, + S: Clone + Send + Sync + 'static, +{ + fn default() -> Self { + Self::new() + } +} + +impl fmt::Debug for Router { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Router") + .field("path_router", &self.path_router) + .field("fallback_router", &self.fallback_router) + .field("default_fallback", &self.default_fallback) + .field("catch_all_fallback", &self.catch_all_fallback) + .finish() + } +} + +pub(crate) const NEST_TAIL_PARAM: &str = "__private__axum_nest_tail_param"; +pub(crate) const NEST_TAIL_PARAM_CAPTURE: &str = "/*__private__axum_nest_tail_param"; +pub(crate) const FALLBACK_PARAM: &str = "__private__axum_fallback"; +pub(crate) const FALLBACK_PARAM_PATH: &str = "/*__private__axum_fallback"; + +impl Router +where + B: HttpBody + Send + 'static, + S: Clone + Send + Sync + 'static, +{ + /// Create a new `Router`. + /// + /// Unless you add additional routes this will respond with `404 Not Found` to + /// all requests. + pub fn new() -> Self { + Self { + path_router: Default::default(), + fallback_router: PathRouter::new_fallback(), + default_fallback: true, + catch_all_fallback: Fallback::Default(Route::new(NotFound)), + } + } + + #[doc = include_str!("../docs/routing/route.md")] + #[track_caller] + pub fn route(mut self, path: &str, method_router: MethodRouter) -> Self { + panic_on_err!(self.path_router.route(path, method_router)); + self + } + + #[doc = include_str!("../docs/routing/route_service.md")] + pub fn route_service(mut self, path: &str, service: T) -> Self + where + T: Service, Error = Infallible> + Clone + Send + 'static, + T::Response: IntoResponse, + T::Future: Send + 'static, + { + let service = match try_downcast::, _>(service) { + Ok(_) => { + panic!( + "Invalid route: `Router::route_service` cannot be used with `Router`s. \ + Use `Router::nest` instead" + ); + } + Err(service) => service, + }; + + panic_on_err!(self.path_router.route_service(path, service)); + self + } + + #[doc = include_str!("../docs/routing/nest.md")] + #[track_caller] + pub fn nest(mut self, path: &str, router: Router) -> Self { + let Router { + path_router, + fallback_router, + default_fallback, + // we don't need to inherit the catch-all fallback. It is only used for CONNECT + // requests with an empty path. If we were to inherit the catch-all fallback + // it would end up matching `/{path}/*` which doesn't match empty paths. + catch_all_fallback: _, + } = router; + + panic_on_err!(self.path_router.nest(path, path_router)); + + if !default_fallback { + panic_on_err!(self.fallback_router.nest(path, fallback_router)); + } + + self + } + + /// Like [`nest`](Self::nest), but accepts an arbitrary `Service`. + #[track_caller] + pub fn nest_service(mut self, path: &str, service: T) -> Self + where + T: Service, Error = Infallible> + Clone + Send + 'static, + T::Response: IntoResponse, + T::Future: Send + 'static, + { + panic_on_err!(self.path_router.nest_service(path, service)); + self + } + + #[doc = include_str!("../docs/routing/merge.md")] + #[track_caller] + pub fn merge(mut self, other: R) -> Self + where + R: Into>, + { + const PANIC_MSG: &str = + "Failed to merge fallbacks. This is a bug in axum. Please file an issue"; + + let Router { + path_router, + fallback_router: mut other_fallback, + default_fallback, + catch_all_fallback, + } = other.into(); + + panic_on_err!(self.path_router.merge(path_router)); + + match (self.default_fallback, default_fallback) { + // both have the default fallback + // use the one from other + (true, true) => { + self.fallback_router.merge(other_fallback).expect(PANIC_MSG); + } + // self has default fallback, other has a custom fallback + (true, false) => { + self.fallback_router.merge(other_fallback).expect(PANIC_MSG); + self.default_fallback = false; + } + // self has a custom fallback, other has a default + (false, true) => { + let fallback_router = std::mem::take(&mut self.fallback_router); + other_fallback.merge(fallback_router).expect(PANIC_MSG); + self.fallback_router = other_fallback; + } + // both have a custom fallback, not allowed + (false, false) => { + panic!("Cannot merge two `Router`s that both have a fallback") + } + }; + + self.catch_all_fallback = self + .catch_all_fallback + .merge(catch_all_fallback) + .unwrap_or_else(|| panic!("Cannot merge two `Router`s that both have a fallback")); + + self + } + + #[doc = include_str!("../docs/routing/layer.md")] + pub fn layer(self, layer: L) -> Router + where + L: Layer> + Clone + Send + 'static, + L::Service: Service> + Clone + Send + 'static, + >>::Response: IntoResponse + 'static, + >>::Error: Into + 'static, + >>::Future: Send + 'static, + NewReqBody: HttpBody + 'static, + { + Router { + path_router: self.path_router.layer(layer.clone()), + fallback_router: self.fallback_router.layer(layer.clone()), + default_fallback: self.default_fallback, + catch_all_fallback: self.catch_all_fallback.map(|route| route.layer(layer)), + } + } + + #[doc = include_str!("../docs/routing/route_layer.md")] + #[track_caller] + pub fn route_layer(self, layer: L) -> Self + where + L: Layer> + Clone + Send + 'static, + L::Service: Service> + Clone + Send + 'static, + >>::Response: IntoResponse + 'static, + >>::Error: Into + 'static, + >>::Future: Send + 'static, + { + Router { + path_router: self.path_router.route_layer(layer), + fallback_router: self.fallback_router, + default_fallback: self.default_fallback, + catch_all_fallback: self.catch_all_fallback, + } + } + + #[track_caller] + #[doc = include_str!("../docs/routing/fallback.md")] + pub fn fallback(mut self, handler: H) -> Self + where + H: Handler, + T: 'static, + { + self.catch_all_fallback = + Fallback::BoxedHandler(BoxedIntoRoute::from_handler(handler.clone())); + self.fallback_endpoint(Endpoint::MethodRouter(any(handler))) + } + + /// Add a fallback [`Service`] to the router. + /// + /// See [`Router::fallback`] for more details. + pub fn fallback_service(mut self, service: T) -> Self + where + T: Service, Error = Infallible> + Clone + Send + 'static, + T::Response: IntoResponse, + T::Future: Send + 'static, + { + let route = Route::new(service); + self.catch_all_fallback = Fallback::Service(route.clone()); + self.fallback_endpoint(Endpoint::Route(route)) + } + + fn fallback_endpoint(mut self, endpoint: Endpoint) -> Self { + self.fallback_router.set_fallback(endpoint); + self.default_fallback = false; + self + } + + #[doc = include_str!("../docs/routing/with_state.md")] + pub fn with_state(self, state: S) -> Router { + Router { + path_router: self.path_router.with_state(state.clone()), + fallback_router: self.fallback_router.with_state(state.clone()), + default_fallback: self.default_fallback, + catch_all_fallback: self.catch_all_fallback.with_state(state), + } + } + + pub(crate) fn call_with_state( + &mut self, + mut req: Request, + state: S, + ) -> RouteFuture { + // required for opaque routers to still inherit the fallback + // TODO(david): remove this feature in 0.7 + if !self.default_fallback { + req.extensions_mut().insert(SuperFallback(SyncWrapper::new( + self.fallback_router.clone(), + ))); + } + + match self.path_router.call_with_state(req, state) { + Ok(future) => future, + Err((mut req, state)) => { + let super_fallback = req + .extensions_mut() + .remove::>() + .map(|SuperFallback(path_router)| path_router.into_inner()); + + if let Some(mut super_fallback) = super_fallback { + match super_fallback.call_with_state(req, state) { + Ok(future) => return future, + Err((req, state)) => { + return self.catch_all_fallback.call_with_state(req, state); + } + } + } + + match self.fallback_router.call_with_state(req, state) { + Ok(future) => future, + Err((req, state)) => self.catch_all_fallback.call_with_state(req, state), + } + } + } + } +} + +impl Router<(), B> +where + B: HttpBody + Send + 'static, +{ + /// Convert this router into a [`MakeService`], that is a [`Service`] whose + /// response is another service. + /// + /// This is useful when running your application with hyper's + /// [`Server`](hyper::server::Server): + /// + /// ``` + /// use axum::{ + /// routing::get, + /// Router, + /// }; + /// + /// let app = Router::new().route("/", get(|| async { "Hi!" })); + /// + /// # async { + /// axum::Server::bind(&"0.0.0.0:3000".parse().unwrap()) + /// .serve(app.into_make_service()) + /// .await + /// .expect("server failed"); + /// # }; + /// ``` + /// + /// [`MakeService`]: tower::make::MakeService + pub fn into_make_service(self) -> IntoMakeService { + // call `Router::with_state` such that everything is turned into `Route` eagerly + // rather than doing that per request + IntoMakeService::new(self.with_state(())) + } + + #[doc = include_str!("../docs/routing/into_make_service_with_connect_info.md")] + #[cfg(feature = "tokio")] + pub fn into_make_service_with_connect_info(self) -> IntoMakeServiceWithConnectInfo { + // call `Router::with_state` such that everything is turned into `Route` eagerly + // rather than doing that per request + IntoMakeServiceWithConnectInfo::new(self.with_state(())) + } +} + +impl Service> for Router<(), B> +where + B: HttpBody + Send + 'static, +{ + type Response = Response; + type Error = Infallible; + type Future = RouteFuture; + + #[inline] + fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + #[inline] + fn call(&mut self, req: Request) -> Self::Future { + self.call_with_state(req, ()) + } +} + +enum Fallback { + Default(Route), + Service(Route), + BoxedHandler(BoxedIntoRoute), +} + +impl Fallback +where + S: Clone, +{ + fn merge(self, other: Self) -> Option { + match (self, other) { + (Self::Default(_), pick @ Self::Default(_)) => Some(pick), + (Self::Default(_), pick) | (pick, Self::Default(_)) => Some(pick), + _ => None, + } + } + + fn map(self, f: F) -> Fallback + where + S: 'static, + B: 'static, + E: 'static, + F: FnOnce(Route) -> Route + Clone + Send + 'static, + B2: HttpBody + 'static, + E2: 'static, + { + match self { + Self::Default(route) => Fallback::Default(f(route)), + Self::Service(route) => Fallback::Service(f(route)), + Self::BoxedHandler(handler) => Fallback::BoxedHandler(handler.map(f)), + } + } + + fn with_state(self, state: S) -> Fallback { + match self { + Fallback::Default(route) => Fallback::Default(route), + Fallback::Service(route) => Fallback::Service(route), + Fallback::BoxedHandler(handler) => Fallback::Service(handler.into_route(state)), + } + } + + fn call_with_state(&mut self, req: Request, state: S) -> RouteFuture { + match self { + Fallback::Default(route) | Fallback::Service(route) => { + RouteFuture::from_future(route.oneshot_inner(req)) + } + Fallback::BoxedHandler(handler) => { + let mut route = handler.clone().into_route(state); + RouteFuture::from_future(route.oneshot_inner(req)) + } + } + } +} + +impl Clone for Fallback { + fn clone(&self) -> Self { + match self { + Self::Default(inner) => Self::Default(inner.clone()), + Self::Service(inner) => Self::Service(inner.clone()), + Self::BoxedHandler(inner) => Self::BoxedHandler(inner.clone()), + } + } +} + +impl fmt::Debug for Fallback { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Default(inner) => f.debug_tuple("Default").field(inner).finish(), + Self::Service(inner) => f.debug_tuple("Service").field(inner).finish(), + Self::BoxedHandler(_) => f.debug_tuple("BoxedHandler").finish(), + } + } +} + +#[allow(clippy::large_enum_variant)] +enum Endpoint { + MethodRouter(MethodRouter), + Route(Route), +} + +impl Endpoint +where + B: HttpBody + Send + 'static, + S: Clone + Send + Sync + 'static, +{ + fn layer(self, layer: L) -> Endpoint + where + L: Layer> + Clone + Send + 'static, + L::Service: Service> + Clone + Send + 'static, + >>::Response: IntoResponse + 'static, + >>::Error: Into + 'static, + >>::Future: Send + 'static, + NewReqBody: HttpBody + 'static, + { + match self { + Endpoint::MethodRouter(method_router) => { + Endpoint::MethodRouter(method_router.layer(layer)) + } + Endpoint::Route(route) => Endpoint::Route(route.layer(layer)), + } + } +} + +impl Clone for Endpoint { + fn clone(&self) -> Self { + match self { + Self::MethodRouter(inner) => Self::MethodRouter(inner.clone()), + Self::Route(inner) => Self::Route(inner.clone()), + } + } +} + +impl fmt::Debug for Endpoint { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::MethodRouter(method_router) => { + f.debug_tuple("MethodRouter").field(method_router).finish() + } + Self::Route(route) => f.debug_tuple("Route").field(route).finish(), + } + } +} + +struct SuperFallback(SyncWrapper>); + +#[test] +#[allow(warnings)] +fn traits() { + use crate::test_helpers::*; + assert_send::>(); +} diff --git a/.cargo-vendor/axum-0.6.20/src/routing/not_found.rs b/.cargo-vendor/axum-0.6.20/src/routing/not_found.rs new file mode 100644 index 0000000000..dc3fec46ac --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/routing/not_found.rs @@ -0,0 +1,34 @@ +use crate::response::Response; +use axum_core::response::IntoResponse; +use http::{Request, StatusCode}; +use std::{ + convert::Infallible, + future::ready, + task::{Context, Poll}, +}; +use tower_service::Service; + +/// A [`Service`] that responds with `404 Not Found` to all requests. +/// +/// This is used as the bottom service in a method router. You shouldn't have to +/// use it manually. +#[derive(Clone, Copy, Debug)] +pub(super) struct NotFound; + +impl Service> for NotFound +where + B: Send + 'static, +{ + type Response = Response; + type Error = Infallible; + type Future = std::future::Ready>; + + #[inline] + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, _req: Request) -> Self::Future { + ready(Ok(StatusCode::NOT_FOUND.into_response())) + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/routing/path_router.rs b/.cargo-vendor/axum-0.6.20/src/routing/path_router.rs new file mode 100644 index 0000000000..b415f4f7b2 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/routing/path_router.rs @@ -0,0 +1,483 @@ +use crate::body::HttpBody; +use axum_core::response::IntoResponse; +use http::Request; +use matchit::MatchError; +use std::{borrow::Cow, collections::HashMap, convert::Infallible, fmt, sync::Arc}; +use tower_layer::Layer; +use tower_service::Service; + +use super::{ + future::RouteFuture, not_found::NotFound, strip_prefix::StripPrefix, url_params, Endpoint, + MethodRouter, Route, RouteId, FALLBACK_PARAM_PATH, NEST_TAIL_PARAM, +}; + +pub(super) struct PathRouter { + routes: HashMap>, + node: Arc, + prev_route_id: RouteId, +} + +impl PathRouter +where + B: HttpBody + Send + 'static, + S: Clone + Send + Sync + 'static, +{ + pub(super) fn new_fallback() -> Self { + let mut this = Self::default(); + this.set_fallback(Endpoint::Route(Route::new(NotFound))); + this + } + + pub(super) fn set_fallback(&mut self, endpoint: Endpoint) { + self.replace_endpoint("/", endpoint.clone()); + self.replace_endpoint(FALLBACK_PARAM_PATH, endpoint); + } +} + +impl PathRouter +where + B: HttpBody + Send + 'static, + S: Clone + Send + Sync + 'static, +{ + pub(super) fn route( + &mut self, + path: &str, + method_router: MethodRouter, + ) -> Result<(), Cow<'static, str>> { + fn validate_path(path: &str) -> Result<(), &'static str> { + if path.is_empty() { + return Err("Paths must start with a `/`. Use \"/\" for root routes"); + } else if !path.starts_with('/') { + return Err("Paths must start with a `/`"); + } + + Ok(()) + } + + validate_path(path)?; + + let id = self.next_route_id(); + + let endpoint = if let Some((route_id, Endpoint::MethodRouter(prev_method_router))) = self + .node + .path_to_route_id + .get(path) + .and_then(|route_id| self.routes.get(route_id).map(|svc| (*route_id, svc))) + { + // if we're adding a new `MethodRouter` to a route that already has one just + // merge them. This makes `.route("/", get(_)).route("/", post(_))` work + let service = Endpoint::MethodRouter( + prev_method_router + .clone() + .merge_for_path(Some(path), method_router), + ); + self.routes.insert(route_id, service); + return Ok(()); + } else { + Endpoint::MethodRouter(method_router) + }; + + self.set_node(path, id)?; + self.routes.insert(id, endpoint); + + Ok(()) + } + + pub(super) fn route_service( + &mut self, + path: &str, + service: T, + ) -> Result<(), Cow<'static, str>> + where + T: Service, Error = Infallible> + Clone + Send + 'static, + T::Response: IntoResponse, + T::Future: Send + 'static, + { + self.route_endpoint(path, Endpoint::Route(Route::new(service))) + } + + pub(super) fn route_endpoint( + &mut self, + path: &str, + endpoint: Endpoint, + ) -> Result<(), Cow<'static, str>> { + if path.is_empty() { + return Err("Paths must start with a `/`. Use \"/\" for root routes".into()); + } else if !path.starts_with('/') { + return Err("Paths must start with a `/`".into()); + } + + let id = self.next_route_id(); + self.set_node(path, id)?; + self.routes.insert(id, endpoint); + + Ok(()) + } + + fn set_node(&mut self, path: &str, id: RouteId) -> Result<(), String> { + let mut node = + Arc::try_unwrap(Arc::clone(&self.node)).unwrap_or_else(|node| (*node).clone()); + if let Err(err) = node.insert(path, id) { + return Err(format!("Invalid route {path:?}: {err}")); + } + self.node = Arc::new(node); + Ok(()) + } + + pub(super) fn merge( + &mut self, + other: PathRouter, + ) -> Result<(), Cow<'static, str>> { + let PathRouter { + routes, + node, + prev_route_id: _, + } = other; + + for (id, route) in routes { + let path = node + .route_id_to_path + .get(&id) + .expect("no path for route id. This is a bug in axum. Please file an issue"); + + if IS_FALLBACK && (&**path == "/" || &**path == FALLBACK_PARAM_PATH) { + // when merging two routers it doesn't matter if you do `a.merge(b)` or + // `b.merge(a)`. This must also be true for fallbacks. + // + // However all fallback routers will have routes for `/` and `/*` so when merging + // we have to ignore the top level fallbacks on one side otherwise we get + // conflicts. + // + // `Router::merge` makes sure that when merging fallbacks `other` always has the + // fallback we want to keep. It panics if both routers have a custom fallback. Thus + // it is always okay to ignore one fallback and `Router::merge` also makes sure the + // one we can ignore is that of `self`. + self.replace_endpoint(path, route); + } else { + match route { + Endpoint::MethodRouter(method_router) => self.route(path, method_router)?, + Endpoint::Route(route) => self.route_service(path, route)?, + } + } + } + + Ok(()) + } + + pub(super) fn nest( + &mut self, + path: &str, + router: PathRouter, + ) -> Result<(), Cow<'static, str>> { + let prefix = validate_nest_path(path); + + let PathRouter { + routes, + node, + prev_route_id: _, + } = router; + + for (id, endpoint) in routes { + let inner_path = node + .route_id_to_path + .get(&id) + .expect("no path for route id. This is a bug in axum. Please file an issue"); + + let path = path_for_nested_route(prefix, inner_path); + + match endpoint.layer(StripPrefix::layer(prefix)) { + Endpoint::MethodRouter(method_router) => { + self.route(&path, method_router)?; + } + Endpoint::Route(route) => { + self.route_endpoint(&path, Endpoint::Route(route))?; + } + } + } + + Ok(()) + } + + pub(super) fn nest_service(&mut self, path: &str, svc: T) -> Result<(), Cow<'static, str>> + where + T: Service, Error = Infallible> + Clone + Send + 'static, + T::Response: IntoResponse, + T::Future: Send + 'static, + { + let path = validate_nest_path(path); + let prefix = path; + + let path = if path.ends_with('/') { + format!("{path}*{NEST_TAIL_PARAM}") + } else { + format!("{path}/*{NEST_TAIL_PARAM}") + }; + + let endpoint = Endpoint::Route(Route::new(StripPrefix::new(svc, prefix))); + + self.route_endpoint(&path, endpoint.clone())?; + + // `/*rest` is not matched by `/` so we need to also register a router at the + // prefix itself. Otherwise if you were to nest at `/foo` then `/foo` itself + // wouldn't match, which it should + self.route_endpoint(prefix, endpoint.clone())?; + if !prefix.ends_with('/') { + // same goes for `/foo/`, that should also match + self.route_endpoint(&format!("{prefix}/"), endpoint)?; + } + + Ok(()) + } + + pub(super) fn layer(self, layer: L) -> PathRouter + where + L: Layer> + Clone + Send + 'static, + L::Service: Service> + Clone + Send + 'static, + >>::Response: IntoResponse + 'static, + >>::Error: Into + 'static, + >>::Future: Send + 'static, + NewReqBody: HttpBody + 'static, + { + let routes = self + .routes + .into_iter() + .map(|(id, endpoint)| { + let route = endpoint.layer(layer.clone()); + (id, route) + }) + .collect(); + + PathRouter { + routes, + node: self.node, + prev_route_id: self.prev_route_id, + } + } + + #[track_caller] + pub(super) fn route_layer(self, layer: L) -> Self + where + L: Layer> + Clone + Send + 'static, + L::Service: Service> + Clone + Send + 'static, + >>::Response: IntoResponse + 'static, + >>::Error: Into + 'static, + >>::Future: Send + 'static, + { + if self.routes.is_empty() { + panic!( + "Adding a route_layer before any routes is a no-op. \ + Add the routes you want the layer to apply to first." + ); + } + + let routes = self + .routes + .into_iter() + .map(|(id, endpoint)| { + let route = endpoint.layer(layer.clone()); + (id, route) + }) + .collect(); + + PathRouter { + routes, + node: self.node, + prev_route_id: self.prev_route_id, + } + } + + pub(super) fn with_state(self, state: S) -> PathRouter { + let routes = self + .routes + .into_iter() + .map(|(id, endpoint)| { + let endpoint: Endpoint = match endpoint { + Endpoint::MethodRouter(method_router) => { + Endpoint::MethodRouter(method_router.with_state(state.clone())) + } + Endpoint::Route(route) => Endpoint::Route(route), + }; + (id, endpoint) + }) + .collect(); + + PathRouter { + routes, + node: self.node, + prev_route_id: self.prev_route_id, + } + } + + pub(super) fn call_with_state( + &mut self, + mut req: Request, + state: S, + ) -> Result, (Request, S)> { + #[cfg(feature = "original-uri")] + { + use crate::extract::OriginalUri; + + if req.extensions().get::().is_none() { + let original_uri = OriginalUri(req.uri().clone()); + req.extensions_mut().insert(original_uri); + } + } + + let path = req.uri().path().to_owned(); + + match self.node.at(&path) { + Ok(match_) => { + let id = *match_.value; + + if !IS_FALLBACK { + #[cfg(feature = "matched-path")] + crate::extract::matched_path::set_matched_path_for_request( + id, + &self.node.route_id_to_path, + req.extensions_mut(), + ); + } + + url_params::insert_url_params(req.extensions_mut(), match_.params); + + let endpont = self + .routes + .get_mut(&id) + .expect("no route for id. This is a bug in axum. Please file an issue"); + + match endpont { + Endpoint::MethodRouter(method_router) => { + Ok(method_router.call_with_state(req, state)) + } + Endpoint::Route(route) => Ok(route.clone().call(req)), + } + } + // explicitly handle all variants in case matchit adds + // new ones we need to handle differently + Err( + MatchError::NotFound + | MatchError::ExtraTrailingSlash + | MatchError::MissingTrailingSlash, + ) => Err((req, state)), + } + } + + pub(super) fn replace_endpoint(&mut self, path: &str, endpoint: Endpoint) { + match self.node.at(path) { + Ok(match_) => { + let id = *match_.value; + self.routes.insert(id, endpoint); + } + Err(_) => self + .route_endpoint(path, endpoint) + .expect("path wasn't matched so endpoint shouldn't exist"), + } + } + + fn next_route_id(&mut self) -> RouteId { + let next_id = self + .prev_route_id + .0 + .checked_add(1) + .expect("Over `u32::MAX` routes created. If you need this, please file an issue."); + self.prev_route_id = RouteId(next_id); + self.prev_route_id + } +} + +impl Default for PathRouter { + fn default() -> Self { + Self { + routes: Default::default(), + node: Default::default(), + prev_route_id: RouteId(0), + } + } +} + +impl fmt::Debug for PathRouter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("PathRouter") + .field("routes", &self.routes) + .field("node", &self.node) + .finish() + } +} + +impl Clone for PathRouter { + fn clone(&self) -> Self { + Self { + routes: self.routes.clone(), + node: self.node.clone(), + prev_route_id: self.prev_route_id, + } + } +} + +/// Wrapper around `matchit::Router` that supports merging two `Router`s. +#[derive(Clone, Default)] +struct Node { + inner: matchit::Router, + route_id_to_path: HashMap>, + path_to_route_id: HashMap, RouteId>, +} + +impl Node { + fn insert( + &mut self, + path: impl Into, + val: RouteId, + ) -> Result<(), matchit::InsertError> { + let path = path.into(); + + self.inner.insert(&path, val)?; + + let shared_path: Arc = path.into(); + self.route_id_to_path.insert(val, shared_path.clone()); + self.path_to_route_id.insert(shared_path, val); + + Ok(()) + } + + fn at<'n, 'p>( + &'n self, + path: &'p str, + ) -> Result, MatchError> { + self.inner.at(path) + } +} + +impl fmt::Debug for Node { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Node") + .field("paths", &self.route_id_to_path) + .finish() + } +} + +#[track_caller] +fn validate_nest_path(path: &str) -> &str { + if path.is_empty() { + // nesting at `""` and `"/"` should mean the same thing + return "/"; + } + + if path.contains('*') { + panic!("Invalid route: nested routes cannot contain wildcards (*)"); + } + + path +} + +pub(crate) fn path_for_nested_route<'a>(prefix: &'a str, path: &'a str) -> Cow<'a, str> { + debug_assert!(prefix.starts_with('/')); + debug_assert!(path.starts_with('/')); + + if prefix.ends_with('/') { + format!("{prefix}{}", path.trim_start_matches('/')).into() + } else if path == "/" { + prefix.into() + } else { + format!("{prefix}{path}").into() + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/routing/route.rs b/.cargo-vendor/axum-0.6.20/src/routing/route.rs new file mode 100644 index 0000000000..1667db1607 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/routing/route.rs @@ -0,0 +1,255 @@ +use crate::{ + body::{boxed, Body, Empty, HttpBody}, + response::Response, +}; +use axum_core::response::IntoResponse; +use bytes::Bytes; +use http::{ + header::{self, CONTENT_LENGTH}, + HeaderMap, HeaderValue, Request, +}; +use pin_project_lite::pin_project; +use std::{ + convert::Infallible, + fmt, + future::Future, + pin::Pin, + task::{Context, Poll}, +}; +use tower::{ + util::{BoxCloneService, MapResponseLayer, Oneshot}, + ServiceBuilder, ServiceExt, +}; +use tower_layer::Layer; +use tower_service::Service; + +/// How routes are stored inside a [`Router`](super::Router). +/// +/// You normally shouldn't need to care about this type. It's used in +/// [`Router::layer`](super::Router::layer). +pub struct Route(BoxCloneService, Response, E>); + +impl Route { + pub(crate) fn new(svc: T) -> Self + where + T: Service, Error = E> + Clone + Send + 'static, + T::Response: IntoResponse + 'static, + T::Future: Send + 'static, + { + Self(BoxCloneService::new( + svc.map_response(IntoResponse::into_response), + )) + } + + pub(crate) fn oneshot_inner( + &mut self, + req: Request, + ) -> Oneshot, Response, E>, Request> { + self.0.clone().oneshot(req) + } + + pub(crate) fn layer(self, layer: L) -> Route + where + L: Layer> + Clone + Send + 'static, + L::Service: Service> + Clone + Send + 'static, + >>::Response: IntoResponse + 'static, + >>::Error: Into + 'static, + >>::Future: Send + 'static, + NewReqBody: 'static, + NewError: 'static, + { + let layer = ServiceBuilder::new() + .map_err(Into::into) + .layer(MapResponseLayer::new(IntoResponse::into_response)) + .layer(layer) + .into_inner(); + + Route::new(layer.layer(self)) + } +} + +impl Clone for Route { + fn clone(&self) -> Self { + Self(self.0.clone()) + } +} + +impl fmt::Debug for Route { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Route").finish() + } +} + +impl Service> for Route +where + B: HttpBody, +{ + type Response = Response; + type Error = E; + type Future = RouteFuture; + + #[inline] + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + #[inline] + fn call(&mut self, req: Request) -> Self::Future { + RouteFuture::from_future(self.oneshot_inner(req)) + } +} + +pin_project! { + /// Response future for [`Route`]. + pub struct RouteFuture { + #[pin] + kind: RouteFutureKind, + strip_body: bool, + allow_header: Option, + } +} + +pin_project! { + #[project = RouteFutureKindProj] + enum RouteFutureKind { + Future { + #[pin] + future: Oneshot< + BoxCloneService, Response, E>, + Request, + >, + }, + Response { + response: Option, + } + } +} + +impl RouteFuture { + pub(crate) fn from_future( + future: Oneshot, Response, E>, Request>, + ) -> Self { + Self { + kind: RouteFutureKind::Future { future }, + strip_body: false, + allow_header: None, + } + } + + pub(crate) fn strip_body(mut self, strip_body: bool) -> Self { + self.strip_body = strip_body; + self + } + + pub(crate) fn allow_header(mut self, allow_header: Bytes) -> Self { + self.allow_header = Some(allow_header); + self + } +} + +impl Future for RouteFuture +where + B: HttpBody, +{ + type Output = Result; + + #[inline] + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + + let mut res = match this.kind.project() { + RouteFutureKindProj::Future { future } => match future.poll(cx) { + Poll::Ready(Ok(res)) => res, + Poll::Ready(Err(err)) => return Poll::Ready(Err(err)), + Poll::Pending => return Poll::Pending, + }, + RouteFutureKindProj::Response { response } => { + response.take().expect("future polled after completion") + } + }; + + set_allow_header(res.headers_mut(), this.allow_header); + + // make sure to set content-length before removing the body + set_content_length(res.size_hint(), res.headers_mut()); + + let res = if *this.strip_body { + res.map(|_| boxed(Empty::new())) + } else { + res + }; + + Poll::Ready(Ok(res)) + } +} + +fn set_allow_header(headers: &mut HeaderMap, allow_header: &mut Option) { + match allow_header.take() { + Some(allow_header) if !headers.contains_key(header::ALLOW) => { + headers.insert( + header::ALLOW, + HeaderValue::from_maybe_shared(allow_header).expect("invalid `Allow` header"), + ); + } + _ => {} + } +} + +fn set_content_length(size_hint: http_body::SizeHint, headers: &mut HeaderMap) { + if headers.contains_key(CONTENT_LENGTH) { + return; + } + + if let Some(size) = size_hint.exact() { + let header_value = if size == 0 { + #[allow(clippy::declare_interior_mutable_const)] + const ZERO: HeaderValue = HeaderValue::from_static("0"); + + ZERO + } else { + let mut buffer = itoa::Buffer::new(); + HeaderValue::from_str(buffer.format(size)).unwrap() + }; + + headers.insert(CONTENT_LENGTH, header_value); + } +} + +pin_project! { + /// A [`RouteFuture`] that always yields a [`Response`]. + pub struct InfallibleRouteFuture { + #[pin] + future: RouteFuture, + } +} + +impl InfallibleRouteFuture { + pub(crate) fn new(future: RouteFuture) -> Self { + Self { future } + } +} + +impl Future for InfallibleRouteFuture +where + B: HttpBody, +{ + type Output = Response; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match futures_util::ready!(self.project().future.poll(cx)) { + Ok(response) => Poll::Ready(response), + Err(err) => match err {}, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn traits() { + use crate::test_helpers::*; + assert_send::>(); + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/routing/strip_prefix.rs b/.cargo-vendor/axum-0.6.20/src/routing/strip_prefix.rs new file mode 100644 index 0000000000..671c4de773 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/routing/strip_prefix.rs @@ -0,0 +1,459 @@ +use http::{Request, Uri}; +use std::{ + sync::Arc, + task::{Context, Poll}, +}; +use tower::Layer; +use tower_layer::layer_fn; +use tower_service::Service; + +#[derive(Clone)] +pub(super) struct StripPrefix { + inner: S, + prefix: Arc, +} + +impl StripPrefix { + pub(super) fn new(inner: S, prefix: &str) -> Self { + Self { + inner, + prefix: prefix.into(), + } + } + + pub(super) fn layer(prefix: &str) -> impl Layer + Clone { + let prefix = Arc::from(prefix); + layer_fn(move |inner| Self { + inner, + prefix: Arc::clone(&prefix), + }) + } +} + +impl Service> for StripPrefix +where + S: Service>, +{ + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, mut req: Request) -> Self::Future { + if let Some(new_uri) = strip_prefix(req.uri(), &self.prefix) { + *req.uri_mut() = new_uri; + } + self.inner.call(req) + } +} + +fn strip_prefix(uri: &Uri, prefix: &str) -> Option { + let path_and_query = uri.path_and_query()?; + + // Check whether the prefix matches the path and if so how long the matching prefix is. + // + // For example: + // + // prefix = /api + // path = /api/users + // ^^^^ this much is matched and the length is 4. Thus if we chop off the first 4 + // characters we get the remainder + // + // prefix = /api/:version + // path = /api/v0/users + // ^^^^^^^ this much is matched and the length is 7. + let mut matching_prefix_length = Some(0); + for item in zip_longest(segments(path_and_query.path()), segments(prefix)) { + // count the `/` + *matching_prefix_length.as_mut().unwrap() += 1; + + match item { + Item::Both(path_segment, prefix_segment) => { + if prefix_segment.starts_with(':') || path_segment == prefix_segment { + // the prefix segment is either a param, which matches anything, or + // it actually matches the path segment + *matching_prefix_length.as_mut().unwrap() += path_segment.len(); + } else if prefix_segment.is_empty() { + // the prefix ended in a `/` so we got a match. + // + // For example: + // + // prefix = /foo/ + // path = /foo/bar + // + // The prefix matches and the new path should be `/bar` + break; + } else { + // the prefix segment didn't match so there is no match + matching_prefix_length = None; + break; + } + } + // the path had more segments than the prefix but we got a match. + // + // For example: + // + // prefix = /foo + // path = /foo/bar + Item::First(_) => { + break; + } + // the prefix had more segments than the path so there is no match + Item::Second(_) => { + matching_prefix_length = None; + break; + } + } + } + + // if the prefix matches it will always do so up until a `/`, it cannot match only + // part of a segment. Therefore this will always be at a char boundary and `split_at` wont + // panic + let after_prefix = uri.path().split_at(matching_prefix_length?).1; + + let new_path_and_query = match (after_prefix.starts_with('/'), path_and_query.query()) { + (true, None) => after_prefix.parse().unwrap(), + (true, Some(query)) => format!("{after_prefix}?{query}").parse().unwrap(), + (false, None) => format!("/{after_prefix}").parse().unwrap(), + (false, Some(query)) => format!("/{after_prefix}?{query}").parse().unwrap(), + }; + + let mut parts = uri.clone().into_parts(); + parts.path_and_query = Some(new_path_and_query); + + Some(Uri::from_parts(parts).unwrap()) +} + +fn segments(s: &str) -> impl Iterator { + assert!( + s.starts_with('/'), + "path didn't start with '/'. axum should have caught this higher up." + ); + + s.split('/') + // skip one because paths always start with `/` so `/a/b` would become ["", "a", "b"] + // otherwise + .skip(1) +} + +fn zip_longest(a: I, b: I2) -> impl Iterator> +where + I: Iterator, + I2: Iterator, +{ + let a = a.map(Some).chain(std::iter::repeat_with(|| None)); + let b = b.map(Some).chain(std::iter::repeat_with(|| None)); + a.zip(b).map_while(|(a, b)| match (a, b) { + (Some(a), Some(b)) => Some(Item::Both(a, b)), + (Some(a), None) => Some(Item::First(a)), + (None, Some(b)) => Some(Item::Second(b)), + (None, None) => None, + }) +} + +#[derive(Debug)] +enum Item { + Both(T, T), + First(T), + Second(T), +} + +#[cfg(test)] +mod tests { + #[allow(unused_imports)] + use super::*; + use quickcheck::Arbitrary; + use quickcheck_macros::quickcheck; + + macro_rules! test { + ( + $name:ident, + uri = $uri:literal, + prefix = $prefix:literal, + expected = $expected:expr, + ) => { + #[test] + fn $name() { + let uri = $uri.parse().unwrap(); + let new_uri = strip_prefix(&uri, $prefix).map(|uri| uri.to_string()); + assert_eq!(new_uri.as_deref(), $expected); + } + }; + } + + test!(empty, uri = "/", prefix = "/", expected = Some("/"),); + + test!( + single_segment, + uri = "/a", + prefix = "/a", + expected = Some("/"), + ); + + test!( + single_segment_root_uri, + uri = "/", + prefix = "/a", + expected = None, + ); + + // the prefix is empty, so removing it should have no effect + test!( + single_segment_root_prefix, + uri = "/a", + prefix = "/", + expected = Some("/a"), + ); + + test!( + single_segment_no_match, + uri = "/a", + prefix = "/b", + expected = None, + ); + + test!( + single_segment_trailing_slash, + uri = "/a/", + prefix = "/a/", + expected = Some("/"), + ); + + test!( + single_segment_trailing_slash_2, + uri = "/a", + prefix = "/a/", + expected = None, + ); + + test!( + single_segment_trailing_slash_3, + uri = "/a/", + prefix = "/a", + expected = Some("/"), + ); + + test!( + multi_segment, + uri = "/a/b", + prefix = "/a", + expected = Some("/b"), + ); + + test!( + multi_segment_2, + uri = "/b/a", + prefix = "/a", + expected = None, + ); + + test!( + multi_segment_3, + uri = "/a", + prefix = "/a/b", + expected = None, + ); + + test!( + multi_segment_4, + uri = "/a/b", + prefix = "/b", + expected = None, + ); + + test!( + multi_segment_trailing_slash, + uri = "/a/b/", + prefix = "/a/b/", + expected = Some("/"), + ); + + test!( + multi_segment_trailing_slash_2, + uri = "/a/b", + prefix = "/a/b/", + expected = None, + ); + + test!( + multi_segment_trailing_slash_3, + uri = "/a/b/", + prefix = "/a/b", + expected = Some("/"), + ); + + test!(param_0, uri = "/", prefix = "/:param", expected = Some("/"),); + + test!( + param_1, + uri = "/a", + prefix = "/:param", + expected = Some("/"), + ); + + test!( + param_2, + uri = "/a/b", + prefix = "/:param", + expected = Some("/b"), + ); + + test!( + param_3, + uri = "/b/a", + prefix = "/:param", + expected = Some("/a"), + ); + + test!( + param_4, + uri = "/a/b", + prefix = "/a/:param", + expected = Some("/"), + ); + + test!(param_5, uri = "/b/a", prefix = "/a/:param", expected = None,); + + test!(param_6, uri = "/a/b", prefix = "/:param/a", expected = None,); + + test!( + param_7, + uri = "/b/a", + prefix = "/:param/a", + expected = Some("/"), + ); + + test!( + param_8, + uri = "/a/b/c", + prefix = "/a/:param/c", + expected = Some("/"), + ); + + test!( + param_9, + uri = "/c/b/a", + prefix = "/a/:param/c", + expected = None, + ); + + test!( + param_10, + uri = "/a/", + prefix = "/:param", + expected = Some("/"), + ); + + test!(param_11, uri = "/a", prefix = "/:param/", expected = None,); + + test!( + param_12, + uri = "/a/", + prefix = "/:param/", + expected = Some("/"), + ); + + test!( + param_13, + uri = "/a/a", + prefix = "/a/", + expected = Some("/a"), + ); + + #[quickcheck] + fn does_not_panic(uri_and_prefix: UriAndPrefix) -> bool { + let UriAndPrefix { uri, prefix } = uri_and_prefix; + strip_prefix(&uri, &prefix); + true + } + + #[derive(Clone, Debug)] + struct UriAndPrefix { + uri: Uri, + prefix: String, + } + + impl Arbitrary for UriAndPrefix { + fn arbitrary(g: &mut quickcheck::Gen) -> Self { + let mut uri = String::new(); + let mut prefix = String::new(); + + let size = u8_between(1, 20, g); + + for _ in 0..size { + let segment = ascii_alphanumeric(g); + + uri.push('/'); + uri.push_str(&segment); + + prefix.push('/'); + + let make_matching_segment = bool::arbitrary(g); + let make_capture = bool::arbitrary(g); + + match (make_matching_segment, make_capture) { + (_, true) => { + prefix.push_str(":a"); + } + (true, false) => { + prefix.push_str(&segment); + } + (false, false) => { + prefix.push_str(&ascii_alphanumeric(g)); + } + } + } + + if bool::arbitrary(g) { + uri.push('/'); + } + + if bool::arbitrary(g) { + prefix.push('/'); + } + + Self { + uri: uri.parse().unwrap(), + prefix, + } + } + } + + fn ascii_alphanumeric(g: &mut quickcheck::Gen) -> String { + #[derive(Clone)] + struct AsciiAlphanumeric(String); + + impl Arbitrary for AsciiAlphanumeric { + fn arbitrary(g: &mut quickcheck::Gen) -> Self { + let mut out = String::new(); + + let size = u8_between(1, 20, g) as usize; + + while out.len() < size { + let c = char::arbitrary(g); + if c.is_ascii_alphanumeric() { + out.push(c); + } + } + Self(out) + } + } + + let out = AsciiAlphanumeric::arbitrary(g).0; + assert!(!out.is_empty()); + out + } + + fn u8_between(lower: u8, upper: u8, g: &mut quickcheck::Gen) -> u8 { + loop { + let size = u8::arbitrary(g); + if size > lower && size <= upper { + break size; + } + } + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/routing/tests/fallback.rs b/.cargo-vendor/axum-0.6.20/src/routing/tests/fallback.rs new file mode 100644 index 0000000000..869b7329cf --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/routing/tests/fallback.rs @@ -0,0 +1,362 @@ +use tower::ServiceExt; + +use super::*; +use crate::middleware::{map_request, map_response}; + +#[crate::test] +async fn basic() { + let app = Router::new() + .route("/foo", get(|| async {})) + .fallback(|| async { "fallback" }); + + let client = TestClient::new(app); + + assert_eq!(client.get("/foo").send().await.status(), StatusCode::OK); + + let res = client.get("/does-not-exist").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.text().await, "fallback"); +} + +#[crate::test] +async fn nest() { + let app = Router::new() + .nest("/foo", Router::new().route("/bar", get(|| async {}))) + .fallback(|| async { "fallback" }); + + let client = TestClient::new(app); + + assert_eq!(client.get("/foo/bar").send().await.status(), StatusCode::OK); + + let res = client.get("/does-not-exist").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.text().await, "fallback"); +} + +#[crate::test] +async fn or() { + let one = Router::new().route("/one", get(|| async {})); + let two = Router::new().route("/two", get(|| async {})); + + let app = one.merge(two).fallback(|| async { "fallback" }); + + let client = TestClient::new(app); + + assert_eq!(client.get("/one").send().await.status(), StatusCode::OK); + assert_eq!(client.get("/two").send().await.status(), StatusCode::OK); + + let res = client.get("/does-not-exist").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.text().await, "fallback"); +} + +#[crate::test] +async fn fallback_accessing_state() { + let app = Router::new() + .fallback(|State(state): State<&'static str>| async move { state }) + .with_state("state"); + + let client = TestClient::new(app); + + let res = client.get("/does-not-exist").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.text().await, "state"); +} + +async fn inner_fallback() -> impl IntoResponse { + (StatusCode::NOT_FOUND, "inner") +} + +async fn outer_fallback() -> impl IntoResponse { + (StatusCode::NOT_FOUND, "outer") +} + +#[crate::test] +async fn nested_router_inherits_fallback() { + let inner = Router::new(); + let app = Router::new().nest("/foo", inner).fallback(outer_fallback); + + let client = TestClient::new(app); + + let res = client.get("/foo/bar").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + assert_eq!(res.text().await, "outer"); +} + +#[crate::test] +async fn doesnt_inherit_fallback_if_overriden() { + let inner = Router::new().fallback(inner_fallback); + let app = Router::new().nest("/foo", inner).fallback(outer_fallback); + + let client = TestClient::new(app); + + let res = client.get("/foo/bar").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + assert_eq!(res.text().await, "inner"); + + let res = client.get("/").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + assert_eq!(res.text().await, "outer"); +} + +#[crate::test] +async fn deeply_nested_inherit_from_top() { + let app = Router::new() + .nest("/foo", Router::new().nest("/bar", Router::new())) + .fallback(outer_fallback); + + let client = TestClient::new(app); + + let res = client.get("/foo/bar/baz").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + assert_eq!(res.text().await, "outer"); +} + +#[crate::test] +async fn deeply_nested_inherit_from_middle() { + let app = Router::new().nest( + "/foo", + Router::new() + .nest("/bar", Router::new()) + .fallback(outer_fallback), + ); + + let client = TestClient::new(app); + + let res = client.get("/foo/bar/baz").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + assert_eq!(res.text().await, "outer"); +} + +#[crate::test] +async fn with_middleware_on_inner_fallback() { + async fn never_called(_: Request) -> Request { + panic!("should never be called") + } + + let inner = Router::new().layer(map_request(never_called)); + let app = Router::new().nest("/foo", inner).fallback(outer_fallback); + + let client = TestClient::new(app); + + let res = client.get("/foo/bar").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + assert_eq!(res.text().await, "outer"); +} + +#[crate::test] +async fn also_inherits_default_layered_fallback() { + async fn set_header(mut res: Response) -> Response { + res.headers_mut() + .insert("x-from-fallback", "1".parse().unwrap()); + res + } + + let inner = Router::new(); + let app = Router::new() + .nest("/foo", inner) + .fallback(outer_fallback) + .layer(map_response(set_header)); + + let client = TestClient::new(app); + + let res = client.get("/foo/bar").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + assert_eq!(res.headers()["x-from-fallback"], "1"); + assert_eq!(res.text().await, "outer"); +} + +#[crate::test] +async fn fallback_inherited_into_nested_router_service() { + let inner = Router::new() + .route( + "/bar", + get(|State(state): State<&'static str>| async move { state }), + ) + .with_state("inner"); + + // with a different state + let app = Router::<()>::new() + .nest_service("/foo", inner) + .fallback(outer_fallback); + + let client = TestClient::new(app); + let res = client.get("/foo/not-found").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + assert_eq!(res.text().await, "outer"); +} + +#[crate::test] +async fn fallback_inherited_into_nested_opaque_service() { + let inner = Router::new() + .route( + "/bar", + get(|State(state): State<&'static str>| async move { state }), + ) + .with_state("inner") + // even if the service is made more opaque it should still inherit the fallback + .boxed_clone(); + + // with a different state + let app = Router::<()>::new() + .nest_service("/foo", inner) + .fallback(outer_fallback); + + let client = TestClient::new(app); + let res = client.get("/foo/not-found").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + assert_eq!(res.text().await, "outer"); +} + +#[crate::test] +async fn nest_fallback_on_inner() { + let app = Router::new() + .nest( + "/foo", + Router::new() + .route("/", get(|| async {})) + .fallback(|| async { (StatusCode::NOT_FOUND, "inner fallback") }), + ) + .fallback(|| async { (StatusCode::NOT_FOUND, "outer fallback") }); + + let client = TestClient::new(app); + + let res = client.get("/foo/not-found").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + assert_eq!(res.text().await, "inner fallback"); +} + +// https://github.com/tokio-rs/axum/issues/1931 +#[crate::test] +async fn doesnt_panic_if_used_with_nested_router() { + async fn handler() {} + + let routes_static = + Router::new().nest_service("/", crate::routing::get_service(handler.into_service())); + + let routes_all = Router::new().fallback_service(routes_static); + + let client = TestClient::new(routes_all); + + let res = client.get("/foobar").send().await; + assert_eq!(res.status(), StatusCode::OK); +} + +#[crate::test] +async fn issue_2072() { + let nested_routes = Router::new().fallback(inner_fallback); + + let app = Router::new() + .nest("/nested", nested_routes) + .merge(Router::new()); + + let client = TestClient::new(app); + + let res = client.get("/nested/does-not-exist").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + assert_eq!(res.text().await, "inner"); + + let res = client.get("/does-not-exist").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + assert_eq!(res.text().await, ""); +} + +#[crate::test] +async fn issue_2072_outer_fallback_before_merge() { + let nested_routes = Router::new().fallback(inner_fallback); + + let app = Router::new() + .nest("/nested", nested_routes) + .fallback(outer_fallback) + .merge(Router::new()); + + let client = TestClient::new(app); + + let res = client.get("/nested/does-not-exist").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + assert_eq!(res.text().await, "inner"); + + let res = client.get("/does-not-exist").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + assert_eq!(res.text().await, "outer"); +} + +#[crate::test] +async fn issue_2072_outer_fallback_after_merge() { + let nested_routes = Router::new().fallback(inner_fallback); + + let app = Router::new() + .nest("/nested", nested_routes) + .merge(Router::new()) + .fallback(outer_fallback); + + let client = TestClient::new(app); + + let res = client.get("/nested/does-not-exist").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + assert_eq!(res.text().await, "inner"); + + let res = client.get("/does-not-exist").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + assert_eq!(res.text().await, "outer"); +} + +#[crate::test] +async fn merge_router_with_fallback_into_nested_router_with_fallback() { + let nested_routes = Router::new().fallback(inner_fallback); + + let app = Router::new() + .nest("/nested", nested_routes) + .merge(Router::new().fallback(outer_fallback)); + + let client = TestClient::new(app); + + let res = client.get("/nested/does-not-exist").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + assert_eq!(res.text().await, "inner"); + + let res = client.get("/does-not-exist").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + assert_eq!(res.text().await, "outer"); +} + +#[crate::test] +async fn merging_nested_router_with_fallback_into_router_with_fallback() { + let nested_routes = Router::new().fallback(inner_fallback); + + let app = Router::new() + .fallback(outer_fallback) + .merge(Router::new().nest("/nested", nested_routes)); + + let client = TestClient::new(app); + + let res = client.get("/nested/does-not-exist").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + assert_eq!(res.text().await, "inner"); + + let res = client.get("/does-not-exist").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + assert_eq!(res.text().await, "outer"); +} + +#[crate::test] +async fn merge_empty_into_router_with_fallback() { + let app = Router::new().fallback(outer_fallback).merge(Router::new()); + + let client = TestClient::new(app); + + let res = client.get("/does-not-exist").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + assert_eq!(res.text().await, "outer"); +} + +#[crate::test] +async fn merge_router_with_fallback_into_empty() { + let app = Router::new().merge(Router::new().fallback(outer_fallback)); + + let client = TestClient::new(app); + + let res = client.get("/does-not-exist").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + assert_eq!(res.text().await, "outer"); +} diff --git a/.cargo-vendor/axum-0.6.20/src/routing/tests/get_to_head.rs b/.cargo-vendor/axum-0.6.20/src/routing/tests/get_to_head.rs new file mode 100644 index 0000000000..b46114c60e --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/routing/tests/get_to_head.rs @@ -0,0 +1,73 @@ +use super::*; +use http::Method; +use tower::ServiceExt; + +mod for_handlers { + use super::*; + use http::HeaderMap; + + #[crate::test] + async fn get_handles_head() { + let app = Router::new().route( + "/", + get(|| async { + let mut headers = HeaderMap::new(); + headers.insert("x-some-header", "foobar".parse().unwrap()); + (headers, "you shouldn't see this") + }), + ); + + // don't use reqwest because it always strips bodies from HEAD responses + let res = app + .oneshot( + Request::builder() + .uri("/") + .method(Method::HEAD) + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.headers()["x-some-header"], "foobar"); + + let body = hyper::body::to_bytes(res.into_body()).await.unwrap(); + assert_eq!(body.len(), 0); + } +} + +mod for_services { + use super::*; + use crate::routing::get_service; + + #[crate::test] + async fn get_handles_head() { + let app = Router::new().route( + "/", + get_service(service_fn(|_req: Request| async move { + Ok::<_, Infallible>( + ([("x-some-header", "foobar")], "you shouldn't see this").into_response(), + ) + })), + ); + + // don't use reqwest because it always strips bodies from HEAD responses + let res = app + .oneshot( + Request::builder() + .uri("/") + .method(Method::HEAD) + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.headers()["x-some-header"], "foobar"); + + let body = hyper::body::to_bytes(res.into_body()).await.unwrap(); + assert_eq!(body.len(), 0); + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/routing/tests/handle_error.rs b/.cargo-vendor/axum-0.6.20/src/routing/tests/handle_error.rs new file mode 100644 index 0000000000..3781677d6b --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/routing/tests/handle_error.rs @@ -0,0 +1,113 @@ +use super::*; +use std::future::{pending, ready}; +use tower::{timeout::TimeoutLayer, ServiceBuilder}; + +async fn unit() {} + +async fn forever() { + pending().await +} + +fn timeout() -> TimeoutLayer { + TimeoutLayer::new(Duration::from_millis(10)) +} + +#[derive(Clone)] +struct Svc; + +impl Service for Svc { + type Response = Response; + type Error = hyper::Error; + type Future = Ready>; + + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, _req: R) -> Self::Future { + ready(Ok(Response::new(Body::empty()))) + } +} + +#[crate::test] +async fn handler() { + let app = Router::new().route( + "/", + get(forever.layer( + ServiceBuilder::new() + .layer(HandleErrorLayer::new(|_: BoxError| async { + StatusCode::REQUEST_TIMEOUT + })) + .layer(timeout()), + )), + ); + + let client = TestClient::new(app); + + let res = client.get("/").send().await; + assert_eq!(res.status(), StatusCode::REQUEST_TIMEOUT); +} + +#[crate::test] +async fn handler_multiple_methods_first() { + let app = Router::new().route( + "/", + get(forever.layer( + ServiceBuilder::new() + .layer(HandleErrorLayer::new(|_: BoxError| async { + StatusCode::REQUEST_TIMEOUT + })) + .layer(timeout()), + )) + .post(unit), + ); + + let client = TestClient::new(app); + + let res = client.get("/").send().await; + assert_eq!(res.status(), StatusCode::REQUEST_TIMEOUT); +} + +#[crate::test] +async fn handler_multiple_methods_middle() { + let app = Router::new().route( + "/", + delete(unit) + .get( + forever.layer( + ServiceBuilder::new() + .layer(HandleErrorLayer::new(|_: BoxError| async { + StatusCode::REQUEST_TIMEOUT + })) + .layer(timeout()), + ), + ) + .post(unit), + ); + + let client = TestClient::new(app); + + let res = client.get("/").send().await; + assert_eq!(res.status(), StatusCode::REQUEST_TIMEOUT); +} + +#[crate::test] +async fn handler_multiple_methods_last() { + let app = Router::new().route( + "/", + delete(unit).get( + forever.layer( + ServiceBuilder::new() + .layer(HandleErrorLayer::new(|_: BoxError| async { + StatusCode::REQUEST_TIMEOUT + })) + .layer(timeout()), + ), + ), + ); + + let client = TestClient::new(app); + + let res = client.get("/").send().await; + assert_eq!(res.status(), StatusCode::REQUEST_TIMEOUT); +} diff --git a/.cargo-vendor/axum-0.6.20/src/routing/tests/merge.rs b/.cargo-vendor/axum-0.6.20/src/routing/tests/merge.rs new file mode 100644 index 0000000000..0344a87939 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/routing/tests/merge.rs @@ -0,0 +1,399 @@ +use super::*; +use crate::{error_handling::HandleErrorLayer, extract::OriginalUri, response::IntoResponse, Json}; +use serde_json::{json, Value}; +use tower::{limit::ConcurrencyLimitLayer, timeout::TimeoutLayer}; + +#[crate::test] +async fn basic() { + let one = Router::new() + .route("/foo", get(|| async {})) + .route("/bar", get(|| async {})); + let two = Router::new().route("/baz", get(|| async {})); + let app = one.merge(two); + + let client = TestClient::new(app); + + let res = client.get("/foo").send().await; + assert_eq!(res.status(), StatusCode::OK); + + let res = client.get("/bar").send().await; + assert_eq!(res.status(), StatusCode::OK); + + let res = client.get("/baz").send().await; + assert_eq!(res.status(), StatusCode::OK); + + let res = client.get("/qux").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); +} + +#[crate::test] +async fn multiple_ors_balanced_differently() { + let one = Router::new().route("/one", get(|| async { "one" })); + let two = Router::new().route("/two", get(|| async { "two" })); + let three = Router::new().route("/three", get(|| async { "three" })); + let four = Router::new().route("/four", get(|| async { "four" })); + + test( + "one", + one.clone() + .merge(two.clone()) + .merge(three.clone()) + .merge(four.clone()), + ) + .await; + + test( + "two", + one.clone() + .merge(two.clone()) + .merge(three.clone().merge(four.clone())), + ) + .await; + + test( + "three", + one.clone() + .merge(two.clone().merge(three.clone()).merge(four.clone())), + ) + .await; + + test("four", one.merge(two.merge(three.merge(four)))).await; + + async fn test(name: &str, app: Router) { + let client = TestClient::new(app); + + for n in ["one", "two", "three", "four"].iter() { + println!("running: {name} / {n}"); + let res = client.get(&format!("/{n}")).send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.text().await, *n); + } + } +} + +#[crate::test] +async fn nested_or() { + let bar = Router::new().route("/bar", get(|| async { "bar" })); + let baz = Router::new().route("/baz", get(|| async { "baz" })); + + let bar_or_baz = bar.merge(baz); + + let client = TestClient::new(bar_or_baz.clone()); + assert_eq!(client.get("/bar").send().await.text().await, "bar"); + assert_eq!(client.get("/baz").send().await.text().await, "baz"); + + let client = TestClient::new(Router::new().nest("/foo", bar_or_baz)); + assert_eq!(client.get("/foo/bar").send().await.text().await, "bar"); + assert_eq!(client.get("/foo/baz").send().await.text().await, "baz"); +} + +#[crate::test] +async fn or_with_route_following() { + let one = Router::new().route("/one", get(|| async { "one" })); + let two = Router::new().route("/two", get(|| async { "two" })); + let app = one.merge(two).route("/three", get(|| async { "three" })); + + let client = TestClient::new(app); + + let res = client.get("/one").send().await; + assert_eq!(res.status(), StatusCode::OK); + + let res = client.get("/two").send().await; + assert_eq!(res.status(), StatusCode::OK); + + let res = client.get("/three").send().await; + assert_eq!(res.status(), StatusCode::OK); +} + +#[crate::test] +async fn layer() { + let one = Router::new().route("/foo", get(|| async {})); + let two = Router::new() + .route("/bar", get(|| async {})) + .layer(ConcurrencyLimitLayer::new(10)); + let app = one.merge(two); + + let client = TestClient::new(app); + + let res = client.get("/foo").send().await; + assert_eq!(res.status(), StatusCode::OK); + + let res = client.get("/bar").send().await; + assert_eq!(res.status(), StatusCode::OK); +} + +#[crate::test] +async fn layer_and_handle_error() { + let one = Router::new().route("/foo", get(|| async {})); + let two = Router::new() + .route("/timeout", get(std::future::pending::<()>)) + .layer( + ServiceBuilder::new() + .layer(HandleErrorLayer::new(|_| async { + StatusCode::REQUEST_TIMEOUT + })) + .layer(TimeoutLayer::new(Duration::from_millis(10))), + ); + let app = one.merge(two); + + let client = TestClient::new(app); + + let res = client.get("/timeout").send().await; + assert_eq!(res.status(), StatusCode::REQUEST_TIMEOUT); +} + +#[crate::test] +async fn nesting() { + let one = Router::new().route("/foo", get(|| async {})); + let two = Router::new().nest("/bar", Router::new().route("/baz", get(|| async {}))); + let app = one.merge(two); + + let client = TestClient::new(app); + + let res = client.get("/bar/baz").send().await; + assert_eq!(res.status(), StatusCode::OK); +} + +#[crate::test] +async fn boxed() { + let one = Router::new().route("/foo", get(|| async {})); + let two = Router::new().route("/bar", get(|| async {})); + let app = one.merge(two); + + let client = TestClient::new(app); + + let res = client.get("/bar").send().await; + assert_eq!(res.status(), StatusCode::OK); +} + +#[crate::test] +async fn many_ors() { + let app = Router::new() + .route("/r1", get(|| async {})) + .merge(Router::new().route("/r2", get(|| async {}))) + .merge(Router::new().route("/r3", get(|| async {}))) + .merge(Router::new().route("/r4", get(|| async {}))) + .merge(Router::new().route("/r5", get(|| async {}))) + .merge(Router::new().route("/r6", get(|| async {}))) + .merge(Router::new().route("/r7", get(|| async {}))); + + let client = TestClient::new(app); + + for n in 1..=7 { + let res = client.get(&format!("/r{n}")).send().await; + assert_eq!(res.status(), StatusCode::OK); + } + + let res = client.get("/r8").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); +} + +#[crate::test] +async fn services() { + use crate::routing::get_service; + + let app = Router::new() + .route( + "/foo", + get_service(service_fn(|_: Request| async { + Ok::<_, Infallible>(Response::new(Body::empty())) + })), + ) + .merge(Router::new().route( + "/bar", + get_service(service_fn(|_: Request| async { + Ok::<_, Infallible>(Response::new(Body::empty())) + })), + )); + + let client = TestClient::new(app); + + let res = client.get("/foo").send().await; + assert_eq!(res.status(), StatusCode::OK); + + let res = client.get("/bar").send().await; + assert_eq!(res.status(), StatusCode::OK); +} + +async fn all_the_uris( + uri: Uri, + OriginalUri(original_uri): OriginalUri, + req: Request, +) -> impl IntoResponse { + Json(json!({ + "uri": uri.to_string(), + "request_uri": req.uri().to_string(), + "original_uri": original_uri.to_string(), + })) +} + +#[crate::test] +async fn nesting_and_seeing_the_right_uri() { + let one = Router::new().nest("/foo/", Router::new().route("/bar", get(all_the_uris))); + let two = Router::new().route("/foo", get(all_the_uris)); + + let client = TestClient::new(one.merge(two)); + + let res = client.get("/foo/bar").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!( + res.json::().await, + json!({ + "uri": "/bar", + "request_uri": "/bar", + "original_uri": "/foo/bar", + }) + ); + + let res = client.get("/foo").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!( + res.json::().await, + json!({ + "uri": "/foo", + "request_uri": "/foo", + "original_uri": "/foo", + }) + ); +} + +#[crate::test] +async fn nesting_and_seeing_the_right_uri_at_more_levels_of_nesting() { + let one = Router::new().nest( + "/foo/", + Router::new().nest("/bar", Router::new().route("/baz", get(all_the_uris))), + ); + let two = Router::new().route("/foo", get(all_the_uris)); + + let client = TestClient::new(one.merge(two)); + + let res = client.get("/foo/bar/baz").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!( + res.json::().await, + json!({ + "uri": "/baz", + "request_uri": "/baz", + "original_uri": "/foo/bar/baz", + }) + ); + + let res = client.get("/foo").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!( + res.json::().await, + json!({ + "uri": "/foo", + "request_uri": "/foo", + "original_uri": "/foo", + }) + ); +} + +#[crate::test] +async fn nesting_and_seeing_the_right_uri_ors_with_nesting() { + let one = Router::new().nest( + "/one", + Router::new().nest("/bar", Router::new().route("/baz", get(all_the_uris))), + ); + let two = Router::new().nest("/two", Router::new().route("/qux", get(all_the_uris))); + let three = Router::new().route("/three", get(all_the_uris)); + + let client = TestClient::new(one.merge(two).merge(three)); + + let res = client.get("/one/bar/baz").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!( + res.json::().await, + json!({ + "uri": "/baz", + "request_uri": "/baz", + "original_uri": "/one/bar/baz", + }) + ); + + let res = client.get("/two/qux").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!( + res.json::().await, + json!({ + "uri": "/qux", + "request_uri": "/qux", + "original_uri": "/two/qux", + }) + ); + + let res = client.get("/three").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!( + res.json::().await, + json!({ + "uri": "/three", + "request_uri": "/three", + "original_uri": "/three", + }) + ); +} + +#[crate::test] +async fn nesting_and_seeing_the_right_uri_ors_with_multi_segment_uris() { + let one = Router::new().nest( + "/one", + Router::new().nest("/foo", Router::new().route("/bar", get(all_the_uris))), + ); + let two = Router::new().route("/two/foo", get(all_the_uris)); + + let client = TestClient::new(one.merge(two)); + + let res = client.get("/one/foo/bar").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!( + res.json::().await, + json!({ + "uri": "/bar", + "request_uri": "/bar", + "original_uri": "/one/foo/bar", + }) + ); + + let res = client.get("/two/foo").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!( + res.json::().await, + json!({ + "uri": "/two/foo", + "request_uri": "/two/foo", + "original_uri": "/two/foo", + }) + ); +} + +#[crate::test] +async fn middleware_that_return_early() { + let private = Router::new() + .route("/", get(|| async {})) + .layer(ValidateRequestHeaderLayer::bearer("password")); + + let public = Router::new().route("/public", get(|| async {})); + + let client = TestClient::new(private.merge(public)); + + assert_eq!( + client.get("/").send().await.status(), + StatusCode::UNAUTHORIZED + ); + assert_eq!( + client + .get("/") + .header("authorization", "Bearer password") + .send() + .await + .status(), + StatusCode::OK + ); + assert_eq!( + client.get("/doesnt-exist").send().await.status(), + StatusCode::NOT_FOUND + ); + assert_eq!(client.get("/public").send().await.status(), StatusCode::OK); +} diff --git a/.cargo-vendor/axum-0.6.20/src/routing/tests/mod.rs b/.cargo-vendor/axum-0.6.20/src/routing/tests/mod.rs new file mode 100644 index 0000000000..f1a459d645 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/routing/tests/mod.rs @@ -0,0 +1,1039 @@ +use crate::{ + body::{Bytes, Empty}, + error_handling::HandleErrorLayer, + extract::{self, DefaultBodyLimit, FromRef, Path, State}, + handler::{Handler, HandlerWithoutStateExt}, + response::IntoResponse, + routing::{ + delete, get, get_service, on, on_service, patch, patch_service, + path_router::path_for_nested_route, post, MethodFilter, + }, + test_helpers::{ + tracing_helpers::{capture_tracing, TracingEvent}, + *, + }, + BoxError, Extension, Json, Router, +}; +use futures_util::stream::StreamExt; +use http::{ + header::CONTENT_LENGTH, + header::{ALLOW, HOST}, + HeaderMap, Method, Request, Response, StatusCode, Uri, +}; +use hyper::Body; +use serde::Deserialize; +use serde_json::json; +use std::{ + convert::Infallible, + future::{ready, Ready}, + sync::atomic::{AtomicBool, AtomicUsize, Ordering}, + task::{Context, Poll}, + time::Duration, +}; +use tower::{ + service_fn, timeout::TimeoutLayer, util::MapResponseLayer, ServiceBuilder, ServiceExt, +}; +use tower_http::{limit::RequestBodyLimitLayer, validate_request::ValidateRequestHeaderLayer}; +use tower_service::Service; + +mod fallback; +mod get_to_head; +mod handle_error; +mod merge; +mod nest; + +#[crate::test] +async fn hello_world() { + async fn root(_: Request) -> &'static str { + "Hello, World!" + } + + async fn foo(_: Request) -> &'static str { + "foo" + } + + async fn users_create(_: Request) -> &'static str { + "users#create" + } + + let app = Router::new() + .route("/", get(root).post(foo)) + .route("/users", post(users_create)); + + let client = TestClient::new(app); + + let res = client.get("/").send().await; + let body = res.text().await; + assert_eq!(body, "Hello, World!"); + + let res = client.post("/").send().await; + let body = res.text().await; + assert_eq!(body, "foo"); + + let res = client.post("/users").send().await; + let body = res.text().await; + assert_eq!(body, "users#create"); +} + +#[crate::test] +async fn routing() { + let app = Router::new() + .route( + "/users", + get(|_: Request| async { "users#index" }) + .post(|_: Request| async { "users#create" }), + ) + .route("/users/:id", get(|_: Request| async { "users#show" })) + .route( + "/users/:id/action", + get(|_: Request| async { "users#action" }), + ); + + let client = TestClient::new(app); + + let res = client.get("/").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + + let res = client.get("/users").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.text().await, "users#index"); + + let res = client.post("/users").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.text().await, "users#create"); + + let res = client.get("/users/1").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.text().await, "users#show"); + + let res = client.get("/users/1/action").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.text().await, "users#action"); +} + +#[crate::test] +async fn router_type_doesnt_change() { + let app: Router = Router::new() + .route( + "/", + on(MethodFilter::GET, |_: Request| async { + "hi from GET" + }) + .on(MethodFilter::POST, |_: Request| async { + "hi from POST" + }), + ) + .layer(tower_http::compression::CompressionLayer::new()); + + let client = TestClient::new(app); + + let res = client.get("/").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.text().await, "hi from GET"); + + let res = client.post("/").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.text().await, "hi from POST"); +} + +#[crate::test] +async fn routing_between_services() { + use std::convert::Infallible; + use tower::service_fn; + + async fn handle(_: Request) -> &'static str { + "handler" + } + + let app = Router::new() + .route( + "/one", + get_service(service_fn(|_: Request| async { + Ok::<_, Infallible>(Response::new(Body::from("one get"))) + })) + .post_service(service_fn(|_: Request| async { + Ok::<_, Infallible>(Response::new(Body::from("one post"))) + })) + .on_service( + MethodFilter::PUT, + service_fn(|_: Request| async { + Ok::<_, Infallible>(Response::new(Body::from("one put"))) + }), + ), + ) + .route("/two", on_service(MethodFilter::GET, handle.into_service())); + + let client = TestClient::new(app); + + let res = client.get("/one").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.text().await, "one get"); + + let res = client.post("/one").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.text().await, "one post"); + + let res = client.put("/one").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.text().await, "one put"); + + let res = client.get("/two").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.text().await, "handler"); +} + +#[crate::test] +async fn middleware_on_single_route() { + use tower::ServiceBuilder; + use tower_http::{compression::CompressionLayer, trace::TraceLayer}; + + async fn handle(_: Request) -> &'static str { + "Hello, World!" + } + + let app = Router::new().route( + "/", + get(handle.layer( + ServiceBuilder::new() + .layer(TraceLayer::new_for_http()) + .layer(CompressionLayer::new()) + .into_inner(), + )), + ); + + let client = TestClient::new(app); + + let res = client.get("/").send().await; + let body = res.text().await; + + assert_eq!(body, "Hello, World!"); +} + +#[crate::test] +async fn service_in_bottom() { + async fn handler(_req: Request) -> Result, Infallible> { + Ok(Response::new(hyper::Body::empty())) + } + + let app = Router::new().route("/", get_service(service_fn(handler))); + + TestClient::new(app); +} + +#[crate::test] +async fn wrong_method_handler() { + let app = Router::new() + .route("/", get(|| async {}).post(|| async {})) + .route("/foo", patch(|| async {})); + + let client = TestClient::new(app); + + let res = client.patch("/").send().await; + assert_eq!(res.status(), StatusCode::METHOD_NOT_ALLOWED); + assert_eq!(res.headers()[ALLOW], "GET,HEAD,POST"); + + let res = client.patch("/foo").send().await; + assert_eq!(res.status(), StatusCode::OK); + + let res = client.post("/foo").send().await; + assert_eq!(res.status(), StatusCode::METHOD_NOT_ALLOWED); + assert_eq!(res.headers()[ALLOW], "PATCH"); + + let res = client.get("/bar").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); +} + +#[crate::test] +async fn wrong_method_service() { + #[derive(Clone)] + struct Svc; + + impl Service for Svc { + type Response = Response>; + type Error = Infallible; + type Future = Ready>; + + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, _req: R) -> Self::Future { + ready(Ok(Response::new(Empty::new()))) + } + } + + let app = Router::new() + .route("/", get_service(Svc).post_service(Svc)) + .route("/foo", patch_service(Svc)); + + let client = TestClient::new(app); + + let res = client.patch("/").send().await; + assert_eq!(res.status(), StatusCode::METHOD_NOT_ALLOWED); + assert_eq!(res.headers()[ALLOW], "GET,HEAD,POST"); + + let res = client.patch("/foo").send().await; + assert_eq!(res.status(), StatusCode::OK); + + let res = client.post("/foo").send().await; + assert_eq!(res.status(), StatusCode::METHOD_NOT_ALLOWED); + assert_eq!(res.headers()[ALLOW], "PATCH"); + + let res = client.get("/bar").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); +} + +#[crate::test] +async fn multiple_methods_for_one_handler() { + async fn root(_: Request) -> &'static str { + "Hello, World!" + } + + let app = Router::new().route("/", on(MethodFilter::GET | MethodFilter::POST, root)); + + let client = TestClient::new(app); + + let res = client.get("/").send().await; + assert_eq!(res.status(), StatusCode::OK); + + let res = client.post("/").send().await; + assert_eq!(res.status(), StatusCode::OK); +} + +#[crate::test] +async fn wildcard_sees_whole_url() { + let app = Router::new().route("/api/*rest", get(|uri: Uri| async move { uri.to_string() })); + + let client = TestClient::new(app); + + let res = client.get("/api/foo/bar").send().await; + assert_eq!(res.text().await, "/api/foo/bar"); +} + +#[crate::test] +async fn middleware_applies_to_routes_above() { + let app = Router::new() + .route("/one", get(std::future::pending::<()>)) + .layer( + ServiceBuilder::new() + .layer(HandleErrorLayer::new(|_: BoxError| async move { + StatusCode::REQUEST_TIMEOUT + })) + .layer(TimeoutLayer::new(Duration::new(0, 0))), + ) + .route("/two", get(|| async {})); + + let client = TestClient::new(app); + + let res = client.get("/one").send().await; + assert_eq!(res.status(), StatusCode::REQUEST_TIMEOUT); + + let res = client.get("/two").send().await; + assert_eq!(res.status(), StatusCode::OK); +} + +#[crate::test] +async fn not_found_for_extra_trailing_slash() { + let app = Router::new().route("/foo", get(|| async {})); + + let client = TestClient::new(app); + + let res = client.get("/foo/").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + + let res = client.get("/foo").send().await; + assert_eq!(res.status(), StatusCode::OK); +} + +#[crate::test] +async fn not_found_for_missing_trailing_slash() { + let app = Router::new().route("/foo/", get(|| async {})); + + let client = TestClient::new(app); + + let res = client.get("/foo").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); +} + +#[crate::test] +async fn with_and_without_trailing_slash() { + let app = Router::new() + .route("/foo", get(|| async { "without tsr" })) + .route("/foo/", get(|| async { "with tsr" })); + + let client = TestClient::new(app); + + let res = client.get("/foo/").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.text().await, "with tsr"); + + let res = client.get("/foo").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.text().await, "without tsr"); +} + +// for https://github.com/tokio-rs/axum/issues/420 +#[crate::test] +async fn wildcard_doesnt_match_just_trailing_slash() { + let app = Router::new().route( + "/x/*path", + get(|Path(path): Path| async move { path }), + ); + + let client = TestClient::new(app); + + let res = client.get("/x").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + + let res = client.get("/x/").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + + let res = client.get("/x/foo/bar").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.text().await, "foo/bar"); +} + +#[crate::test] +async fn what_matches_wildcard() { + let app = Router::new() + .route("/*key", get(|| async { "root" })) + .route("/x/*key", get(|| async { "x" })) + .fallback(|| async { "fallback" }); + + let client = TestClient::new(app); + + let get = |path| { + let f = client.get(path).send(); + async move { f.await.text().await } + }; + + assert_eq!(get("/").await, "fallback"); + assert_eq!(get("/a").await, "root"); + assert_eq!(get("/a/").await, "root"); + assert_eq!(get("/a/b").await, "root"); + assert_eq!(get("/a/b/").await, "root"); + + assert_eq!(get("/x").await, "root"); + assert_eq!(get("/x/").await, "root"); + assert_eq!(get("/x/a").await, "x"); + assert_eq!(get("/x/a/").await, "x"); + assert_eq!(get("/x/a/b").await, "x"); + assert_eq!(get("/x/a/b/").await, "x"); +} + +#[crate::test] +async fn static_and_dynamic_paths() { + let app = Router::new() + .route( + "/:key", + get(|Path(key): Path| async move { format!("dynamic: {key}") }), + ) + .route("/foo", get(|| async { "static" })); + + let client = TestClient::new(app); + + let res = client.get("/bar").send().await; + assert_eq!(res.text().await, "dynamic: bar"); + + let res = client.get("/foo").send().await; + assert_eq!(res.text().await, "static"); +} + +#[crate::test] +#[should_panic(expected = "Paths must start with a `/`. Use \"/\" for root routes")] +async fn empty_route() { + let app = Router::new().route("", get(|| async {})); + TestClient::new(app); +} + +#[crate::test] +async fn middleware_still_run_for_unmatched_requests() { + #[derive(Clone)] + struct CountMiddleware(S); + + static COUNT: AtomicUsize = AtomicUsize::new(0); + + impl Service for CountMiddleware + where + S: Service, + { + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.0.poll_ready(cx) + } + + fn call(&mut self, req: R) -> Self::Future { + COUNT.fetch_add(1, Ordering::SeqCst); + self.0.call(req) + } + } + + let app = Router::new() + .route("/", get(|| async {})) + .layer(tower::layer::layer_fn(CountMiddleware)); + + let client = TestClient::new(app); + + assert_eq!(COUNT.load(Ordering::SeqCst), 0); + + client.get("/").send().await; + assert_eq!(COUNT.load(Ordering::SeqCst), 1); + + client.get("/not-found").send().await; + assert_eq!(COUNT.load(Ordering::SeqCst), 2); +} + +#[crate::test] +#[should_panic(expected = "\ + Invalid route: `Router::route_service` cannot be used with `Router`s. \ + Use `Router::nest` instead\ +")] +async fn routing_to_router_panics() { + TestClient::new(Router::new().route_service("/", Router::new())); +} + +#[crate::test] +async fn route_layer() { + let app = Router::new() + .route("/foo", get(|| async {})) + .route_layer(ValidateRequestHeaderLayer::bearer("password")); + + let client = TestClient::new(app); + + let res = client + .get("/foo") + .header("authorization", "Bearer password") + .send() + .await; + assert_eq!(res.status(), StatusCode::OK); + + let res = client.get("/foo").send().await; + assert_eq!(res.status(), StatusCode::UNAUTHORIZED); + + let res = client.get("/not-found").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + + // it would be nice if this would return `405 Method Not Allowed` + // but that requires knowing more about which method route we're calling, which we + // don't know currently since its just a generic `Service` + let res = client.post("/foo").send().await; + assert_eq!(res.status(), StatusCode::UNAUTHORIZED); +} + +#[crate::test] +async fn different_methods_added_in_different_routes() { + let app = Router::new() + .route("/", get(|| async { "GET" })) + .route("/", post(|| async { "POST" })); + + let client = TestClient::new(app); + + let res = client.get("/").send().await; + let body = res.text().await; + assert_eq!(body, "GET"); + + let res = client.post("/").send().await; + let body = res.text().await; + assert_eq!(body, "POST"); +} + +#[crate::test] +#[should_panic(expected = "Cannot merge two `Router`s that both have a fallback")] +async fn merging_routers_with_fallbacks_panics() { + async fn fallback() {} + let one = Router::new().fallback(fallback); + let two = Router::new().fallback(fallback); + TestClient::new(one.merge(two)); +} + +#[test] +#[should_panic(expected = "Overlapping method route. Handler for `GET /foo/bar` already exists")] +fn routes_with_overlapping_method_routes() { + async fn handler() {} + let _: Router = Router::new() + .route("/foo/bar", get(handler)) + .route("/foo/bar", get(handler)); +} + +#[test] +#[should_panic(expected = "Overlapping method route. Handler for `GET /foo/bar` already exists")] +fn merging_with_overlapping_method_routes() { + async fn handler() {} + let app: Router = Router::new().route("/foo/bar", get(handler)); + _ = app.clone().merge(app); +} + +#[crate::test] +async fn merging_routers_with_same_paths_but_different_methods() { + let one = Router::new().route("/", get(|| async { "GET" })); + let two = Router::new().route("/", post(|| async { "POST" })); + + let client = TestClient::new(one.merge(two)); + + let res = client.get("/").send().await; + let body = res.text().await; + assert_eq!(body, "GET"); + + let res = client.post("/").send().await; + let body = res.text().await; + assert_eq!(body, "POST"); +} + +#[crate::test] +async fn head_content_length_through_hyper_server() { + let app = Router::new() + .route("/", get(|| async { "foo" })) + .route("/json", get(|| async { Json(json!({ "foo": 1 })) })); + + let client = TestClient::new(app); + + let res = client.head("/").send().await; + assert_eq!(res.headers()["content-length"], "3"); + assert!(res.text().await.is_empty()); + + let res = client.head("/json").send().await; + assert_eq!(res.headers()["content-length"], "9"); + assert!(res.text().await.is_empty()); +} + +#[crate::test] +async fn head_content_length_through_hyper_server_that_hits_fallback() { + let app = Router::new().fallback(|| async { "foo" }); + + let client = TestClient::new(app); + + let res = client.head("/").send().await; + assert_eq!(res.headers()["content-length"], "3"); +} + +#[crate::test] +async fn head_with_middleware_applied() { + use tower_http::compression::{predicate::SizeAbove, CompressionLayer}; + + let app = Router::new() + .nest( + "/", + Router::new().route("/", get(|| async { "Hello, World!" })), + ) + .layer(CompressionLayer::new().compress_when(SizeAbove::new(0))); + + let client = TestClient::new(app); + + // send GET request + let res = client + .get("/") + .header("accept-encoding", "gzip") + .send() + .await; + assert_eq!(res.headers()["transfer-encoding"], "chunked"); + // cannot have `transfer-encoding: chunked` and `content-length` + assert!(!res.headers().contains_key("content-length")); + + // send HEAD request + let res = client + .head("/") + .header("accept-encoding", "gzip") + .send() + .await; + // no response body so no `transfer-encoding` + assert!(!res.headers().contains_key("transfer-encoding")); + // no content-length since we cannot know it since the response + // is compressed + assert!(!res.headers().contains_key("content-length")); +} + +#[crate::test] +#[should_panic(expected = "Paths must start with a `/`")] +async fn routes_must_start_with_slash() { + let app = Router::new().route(":foo", get(|| async {})); + TestClient::new(app); +} + +#[crate::test] +async fn body_limited_by_default() { + let app = Router::new() + .route("/bytes", post(|_: Bytes| async {})) + .route("/string", post(|_: String| async {})) + .route("/json", post(|_: Json| async {})); + + let client = TestClient::new(app); + + for uri in ["/bytes", "/string", "/json"] { + println!("calling {uri}"); + + let stream = futures_util::stream::repeat("a".repeat(1000)).map(Ok::<_, hyper::Error>); + let body = Body::wrap_stream(stream); + + let res_future = client + .post(uri) + .header("content-type", "application/json") + .body(body) + .send(); + let res = tokio::time::timeout(Duration::from_secs(3), res_future) + .await + .expect("never got response"); + + assert_eq!(res.status(), StatusCode::PAYLOAD_TOO_LARGE); + } +} + +#[crate::test] +async fn disabling_the_default_limit() { + let app = Router::new() + .route("/", post(|_: Bytes| async {})) + .layer(DefaultBodyLimit::disable()); + + let client = TestClient::new(app); + + // `DEFAULT_LIMIT` is 2mb so make a body larger than that + let body = Body::from("a".repeat(3_000_000)); + + let res = client.post("/").body(body).send().await; + + assert_eq!(res.status(), StatusCode::OK); +} + +#[crate::test] +async fn limited_body_with_content_length() { + const LIMIT: usize = 3; + + let app = Router::new() + .route( + "/", + post(|headers: HeaderMap, _body: Bytes| async move { + assert!(headers.get(CONTENT_LENGTH).is_some()); + }), + ) + .layer(RequestBodyLimitLayer::new(LIMIT)); + + let client = TestClient::new(app); + + let res = client.post("/").body("a".repeat(LIMIT)).send().await; + assert_eq!(res.status(), StatusCode::OK); + + let res = client.post("/").body("a".repeat(LIMIT * 2)).send().await; + assert_eq!(res.status(), StatusCode::PAYLOAD_TOO_LARGE); +} + +#[crate::test] +async fn changing_the_default_limit() { + let new_limit = 2; + + let app = Router::new() + .route("/", post(|_: Bytes| async {})) + .layer(DefaultBodyLimit::max(new_limit)); + + let client = TestClient::new(app); + + let res = client + .post("/") + .body(Body::from("a".repeat(new_limit))) + .send() + .await; + assert_eq!(res.status(), StatusCode::OK); + + let res = client + .post("/") + .body(Body::from("a".repeat(new_limit + 1))) + .send() + .await; + assert_eq!(res.status(), StatusCode::PAYLOAD_TOO_LARGE); +} + +#[crate::test] +async fn limited_body_with_streaming_body() { + const LIMIT: usize = 3; + + let app = Router::new() + .route( + "/", + post(|headers: HeaderMap, _body: Bytes| async move { + assert!(headers.get(CONTENT_LENGTH).is_none()); + }), + ) + .layer(RequestBodyLimitLayer::new(LIMIT)); + + let client = TestClient::new(app); + + let stream = futures_util::stream::iter(vec![Ok::<_, hyper::Error>("a".repeat(LIMIT))]); + let res = client + .post("/") + .body(Body::wrap_stream(stream)) + .send() + .await; + assert_eq!(res.status(), StatusCode::OK); + + let stream = futures_util::stream::iter(vec![Ok::<_, hyper::Error>("a".repeat(LIMIT * 2))]); + let res = client + .post("/") + .body(Body::wrap_stream(stream)) + .send() + .await; + assert_eq!(res.status(), StatusCode::PAYLOAD_TOO_LARGE); +} + +#[crate::test] +async fn extract_state() { + #[derive(Clone)] + struct AppState { + value: i32, + inner: InnerState, + } + + #[derive(Clone)] + struct InnerState { + value: i32, + } + + impl FromRef for InnerState { + fn from_ref(state: &AppState) -> Self { + state.inner.clone() + } + } + + async fn handler(State(outer): State, State(inner): State) { + assert_eq!(outer.value, 1); + assert_eq!(inner.value, 2); + } + + let state = AppState { + value: 1, + inner: InnerState { value: 2 }, + }; + + let app = Router::new().route("/", get(handler)).with_state(state); + let client = TestClient::new(app); + + let res = client.get("/").send().await; + assert_eq!(res.status(), StatusCode::OK); +} + +#[crate::test] +async fn explicitly_set_state() { + let app = Router::new() + .route_service( + "/", + get(|State(state): State<&'static str>| async move { state }).with_state("foo"), + ) + .with_state("..."); + + let client = TestClient::new(app); + let res = client.get("/").send().await; + assert_eq!(res.text().await, "foo"); +} + +#[crate::test] +async fn layer_response_into_response() { + fn map_response(_res: Response) -> Result, impl IntoResponse> { + let headers = [("x-foo", "bar")]; + let status = StatusCode::IM_A_TEAPOT; + Err((headers, status)) + } + + let app = Router::new() + .route("/", get(|| async {})) + .layer(MapResponseLayer::new(map_response)); + + let client = TestClient::new(app); + + let res = client.get("/").send().await; + assert_eq!(res.headers()["x-foo"], "bar"); + assert_eq!(res.status(), StatusCode::IM_A_TEAPOT); +} + +#[allow(dead_code)] +fn method_router_fallback_with_state() { + async fn fallback(_: State<&'static str>) {} + + async fn not_found(_: State<&'static str>) {} + + let state = "foo"; + + let _: Router = Router::new() + .fallback(get(fallback).fallback(not_found)) + .with_state(state); +} + +#[test] +fn test_path_for_nested_route() { + assert_eq!(path_for_nested_route("/", "/"), "/"); + + assert_eq!(path_for_nested_route("/a", "/"), "/a"); + assert_eq!(path_for_nested_route("/", "/b"), "/b"); + assert_eq!(path_for_nested_route("/a/", "/"), "/a/"); + assert_eq!(path_for_nested_route("/", "/b/"), "/b/"); + + assert_eq!(path_for_nested_route("/a", "/b"), "/a/b"); + assert_eq!(path_for_nested_route("/a/", "/b"), "/a/b"); + assert_eq!(path_for_nested_route("/a", "/b/"), "/a/b/"); + assert_eq!(path_for_nested_route("/a/", "/b/"), "/a/b/"); +} + +#[crate::test] +async fn state_isnt_cloned_too_much() { + static SETUP_DONE: AtomicBool = AtomicBool::new(false); + static COUNT: AtomicUsize = AtomicUsize::new(0); + + struct AppState; + + impl Clone for AppState { + fn clone(&self) -> Self { + #[rustversion::since(1.65)] + #[track_caller] + fn count() { + if SETUP_DONE.load(Ordering::SeqCst) { + let bt = std::backtrace::Backtrace::force_capture(); + let bt = bt + .to_string() + .lines() + .filter(|line| line.contains("axum") || line.contains("./src")) + .collect::>() + .join("\n"); + println!("AppState::Clone:\n===============\n{}\n", bt); + COUNT.fetch_add(1, Ordering::SeqCst); + } + } + + #[rustversion::not(since(1.65))] + fn count() { + if SETUP_DONE.load(Ordering::SeqCst) { + COUNT.fetch_add(1, Ordering::SeqCst); + } + } + + count(); + + Self + } + } + + let app = Router::new() + .route("/", get(|_: State| async {})) + .with_state(AppState); + + let client = TestClient::new(app); + + // ignore clones made during setup + SETUP_DONE.store(true, Ordering::SeqCst); + + client.get("/").send().await; + + assert_eq!(COUNT.load(Ordering::SeqCst), 4); +} + +#[crate::test] +async fn logging_rejections() { + #[derive(Deserialize, Eq, PartialEq, Debug)] + #[serde(deny_unknown_fields)] + struct RejectionEvent { + message: String, + status: u16, + body: String, + rejection_type: String, + } + + let events = capture_tracing::(|| async { + let app = Router::new() + .route("/extension", get(|_: Extension| async {})) + .route("/string", post(|_: String| async {})); + + let client = TestClient::new(app); + + assert_eq!( + client.get("/extension").send().await.status(), + StatusCode::INTERNAL_SERVER_ERROR + ); + + assert_eq!( + client + .post("/string") + .body(Vec::from([0, 159, 146, 150])) + .send() + .await + .status(), + StatusCode::BAD_REQUEST, + ); + }) + .await; + + assert_eq!( + dbg!(events), + Vec::from([ + TracingEvent { + fields: RejectionEvent { + message: "rejecting request".to_owned(), + status: 500, + body: "Missing request extension: Extension of \ + type `core::convert::Infallible` was not found. \ + Perhaps you forgot to add it? See `axum::Extension`." + .to_owned(), + rejection_type: "axum::extract::rejection::MissingExtension".to_owned(), + }, + target: "axum::rejection".to_owned(), + level: "TRACE".to_owned(), + }, + TracingEvent { + fields: RejectionEvent { + message: "rejecting request".to_owned(), + status: 400, + body: "Request body didn't contain valid UTF-8: \ + invalid utf-8 sequence of 1 bytes from index 1" + .to_owned(), + rejection_type: "axum_core::extract::rejection::InvalidUtf8".to_owned(), + }, + target: "axum::rejection".to_owned(), + level: "TRACE".to_owned(), + }, + ]) + ) +} + +// https://github.com/tokio-rs/axum/issues/1955 +#[crate::test] +async fn connect_going_to_custom_fallback() { + let app = Router::new().fallback(|| async { (StatusCode::NOT_FOUND, "custom fallback") }); + + let req = Request::builder() + .uri("example.com:443") + .method(Method::CONNECT) + .header(HOST, "example.com:443") + .body(Body::empty()) + .unwrap(); + + let res = app.oneshot(req).await.unwrap(); + assert_eq!(res.status(), StatusCode::NOT_FOUND); + let text = String::from_utf8(hyper::body::to_bytes(res).await.unwrap().to_vec()).unwrap(); + assert_eq!(text, "custom fallback"); +} + +// https://github.com/tokio-rs/axum/issues/1955 +#[crate::test] +async fn connect_going_to_default_fallback() { + let app = Router::new(); + + let req = Request::builder() + .uri("example.com:443") + .method(Method::CONNECT) + .header(HOST, "example.com:443") + .body(Body::empty()) + .unwrap(); + + let res = app.oneshot(req).await.unwrap(); + assert_eq!(res.status(), StatusCode::NOT_FOUND); + let body = hyper::body::to_bytes(res).await.unwrap(); + assert!(body.is_empty()); +} + +#[crate::test] +async fn impl_handler_for_into_response() { + let app = Router::new().route("/things", post((StatusCode::CREATED, "thing created"))); + + let client = TestClient::new(app); + + let res = client.post("/things").send().await; + assert_eq!(res.status(), StatusCode::CREATED); + assert_eq!(res.text().await, "thing created"); +} diff --git a/.cargo-vendor/axum-0.6.20/src/routing/tests/nest.rs b/.cargo-vendor/axum-0.6.20/src/routing/tests/nest.rs new file mode 100644 index 0000000000..0544f8be59 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/routing/tests/nest.rs @@ -0,0 +1,423 @@ +use super::*; +use crate::{body::boxed, extract::Extension}; +use std::collections::HashMap; +use tower_http::services::ServeDir; + +#[crate::test] +async fn nesting_apps() { + let api_routes = Router::new() + .route( + "/users", + get(|| async { "users#index" }).post(|| async { "users#create" }), + ) + .route( + "/users/:id", + get( + |params: extract::Path>| async move { + format!( + "{}: users#show ({})", + params.get("version").unwrap(), + params.get("id").unwrap() + ) + }, + ), + ) + .route( + "/games/:id", + get( + |params: extract::Path>| async move { + format!( + "{}: games#show ({})", + params.get("version").unwrap(), + params.get("id").unwrap() + ) + }, + ), + ); + + let app = Router::new() + .route("/", get(|| async { "hi" })) + .nest("/:version/api", api_routes); + + let client = TestClient::new(app); + + let res = client.get("/").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.text().await, "hi"); + + let res = client.get("/v0/api/users").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.text().await, "users#index"); + + let res = client.get("/v0/api/users/123").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.text().await, "v0: users#show (123)"); + + let res = client.get("/v0/api/games/123").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.text().await, "v0: games#show (123)"); +} + +#[crate::test] +async fn wrong_method_nest() { + let nested_app = Router::new().route("/", get(|| async {})); + let app = Router::new().nest("/", nested_app); + + let client = TestClient::new(app); + + let res = client.get("/").send().await; + assert_eq!(res.status(), StatusCode::OK); + + let res = client.post("/").send().await; + assert_eq!(res.status(), StatusCode::METHOD_NOT_ALLOWED); + assert_eq!(res.headers()[ALLOW], "GET,HEAD"); + + let res = client.patch("/foo").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); +} + +#[crate::test] +async fn nesting_router_at_root() { + let nested = Router::new().route("/foo", get(|uri: Uri| async move { uri.to_string() })); + let app = Router::new().nest("/", nested); + + let client = TestClient::new(app); + + let res = client.get("/").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + + let res = client.get("/foo").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.text().await, "/foo"); + + let res = client.get("/foo/bar").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); +} + +#[crate::test] +async fn nesting_router_at_empty_path() { + let nested = Router::new().route("/foo", get(|uri: Uri| async move { uri.to_string() })); + let app = Router::new().nest("", nested); + + let client = TestClient::new(app); + + let res = client.get("/").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + + let res = client.get("/foo").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.text().await, "/foo"); + + let res = client.get("/foo/bar").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); +} + +#[crate::test] +async fn nesting_handler_at_root() { + let app = Router::new().nest_service("/", get(|uri: Uri| async move { uri.to_string() })); + + let client = TestClient::new(app); + + let res = client.get("/").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.text().await, "/"); + + let res = client.get("/foo").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.text().await, "/foo"); + + let res = client.get("/foo/bar").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.text().await, "/foo/bar"); +} + +#[crate::test] +async fn nested_url_extractor() { + let app = Router::new().nest( + "/foo", + Router::new().nest( + "/bar", + Router::new() + .route("/baz", get(|uri: Uri| async move { uri.to_string() })) + .route( + "/qux", + get(|req: Request| async move { req.uri().to_string() }), + ), + ), + ); + + let client = TestClient::new(app); + + let res = client.get("/foo/bar/baz").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.text().await, "/baz"); + + let res = client.get("/foo/bar/qux").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.text().await, "/qux"); +} + +#[crate::test] +async fn nested_url_original_extractor() { + let app = Router::new().nest( + "/foo", + Router::new().nest( + "/bar", + Router::new().route( + "/baz", + get(|uri: extract::OriginalUri| async move { uri.0.to_string() }), + ), + ), + ); + + let client = TestClient::new(app); + + let res = client.get("/foo/bar/baz").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.text().await, "/foo/bar/baz"); +} + +#[crate::test] +async fn nested_service_sees_stripped_uri() { + let app = Router::new().nest( + "/foo", + Router::new().nest( + "/bar", + Router::new().route_service( + "/baz", + service_fn(|req: Request| async move { + let body = boxed(Body::from(req.uri().to_string())); + Ok::<_, Infallible>(Response::new(body)) + }), + ), + ), + ); + + let client = TestClient::new(app); + + let res = client.get("/foo/bar/baz").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.text().await, "/baz"); +} + +#[crate::test] +async fn nest_static_file_server() { + let app = Router::new().nest_service("/static", ServeDir::new(".")); + + let client = TestClient::new(app); + + let res = client.get("/static/README.md").send().await; + assert_eq!(res.status(), StatusCode::OK); +} + +#[crate::test] +async fn nested_multiple_routes() { + let app = Router::new() + .nest( + "/api", + Router::new() + .route("/users", get(|| async { "users" })) + .route("/teams", get(|| async { "teams" })), + ) + .route("/", get(|| async { "root" })); + + let client = TestClient::new(app); + + assert_eq!(client.get("/").send().await.text().await, "root"); + assert_eq!(client.get("/api/users").send().await.text().await, "users"); + assert_eq!(client.get("/api/teams").send().await.text().await, "teams"); +} + +#[test] +#[should_panic = "Invalid route \"/\": insertion failed due to conflict with previously registered route: /*__private__axum_nest_tail_param"] +fn nested_service_at_root_with_other_routes() { + let _: Router = Router::new() + .nest_service("/", Router::new().route("/users", get(|| async {}))) + .route("/", get(|| async {})); +} + +#[test] +fn nested_at_root_with_other_routes() { + let _: Router = Router::new() + .nest("/", Router::new().route("/users", get(|| async {}))) + .route("/", get(|| async {})); +} + +#[crate::test] +async fn multiple_top_level_nests() { + let app = Router::new() + .nest( + "/one", + Router::new().route("/route", get(|| async { "one" })), + ) + .nest( + "/two", + Router::new().route("/route", get(|| async { "two" })), + ); + + let client = TestClient::new(app); + + assert_eq!(client.get("/one/route").send().await.text().await, "one"); + assert_eq!(client.get("/two/route").send().await.text().await, "two"); +} + +#[crate::test] +#[should_panic(expected = "Invalid route: nested routes cannot contain wildcards (*)")] +async fn nest_cannot_contain_wildcards() { + _ = Router::<(), Body>::new().nest("/one/*rest", Router::new()); +} + +#[crate::test] +async fn outer_middleware_still_see_whole_url() { + #[derive(Clone)] + struct SetUriExtension(S); + + #[derive(Clone)] + struct Uri(http::Uri); + + impl Service> for SetUriExtension + where + S: Service>, + { + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.0.poll_ready(cx) + } + + fn call(&mut self, mut req: Request) -> Self::Future { + let uri = Uri(req.uri().clone()); + req.extensions_mut().insert(uri); + self.0.call(req) + } + } + + async fn handler(Extension(Uri(middleware_uri)): Extension) -> impl IntoResponse { + middleware_uri.to_string() + } + + let app = Router::new() + .route("/", get(handler)) + .route("/foo", get(handler)) + .route("/foo/bar", get(handler)) + .nest("/one", Router::new().route("/two", get(handler))) + .fallback(handler) + .layer(tower::layer::layer_fn(SetUriExtension)); + + let client = TestClient::new(app); + + assert_eq!(client.get("/").send().await.text().await, "/"); + assert_eq!(client.get("/foo").send().await.text().await, "/foo"); + assert_eq!(client.get("/foo/bar").send().await.text().await, "/foo/bar"); + assert_eq!( + client.get("/not-found").send().await.text().await, + "/not-found" + ); + assert_eq!(client.get("/one/two").send().await.text().await, "/one/two"); +} + +#[crate::test] +async fn nest_at_capture() { + let api_routes = Router::new().route( + "/:b", + get(|Path((a, b)): Path<(String, String)>| async move { format!("a={a} b={b}") }), + ); + + let app = Router::new().nest("/:a", api_routes); + + let client = TestClient::new(app); + + let res = client.get("/foo/bar").send().await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.text().await, "a=foo b=bar"); +} + +#[crate::test] +async fn nest_with_and_without_trailing() { + let app = Router::new().nest_service("/foo", get(|| async {})); + + let client = TestClient::new(app); + + let res = client.get("/foo").send().await; + assert_eq!(res.status(), StatusCode::OK); + + let res = client.get("/foo/").send().await; + assert_eq!(res.status(), StatusCode::OK); + + let res = client.get("/foo/bar").send().await; + assert_eq!(res.status(), StatusCode::OK); +} + +#[tokio::test] +async fn nesting_with_root_inner_router() { + let app = Router::new() + .nest_service("/service", Router::new().route("/", get(|| async {}))) + .nest("/router", Router::new().route("/", get(|| async {}))) + .nest("/router-slash/", Router::new().route("/", get(|| async {}))); + + let client = TestClient::new(app); + + // `/service/` does match the `/service` prefix and the remaining path is technically + // empty, which is the same as `/` which matches `.route("/", _)` + let res = client.get("/service").send().await; + assert_eq!(res.status(), StatusCode::OK); + + // `/service/` does match the `/service` prefix and the remaining path is `/` + // which matches `.route("/", _)` + // + // this is perhaps a little surprising but don't think there is much we can do + let res = client.get("/service/").send().await; + assert_eq!(res.status(), StatusCode::OK); + + // at least it does work like you'd expect when using `nest` + + let res = client.get("/router").send().await; + assert_eq!(res.status(), StatusCode::OK); + + let res = client.get("/router/").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + + let res = client.get("/router-slash").send().await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + + let res = client.get("/router-slash/").send().await; + assert_eq!(res.status(), StatusCode::OK); +} + +macro_rules! nested_route_test { + ( + $name:ident, + // the path we nest the inner router at + nest = $nested_path:literal, + // the route the inner router accepts + route = $route_path:literal, + // the route we expect to be able to call + expected = $expected_path:literal $(,)? + ) => { + #[crate::test] + async fn $name() { + let inner = Router::new().route($route_path, get(|| async {})); + let app = Router::new().nest($nested_path, inner); + let client = TestClient::new(app); + let res = client.get($expected_path).send().await; + let status = res.status(); + assert_eq!(status, StatusCode::OK, "Router"); + } + }; +} + +// test cases taken from https://github.com/tokio-rs/axum/issues/714#issuecomment-1058144460 +nested_route_test!(nest_1, nest = "", route = "/", expected = "/"); +nested_route_test!(nest_2, nest = "", route = "/a", expected = "/a"); +nested_route_test!(nest_3, nest = "", route = "/a/", expected = "/a/"); +nested_route_test!(nest_4, nest = "/", route = "/", expected = "/"); +nested_route_test!(nest_5, nest = "/", route = "/a", expected = "/a"); +nested_route_test!(nest_6, nest = "/", route = "/a/", expected = "/a/"); +nested_route_test!(nest_7, nest = "/a", route = "/", expected = "/a"); +nested_route_test!(nest_8, nest = "/a", route = "/a", expected = "/a/a"); +nested_route_test!(nest_9, nest = "/a", route = "/a/", expected = "/a/a/"); +nested_route_test!(nest_11, nest = "/a/", route = "/", expected = "/a/"); +nested_route_test!(nest_12, nest = "/a/", route = "/a", expected = "/a/a"); +nested_route_test!(nest_13, nest = "/a/", route = "/a/", expected = "/a/a/"); diff --git a/.cargo-vendor/axum-0.6.20/src/routing/url_params.rs b/.cargo-vendor/axum-0.6.20/src/routing/url_params.rs new file mode 100644 index 0000000000..6243d379c0 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/routing/url_params.rs @@ -0,0 +1,46 @@ +use crate::util::PercentDecodedStr; +use http::Extensions; +use matchit::Params; +use std::sync::Arc; + +pub(crate) enum UrlParams { + Params(Vec<(Arc, PercentDecodedStr)>), + InvalidUtf8InPathParam { key: Arc }, +} + +pub(super) fn insert_url_params(extensions: &mut Extensions, params: Params) { + let current_params = extensions.get_mut(); + + if let Some(UrlParams::InvalidUtf8InPathParam { .. }) = current_params { + // nothing to do here since an error was stored earlier + return; + } + + let params = params + .iter() + .filter(|(key, _)| !key.starts_with(super::NEST_TAIL_PARAM)) + .filter(|(key, _)| !key.starts_with(super::FALLBACK_PARAM)) + .map(|(k, v)| { + if let Some(decoded) = PercentDecodedStr::new(v) { + Ok((Arc::from(k), decoded)) + } else { + Err(Arc::from(k)) + } + }) + .collect::, _>>(); + + match (current_params, params) { + (Some(UrlParams::InvalidUtf8InPathParam { .. }), _) => { + unreachable!("we check for this state earlier in this method") + } + (_, Err(invalid_key)) => { + extensions.insert(UrlParams::InvalidUtf8InPathParam { key: invalid_key }); + } + (Some(UrlParams::Params(current)), Ok(params)) => { + current.extend(params); + } + (None, Ok(params)) => { + extensions.insert(UrlParams::Params(params)); + } + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/service_ext.rs b/.cargo-vendor/axum-0.6.20/src/service_ext.rs new file mode 100644 index 0000000000..e603d65f16 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/service_ext.rs @@ -0,0 +1,47 @@ +#[cfg(feature = "tokio")] +use crate::extract::connect_info::IntoMakeServiceWithConnectInfo; +use crate::routing::IntoMakeService; +use tower_service::Service; + +/// Extension trait that adds additional methods to any [`Service`]. +pub trait ServiceExt: Service + Sized { + /// Convert this service into a [`MakeService`], that is a [`Service`] whose + /// response is another service. + /// + /// This is commonly used when applying middleware around an entire [`Router`]. See ["Rewriting + /// request URI in middleware"] for more details. + /// + /// [`MakeService`]: tower::make::MakeService + /// ["Rewriting request URI in middleware"]: crate::middleware#rewriting-request-uri-in-middleware + /// [`Router`]: crate::Router + fn into_make_service(self) -> IntoMakeService; + + /// Convert this service into a [`MakeService`], that will store `C`'s + /// associated `ConnectInfo` in a request extension such that [`ConnectInfo`] + /// can extract it. + /// + /// This enables extracting things like the client's remote address. + /// This is commonly used when applying middleware around an entire [`Router`]. See ["Rewriting + /// request URI in middleware"] for more details. + /// + /// [`MakeService`]: tower::make::MakeService + /// ["Rewriting request URI in middleware"]: crate::middleware#rewriting-request-uri-in-middleware + /// [`Router`]: crate::Router + /// [`ConnectInfo`]: crate::extract::connect_info::ConnectInfo + #[cfg(feature = "tokio")] + fn into_make_service_with_connect_info(self) -> IntoMakeServiceWithConnectInfo; +} + +impl ServiceExt for S +where + S: Service + Sized, +{ + fn into_make_service(self) -> IntoMakeService { + IntoMakeService::new(self) + } + + #[cfg(feature = "tokio")] + fn into_make_service_with_connect_info(self) -> IntoMakeServiceWithConnectInfo { + IntoMakeServiceWithConnectInfo::new(self) + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/test_helpers/mod.rs b/.cargo-vendor/axum-0.6.20/src/test_helpers/mod.rs new file mode 100644 index 0000000000..de4554905e --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/test_helpers/mod.rs @@ -0,0 +1,14 @@ +#![allow(clippy::disallowed_names)] + +use crate::{body::HttpBody, BoxError}; + +mod test_client; +pub(crate) use self::test_client::*; + +pub(crate) mod tracing_helpers; + +pub(crate) fn assert_send() {} +pub(crate) fn assert_sync() {} +pub(crate) fn assert_unpin() {} + +pub(crate) struct NotSendSync(*const ()); diff --git a/.cargo-vendor/axum-0.6.20/src/test_helpers/test_client.rs b/.cargo-vendor/axum-0.6.20/src/test_helpers/test_client.rs new file mode 100644 index 0000000000..d1d73f6c1d --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/test_helpers/test_client.rs @@ -0,0 +1,158 @@ +use super::{BoxError, HttpBody}; +use bytes::Bytes; +use http::{ + header::{HeaderName, HeaderValue}, + Request, StatusCode, +}; +use hyper::{Body, Server}; +use std::net::{SocketAddr, TcpListener}; +use tower::make::Shared; +use tower_service::Service; + +pub(crate) struct TestClient { + client: reqwest::Client, + addr: SocketAddr, +} + +impl TestClient { + pub(crate) fn new(svc: S) -> Self + where + S: Service, Response = http::Response> + Clone + Send + 'static, + ResBody: HttpBody + Send + 'static, + ResBody::Data: Send, + ResBody::Error: Into, + S::Future: Send, + S::Error: Into, + { + let listener = TcpListener::bind("127.0.0.1:0").expect("Could not bind ephemeral socket"); + let addr = listener.local_addr().unwrap(); + println!("Listening on {addr}"); + + tokio::spawn(async move { + let server = Server::from_tcp(listener).unwrap().serve(Shared::new(svc)); + server.await.expect("server error"); + }); + + let client = reqwest::Client::builder() + .redirect(reqwest::redirect::Policy::none()) + .build() + .unwrap(); + + TestClient { client, addr } + } + + pub(crate) fn get(&self, url: &str) -> RequestBuilder { + RequestBuilder { + builder: self.client.get(format!("http://{}{}", self.addr, url)), + } + } + + pub(crate) fn head(&self, url: &str) -> RequestBuilder { + RequestBuilder { + builder: self.client.head(format!("http://{}{}", self.addr, url)), + } + } + + pub(crate) fn post(&self, url: &str) -> RequestBuilder { + RequestBuilder { + builder: self.client.post(format!("http://{}{}", self.addr, url)), + } + } + + #[allow(dead_code)] + pub(crate) fn put(&self, url: &str) -> RequestBuilder { + RequestBuilder { + builder: self.client.put(format!("http://{}{}", self.addr, url)), + } + } + + #[allow(dead_code)] + pub(crate) fn patch(&self, url: &str) -> RequestBuilder { + RequestBuilder { + builder: self.client.patch(format!("http://{}{}", self.addr, url)), + } + } +} + +pub(crate) struct RequestBuilder { + builder: reqwest::RequestBuilder, +} + +impl RequestBuilder { + pub(crate) async fn send(self) -> TestResponse { + TestResponse { + response: self.builder.send().await.unwrap(), + } + } + + pub(crate) fn body(mut self, body: impl Into) -> Self { + self.builder = self.builder.body(body); + self + } + + pub(crate) fn json(mut self, json: &T) -> Self + where + T: serde::Serialize, + { + self.builder = self.builder.json(json); + self + } + + pub(crate) fn header(mut self, key: K, value: V) -> Self + where + HeaderName: TryFrom, + >::Error: Into, + HeaderValue: TryFrom, + >::Error: Into, + { + self.builder = self.builder.header(key, value); + self + } + + #[allow(dead_code)] + pub(crate) fn multipart(mut self, form: reqwest::multipart::Form) -> Self { + self.builder = self.builder.multipart(form); + self + } +} + +#[derive(Debug)] +pub(crate) struct TestResponse { + response: reqwest::Response, +} + +impl TestResponse { + #[allow(dead_code)] + pub(crate) async fn bytes(self) -> Bytes { + self.response.bytes().await.unwrap() + } + + pub(crate) async fn text(self) -> String { + self.response.text().await.unwrap() + } + + #[allow(dead_code)] + pub(crate) async fn json(self) -> T + where + T: serde::de::DeserializeOwned, + { + self.response.json().await.unwrap() + } + + pub(crate) fn status(&self) -> StatusCode { + self.response.status() + } + + pub(crate) fn headers(&self) -> &http::HeaderMap { + self.response.headers() + } + + pub(crate) async fn chunk(&mut self) -> Option { + self.response.chunk().await.unwrap() + } + + pub(crate) async fn chunk_text(&mut self) -> Option { + let chunk = self.chunk().await?; + Some(String::from_utf8(chunk.to_vec()).unwrap()) + } +} diff --git a/.cargo-vendor/axum-0.6.20/src/test_helpers/tracing_helpers.rs b/.cargo-vendor/axum-0.6.20/src/test_helpers/tracing_helpers.rs new file mode 100644 index 0000000000..3d5cf18149 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/test_helpers/tracing_helpers.rs @@ -0,0 +1,108 @@ +use std::{ + future::Future, + io, + sync::{Arc, Mutex}, +}; + +use serde::{de::DeserializeOwned, Deserialize}; +use tracing_subscriber::prelude::*; +use tracing_subscriber::{filter::Targets, fmt::MakeWriter}; + +#[derive(Deserialize, Eq, PartialEq, Debug)] +#[serde(deny_unknown_fields)] +pub(crate) struct TracingEvent { + pub(crate) fields: T, + pub(crate) target: String, + pub(crate) level: String, +} + +/// Run an async closure and capture the tracing output it produces. +pub(crate) async fn capture_tracing(f: F) -> Vec> +where + F: Fn() -> Fut, + Fut: Future, + T: DeserializeOwned, +{ + let (make_writer, handle) = TestMakeWriter::new(); + + let subscriber = tracing_subscriber::registry().with( + tracing_subscriber::fmt::layer() + .with_writer(make_writer) + .with_target(true) + .without_time() + .with_ansi(false) + .json() + .flatten_event(false) + .with_filter("axum=trace".parse::().unwrap()), + ); + + let guard = tracing::subscriber::set_default(subscriber); + + f().await; + + drop(guard); + + handle + .take() + .lines() + .map(|line| serde_json::from_str(line).unwrap()) + .collect() +} + +struct TestMakeWriter { + write: Arc>>>, +} + +impl TestMakeWriter { + fn new() -> (Self, Handle) { + let write = Arc::new(Mutex::new(Some(Vec::::new()))); + + ( + Self { + write: write.clone(), + }, + Handle { write }, + ) + } +} + +impl<'a> MakeWriter<'a> for TestMakeWriter { + type Writer = Writer<'a>; + + fn make_writer(&'a self) -> Self::Writer { + Writer(self) + } +} + +struct Writer<'a>(&'a TestMakeWriter); + +impl<'a> io::Write for Writer<'a> { + fn write(&mut self, buf: &[u8]) -> io::Result { + match &mut *self.0.write.lock().unwrap() { + Some(vec) => { + let len = buf.len(); + vec.extend(buf); + Ok(len) + } + None => Err(io::Error::new( + io::ErrorKind::Other, + "inner writer has been taken", + )), + } + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} + +struct Handle { + write: Arc>>>, +} + +impl Handle { + fn take(self) -> String { + let vec = self.write.lock().unwrap().take().unwrap(); + String::from_utf8(vec).unwrap() + } +} diff --git a/.cargo-vendor/axum/src/typed_header.rs b/.cargo-vendor/axum-0.6.20/src/typed_header.rs similarity index 100% rename from .cargo-vendor/axum/src/typed_header.rs rename to .cargo-vendor/axum-0.6.20/src/typed_header.rs diff --git a/.cargo-vendor/axum-0.6.20/src/util.rs b/.cargo-vendor/axum-0.6.20/src/util.rs new file mode 100644 index 0000000000..f7fc6ae149 --- /dev/null +++ b/.cargo-vendor/axum-0.6.20/src/util.rs @@ -0,0 +1,61 @@ +use pin_project_lite::pin_project; +use std::{ops::Deref, sync::Arc}; + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub(crate) struct PercentDecodedStr(Arc); + +impl PercentDecodedStr { + pub(crate) fn new(s: S) -> Option + where + S: AsRef, + { + percent_encoding::percent_decode(s.as_ref().as_bytes()) + .decode_utf8() + .ok() + .map(|decoded| Self(decoded.as_ref().into())) + } + + pub(crate) fn as_str(&self) -> &str { + &self.0 + } + + pub(crate) fn into_inner(self) -> Arc { + self.0 + } +} + +impl Deref for PercentDecodedStr { + type Target = str; + + #[inline] + fn deref(&self) -> &Self::Target { + self.as_str() + } +} + +pin_project! { + #[project = EitherProj] + pub(crate) enum Either { + A { #[pin] inner: A }, + B { #[pin] inner: B }, + } +} + +pub(crate) fn try_downcast(k: K) -> Result +where + T: 'static, + K: Send + 'static, +{ + let mut k = Some(k); + if let Some(k) = ::downcast_mut::>(&mut k) { + Ok(k.take().unwrap()) + } else { + Err(k.unwrap()) + } +} + +#[test] +fn test_try_downcast() { + assert_eq!(try_downcast::(5_u32), Err(5_u32)); + assert_eq!(try_downcast::(5_i32), Ok(5_i32)); +} diff --git a/.cargo-vendor/axum-core-0.3.4/.cargo-checksum.json b/.cargo-vendor/axum-core-0.3.4/.cargo-checksum.json new file mode 100644 index 0000000000..8bfd791ee7 --- /dev/null +++ b/.cargo-vendor/axum-core-0.3.4/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"446d28d46bc10b208ef14dab4577ab84630b57539a3553e50c0a9f5fe065ff69","Cargo.toml":"b9a8bd35772328c050ef314ead75f79a78acd70ea9b3f535af1a8580d50d6b0a","LICENSE":"ab25eee08e7b6d209398f44e6f4a6f17c9446cd54b99fd64e041edadc96b6f9b","README.md":"7113dacbc69f31674e7051c4b504471130b84e359e36f3e1e9fa944990cc51b7","build.rs":"3bad731d51fd3a32b235ebd437bc9a2788b35bcae4779237cd26696e13840bcb","src/body.rs":"e779fac3a090a4d9156922a8255cd79fffb5ad52a96db64db64a3fe66016b77a","src/error.rs":"ac4b458b73677b8cd07c1cf71030db81963e4cabf417d80c678ea315a7b290cd","src/ext_traits/mod.rs":"d9ccd994037e44dff41865e4c66b3b110e09c380b54c2b74e3b6022c2573d719","src/ext_traits/request.rs":"265cd31f92c93294e4f39c40a1d7edd6dabfd9e6affad0908ccdbcce25ba92ae","src/ext_traits/request_parts.rs":"04be5b833550d4d9d3fbb7788077c39f248cfd787c94d778f43cb71e0bce7e2a","src/extract/default_body_limit.rs":"1f55fd5681ec2e7c548d64e9758c7562753123e1d81c98b11afc8c0a5d5d79d1","src/extract/from_ref.rs":"baf52ef04101b6b8f3c5a699b30c09ea322391c64380561a963cb3a7536616f3","src/extract/mod.rs":"8123aea39f35391aab17cd166f8ffec0ef6a89eac954edb94730f8a2f500f3a3","src/extract/rejection.rs":"64bbf092dd50af4cf4055bcfa86719290b795bbea4a0824bf5d7e786dc372ee3","src/extract/request_parts.rs":"85794b4265a354319a46895edea2fa7940712e2aef1388354cfd1060868cac5f","src/extract/tuple.rs":"983f035fd11d85cff01370373bef723bc25fe4870eef8bb176e90d11f94923fa","src/lib.rs":"b752ccd80895e79307126b45b4d89116d4fefb2cf88aed8bdbc8bffb550a5465","src/macros.rs":"e428cdb5c2e4c74451e57628ce6d1b457e38965daf7a0b9f7053f73317a55f2f","src/response/append_headers.rs":"4fac6f55e9fb93f16e2e549d586aa4d698a208a27578ee2e708bc17513708e06","src/response/into_response.rs":"58d68bec046db7ba48d06dd13511e1f2e05ea217e4e4c8d56299e9745544ddb4","src/response/into_response_parts.rs":"f0e85f3e931acfcf3d610b4a464b38fe3bcabd96940c55436c7f82a5dfdc30f3","src/response/mod.rs":"34766742cfc0c4b7bc02c9b290dbdcd2128138f619d622d72a2a546ce122c73b"},"package":"759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c"} \ No newline at end of file diff --git a/.cargo-vendor/axum-core-0.3.4/CHANGELOG.md b/.cargo-vendor/axum-core-0.3.4/CHANGELOG.md new file mode 100644 index 0000000000..f17c2250ae --- /dev/null +++ b/.cargo-vendor/axum-core-0.3.4/CHANGELOG.md @@ -0,0 +1,210 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +# Unreleased + +- None. + +# 0.3.4 (11. April, 2023) + +- Changes to private APIs. + +# 0.3.3 (03. March, 2023) + +- **fixed:** Add `#[must_use]` attributes to types that do nothing unless used ([#1809]) + +[#1809]: https://github.com/tokio-rs/axum/pull/1809 + +# 0.3.2 (20. January, 2023) + +- **added:** Implement `IntoResponse` for `&'static [u8; N]` and `[u8; N]` ([#1690]) + +[#1690]: https://github.com/tokio-rs/axum/pull/1690 + +# 0.3.1 (9. January, 2023) + +- **added:** Add `body_text` and `status` methods to built-in rejections ([#1612]) + +[#1612]: https://github.com/tokio-rs/axum/pull/1612 + +# 0.3.0 (25. November, 2022) + +- **added:** Added new `FromRequestParts` trait. See axum's changelog for more + details ([#1272]) +- **breaking:** `FromRequest` has been reworked and `RequestParts` has been + removed. See axum's changelog for more details ([#1272]) +- **breaking:** `BodyAlreadyExtracted` has been removed ([#1272]) +- **breaking:** `AppendHeaders` now works on any `impl IntoIterator` ([#1495]) + +[#1272]: https://github.com/tokio-rs/axum/pull/1272 +[#1495]: https://github.com/tokio-rs/axum/pull/1495 + +
+0.3.0 Pre-Releases + +# 0.3.0-rc.3 (8. November, 2022) + +- **added:** Add `DefaultBodyLimit::max` for changing the default body limit ([#1397]) +- **added:** Add `Error::into_inner` for converting `Error` to `BoxError` without allocating ([#1476]) +- **breaking:** `AppendHeaders` now works on any `impl IntoIterator` ([#1495]) + +[#1397]: https://github.com/tokio-rs/axum/pull/1397 +[#1476]: https://github.com/tokio-rs/axum/pull/1476 +[#1495]: https://github.com/tokio-rs/axum/pull/1495 + +# 0.3.0-rc.2 (10. September, 2022) + +- **breaking:** Added default limit to how much data `Bytes::from_request` will + consume. Previously it would attempt to consume the entire request body + without checking its length. This meant if a malicious peer sent an large (or + infinite) request body your server might run out of memory and crash. + + The default limit is at 2 MB and can be disabled by adding the new + `DefaultBodyLimit::disable()` middleware. See its documentation for more + details. + + This also applies to `String` which used `Bytes::from_request` internally. + + ([#1346]) + +[#1346]: https://github.com/tokio-rs/axum/pull/1346 + +# 0.3.0-rc.1 (23. August, 2022) + +- **breaking:** `FromRequest` has been reworked and `RequestParts` has been + removed. See axum's changelog for more details ([#1272]) +- **added:** Added new `FromRequestParts` trait. See axum's changelog for more + details ([#1272]) +- **breaking:** `BodyAlreadyExtracted` has been removed ([#1272]) + +[#1155]: https://github.com/tokio-rs/axum/pull/1155 +[#1272]: https://github.com/tokio-rs/axum/pull/1272 + +
+ +# 0.2.8 (10. September, 2022) + +- **breaking:** Added default limit to how much data `Bytes::from_request` will + consume. Previously it would attempt to consume the entire request body + without checking its length. This meant if a malicious peer sent an large (or + infinite) request body your server might run out of memory and crash. + + The default limit is at 2 MB and can be disabled by adding the new + `DefaultBodyLimit::disable()` middleware. See its documentation for more + details. + + This also applies to `String` which used `Bytes::from_request` internally. + + ([#1346]) + +[#1346]: https://github.com/tokio-rs/axum/pull/1346 + +# 0.2.7 (10. July, 2022) + +- **fix:** Fix typos in `RequestParts` docs ([#1147]) + +[#1147]: https://github.com/tokio-rs/axum/pull/1147 + +# 0.2.6 (18. June, 2022) + +- **change:** axum-core's MSRV is now 1.56 ([#1098]) + +[#1098]: https://github.com/tokio-rs/axum/pull/1098 + +# 0.2.5 (08. June, 2022) + +- **added:** Automatically handle `http_body::LengthLimitError` in `FailedToBufferBody` and map + such errors to `413 Payload Too Large` ([#1048]) +- **fixed:** Use `impl IntoResponse` less in docs ([#1049]) + +[#1048]: https://github.com/tokio-rs/axum/pull/1048 +[#1049]: https://github.com/tokio-rs/axum/pull/1049 + +# 0.2.4 (02. May, 2022) + +- **added:** Implement `IntoResponse` and `IntoResponseParts` for `http::Extensions` ([#975]) +- **added:** Implement `IntoResponse` for `(http::response::Parts, impl IntoResponse)` ([#950]) +- **added:** Implement `IntoResponse` for `(http::response::Response<()>, impl IntoResponse)` ([#950]) +- **added:** Implement `IntoResponse for (Parts | Request<()>, $(impl IntoResponseParts)+, impl IntoResponse)` ([#980]) + +[#950]: https://github.com/tokio-rs/axum/pull/950 +[#975]: https://github.com/tokio-rs/axum/pull/975 +[#980]: https://github.com/tokio-rs/axum/pull/980 + +# 0.2.3 (25. April, 2022) + +- **added:** Add `response::ErrorResponse` and `response::Result` for + `IntoResponse`-based error handling ([#921]) + +[#921]: https://github.com/tokio-rs/axum/pull/921 + +# 0.2.2 (19. April, 2022) + +- **added:** Add `AppendHeaders` for appending headers to a response rather than overriding them ([#927]) + +[#927]: https://github.com/tokio-rs/axum/pull/927 + +# 0.2.1 (03. April, 2022) + +- **added:** Add `RequestParts::extract` which allows applying an extractor as a method call ([#897]) + +[#897]: https://github.com/tokio-rs/axum/pull/897 + +# 0.2.0 (31. March, 2022) + +- **added:** Add `IntoResponseParts` trait which allows defining custom response + types for adding headers or extensions to responses ([#797]) +- **breaking:** Using `HeaderMap` as an extractor will no longer remove the headers and thus + they'll still be accessible to other extractors, such as `axum::extract::Json`. Instead + `HeaderMap` will clone the headers. You should prefer to use `TypedHeader` to extract only the + headers you need ([#698]) + + This includes these breaking changes: + - `RequestParts::take_headers` has been removed. + - `RequestParts::headers` returns `&HeaderMap`. + - `RequestParts::headers_mut` returns `&mut HeaderMap`. + - `HeadersAlreadyExtracted` has been removed. + - The `HeadersAlreadyExtracted` variant has been removed from these rejections: + - `RequestAlreadyExtracted` + - `RequestPartsAlreadyExtracted` + - `>::Rejection` has been changed to `std::convert::Infallible`. +- **breaking:** `axum::http::Extensions` is no longer an extractor (ie it + doesn't implement `FromRequest`). The `axum::extract::Extension` extractor is + _not_ impacted by this and works the same. This change makes it harder to + accidentally remove all extensions which would result in confusing errors + elsewhere ([#699]) + This includes these breaking changes: + - `RequestParts::take_extensions` has been removed. + - `RequestParts::extensions` returns `&Extensions`. + - `RequestParts::extensions_mut` returns `&mut Extensions`. + - `RequestAlreadyExtracted` has been removed. + - `::Rejection` is now `BodyAlreadyExtracted`. + - `::Rejection` is now `Infallible`. + - `ExtensionsAlreadyExtracted` has been removed. +- **breaking:** `RequestParts::body_mut` now returns `&mut Option` so the + body can be swapped ([#869]) + +[#698]: https://github.com/tokio-rs/axum/pull/698 +[#699]: https://github.com/tokio-rs/axum/pull/699 +[#797]: https://github.com/tokio-rs/axum/pull/797 +[#869]: https://github.com/tokio-rs/axum/pull/869 + +# 0.1.2 (22. February, 2022) + +- **added:** Implement `IntoResponse` for `bytes::BytesMut` and `bytes::Chain` ([#767]) + +[#767]: https://github.com/tokio-rs/axum/pull/767 + +# 0.1.1 (06. December, 2021) + +- **added:** `axum_core::response::Response` now exists as a shorthand for writing `Response` ([#590]) + +[#590]: https://github.com/tokio-rs/axum/pull/590 + +# 0.1.0 (02. December, 2021) + +- Initial release. diff --git a/.cargo-vendor/axum-core-0.3.4/Cargo.toml b/.cargo-vendor/axum-core-0.3.4/Cargo.toml new file mode 100644 index 0000000000..4d45d91bb9 --- /dev/null +++ b/.cargo-vendor/axum-core-0.3.4/Cargo.toml @@ -0,0 +1,103 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.56" +name = "axum-core" +version = "0.3.4" +description = "Core types and traits for axum" +homepage = "https://github.com/tokio-rs/axum" +readme = "README.md" +keywords = [ + "http", + "web", + "framework", +] +categories = [ + "asynchronous", + "network-programming", + "web-programming", +] +license = "MIT" +repository = "https://github.com/tokio-rs/axum" + +[package.metadata.cargo-public-api-crates] +allowed = [ + "futures_core", + "http", + "bytes", + "http_body", + "tower_layer", +] + +[dependencies.async-trait] +version = "0.1.67" + +[dependencies.bytes] +version = "1.0" + +[dependencies.futures-util] +version = "0.3" +features = ["alloc"] +default-features = false + +[dependencies.http] +version = "0.2.7" + +[dependencies.http-body] +version = "0.4.5" + +[dependencies.mime] +version = "0.3.16" + +[dependencies.tower-http] +version = "0.4" +features = ["limit"] +optional = true + +[dependencies.tower-layer] +version = "0.3" + +[dependencies.tower-service] +version = "0.3" + +[dependencies.tracing] +version = "0.1.37" +optional = true +default-features = false + +[dev-dependencies.axum] +version = "0.6.0" +features = ["headers"] + +[dev-dependencies.futures-util] +version = "0.3" +features = ["alloc"] +default-features = false + +[dev-dependencies.hyper] +version = "0.14.24" + +[dev-dependencies.tokio] +version = "1.25.0" +features = ["macros"] + +[dev-dependencies.tower-http] +version = "0.4" +features = ["limit"] + +[build-dependencies.rustversion] +version = "1.0.9" + +[features] +__private_docs = ["dep:tower-http"] +tracing = ["dep:tracing"] diff --git a/.cargo-vendor/axum-core-0.3.4/LICENSE b/.cargo-vendor/axum-core-0.3.4/LICENSE new file mode 100644 index 0000000000..538d04abb9 --- /dev/null +++ b/.cargo-vendor/axum-core-0.3.4/LICENSE @@ -0,0 +1,7 @@ +Copyright 2021 Axum Contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/.cargo-vendor/axum-core-0.3.4/README.md b/.cargo-vendor/axum-core-0.3.4/README.md new file mode 100644 index 0000000000..bd5efbeb01 --- /dev/null +++ b/.cargo-vendor/axum-core-0.3.4/README.md @@ -0,0 +1,45 @@ +# axum-core + +[![Build status](https://github.com/tokio-rs/axum/actions/workflows/CI.yml/badge.svg?branch=main)](https://github.com/tokio-rs/axum-core/actions/workflows/CI.yml) +[![Crates.io](https://img.shields.io/crates/v/axum-core)](https://crates.io/crates/axum-core) +[![Documentation](https://docs.rs/axum-core/badge.svg)](https://docs.rs/axum-core) + +Core types and traits for axum. + +More information about this crate can be found in the [crate documentation][docs]. + +## Safety + +This crate uses `#![forbid(unsafe_code)]` to ensure everything is implemented in 100% safe Rust. + +## Minimum supported Rust version + +axum-core's MSRV is 1.56. + +## Getting Help + +You're also welcome to ask in the [Discord channel][chat] or open an [issue] +with your question. + +## Contributing + +:balloon: Thanks for your help improving the project! We are so happy to have +you! We have a [contributing guide][contributing] to help you get involved in the +`axum` project. + +## License + +This project is licensed under the [MIT license][license]. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in `axum` by you, shall be licensed as MIT, without any +additional terms or conditions. + +[`axum`]: https://crates.io/crates/axum +[chat]: https://discord.gg/tokio +[contributing]: /CONTRIBUTING.md +[docs]: https://docs.rs/axum-core +[license]: /axum-core/LICENSE +[issue]: https://github.com/tokio-rs/axum/issues/new diff --git a/.cargo-vendor/axum-core-0.3.4/build.rs b/.cargo-vendor/axum-core-0.3.4/build.rs new file mode 100644 index 0000000000..b52885c626 --- /dev/null +++ b/.cargo-vendor/axum-core-0.3.4/build.rs @@ -0,0 +1,7 @@ +#[rustversion::nightly] +fn main() { + println!("cargo:rustc-cfg=nightly_error_messages"); +} + +#[rustversion::not(nightly)] +fn main() {} diff --git a/.cargo-vendor/axum-core-0.3.4/src/body.rs b/.cargo-vendor/axum-core-0.3.4/src/body.rs new file mode 100644 index 0000000000..9f25408936 --- /dev/null +++ b/.cargo-vendor/axum-core-0.3.4/src/body.rs @@ -0,0 +1,92 @@ +//! HTTP body utilities. + +use crate::{BoxError, Error}; +use bytes::Bytes; +use bytes::{Buf, BufMut}; +use http_body::Body; + +/// A boxed [`Body`] trait object. +/// +/// This is used in axum as the response body type for applications. It's +/// necessary to unify multiple response bodies types into one. +pub type BoxBody = http_body::combinators::UnsyncBoxBody; + +/// Convert a [`http_body::Body`] into a [`BoxBody`]. +pub fn boxed(body: B) -> BoxBody +where + B: http_body::Body + Send + 'static, + B::Error: Into, +{ + try_downcast(body).unwrap_or_else(|body| body.map_err(Error::new).boxed_unsync()) +} + +pub(crate) fn try_downcast(k: K) -> Result +where + T: 'static, + K: Send + 'static, +{ + let mut k = Some(k); + if let Some(k) = ::downcast_mut::>(&mut k) { + Ok(k.take().unwrap()) + } else { + Err(k.unwrap()) + } +} + +// copied from hyper under the following license: +// Copyright (c) 2014-2021 Sean McArthur + +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: + +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. + +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +pub(crate) async fn to_bytes(body: T) -> Result +where + T: Body, +{ + futures_util::pin_mut!(body); + + // If there's only 1 chunk, we can just return Buf::to_bytes() + let mut first = if let Some(buf) = body.data().await { + buf? + } else { + return Ok(Bytes::new()); + }; + + let second = if let Some(buf) = body.data().await { + buf? + } else { + return Ok(first.copy_to_bytes(first.remaining())); + }; + + // With more than 1 buf, we gotta flatten into a Vec first. + let cap = first.remaining() + second.remaining() + body.size_hint().lower() as usize; + let mut vec = Vec::with_capacity(cap); + vec.put(first); + vec.put(second); + + while let Some(buf) = body.data().await { + vec.put(buf?); + } + + Ok(vec.into()) +} + +#[test] +fn test_try_downcast() { + assert_eq!(try_downcast::(5_u32), Err(5_u32)); + assert_eq!(try_downcast::(5_i32), Ok(5_i32)); +} diff --git a/.cargo-vendor/axum-core-0.3.4/src/error.rs b/.cargo-vendor/axum-core-0.3.4/src/error.rs new file mode 100644 index 0000000000..8c522c72b2 --- /dev/null +++ b/.cargo-vendor/axum-core-0.3.4/src/error.rs @@ -0,0 +1,34 @@ +use crate::BoxError; +use std::{error::Error as StdError, fmt}; + +/// Errors that can happen when using axum. +#[derive(Debug)] +pub struct Error { + inner: BoxError, +} + +impl Error { + /// Create a new `Error` from a boxable error. + pub fn new(error: impl Into) -> Self { + Self { + inner: error.into(), + } + } + + /// Convert an `Error` back into the underlying boxed trait object. + pub fn into_inner(self) -> BoxError { + self.inner + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.inner.fmt(f) + } +} + +impl StdError for Error { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + Some(&*self.inner) + } +} diff --git a/.cargo-vendor/axum-core-0.3.4/src/ext_traits/mod.rs b/.cargo-vendor/axum-core-0.3.4/src/ext_traits/mod.rs new file mode 100644 index 0000000000..02595fbeac --- /dev/null +++ b/.cargo-vendor/axum-core-0.3.4/src/ext_traits/mod.rs @@ -0,0 +1,50 @@ +pub(crate) mod request; +pub(crate) mod request_parts; + +#[cfg(test)] +mod tests { + use std::convert::Infallible; + + use crate::extract::{FromRef, FromRequestParts}; + use async_trait::async_trait; + use http::request::Parts; + + #[derive(Debug, Default, Clone, Copy)] + pub(crate) struct State(pub(crate) S); + + #[async_trait] + impl FromRequestParts for State + where + InnerState: FromRef, + OuterState: Send + Sync, + { + type Rejection = Infallible; + + async fn from_request_parts( + _parts: &mut Parts, + state: &OuterState, + ) -> Result { + let inner_state = InnerState::from_ref(state); + Ok(Self(inner_state)) + } + } + + // some extractor that requires the state, such as `SignedCookieJar` + pub(crate) struct RequiresState(pub(crate) String); + + #[async_trait] + impl FromRequestParts for RequiresState + where + S: Send + Sync, + String: FromRef, + { + type Rejection = Infallible; + + async fn from_request_parts( + _parts: &mut Parts, + state: &S, + ) -> Result { + Ok(Self(String::from_ref(state))) + } + } +} diff --git a/.cargo-vendor/axum-core-0.3.4/src/ext_traits/request.rs b/.cargo-vendor/axum-core-0.3.4/src/ext_traits/request.rs new file mode 100644 index 0000000000..e49ba9216c --- /dev/null +++ b/.cargo-vendor/axum-core-0.3.4/src/ext_traits/request.rs @@ -0,0 +1,440 @@ +use crate::extract::{DefaultBodyLimitKind, FromRequest, FromRequestParts}; +use futures_util::future::BoxFuture; +use http::Request; +use http_body::Limited; + +mod sealed { + pub trait Sealed {} + impl Sealed for http::Request {} +} + +/// Extension trait that adds additional methods to [`Request`]. +pub trait RequestExt: sealed::Sealed + Sized { + /// Apply an extractor to this `Request`. + /// + /// This is just a convenience for `E::from_request(req, &())`. + /// + /// Note this consumes the request. Use [`RequestExt::extract_parts`] if you're not extracting + /// the body and don't want to consume the request. + /// + /// # Example + /// + /// ``` + /// use axum::{ + /// async_trait, + /// extract::FromRequest, + /// http::{header::CONTENT_TYPE, Request, StatusCode}, + /// response::{IntoResponse, Response}, + /// Form, Json, RequestExt, + /// }; + /// + /// struct FormOrJson(T); + /// + /// #[async_trait] + /// impl FromRequest for FormOrJson + /// where + /// Json: FromRequest<(), B>, + /// Form: FromRequest<(), B>, + /// T: 'static, + /// B: Send + 'static, + /// S: Send + Sync, + /// { + /// type Rejection = Response; + /// + /// async fn from_request(req: Request, _state: &S) -> Result { + /// let content_type = req + /// .headers() + /// .get(CONTENT_TYPE) + /// .and_then(|value| value.to_str().ok()) + /// .ok_or_else(|| StatusCode::BAD_REQUEST.into_response())?; + /// + /// if content_type.starts_with("application/json") { + /// let Json(payload) = req + /// .extract::, _>() + /// .await + /// .map_err(|err| err.into_response())?; + /// + /// Ok(Self(payload)) + /// } else if content_type.starts_with("application/x-www-form-urlencoded") { + /// let Form(payload) = req + /// .extract::, _>() + /// .await + /// .map_err(|err| err.into_response())?; + /// + /// Ok(Self(payload)) + /// } else { + /// Err(StatusCode::BAD_REQUEST.into_response()) + /// } + /// } + /// } + /// ``` + fn extract(self) -> BoxFuture<'static, Result> + where + E: FromRequest<(), B, M> + 'static, + M: 'static; + + /// Apply an extractor that requires some state to this `Request`. + /// + /// This is just a convenience for `E::from_request(req, state)`. + /// + /// Note this consumes the request. Use [`RequestExt::extract_parts_with_state`] if you're not + /// extracting the body and don't want to consume the request. + /// + /// # Example + /// + /// ``` + /// use axum::{ + /// async_trait, + /// extract::{FromRef, FromRequest}, + /// http::Request, + /// RequestExt, + /// }; + /// + /// struct MyExtractor { + /// requires_state: RequiresState, + /// } + /// + /// #[async_trait] + /// impl FromRequest for MyExtractor + /// where + /// String: FromRef, + /// S: Send + Sync, + /// B: Send + 'static, + /// { + /// type Rejection = std::convert::Infallible; + /// + /// async fn from_request(req: Request, state: &S) -> Result { + /// let requires_state = req.extract_with_state::(state).await?; + /// + /// Ok(Self { requires_state }) + /// } + /// } + /// + /// // some extractor that consumes the request body and requires state + /// struct RequiresState { /* ... */ } + /// + /// #[async_trait] + /// impl FromRequest for RequiresState + /// where + /// String: FromRef, + /// S: Send + Sync, + /// B: Send + 'static, + /// { + /// // ... + /// # type Rejection = std::convert::Infallible; + /// # async fn from_request(req: Request, _state: &S) -> Result { + /// # todo!() + /// # } + /// } + /// ``` + fn extract_with_state(self, state: &S) -> BoxFuture<'_, Result> + where + E: FromRequest + 'static, + S: Send + Sync; + + /// Apply a parts extractor to this `Request`. + /// + /// This is just a convenience for `E::from_request_parts(parts, state)`. + /// + /// # Example + /// + /// ``` + /// use axum::{ + /// async_trait, + /// extract::FromRequest, + /// headers::{authorization::Bearer, Authorization}, + /// http::Request, + /// response::{IntoResponse, Response}, + /// Json, RequestExt, TypedHeader, + /// }; + /// + /// struct MyExtractor { + /// bearer_token: String, + /// payload: T, + /// } + /// + /// #[async_trait] + /// impl FromRequest for MyExtractor + /// where + /// B: Send + 'static, + /// S: Send + Sync, + /// Json: FromRequest<(), B>, + /// T: 'static, + /// { + /// type Rejection = Response; + /// + /// async fn from_request(mut req: Request, _state: &S) -> Result { + /// let TypedHeader(auth_header) = req + /// .extract_parts::>>() + /// .await + /// .map_err(|err| err.into_response())?; + /// + /// let Json(payload) = req + /// .extract::, _>() + /// .await + /// .map_err(|err| err.into_response())?; + /// + /// Ok(Self { + /// bearer_token: auth_header.token().to_owned(), + /// payload, + /// }) + /// } + /// } + /// ``` + fn extract_parts(&mut self) -> BoxFuture<'_, Result> + where + E: FromRequestParts<()> + 'static; + + /// Apply a parts extractor that requires some state to this `Request`. + /// + /// This is just a convenience for `E::from_request_parts(parts, state)`. + /// + /// # Example + /// + /// ``` + /// use axum::{ + /// async_trait, + /// extract::{FromRef, FromRequest, FromRequestParts}, + /// http::{request::Parts, Request}, + /// response::{IntoResponse, Response}, + /// Json, RequestExt, + /// }; + /// + /// struct MyExtractor { + /// requires_state: RequiresState, + /// payload: T, + /// } + /// + /// #[async_trait] + /// impl FromRequest for MyExtractor + /// where + /// String: FromRef, + /// Json: FromRequest<(), B>, + /// T: 'static, + /// S: Send + Sync, + /// B: Send + 'static, + /// { + /// type Rejection = Response; + /// + /// async fn from_request(mut req: Request, state: &S) -> Result { + /// let requires_state = req + /// .extract_parts_with_state::(state) + /// .await + /// .map_err(|err| err.into_response())?; + /// + /// let Json(payload) = req + /// .extract::, _>() + /// .await + /// .map_err(|err| err.into_response())?; + /// + /// Ok(Self { + /// requires_state, + /// payload, + /// }) + /// } + /// } + /// + /// struct RequiresState {} + /// + /// #[async_trait] + /// impl FromRequestParts for RequiresState + /// where + /// String: FromRef, + /// S: Send + Sync, + /// { + /// // ... + /// # type Rejection = std::convert::Infallible; + /// # async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { + /// # todo!() + /// # } + /// } + /// ``` + fn extract_parts_with_state<'a, E, S>( + &'a mut self, + state: &'a S, + ) -> BoxFuture<'a, Result> + where + E: FromRequestParts + 'static, + S: Send + Sync; + + /// Apply the [default body limit](crate::extract::DefaultBodyLimit). + /// + /// If it is disabled, return the request as-is in `Err`. + fn with_limited_body(self) -> Result>, Request>; + + /// Consumes the request, returning the body wrapped in [`Limited`] if a + /// [default limit](crate::extract::DefaultBodyLimit) is in place, or not wrapped if the + /// default limit is disabled. + fn into_limited_body(self) -> Result, B>; +} + +impl RequestExt for Request +where + B: Send + 'static, +{ + fn extract(self) -> BoxFuture<'static, Result> + where + E: FromRequest<(), B, M> + 'static, + M: 'static, + { + self.extract_with_state(&()) + } + + fn extract_with_state(self, state: &S) -> BoxFuture<'_, Result> + where + E: FromRequest + 'static, + S: Send + Sync, + { + E::from_request(self, state) + } + + fn extract_parts(&mut self) -> BoxFuture<'_, Result> + where + E: FromRequestParts<()> + 'static, + { + self.extract_parts_with_state(&()) + } + + fn extract_parts_with_state<'a, E, S>( + &'a mut self, + state: &'a S, + ) -> BoxFuture<'a, Result> + where + E: FromRequestParts + 'static, + S: Send + Sync, + { + let mut req = Request::new(()); + *req.version_mut() = self.version(); + *req.method_mut() = self.method().clone(); + *req.uri_mut() = self.uri().clone(); + *req.headers_mut() = std::mem::take(self.headers_mut()); + *req.extensions_mut() = std::mem::take(self.extensions_mut()); + let (mut parts, _) = req.into_parts(); + + Box::pin(async move { + let result = E::from_request_parts(&mut parts, state).await; + + *self.version_mut() = parts.version; + *self.method_mut() = parts.method.clone(); + *self.uri_mut() = parts.uri.clone(); + *self.headers_mut() = std::mem::take(&mut parts.headers); + *self.extensions_mut() = std::mem::take(&mut parts.extensions); + + result + }) + } + + fn with_limited_body(self) -> Result>, Request> { + // update docs in `axum-core/src/extract/default_body_limit.rs` and + // `axum/src/docs/extract.md` if this changes + const DEFAULT_LIMIT: usize = 2_097_152; // 2 mb + + match self.extensions().get::().copied() { + Some(DefaultBodyLimitKind::Disable) => Err(self), + Some(DefaultBodyLimitKind::Limit(limit)) => { + Ok(self.map(|b| http_body::Limited::new(b, limit))) + } + None => Ok(self.map(|b| http_body::Limited::new(b, DEFAULT_LIMIT))), + } + } + + fn into_limited_body(self) -> Result, B> { + self.with_limited_body() + .map(Request::into_body) + .map_err(Request::into_body) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + ext_traits::tests::{RequiresState, State}, + extract::FromRef, + }; + use async_trait::async_trait; + use http::Method; + use hyper::Body; + + #[tokio::test] + async fn extract_without_state() { + let req = Request::new(()); + + let method: Method = req.extract().await.unwrap(); + + assert_eq!(method, Method::GET); + } + + #[tokio::test] + async fn extract_body_without_state() { + let req = Request::new(Body::from("foobar")); + + let body: String = req.extract().await.unwrap(); + + assert_eq!(body, "foobar"); + } + + #[tokio::test] + async fn extract_with_state() { + let req = Request::new(()); + + let state = "state".to_owned(); + + let State(extracted_state): State = req.extract_with_state(&state).await.unwrap(); + + assert_eq!(extracted_state, state); + } + + #[tokio::test] + async fn extract_parts_without_state() { + let mut req = Request::builder().header("x-foo", "foo").body(()).unwrap(); + + let method: Method = req.extract_parts().await.unwrap(); + + assert_eq!(method, Method::GET); + assert_eq!(req.headers()["x-foo"], "foo"); + } + + #[tokio::test] + async fn extract_parts_with_state() { + let mut req = Request::builder().header("x-foo", "foo").body(()).unwrap(); + + let state = "state".to_owned(); + + let State(extracted_state): State = + req.extract_parts_with_state(&state).await.unwrap(); + + assert_eq!(extracted_state, state); + assert_eq!(req.headers()["x-foo"], "foo"); + } + + // this stuff just needs to compile + #[allow(dead_code)] + struct WorksForCustomExtractor { + method: Method, + from_state: String, + body: String, + } + + #[async_trait] + impl FromRequest for WorksForCustomExtractor + where + S: Send + Sync, + B: Send + 'static, + String: FromRef + FromRequest<(), B>, + { + type Rejection = >::Rejection; + + async fn from_request(mut req: Request, state: &S) -> Result { + let RequiresState(from_state) = req.extract_parts_with_state(state).await.unwrap(); + let method = req.extract_parts().await.unwrap(); + let body = req.extract().await?; + + Ok(Self { + method, + from_state, + body, + }) + } + } +} diff --git a/.cargo-vendor/axum-core-0.3.4/src/ext_traits/request_parts.rs b/.cargo-vendor/axum-core-0.3.4/src/ext_traits/request_parts.rs new file mode 100644 index 0000000000..07a7dbff30 --- /dev/null +++ b/.cargo-vendor/axum-core-0.3.4/src/ext_traits/request_parts.rs @@ -0,0 +1,200 @@ +use crate::extract::FromRequestParts; +use futures_util::future::BoxFuture; +use http::request::Parts; + +mod sealed { + pub trait Sealed {} + impl Sealed for http::request::Parts {} +} + +/// Extension trait that adds additional methods to [`Parts`]. +pub trait RequestPartsExt: sealed::Sealed + Sized { + /// Apply an extractor to this `Parts`. + /// + /// This is just a convenience for `E::from_request_parts(parts, &())`. + /// + /// # Example + /// + /// ``` + /// use axum::{ + /// extract::{Query, TypedHeader, FromRequestParts}, + /// response::{Response, IntoResponse}, + /// headers::UserAgent, + /// http::request::Parts, + /// RequestPartsExt, + /// async_trait, + /// }; + /// use std::collections::HashMap; + /// + /// struct MyExtractor { + /// user_agent: String, + /// query_params: HashMap, + /// } + /// + /// #[async_trait] + /// impl FromRequestParts for MyExtractor + /// where + /// S: Send + Sync, + /// { + /// type Rejection = Response; + /// + /// async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { + /// let user_agent = parts + /// .extract::>() + /// .await + /// .map(|user_agent| user_agent.as_str().to_owned()) + /// .map_err(|err| err.into_response())?; + /// + /// let query_params = parts + /// .extract::>>() + /// .await + /// .map(|Query(params)| params) + /// .map_err(|err| err.into_response())?; + /// + /// Ok(MyExtractor { user_agent, query_params }) + /// } + /// } + /// ``` + fn extract(&mut self) -> BoxFuture<'_, Result> + where + E: FromRequestParts<()> + 'static; + + /// Apply an extractor that requires some state to this `Parts`. + /// + /// This is just a convenience for `E::from_request_parts(parts, state)`. + /// + /// # Example + /// + /// ``` + /// use axum::{ + /// extract::{FromRef, FromRequestParts}, + /// response::{Response, IntoResponse}, + /// http::request::Parts, + /// RequestPartsExt, + /// async_trait, + /// }; + /// + /// struct MyExtractor { + /// requires_state: RequiresState, + /// } + /// + /// #[async_trait] + /// impl FromRequestParts for MyExtractor + /// where + /// String: FromRef, + /// S: Send + Sync, + /// { + /// type Rejection = std::convert::Infallible; + /// + /// async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { + /// let requires_state = parts + /// .extract_with_state::(state) + /// .await?; + /// + /// Ok(MyExtractor { requires_state }) + /// } + /// } + /// + /// struct RequiresState { /* ... */ } + /// + /// // some extractor that requires a `String` in the state + /// #[async_trait] + /// impl FromRequestParts for RequiresState + /// where + /// String: FromRef, + /// S: Send + Sync, + /// { + /// // ... + /// # type Rejection = std::convert::Infallible; + /// # async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { + /// # unimplemented!() + /// # } + /// } + /// ``` + fn extract_with_state<'a, E, S>( + &'a mut self, + state: &'a S, + ) -> BoxFuture<'a, Result> + where + E: FromRequestParts + 'static, + S: Send + Sync; +} + +impl RequestPartsExt for Parts { + fn extract(&mut self) -> BoxFuture<'_, Result> + where + E: FromRequestParts<()> + 'static, + { + self.extract_with_state(&()) + } + + fn extract_with_state<'a, E, S>( + &'a mut self, + state: &'a S, + ) -> BoxFuture<'a, Result> + where + E: FromRequestParts + 'static, + S: Send + Sync, + { + E::from_request_parts(self, state) + } +} + +#[cfg(test)] +mod tests { + use std::convert::Infallible; + + use super::*; + use crate::{ + ext_traits::tests::{RequiresState, State}, + extract::FromRef, + }; + use async_trait::async_trait; + use http::{Method, Request}; + + #[tokio::test] + async fn extract_without_state() { + let (mut parts, _) = Request::new(()).into_parts(); + + let method: Method = parts.extract().await.unwrap(); + + assert_eq!(method, Method::GET); + } + + #[tokio::test] + async fn extract_with_state() { + let (mut parts, _) = Request::new(()).into_parts(); + + let state = "state".to_owned(); + + let State(extracted_state): State = parts + .extract_with_state::, String>(&state) + .await + .unwrap(); + + assert_eq!(extracted_state, state); + } + + // this stuff just needs to compile + #[allow(dead_code)] + struct WorksForCustomExtractor { + method: Method, + from_state: String, + } + + #[async_trait] + impl FromRequestParts for WorksForCustomExtractor + where + S: Send + Sync, + String: FromRef, + { + type Rejection = Infallible; + + async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { + let RequiresState(from_state) = parts.extract_with_state(state).await?; + let method = parts.extract().await?; + + Ok(Self { method, from_state }) + } + } +} diff --git a/.cargo-vendor/axum-core-0.3.4/src/extract/default_body_limit.rs b/.cargo-vendor/axum-core-0.3.4/src/extract/default_body_limit.rs new file mode 100644 index 0000000000..7b37f1edab --- /dev/null +++ b/.cargo-vendor/axum-core-0.3.4/src/extract/default_body_limit.rs @@ -0,0 +1,201 @@ +use self::private::DefaultBodyLimitService; +use tower_layer::Layer; + +/// Layer for configuring the default request body limit. +/// +/// For security reasons, [`Bytes`] will, by default, not accept bodies larger than 2MB. This also +/// applies to extractors that uses [`Bytes`] internally such as `String`, [`Json`], and [`Form`]. +/// +/// This middleware provides ways to configure that. +/// +/// Note that if an extractor consumes the body directly with [`Body::data`], or similar, the +/// default limit is _not_ applied. +/// +/// # Difference between `DefaultBodyLimit` and [`RequestBodyLimit`] +/// +/// `DefaultBodyLimit` and [`RequestBodyLimit`] serve similar functions but in different ways. +/// +/// `DefaultBodyLimit` is local in that it only applies to [`FromRequest`] implementations that +/// explicitly apply it (or call another extractor that does). You can apply the limit with +/// [`RequestExt::with_limited_body`] or [`RequestExt::into_limited_body`] +/// +/// [`RequestBodyLimit`] is applied globally to all requests, regardless of which extractors are +/// used or how the body is consumed. +/// +/// `DefaultBodyLimit` is also easier to integrate into an existing setup since it doesn't change +/// the request body type: +/// +/// ``` +/// use axum::{ +/// Router, +/// routing::post, +/// body::Body, +/// extract::{DefaultBodyLimit, RawBody}, +/// http::Request, +/// }; +/// +/// let app = Router::new() +/// .route( +/// "/", +/// // even with `DefaultBodyLimit` the request body is still just `Body` +/// post(|request: Request| async {}), +/// ) +/// .layer(DefaultBodyLimit::max(1024)); +/// # let _: Router<(), _> = app; +/// ``` +/// +/// ``` +/// use axum::{Router, routing::post, body::Body, extract::RawBody, http::Request}; +/// use tower_http::limit::RequestBodyLimitLayer; +/// use http_body::Limited; +/// +/// let app = Router::new() +/// .route( +/// "/", +/// // `RequestBodyLimitLayer` changes the request body type to `Limited` +/// // extracting a different body type wont work +/// post(|request: Request>| async {}), +/// ) +/// .layer(RequestBodyLimitLayer::new(1024)); +/// # let _: Router<(), _> = app; +/// ``` +/// +/// In general using `DefaultBodyLimit` is recommended but if you need to use third party +/// extractors and want to sure a limit is also applied there then [`RequestBodyLimit`] should be +/// used. +/// +/// [`Body::data`]: http_body::Body::data +/// [`Bytes`]: bytes::Bytes +/// [`Json`]: https://docs.rs/axum/0.6.0/axum/struct.Json.html +/// [`Form`]: https://docs.rs/axum/0.6.0/axum/struct.Form.html +/// [`FromRequest`]: crate::extract::FromRequest +/// [`RequestBodyLimit`]: tower_http::limit::RequestBodyLimit +/// [`RequestExt::with_limited_body`]: crate::RequestExt::with_limited_body +/// [`RequestExt::into_limited_body`]: crate::RequestExt::into_limited_body +#[derive(Debug, Clone)] +#[must_use] +pub struct DefaultBodyLimit { + kind: DefaultBodyLimitKind, +} + +#[derive(Debug, Clone, Copy)] +pub(crate) enum DefaultBodyLimitKind { + Disable, + Limit(usize), +} + +impl DefaultBodyLimit { + /// Disable the default request body limit. + /// + /// This must be used to receive bodies larger than the default limit of 2MB using [`Bytes`] or + /// an extractor built on it such as `String`, [`Json`], [`Form`]. + /// + /// Note that if you're accepting data from untrusted remotes it is recommend to add your own + /// limit such as [`tower_http::limit`]. + /// + /// # Example + /// + /// ``` + /// use axum::{ + /// Router, + /// routing::get, + /// body::{Bytes, Body}, + /// extract::DefaultBodyLimit, + /// }; + /// use tower_http::limit::RequestBodyLimitLayer; + /// use http_body::Limited; + /// + /// let app: Router<(), Limited> = Router::new() + /// .route("/", get(|body: Bytes| async {})) + /// // Disable the default limit + /// .layer(DefaultBodyLimit::disable()) + /// // Set a different limit + /// .layer(RequestBodyLimitLayer::new(10 * 1000 * 1000)); + /// ``` + /// + /// [`Bytes`]: bytes::Bytes + /// [`Json`]: https://docs.rs/axum/0.6.0/axum/struct.Json.html + /// [`Form`]: https://docs.rs/axum/0.6.0/axum/struct.Form.html + pub fn disable() -> Self { + Self { + kind: DefaultBodyLimitKind::Disable, + } + } + + /// Set the default request body limit. + /// + /// By default the limit of request body sizes that [`Bytes::from_request`] (and other + /// extractors built on top of it such as `String`, [`Json`], and [`Form`]) is 2MB. This method + /// can be used to change that limit. + /// + /// # Example + /// + /// ``` + /// use axum::{ + /// Router, + /// routing::get, + /// body::{Bytes, Body}, + /// extract::DefaultBodyLimit, + /// }; + /// use tower_http::limit::RequestBodyLimitLayer; + /// use http_body::Limited; + /// + /// let app: Router<(), Limited> = Router::new() + /// .route("/", get(|body: Bytes| async {})) + /// // Replace the default of 2MB with 1024 bytes. + /// .layer(DefaultBodyLimit::max(1024)); + /// ``` + /// + /// [`Bytes::from_request`]: bytes::Bytes + /// [`Json`]: https://docs.rs/axum/0.6.0/axum/struct.Json.html + /// [`Form`]: https://docs.rs/axum/0.6.0/axum/struct.Form.html + pub fn max(limit: usize) -> Self { + Self { + kind: DefaultBodyLimitKind::Limit(limit), + } + } +} + +impl Layer for DefaultBodyLimit { + type Service = DefaultBodyLimitService; + + fn layer(&self, inner: S) -> Self::Service { + DefaultBodyLimitService { + inner, + kind: self.kind, + } + } +} + +mod private { + use super::DefaultBodyLimitKind; + use http::Request; + use std::task::Context; + use tower_service::Service; + + #[derive(Debug, Clone, Copy)] + pub struct DefaultBodyLimitService { + pub(super) inner: S, + pub(super) kind: DefaultBodyLimitKind, + } + + impl Service> for DefaultBodyLimitService + where + S: Service>, + { + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> std::task::Poll> { + self.inner.poll_ready(cx) + } + + #[inline] + fn call(&mut self, mut req: Request) -> Self::Future { + req.extensions_mut().insert(self.kind); + self.inner.call(req) + } + } +} diff --git a/.cargo-vendor/axum-core-0.3.4/src/extract/from_ref.rs b/.cargo-vendor/axum-core-0.3.4/src/extract/from_ref.rs new file mode 100644 index 0000000000..bdfa7dd07e --- /dev/null +++ b/.cargo-vendor/axum-core-0.3.4/src/extract/from_ref.rs @@ -0,0 +1,25 @@ +/// Used to do reference-to-value conversions thus not consuming the input value. +/// +/// This is mainly used with [`State`] to extract "substates" from a reference to main application +/// state. +/// +/// See [`State`] for more details on how library authors should use this trait. +/// +/// This trait can be derived using `#[derive(FromRef)]`. +/// +/// [`State`]: https://docs.rs/axum/0.6/axum/extract/struct.State.html +// NOTE: This trait is defined in axum-core, even though it is mainly used with `State` which is +// defined in axum. That allows crate authors to use it when implementing extractors. +pub trait FromRef { + /// Converts to this type from a reference to the input type. + fn from_ref(input: &T) -> Self; +} + +impl FromRef for T +where + T: Clone, +{ + fn from_ref(input: &T) -> Self { + input.clone() + } +} diff --git a/.cargo-vendor/axum-core-0.3.4/src/extract/mod.rs b/.cargo-vendor/axum-core-0.3.4/src/extract/mod.rs new file mode 100644 index 0000000000..1113a1ee7a --- /dev/null +++ b/.cargo-vendor/axum-core-0.3.4/src/extract/mod.rs @@ -0,0 +1,196 @@ +//! Types and traits for extracting data from requests. +//! +//! See [`axum::extract`] for more details. +//! +//! [`axum::extract`]: https://docs.rs/axum/latest/axum/extract/index.html + +use crate::response::IntoResponse; +use async_trait::async_trait; +use http::{request::Parts, Request}; +use std::convert::Infallible; + +pub mod rejection; + +mod default_body_limit; +mod from_ref; +mod request_parts; +mod tuple; + +pub(crate) use self::default_body_limit::DefaultBodyLimitKind; +pub use self::{default_body_limit::DefaultBodyLimit, from_ref::FromRef}; + +mod private { + #[derive(Debug, Clone, Copy)] + pub enum ViaParts {} + + #[derive(Debug, Clone, Copy)] + pub enum ViaRequest {} +} + +/// Types that can be created from request parts. +/// +/// Extractors that implement `FromRequestParts` cannot consume the request body and can thus be +/// run in any order for handlers. +/// +/// If your extractor needs to consume the request body then you should implement [`FromRequest`] +/// and not [`FromRequestParts`]. +/// +/// See [`axum::extract`] for more general docs about extractors. +/// +/// [`axum::extract`]: https://docs.rs/axum/0.6.0/axum/extract/index.html +#[async_trait] +#[cfg_attr( + nightly_error_messages, + rustc_on_unimplemented( + note = "Function argument is not a valid axum extractor. \nSee `https://docs.rs/axum/latest/axum/extract/index.html` for details", + ) +)] +pub trait FromRequestParts: Sized { + /// If the extractor fails it'll use this "rejection" type. A rejection is + /// a kind of error that can be converted into a response. + type Rejection: IntoResponse; + + /// Perform the extraction. + async fn from_request_parts(parts: &mut Parts, state: &S) -> Result; +} + +/// Types that can be created from requests. +/// +/// Extractors that implement `FromRequest` can consume the request body and can thus only be run +/// once for handlers. +/// +/// If your extractor doesn't need to consume the request body then you should implement +/// [`FromRequestParts`] and not [`FromRequest`]. +/// +/// See [`axum::extract`] for more general docs about extractors. +/// +/// # What is the `B` type parameter? +/// +/// `FromRequest` is generic over the request body (the `B` in +/// [`http::Request`]). This is to allow `FromRequest` to be usable with any +/// type of request body. This is necessary because some middleware change the +/// request body, for example to add timeouts. +/// +/// If you're writing your own `FromRequest` that wont be used outside your +/// application, and not using any middleware that changes the request body, you +/// can most likely use `axum::body::Body`. +/// +/// If you're writing a library that's intended for others to use, it's recommended +/// to keep the generic type parameter: +/// +/// ```rust +/// use axum::{ +/// async_trait, +/// extract::FromRequest, +/// http::{self, Request}, +/// }; +/// +/// struct MyExtractor; +/// +/// #[async_trait] +/// impl FromRequest for MyExtractor +/// where +/// // these bounds are required by `async_trait` +/// B: Send + 'static, +/// S: Send + Sync, +/// { +/// type Rejection = http::StatusCode; +/// +/// async fn from_request(req: Request, state: &S) -> Result { +/// // ... +/// # unimplemented!() +/// } +/// } +/// ``` +/// +/// This ensures your extractor is as flexible as possible. +/// +/// [`http::Request`]: http::Request +/// [`axum::extract`]: https://docs.rs/axum/0.6.0/axum/extract/index.html +#[async_trait] +#[cfg_attr( + nightly_error_messages, + rustc_on_unimplemented( + note = "Function argument is not a valid axum extractor. \nSee `https://docs.rs/axum/latest/axum/extract/index.html` for details", + ) +)] +pub trait FromRequest: Sized { + /// If the extractor fails it'll use this "rejection" type. A rejection is + /// a kind of error that can be converted into a response. + type Rejection: IntoResponse; + + /// Perform the extraction. + async fn from_request(req: Request, state: &S) -> Result; +} + +#[async_trait] +impl FromRequest for T +where + B: Send + 'static, + S: Send + Sync, + T: FromRequestParts, +{ + type Rejection = >::Rejection; + + async fn from_request(req: Request, state: &S) -> Result { + let (mut parts, _) = req.into_parts(); + Self::from_request_parts(&mut parts, state).await + } +} + +#[async_trait] +impl FromRequestParts for Option +where + T: FromRequestParts, + S: Send + Sync, +{ + type Rejection = Infallible; + + async fn from_request_parts( + parts: &mut Parts, + state: &S, + ) -> Result, Self::Rejection> { + Ok(T::from_request_parts(parts, state).await.ok()) + } +} + +#[async_trait] +impl FromRequest for Option +where + T: FromRequest, + B: Send + 'static, + S: Send + Sync, +{ + type Rejection = Infallible; + + async fn from_request(req: Request, state: &S) -> Result, Self::Rejection> { + Ok(T::from_request(req, state).await.ok()) + } +} + +#[async_trait] +impl FromRequestParts for Result +where + T: FromRequestParts, + S: Send + Sync, +{ + type Rejection = Infallible; + + async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { + Ok(T::from_request_parts(parts, state).await) + } +} + +#[async_trait] +impl FromRequest for Result +where + T: FromRequest, + B: Send + 'static, + S: Send + Sync, +{ + type Rejection = Infallible; + + async fn from_request(req: Request, state: &S) -> Result { + Ok(T::from_request(req, state).await) + } +} diff --git a/.cargo-vendor/axum-core-0.3.4/src/extract/rejection.rs b/.cargo-vendor/axum-core-0.3.4/src/extract/rejection.rs new file mode 100644 index 0000000000..958f3b2170 --- /dev/null +++ b/.cargo-vendor/axum-core-0.3.4/src/extract/rejection.rs @@ -0,0 +1,72 @@ +//! Rejection response types. + +use crate::__composite_rejection as composite_rejection; +use crate::__define_rejection as define_rejection; + +use crate::BoxError; + +composite_rejection! { + /// Rejection type for extractors that buffer the request body. Used if the + /// request body cannot be buffered due to an error. + pub enum FailedToBufferBody { + LengthLimitError, + UnknownBodyError, + } +} + +impl FailedToBufferBody { + pub(crate) fn from_err(err: E) -> Self + where + E: Into, + { + match err.into().downcast::() { + Ok(err) => Self::LengthLimitError(LengthLimitError::from_err(err)), + Err(err) => Self::UnknownBodyError(UnknownBodyError::from_err(err)), + } + } +} + +define_rejection! { + #[status = PAYLOAD_TOO_LARGE] + #[body = "Failed to buffer the request body"] + /// Encountered some other error when buffering the body. + /// + /// This can _only_ happen when you're using [`tower_http::limit::RequestBodyLimitLayer`] or + /// otherwise wrapping request bodies in [`http_body::Limited`]. + pub struct LengthLimitError(Error); +} + +define_rejection! { + #[status = BAD_REQUEST] + #[body = "Failed to buffer the request body"] + /// Encountered an unknown error when buffering the body. + pub struct UnknownBodyError(Error); +} + +define_rejection! { + #[status = BAD_REQUEST] + #[body = "Request body didn't contain valid UTF-8"] + /// Rejection type used when buffering the request into a [`String`] if the + /// body doesn't contain valid UTF-8. + pub struct InvalidUtf8(Error); +} + +composite_rejection! { + /// Rejection used for [`Bytes`](bytes::Bytes). + /// + /// Contains one variant for each way the [`Bytes`](bytes::Bytes) extractor + /// can fail. + pub enum BytesRejection { + FailedToBufferBody, + } +} + +composite_rejection! { + /// Rejection used for [`String`]. + /// + /// Contains one variant for each way the [`String`] extractor can fail. + pub enum StringRejection { + FailedToBufferBody, + InvalidUtf8, + } +} diff --git a/.cargo-vendor/axum-core-0.3.4/src/extract/request_parts.rs b/.cargo-vendor/axum-core-0.3.4/src/extract/request_parts.rs new file mode 100644 index 0000000000..05d7d7277b --- /dev/null +++ b/.cargo-vendor/axum-core-0.3.4/src/extract/request_parts.rs @@ -0,0 +1,136 @@ +use super::{rejection::*, FromRequest, FromRequestParts}; +use crate::{BoxError, RequestExt}; +use async_trait::async_trait; +use bytes::Bytes; +use http::{request::Parts, HeaderMap, Method, Request, Uri, Version}; +use std::convert::Infallible; + +#[async_trait] +impl FromRequest for Request +where + B: Send, + S: Send + Sync, +{ + type Rejection = Infallible; + + async fn from_request(req: Request, _: &S) -> Result { + Ok(req) + } +} + +#[async_trait] +impl FromRequestParts for Method +where + S: Send + Sync, +{ + type Rejection = Infallible; + + async fn from_request_parts(parts: &mut Parts, _: &S) -> Result { + Ok(parts.method.clone()) + } +} + +#[async_trait] +impl FromRequestParts for Uri +where + S: Send + Sync, +{ + type Rejection = Infallible; + + async fn from_request_parts(parts: &mut Parts, _: &S) -> Result { + Ok(parts.uri.clone()) + } +} + +#[async_trait] +impl FromRequestParts for Version +where + S: Send + Sync, +{ + type Rejection = Infallible; + + async fn from_request_parts(parts: &mut Parts, _: &S) -> Result { + Ok(parts.version) + } +} + +/// Clone the headers from the request. +/// +/// Prefer using [`TypedHeader`] to extract only the headers you need. +/// +/// [`TypedHeader`]: https://docs.rs/axum/latest/axum/extract/struct.TypedHeader.html +#[async_trait] +impl FromRequestParts for HeaderMap +where + S: Send + Sync, +{ + type Rejection = Infallible; + + async fn from_request_parts(parts: &mut Parts, _: &S) -> Result { + Ok(parts.headers.clone()) + } +} + +#[async_trait] +impl FromRequest for Bytes +where + B: http_body::Body + Send + 'static, + B::Data: Send, + B::Error: Into, + S: Send + Sync, +{ + type Rejection = BytesRejection; + + async fn from_request(req: Request, _: &S) -> Result { + let bytes = match req.into_limited_body() { + Ok(limited_body) => crate::body::to_bytes(limited_body) + .await + .map_err(FailedToBufferBody::from_err)?, + Err(unlimited_body) => crate::body::to_bytes(unlimited_body) + .await + .map_err(FailedToBufferBody::from_err)?, + }; + + Ok(bytes) + } +} + +#[async_trait] +impl FromRequest for String +where + B: http_body::Body + Send + 'static, + B::Data: Send, + B::Error: Into, + S: Send + Sync, +{ + type Rejection = StringRejection; + + async fn from_request(req: Request, state: &S) -> Result { + let bytes = Bytes::from_request(req, state) + .await + .map_err(|err| match err { + BytesRejection::FailedToBufferBody(inner) => { + StringRejection::FailedToBufferBody(inner) + } + })?; + + let string = std::str::from_utf8(&bytes) + .map_err(InvalidUtf8::from_err)? + .to_owned(); + + Ok(string) + } +} + +#[async_trait] +impl FromRequest for Parts +where + B: Send + 'static, + S: Send + Sync, +{ + type Rejection = Infallible; + + async fn from_request(req: Request, _: &S) -> Result { + Ok(req.into_parts().0) + } +} diff --git a/.cargo-vendor/axum-core-0.3.4/src/extract/tuple.rs b/.cargo-vendor/axum-core-0.3.4/src/extract/tuple.rs new file mode 100644 index 0000000000..728135b2a0 --- /dev/null +++ b/.cargo-vendor/axum-core-0.3.4/src/extract/tuple.rs @@ -0,0 +1,119 @@ +use super::{FromRequest, FromRequestParts}; +use crate::response::{IntoResponse, Response}; +use async_trait::async_trait; +use http::request::{Parts, Request}; +use std::convert::Infallible; + +#[async_trait] +impl FromRequestParts for () +where + S: Send + Sync, +{ + type Rejection = Infallible; + + async fn from_request_parts(_: &mut Parts, _: &S) -> Result<(), Self::Rejection> { + Ok(()) + } +} + +macro_rules! impl_from_request { + ( + [$($ty:ident),*], $last:ident + ) => { + #[async_trait] + #[allow(non_snake_case, unused_mut, unused_variables)] + impl FromRequestParts for ($($ty,)* $last,) + where + $( $ty: FromRequestParts + Send, )* + $last: FromRequestParts + Send, + S: Send + Sync, + { + type Rejection = Response; + + async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { + $( + let $ty = $ty::from_request_parts(parts, state) + .await + .map_err(|err| err.into_response())?; + )* + let $last = $last::from_request_parts(parts, state) + .await + .map_err(|err| err.into_response())?; + + Ok(($($ty,)* $last,)) + } + } + + // This impl must not be generic over M, otherwise it would conflict with the blanket + // implementation of `FromRequest` for `T: FromRequestParts`. + #[async_trait] + #[allow(non_snake_case, unused_mut, unused_variables)] + impl FromRequest for ($($ty,)* $last,) + where + $( $ty: FromRequestParts + Send, )* + $last: FromRequest + Send, + B: Send + 'static, + S: Send + Sync, + { + type Rejection = Response; + + async fn from_request(req: Request, state: &S) -> Result { + let (mut parts, body) = req.into_parts(); + + $( + let $ty = $ty::from_request_parts(&mut parts, state).await.map_err(|err| err.into_response())?; + )* + + let req = Request::from_parts(parts, body); + + let $last = $last::from_request(req, state).await.map_err(|err| err.into_response())?; + + Ok(($($ty,)* $last,)) + } + } + }; +} + +all_the_tuples!(impl_from_request); + +#[cfg(test)] +mod tests { + use bytes::Bytes; + use http::Method; + + use crate::extract::{FromRequest, FromRequestParts}; + + fn assert_from_request() + where + T: FromRequest<(), http_body::Full, M>, + { + } + + fn assert_from_request_parts>() {} + + #[test] + fn unit() { + assert_from_request_parts::<()>(); + assert_from_request::<_, ()>(); + } + + #[test] + fn tuple_of_one() { + assert_from_request_parts::<(Method,)>(); + assert_from_request::<_, (Method,)>(); + assert_from_request::<_, (Bytes,)>(); + } + + #[test] + fn tuple_of_two() { + assert_from_request_parts::<((), ())>(); + assert_from_request::<_, ((), ())>(); + assert_from_request::<_, (Method, Bytes)>(); + } + + #[test] + fn nested_tuple() { + assert_from_request_parts::<(((Method,),),)>(); + assert_from_request::<_, ((((Bytes,),),),)>(); + } +} diff --git a/.cargo-vendor/axum-core-0.3.4/src/lib.rs b/.cargo-vendor/axum-core-0.3.4/src/lib.rs new file mode 100644 index 0000000000..974e5e18d4 --- /dev/null +++ b/.cargo-vendor/axum-core-0.3.4/src/lib.rs @@ -0,0 +1,66 @@ +#![cfg_attr(nightly_error_messages, feature(rustc_attrs))] +//! Core types and traits for [`axum`]. +//! +//! Libraries authors that want to provide [`FromRequest`] or [`IntoResponse`] implementations +//! should depend on the [`axum-core`] crate, instead of `axum` if possible. +//! +//! [`FromRequest`]: crate::extract::FromRequest +//! [`IntoResponse`]: crate::response::IntoResponse +//! [`axum`]: https://crates.io/crates/axum +//! [`axum-core`]: http://crates.io/crates/axum-core + +#![warn( + clippy::all, + clippy::dbg_macro, + clippy::todo, + clippy::empty_enum, + clippy::enum_glob_use, + clippy::mem_forget, + clippy::unused_self, + clippy::filter_map_next, + clippy::needless_continue, + clippy::needless_borrow, + clippy::match_wildcard_for_single_variants, + clippy::if_let_mutex, + clippy::mismatched_target_os, + clippy::await_holding_lock, + clippy::match_on_vec_items, + clippy::imprecise_flops, + clippy::suboptimal_flops, + clippy::lossy_float_literal, + clippy::rest_pat_in_fully_bound_structs, + clippy::fn_params_excessive_bools, + clippy::exit, + clippy::inefficient_to_string, + clippy::linkedlist, + clippy::macro_use_imports, + clippy::option_option, + clippy::verbose_file_reads, + clippy::unnested_or_patterns, + clippy::str_to_string, + rust_2018_idioms, + future_incompatible, + nonstandard_style, + missing_debug_implementations, + missing_docs +)] +#![deny(unreachable_pub, private_in_public)] +#![allow(elided_lifetimes_in_paths, clippy::type_complexity)] +#![forbid(unsafe_code)] +#![cfg_attr(test, allow(clippy::float_cmp))] + +#[macro_use] +pub(crate) mod macros; + +mod error; +mod ext_traits; +pub use self::error::Error; + +pub mod body; +pub mod extract; +pub mod response; + +/// Alias for a type-erased error type. +pub type BoxError = Box; + +pub use self::ext_traits::{request::RequestExt, request_parts::RequestPartsExt}; diff --git a/.cargo-vendor/axum-core-0.3.4/src/macros.rs b/.cargo-vendor/axum-core-0.3.4/src/macros.rs new file mode 100644 index 0000000000..10365e1556 --- /dev/null +++ b/.cargo-vendor/axum-core-0.3.4/src/macros.rs @@ -0,0 +1,296 @@ +/// Private API. +#[doc(hidden)] +#[macro_export] +macro_rules! __log_rejection { + ( + rejection_type = $ty:ident, + body_text = $body_text:expr, + status = $status:expr, + ) => { + #[cfg(feature = "tracing")] + { + tracing::event!( + target: "axum::rejection", + tracing::Level::TRACE, + status = $status.as_u16(), + body = $body_text, + rejection_type = std::any::type_name::<$ty>(), + "rejecting request", + ); + } + }; +} + +/// Private API. +#[doc(hidden)] +#[macro_export] +macro_rules! __define_rejection { + ( + #[status = $status:ident] + #[body = $body:expr] + $(#[$m:meta])* + pub struct $name:ident; + ) => { + $(#[$m])* + #[derive(Debug)] + #[non_exhaustive] + pub struct $name; + + impl $crate::response::IntoResponse for $name { + fn into_response(self) -> $crate::response::Response { + $crate::__log_rejection!( + rejection_type = $name, + body_text = $body, + status = http::StatusCode::$status, + ); + (self.status(), $body).into_response() + } + } + + impl $name { + /// Get the response body text used for this rejection. + pub fn body_text(&self) -> String { + $body.into() + } + + /// Get the status code used for this rejection. + pub fn status(&self) -> http::StatusCode { + http::StatusCode::$status + } + } + + impl std::fmt::Display for $name { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", $body) + } + } + + impl std::error::Error for $name {} + + impl Default for $name { + fn default() -> Self { + Self + } + } + }; + + ( + #[status = $status:ident] + #[body = $body:expr] + $(#[$m:meta])* + pub struct $name:ident (Error); + ) => { + $(#[$m])* + #[derive(Debug)] + pub struct $name(pub(crate) $crate::Error); + + impl $name { + pub(crate) fn from_err(err: E) -> Self + where + E: Into<$crate::BoxError>, + { + Self($crate::Error::new(err)) + } + } + + impl $crate::response::IntoResponse for $name { + fn into_response(self) -> $crate::response::Response { + $crate::__log_rejection!( + rejection_type = $name, + body_text = self.body_text(), + status = http::StatusCode::$status, + ); + (self.status(), self.body_text()).into_response() + } + } + + impl $name { + /// Get the response body text used for this rejection. + pub fn body_text(&self) -> String { + format!(concat!($body, ": {}"), self.0).into() + } + + /// Get the status code used for this rejection. + pub fn status(&self) -> http::StatusCode { + http::StatusCode::$status + } + } + + impl std::fmt::Display for $name { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", $body) + } + } + + impl std::error::Error for $name { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + Some(&self.0) + } + } + }; +} + +/// Private API. +#[doc(hidden)] +#[macro_export] +macro_rules! __composite_rejection { + ( + $(#[$m:meta])* + pub enum $name:ident { + $($variant:ident),+ + $(,)? + } + ) => { + $(#[$m])* + #[derive(Debug)] + #[non_exhaustive] + pub enum $name { + $( + #[allow(missing_docs)] + $variant($variant) + ),+ + } + + impl $crate::response::IntoResponse for $name { + fn into_response(self) -> $crate::response::Response { + match self { + $( + Self::$variant(inner) => inner.into_response(), + )+ + } + } + } + + impl $name { + /// Get the response body text used for this rejection. + pub fn body_text(&self) -> String { + match self { + $( + Self::$variant(inner) => inner.body_text(), + )+ + } + } + + /// Get the status code used for this rejection. + pub fn status(&self) -> http::StatusCode { + match self { + $( + Self::$variant(inner) => inner.status(), + )+ + } + } + } + + $( + impl From<$variant> for $name { + fn from(inner: $variant) -> Self { + Self::$variant(inner) + } + } + )+ + + impl std::fmt::Display for $name { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + $( + Self::$variant(inner) => write!(f, "{}", inner), + )+ + } + } + } + + impl std::error::Error for $name { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + $( + Self::$variant(inner) => Some(inner), + )+ + } + } + } + }; +} + +#[rustfmt::skip] +macro_rules! all_the_tuples { + ($name:ident) => { + $name!([], T1); + $name!([T1], T2); + $name!([T1, T2], T3); + $name!([T1, T2, T3], T4); + $name!([T1, T2, T3, T4], T5); + $name!([T1, T2, T3, T4, T5], T6); + $name!([T1, T2, T3, T4, T5, T6], T7); + $name!([T1, T2, T3, T4, T5, T6, T7], T8); + $name!([T1, T2, T3, T4, T5, T6, T7, T8], T9); + $name!([T1, T2, T3, T4, T5, T6, T7, T8, T9], T10); + $name!([T1, T2, T3, T4, T5, T6, T7, T8, T9, T10], T11); + $name!([T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11], T12); + $name!([T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12], T13); + $name!([T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13], T14); + $name!([T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14], T15); + $name!([T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15], T16); + }; +} + +macro_rules! all_the_tuples_no_last_special_case { + ($name:ident) => { + $name!(T1); + $name!(T1, T2); + $name!(T1, T2, T3); + $name!(T1, T2, T3, T4); + $name!(T1, T2, T3, T4, T5); + $name!(T1, T2, T3, T4, T5, T6); + $name!(T1, T2, T3, T4, T5, T6, T7); + $name!(T1, T2, T3, T4, T5, T6, T7, T8); + $name!(T1, T2, T3, T4, T5, T6, T7, T8, T9); + $name!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10); + $name!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11); + $name!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12); + $name!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13); + $name!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14); + $name!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15); + $name!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16); + }; +} + +/// Private API. +#[doc(hidden)] +#[macro_export] +macro_rules! __impl_deref { + ($ident:ident) => { + impl std::ops::Deref for $ident { + type Target = T; + + #[inline] + fn deref(&self) -> &Self::Target { + &self.0 + } + } + + impl std::ops::DerefMut for $ident { + #[inline] + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } + } + }; + + ($ident:ident: $ty:ty) => { + impl std::ops::Deref for $ident { + type Target = $ty; + + #[inline] + fn deref(&self) -> &Self::Target { + &self.0 + } + } + + impl std::ops::DerefMut for $ident { + #[inline] + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } + } + }; +} diff --git a/.cargo-vendor/axum-core-0.3.4/src/response/append_headers.rs b/.cargo-vendor/axum-core-0.3.4/src/response/append_headers.rs new file mode 100644 index 0000000000..e4ac4812f9 --- /dev/null +++ b/.cargo-vendor/axum-core-0.3.4/src/response/append_headers.rs @@ -0,0 +1,68 @@ +use super::{IntoResponse, IntoResponseParts, Response, ResponseParts, TryIntoHeaderError}; +use http::header::{HeaderName, HeaderValue}; +use std::fmt; + +/// Append headers to a response. +/// +/// Returning something like `[("content-type", "foo=bar")]` from a handler will override any +/// existing `content-type` headers. If instead you want to append headers, use `AppendHeaders`: +/// +/// ```rust +/// use axum::{ +/// response::{AppendHeaders, IntoResponse}, +/// http::header::SET_COOKIE, +/// }; +/// +/// async fn handler() -> impl IntoResponse { +/// // something that sets the `set-cookie` header +/// let set_some_cookies = /* ... */ +/// # axum::http::HeaderMap::new(); +/// +/// ( +/// set_some_cookies, +/// // append two `set-cookie` headers to the response +/// // without overriding the ones added by `set_some_cookies` +/// AppendHeaders([ +/// (SET_COOKIE, "foo=bar"), +/// (SET_COOKIE, "baz=qux"), +/// ]) +/// ) +/// } +/// ``` +#[derive(Debug)] +#[must_use] +pub struct AppendHeaders(pub I); + +impl IntoResponse for AppendHeaders +where + I: IntoIterator, + K: TryInto, + K::Error: fmt::Display, + V: TryInto, + V::Error: fmt::Display, +{ + fn into_response(self) -> Response { + (self, ()).into_response() + } +} + +impl IntoResponseParts for AppendHeaders +where + I: IntoIterator, + K: TryInto, + K::Error: fmt::Display, + V: TryInto, + V::Error: fmt::Display, +{ + type Error = TryIntoHeaderError; + + fn into_response_parts(self, mut res: ResponseParts) -> Result { + for (key, value) in self.0 { + let key = key.try_into().map_err(TryIntoHeaderError::key)?; + let value = value.try_into().map_err(TryIntoHeaderError::value)?; + res.headers_mut().append(key, value); + } + + Ok(res) + } +} diff --git a/.cargo-vendor/axum-core-0.3.4/src/response/into_response.rs b/.cargo-vendor/axum-core-0.3.4/src/response/into_response.rs new file mode 100644 index 0000000000..f19974cfb7 --- /dev/null +++ b/.cargo-vendor/axum-core-0.3.4/src/response/into_response.rs @@ -0,0 +1,531 @@ +use super::{IntoResponseParts, Response, ResponseParts}; +use crate::{body, BoxError}; +use bytes::{buf::Chain, Buf, Bytes, BytesMut}; +use http::{ + header::{self, HeaderMap, HeaderName, HeaderValue}, + Extensions, StatusCode, +}; +use http_body::{ + combinators::{MapData, MapErr}, + Empty, Full, SizeHint, +}; +use std::{ + borrow::Cow, + convert::Infallible, + fmt, + pin::Pin, + task::{Context, Poll}, +}; + +/// Trait for generating responses. +/// +/// Types that implement `IntoResponse` can be returned from handlers. +/// +/// # Implementing `IntoResponse` +/// +/// You generally shouldn't have to implement `IntoResponse` manually, as axum +/// provides implementations for many common types. +/// +/// However it might be necessary if you have a custom error type that you want +/// to return from handlers: +/// +/// ```rust +/// use axum::{ +/// Router, +/// body::{self, Bytes}, +/// routing::get, +/// http::StatusCode, +/// response::{IntoResponse, Response}, +/// }; +/// +/// enum MyError { +/// SomethingWentWrong, +/// SomethingElseWentWrong, +/// } +/// +/// impl IntoResponse for MyError { +/// fn into_response(self) -> Response { +/// let body = match self { +/// MyError::SomethingWentWrong => "something went wrong", +/// MyError::SomethingElseWentWrong => "something else went wrong", +/// }; +/// +/// // its often easiest to implement `IntoResponse` by calling other implementations +/// (StatusCode::INTERNAL_SERVER_ERROR, body).into_response() +/// } +/// } +/// +/// // `Result` can now be returned from handlers +/// let app = Router::new().route("/", get(handler)); +/// +/// async fn handler() -> Result<(), MyError> { +/// Err(MyError::SomethingWentWrong) +/// } +/// # async { +/// # hyper::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +/// # }; +/// ``` +/// +/// Or if you have a custom body type you'll also need to implement +/// `IntoResponse` for it: +/// +/// ```rust +/// use axum::{ +/// body, +/// routing::get, +/// response::{IntoResponse, Response}, +/// Router, +/// }; +/// use http_body::Body; +/// use http::HeaderMap; +/// use bytes::Bytes; +/// use std::{ +/// convert::Infallible, +/// task::{Poll, Context}, +/// pin::Pin, +/// }; +/// +/// struct MyBody; +/// +/// // First implement `Body` for `MyBody`. This could for example use +/// // some custom streaming protocol. +/// impl Body for MyBody { +/// type Data = Bytes; +/// type Error = Infallible; +/// +/// fn poll_data( +/// self: Pin<&mut Self>, +/// cx: &mut Context<'_> +/// ) -> Poll>> { +/// # unimplemented!() +/// // ... +/// } +/// +/// fn poll_trailers( +/// self: Pin<&mut Self>, +/// cx: &mut Context<'_> +/// ) -> Poll, Self::Error>> { +/// # unimplemented!() +/// // ... +/// } +/// } +/// +/// // Now we can implement `IntoResponse` directly for `MyBody` +/// impl IntoResponse for MyBody { +/// fn into_response(self) -> Response { +/// Response::new(body::boxed(self)) +/// } +/// } +/// +/// // `MyBody` can now be returned from handlers. +/// let app = Router::new().route("/", get(|| async { MyBody })); +/// # async { +/// # hyper::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); +/// # }; +/// ``` +pub trait IntoResponse { + /// Create a response. + fn into_response(self) -> Response; +} + +impl IntoResponse for StatusCode { + fn into_response(self) -> Response { + let mut res = ().into_response(); + *res.status_mut() = self; + res + } +} + +impl IntoResponse for () { + fn into_response(self) -> Response { + Empty::new().into_response() + } +} + +impl IntoResponse for Infallible { + fn into_response(self) -> Response { + match self {} + } +} + +impl IntoResponse for Result +where + T: IntoResponse, + E: IntoResponse, +{ + fn into_response(self) -> Response { + match self { + Ok(value) => value.into_response(), + Err(err) => err.into_response(), + } + } +} + +impl IntoResponse for Response +where + B: http_body::Body + Send + 'static, + B::Error: Into, +{ + fn into_response(self) -> Response { + self.map(body::boxed) + } +} + +impl IntoResponse for http::response::Parts { + fn into_response(self) -> Response { + Response::from_parts(self, body::boxed(Empty::new())) + } +} + +impl IntoResponse for Full { + fn into_response(self) -> Response { + Response::new(body::boxed(self)) + } +} + +impl IntoResponse for Empty { + fn into_response(self) -> Response { + Response::new(body::boxed(self)) + } +} + +impl IntoResponse for http_body::combinators::BoxBody +where + E: Into + 'static, +{ + fn into_response(self) -> Response { + Response::new(body::boxed(self)) + } +} + +impl IntoResponse for http_body::combinators::UnsyncBoxBody +where + E: Into + 'static, +{ + fn into_response(self) -> Response { + Response::new(body::boxed(self)) + } +} + +impl IntoResponse for MapData +where + B: http_body::Body + Send + 'static, + F: FnMut(B::Data) -> Bytes + Send + 'static, + B::Error: Into, +{ + fn into_response(self) -> Response { + Response::new(body::boxed(self)) + } +} + +impl IntoResponse for MapErr +where + B: http_body::Body + Send + 'static, + F: FnMut(B::Error) -> E + Send + 'static, + E: Into, +{ + fn into_response(self) -> Response { + Response::new(body::boxed(self)) + } +} + +impl IntoResponse for &'static str { + fn into_response(self) -> Response { + Cow::Borrowed(self).into_response() + } +} + +impl IntoResponse for String { + fn into_response(self) -> Response { + Cow::<'static, str>::Owned(self).into_response() + } +} + +impl IntoResponse for Cow<'static, str> { + fn into_response(self) -> Response { + let mut res = Full::from(self).into_response(); + res.headers_mut().insert( + header::CONTENT_TYPE, + HeaderValue::from_static(mime::TEXT_PLAIN_UTF_8.as_ref()), + ); + res + } +} + +impl IntoResponse for Bytes { + fn into_response(self) -> Response { + let mut res = Full::from(self).into_response(); + res.headers_mut().insert( + header::CONTENT_TYPE, + HeaderValue::from_static(mime::APPLICATION_OCTET_STREAM.as_ref()), + ); + res + } +} + +impl IntoResponse for BytesMut { + fn into_response(self) -> Response { + self.freeze().into_response() + } +} + +impl IntoResponse for Chain +where + T: Buf + Unpin + Send + 'static, + U: Buf + Unpin + Send + 'static, +{ + fn into_response(self) -> Response { + let (first, second) = self.into_inner(); + let mut res = Response::new(body::boxed(BytesChainBody { + first: Some(first), + second: Some(second), + })); + res.headers_mut().insert( + header::CONTENT_TYPE, + HeaderValue::from_static(mime::APPLICATION_OCTET_STREAM.as_ref()), + ); + res + } +} + +struct BytesChainBody { + first: Option, + second: Option, +} + +impl http_body::Body for BytesChainBody +where + T: Buf + Unpin, + U: Buf + Unpin, +{ + type Data = Bytes; + type Error = Infallible; + + fn poll_data( + mut self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll>> { + if let Some(mut buf) = self.first.take() { + let bytes = buf.copy_to_bytes(buf.remaining()); + return Poll::Ready(Some(Ok(bytes))); + } + + if let Some(mut buf) = self.second.take() { + let bytes = buf.copy_to_bytes(buf.remaining()); + return Poll::Ready(Some(Ok(bytes))); + } + + Poll::Ready(None) + } + + fn poll_trailers( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll, Self::Error>> { + Poll::Ready(Ok(None)) + } + + fn is_end_stream(&self) -> bool { + self.first.is_none() && self.second.is_none() + } + + fn size_hint(&self) -> SizeHint { + match (self.first.as_ref(), self.second.as_ref()) { + (Some(first), Some(second)) => { + let total_size = first.remaining() + second.remaining(); + SizeHint::with_exact(total_size as u64) + } + (Some(buf), None) => SizeHint::with_exact(buf.remaining() as u64), + (None, Some(buf)) => SizeHint::with_exact(buf.remaining() as u64), + (None, None) => SizeHint::with_exact(0), + } + } +} + +impl IntoResponse for &'static [u8] { + fn into_response(self) -> Response { + Cow::Borrowed(self).into_response() + } +} + +impl IntoResponse for &'static [u8; N] { + fn into_response(self) -> Response { + self.as_slice().into_response() + } +} + +impl IntoResponse for [u8; N] { + fn into_response(self) -> Response { + self.to_vec().into_response() + } +} + +impl IntoResponse for Vec { + fn into_response(self) -> Response { + Cow::<'static, [u8]>::Owned(self).into_response() + } +} + +impl IntoResponse for Cow<'static, [u8]> { + fn into_response(self) -> Response { + let mut res = Full::from(self).into_response(); + res.headers_mut().insert( + header::CONTENT_TYPE, + HeaderValue::from_static(mime::APPLICATION_OCTET_STREAM.as_ref()), + ); + res + } +} + +impl IntoResponse for (StatusCode, R) +where + R: IntoResponse, +{ + fn into_response(self) -> Response { + let mut res = self.1.into_response(); + *res.status_mut() = self.0; + res + } +} + +impl IntoResponse for HeaderMap { + fn into_response(self) -> Response { + let mut res = ().into_response(); + *res.headers_mut() = self; + res + } +} + +impl IntoResponse for Extensions { + fn into_response(self) -> Response { + let mut res = ().into_response(); + *res.extensions_mut() = self; + res + } +} + +impl IntoResponse for [(K, V); N] +where + K: TryInto, + K::Error: fmt::Display, + V: TryInto, + V::Error: fmt::Display, +{ + fn into_response(self) -> Response { + (self, ()).into_response() + } +} + +impl IntoResponse for (http::response::Parts, R) +where + R: IntoResponse, +{ + fn into_response(self) -> Response { + let (parts, res) = self; + (parts.status, parts.headers, parts.extensions, res).into_response() + } +} + +impl IntoResponse for (http::response::Response<()>, R) +where + R: IntoResponse, +{ + fn into_response(self) -> Response { + let (template, res) = self; + let (parts, ()) = template.into_parts(); + (parts, res).into_response() + } +} + +macro_rules! impl_into_response { + ( $($ty:ident),* $(,)? ) => { + #[allow(non_snake_case)] + impl IntoResponse for ($($ty),*, R) + where + $( $ty: IntoResponseParts, )* + R: IntoResponse, + { + fn into_response(self) -> Response { + let ($($ty),*, res) = self; + + let res = res.into_response(); + let parts = ResponseParts { res }; + + $( + let parts = match $ty.into_response_parts(parts) { + Ok(parts) => parts, + Err(err) => { + return err.into_response(); + } + }; + )* + + parts.res + } + } + + #[allow(non_snake_case)] + impl IntoResponse for (StatusCode, $($ty),*, R) + where + $( $ty: IntoResponseParts, )* + R: IntoResponse, + { + fn into_response(self) -> Response { + let (status, $($ty),*, res) = self; + + let res = res.into_response(); + let parts = ResponseParts { res }; + + $( + let parts = match $ty.into_response_parts(parts) { + Ok(parts) => parts, + Err(err) => { + return err.into_response(); + } + }; + )* + + (status, parts.res).into_response() + } + } + + #[allow(non_snake_case)] + impl IntoResponse for (http::response::Parts, $($ty),*, R) + where + $( $ty: IntoResponseParts, )* + R: IntoResponse, + { + fn into_response(self) -> Response { + let (outer_parts, $($ty),*, res) = self; + + let res = res.into_response(); + let parts = ResponseParts { res }; + $( + let parts = match $ty.into_response_parts(parts) { + Ok(parts) => parts, + Err(err) => { + return err.into_response(); + } + }; + )* + + (outer_parts, parts.res).into_response() + } + } + + #[allow(non_snake_case)] + impl IntoResponse for (http::response::Response<()>, $($ty),*, R) + where + $( $ty: IntoResponseParts, )* + R: IntoResponse, + { + fn into_response(self) -> Response { + let (template, $($ty),*, res) = self; + let (parts, ()) = template.into_parts(); + (parts, $($ty),*, res).into_response() + } + } + } +} + +all_the_tuples_no_last_special_case!(impl_into_response); diff --git a/.cargo-vendor/axum-core-0.3.4/src/response/into_response_parts.rs b/.cargo-vendor/axum-core-0.3.4/src/response/into_response_parts.rs new file mode 100644 index 0000000000..60f0b805e7 --- /dev/null +++ b/.cargo-vendor/axum-core-0.3.4/src/response/into_response_parts.rs @@ -0,0 +1,260 @@ +use super::{IntoResponse, Response}; +use http::{ + header::{HeaderMap, HeaderName, HeaderValue}, + Extensions, StatusCode, +}; +use std::{convert::Infallible, fmt}; + +/// Trait for adding headers and extensions to a response. +/// +/// # Example +/// +/// ```rust +/// use axum::{ +/// response::{ResponseParts, IntoResponse, IntoResponseParts, Response}, +/// http::{StatusCode, header::{HeaderName, HeaderValue}}, +/// }; +/// +/// // Hypothetical helper type for setting a single header +/// struct SetHeader<'a>(&'a str, &'a str); +/// +/// impl<'a> IntoResponseParts for SetHeader<'a> { +/// type Error = (StatusCode, String); +/// +/// fn into_response_parts(self, mut res: ResponseParts) -> Result { +/// match (self.0.parse::(), self.1.parse::()) { +/// (Ok(name), Ok(value)) => { +/// res.headers_mut().insert(name, value); +/// }, +/// (Err(_), _) => { +/// return Err(( +/// StatusCode::INTERNAL_SERVER_ERROR, +/// format!("Invalid header name {}", self.0), +/// )); +/// }, +/// (_, Err(_)) => { +/// return Err(( +/// StatusCode::INTERNAL_SERVER_ERROR, +/// format!("Invalid header value {}", self.1), +/// )); +/// }, +/// } +/// +/// Ok(res) +/// } +/// } +/// +/// // Its also recommended to implement `IntoResponse` so `SetHeader` can be used on its own as +/// // the response +/// impl<'a> IntoResponse for SetHeader<'a> { +/// fn into_response(self) -> Response { +/// // This gives an empty response with the header +/// (self, ()).into_response() +/// } +/// } +/// +/// // We can now return `SetHeader` in responses +/// // +/// // Note that returning `impl IntoResponse` might be easier if the response has many parts to +/// // it. The return type is written out here for clarity. +/// async fn handler() -> (SetHeader<'static>, SetHeader<'static>, &'static str) { +/// ( +/// SetHeader("server", "axum"), +/// SetHeader("x-foo", "custom"), +/// "body", +/// ) +/// } +/// +/// // Or on its own as the whole response +/// async fn other_handler() -> SetHeader<'static> { +/// SetHeader("x-foo", "custom") +/// } +/// ``` +pub trait IntoResponseParts { + /// The type returned in the event of an error. + /// + /// This can be used to fallibly convert types into headers or extensions. + type Error: IntoResponse; + + /// Set parts of the response + fn into_response_parts(self, res: ResponseParts) -> Result; +} + +impl IntoResponseParts for Option +where + T: IntoResponseParts, +{ + type Error = T::Error; + + fn into_response_parts(self, res: ResponseParts) -> Result { + if let Some(inner) = self { + inner.into_response_parts(res) + } else { + Ok(res) + } + } +} + +/// Parts of a response. +/// +/// Used with [`IntoResponseParts`]. +#[derive(Debug)] +pub struct ResponseParts { + pub(crate) res: Response, +} + +impl ResponseParts { + /// Gets a reference to the response headers. + pub fn headers(&self) -> &HeaderMap { + self.res.headers() + } + + /// Gets a mutable reference to the response headers. + pub fn headers_mut(&mut self) -> &mut HeaderMap { + self.res.headers_mut() + } + + /// Gets a reference to the response extensions. + pub fn extensions(&self) -> &Extensions { + self.res.extensions() + } + + /// Gets a mutable reference to the response extensions. + pub fn extensions_mut(&mut self) -> &mut Extensions { + self.res.extensions_mut() + } +} + +impl IntoResponseParts for HeaderMap { + type Error = Infallible; + + fn into_response_parts(self, mut res: ResponseParts) -> Result { + res.headers_mut().extend(self); + Ok(res) + } +} + +impl IntoResponseParts for [(K, V); N] +where + K: TryInto, + K::Error: fmt::Display, + V: TryInto, + V::Error: fmt::Display, +{ + type Error = TryIntoHeaderError; + + fn into_response_parts(self, mut res: ResponseParts) -> Result { + for (key, value) in self { + let key = key.try_into().map_err(TryIntoHeaderError::key)?; + let value = value.try_into().map_err(TryIntoHeaderError::value)?; + res.headers_mut().insert(key, value); + } + + Ok(res) + } +} + +/// Error returned if converting a value to a header fails. +#[derive(Debug)] +pub struct TryIntoHeaderError { + kind: TryIntoHeaderErrorKind, +} + +impl TryIntoHeaderError { + pub(super) fn key(err: K) -> Self { + Self { + kind: TryIntoHeaderErrorKind::Key(err), + } + } + + pub(super) fn value(err: V) -> Self { + Self { + kind: TryIntoHeaderErrorKind::Value(err), + } + } +} + +#[derive(Debug)] +enum TryIntoHeaderErrorKind { + Key(K), + Value(V), +} + +impl IntoResponse for TryIntoHeaderError +where + K: fmt::Display, + V: fmt::Display, +{ + fn into_response(self) -> Response { + match self.kind { + TryIntoHeaderErrorKind::Key(inner) => { + (StatusCode::INTERNAL_SERVER_ERROR, inner.to_string()).into_response() + } + TryIntoHeaderErrorKind::Value(inner) => { + (StatusCode::INTERNAL_SERVER_ERROR, inner.to_string()).into_response() + } + } + } +} + +impl fmt::Display for TryIntoHeaderError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.kind { + TryIntoHeaderErrorKind::Key(_) => write!(f, "failed to convert key to a header name"), + TryIntoHeaderErrorKind::Value(_) => { + write!(f, "failed to convert value to a header value") + } + } + } +} + +impl std::error::Error for TryIntoHeaderError +where + K: std::error::Error + 'static, + V: std::error::Error + 'static, +{ + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match &self.kind { + TryIntoHeaderErrorKind::Key(inner) => Some(inner), + TryIntoHeaderErrorKind::Value(inner) => Some(inner), + } + } +} + +macro_rules! impl_into_response_parts { + ( $($ty:ident),* $(,)? ) => { + #[allow(non_snake_case)] + impl<$($ty,)*> IntoResponseParts for ($($ty,)*) + where + $( $ty: IntoResponseParts, )* + { + type Error = Response; + + fn into_response_parts(self, res: ResponseParts) -> Result { + let ($($ty,)*) = self; + + $( + let res = match $ty.into_response_parts(res) { + Ok(res) => res, + Err(err) => { + return Err(err.into_response()); + } + }; + )* + + Ok(res) + } + } + } +} + +all_the_tuples_no_last_special_case!(impl_into_response_parts); + +impl IntoResponseParts for Extensions { + type Error = Infallible; + + fn into_response_parts(self, mut res: ResponseParts) -> Result { + res.extensions_mut().extend(self); + Ok(res) + } +} diff --git a/.cargo-vendor/axum-core-0.3.4/src/response/mod.rs b/.cargo-vendor/axum-core-0.3.4/src/response/mod.rs new file mode 100644 index 0000000000..d66dfec510 --- /dev/null +++ b/.cargo-vendor/axum-core-0.3.4/src/response/mod.rs @@ -0,0 +1,129 @@ +//! Types and traits for generating responses. +//! +//! See [`axum::response`] for more details. +//! +//! [`axum::response`]: https://docs.rs/axum/latest/axum/response/index.html + +use crate::body::BoxBody; + +mod append_headers; +mod into_response; +mod into_response_parts; + +pub use self::{ + append_headers::AppendHeaders, + into_response::IntoResponse, + into_response_parts::{IntoResponseParts, ResponseParts, TryIntoHeaderError}, +}; + +/// Type alias for [`http::Response`] whose body type defaults to [`BoxBody`], the most common body +/// type used with axum. +pub type Response = http::Response; + +/// An [`IntoResponse`]-based result type that uses [`ErrorResponse`] as the error type. +/// +/// All types which implement [`IntoResponse`] can be converted to an [`ErrorResponse`]. This makes +/// it useful as a general purpose error type for functions which combine multiple distinct error +/// types that all implement [`IntoResponse`]. +/// +/// # Example +/// +/// ``` +/// use axum::{ +/// response::{IntoResponse, Response}, +/// http::StatusCode, +/// }; +/// +/// // two fallible functions with different error types +/// fn try_something() -> Result<(), ErrorA> { +/// // ... +/// # unimplemented!() +/// } +/// +/// fn try_something_else() -> Result<(), ErrorB> { +/// // ... +/// # unimplemented!() +/// } +/// +/// // each error type implements `IntoResponse` +/// struct ErrorA; +/// +/// impl IntoResponse for ErrorA { +/// fn into_response(self) -> Response { +/// // ... +/// # unimplemented!() +/// } +/// } +/// +/// enum ErrorB { +/// SomethingWentWrong, +/// } +/// +/// impl IntoResponse for ErrorB { +/// fn into_response(self) -> Response { +/// // ... +/// # unimplemented!() +/// } +/// } +/// +/// // we can combine them using `axum::response::Result` and still use `?` +/// async fn handler() -> axum::response::Result<&'static str> { +/// // the errors are automatically converted to `ErrorResponse` +/// try_something()?; +/// try_something_else()?; +/// +/// Ok("it worked!") +/// } +/// ``` +/// +/// # As a replacement for `std::result::Result` +/// +/// Since `axum::response::Result` has a default error type you only have to specify the `Ok` type: +/// +/// ``` +/// use axum::{ +/// response::{IntoResponse, Response, Result}, +/// http::StatusCode, +/// }; +/// +/// // `Result` automatically uses `ErrorResponse` as the error type. +/// async fn handler() -> Result<&'static str> { +/// try_something()?; +/// +/// Ok("it worked!") +/// } +/// +/// // You can still specify the error even if you've imported `axum::response::Result` +/// fn try_something() -> Result<(), StatusCode> { +/// // ... +/// # unimplemented!() +/// } +/// ``` +pub type Result = std::result::Result; + +impl IntoResponse for Result +where + T: IntoResponse, +{ + fn into_response(self) -> Response { + match self { + Ok(ok) => ok.into_response(), + Err(err) => err.0, + } + } +} + +/// An [`IntoResponse`]-based error type +/// +/// See [`Result`] for more details. +#[derive(Debug)] +pub struct ErrorResponse(Response); + +impl From for ErrorResponse +where + T: IntoResponse, +{ + fn from(value: T) -> Self { + Self(value.into_response()) + } +} diff --git a/.cargo-vendor/axum-core/.cargo-checksum.json b/.cargo-vendor/axum-core/.cargo-checksum.json index 8bfd791ee7..eab0d70a2b 100644 --- a/.cargo-vendor/axum-core/.cargo-checksum.json +++ b/.cargo-vendor/axum-core/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"CHANGELOG.md":"446d28d46bc10b208ef14dab4577ab84630b57539a3553e50c0a9f5fe065ff69","Cargo.toml":"b9a8bd35772328c050ef314ead75f79a78acd70ea9b3f535af1a8580d50d6b0a","LICENSE":"ab25eee08e7b6d209398f44e6f4a6f17c9446cd54b99fd64e041edadc96b6f9b","README.md":"7113dacbc69f31674e7051c4b504471130b84e359e36f3e1e9fa944990cc51b7","build.rs":"3bad731d51fd3a32b235ebd437bc9a2788b35bcae4779237cd26696e13840bcb","src/body.rs":"e779fac3a090a4d9156922a8255cd79fffb5ad52a96db64db64a3fe66016b77a","src/error.rs":"ac4b458b73677b8cd07c1cf71030db81963e4cabf417d80c678ea315a7b290cd","src/ext_traits/mod.rs":"d9ccd994037e44dff41865e4c66b3b110e09c380b54c2b74e3b6022c2573d719","src/ext_traits/request.rs":"265cd31f92c93294e4f39c40a1d7edd6dabfd9e6affad0908ccdbcce25ba92ae","src/ext_traits/request_parts.rs":"04be5b833550d4d9d3fbb7788077c39f248cfd787c94d778f43cb71e0bce7e2a","src/extract/default_body_limit.rs":"1f55fd5681ec2e7c548d64e9758c7562753123e1d81c98b11afc8c0a5d5d79d1","src/extract/from_ref.rs":"baf52ef04101b6b8f3c5a699b30c09ea322391c64380561a963cb3a7536616f3","src/extract/mod.rs":"8123aea39f35391aab17cd166f8ffec0ef6a89eac954edb94730f8a2f500f3a3","src/extract/rejection.rs":"64bbf092dd50af4cf4055bcfa86719290b795bbea4a0824bf5d7e786dc372ee3","src/extract/request_parts.rs":"85794b4265a354319a46895edea2fa7940712e2aef1388354cfd1060868cac5f","src/extract/tuple.rs":"983f035fd11d85cff01370373bef723bc25fe4870eef8bb176e90d11f94923fa","src/lib.rs":"b752ccd80895e79307126b45b4d89116d4fefb2cf88aed8bdbc8bffb550a5465","src/macros.rs":"e428cdb5c2e4c74451e57628ce6d1b457e38965daf7a0b9f7053f73317a55f2f","src/response/append_headers.rs":"4fac6f55e9fb93f16e2e549d586aa4d698a208a27578ee2e708bc17513708e06","src/response/into_response.rs":"58d68bec046db7ba48d06dd13511e1f2e05ea217e4e4c8d56299e9745544ddb4","src/response/into_response_parts.rs":"f0e85f3e931acfcf3d610b4a464b38fe3bcabd96940c55436c7f82a5dfdc30f3","src/response/mod.rs":"34766742cfc0c4b7bc02c9b290dbdcd2128138f619d622d72a2a546ce122c73b"},"package":"759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c"} \ No newline at end of file +{"files":{"CHANGELOG.md":"b89908d8df22fb4c21ac8b498838599371661b1cd899318a22f6f152d0b99790","Cargo.toml":"e631390fb82a6fe25bdca464d594a23fd5232bb3880149a12ae748fd6035be26","LICENSE":"ab25eee08e7b6d209398f44e6f4a6f17c9446cd54b99fd64e041edadc96b6f9b","README.md":"e49ab3160761c89f9e67ace6b9689b74945313975a98d3f0ec3b81971c9c5c7f","build.rs":"3bad731d51fd3a32b235ebd437bc9a2788b35bcae4779237cd26696e13840bcb","src/body.rs":"ecb48d340535bad20c7cca4c7114e8689ac43173480a208c8216da1236b36b9e","src/error.rs":"ac4b458b73677b8cd07c1cf71030db81963e4cabf417d80c678ea315a7b290cd","src/ext_traits/mod.rs":"d9ccd994037e44dff41865e4c66b3b110e09c380b54c2b74e3b6022c2573d719","src/ext_traits/request.rs":"15fa03f0eccab57c10276a35123468f4c59515f35c1a16699e33e90128293eb9","src/ext_traits/request_parts.rs":"7640477fd876e4a8d5f823a20f710e42556b7d268dfae1d532306ac113e72dec","src/extract/default_body_limit.rs":"3c5374f09af134ba455bed7b6580b26d3dfe15bb70b8900c52718ddf8b1448d5","src/extract/from_ref.rs":"4333d38c18da68a2e0134630ec3854877918a68a545da3853b882028f415a3d1","src/extract/mod.rs":"bf3cb3510f3fc21201ea1975d111efb5267a969f3a631a70a6f84b98bb4163e4","src/extract/rejection.rs":"1a91b652bb736e2de84701046e9454219879d07496dc1b572ef091d349c99ae9","src/extract/request_parts.rs":"1d5ce590779070b68f57eb518d988504e1036ff68ab468bda50004a9ba20575f","src/extract/tuple.rs":"ae6a836e80d3a6d35d55bcf25d6c515ad6b55c9fbdae6931e898235328ca3810","src/lib.rs":"97834d133382d48c83ade32d157527dbff300c211eb67c1be270e6f2f7c0ee68","src/macros.rs":"41b86d1333aaac32c6d97fb41fb61719a33ca263a4b630aa5c46b336ad505edd","src/response/append_headers.rs":"4fac6f55e9fb93f16e2e549d586aa4d698a208a27578ee2e708bc17513708e06","src/response/into_response.rs":"94cf93864ea4640e2207bc41b1a0b024837998e01767e4a762e511ffd174d296","src/response/into_response_parts.rs":"1f4165f54447bf77651fd5901e8c29670f77874e5e110476aa259057fdbd3e45","src/response/mod.rs":"d4f89d09d3f4f02fe0c2a80d104511f58f99a7baa1eeb24207cd50b56c2526c8"},"package":"a15c63fd72d41492dc4f497196f5da1fb04fb7529e631d73630d1b491e47a2e3"} \ No newline at end of file diff --git a/.cargo-vendor/axum-core/CHANGELOG.md b/.cargo-vendor/axum-core/CHANGELOG.md index f17c2250ae..ef0d1a6793 100644 --- a/.cargo-vendor/axum-core/CHANGELOG.md +++ b/.cargo-vendor/axum-core/CHANGELOG.md @@ -9,6 +9,41 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - None. +# 0.4.3 (13. January, 2024) + +- **added:** Implement `IntoResponseParts` for `()` ([#2471]) + +[#2471]: https://github.com/tokio-rs/axum/pull/2471 + +# 0.4.2 (29. December, 2023) + +- **added:** `Body` implements `From<()>` now ([#2411]) + +[#2411]: https://github.com/tokio-rs/axum/pull/2411 + +# 0.4.1 (03. December, 2023) + +- Fix from_stream doc link to `Stream` in docs ([#2391]) + +[#2391]: https://github.com/tokio-rs/axum/pull/2391 + +# 0.4.0 (27. November, 2023) + +- **added:** Implement `IntoResponse` for `(R,) where R: IntoResponse` ([#2143]) +- **fixed:** Fix broken docs links ([#2164]) +- **fixed:** Clearly document applying `DefaultBodyLimit` to individual routes ([#2157]) +- **breaking:** The following types/traits are no longer generic over the request body + (i.e. the `B` type param has been removed) ([#1751] and [#1789]): + - `FromRequestParts` + - `FromRequest` + - `RequestExt` +- **breaking:** axum no longer re-exports `hyper::Body` as that type is removed + in hyper 1.0. Instead axum has its own body type at `axum_core::body::Body` ([#1751]) + +[#2143]: https://github.com/tokio-rs/axum/pull/2143 +[#2164]: https://github.com/tokio-rs/axum/pull/2164 +[#2157]: https://github.com/tokio-rs/axum/pull/2157 + # 0.3.4 (11. April, 2023) - Changes to private APIs. diff --git a/.cargo-vendor/axum-core/Cargo.toml b/.cargo-vendor/axum-core/Cargo.toml index 4d45d91bb9..aa1fcd2fdd 100644 --- a/.cargo-vendor/axum-core/Cargo.toml +++ b/.cargo-vendor/axum-core/Cargo.toml @@ -13,7 +13,7 @@ edition = "2021" rust-version = "1.56" name = "axum-core" -version = "0.3.4" +version = "0.4.3" description = "Core types and traits for axum" homepage = "https://github.com/tokio-rs/axum" readme = "README.md" @@ -33,10 +33,17 @@ repository = "https://github.com/tokio-rs/axum" [package.metadata.cargo-public-api-crates] allowed = [ "futures_core", - "http", + "tower_layer", "bytes", + "http", "http_body", - "tower_layer", +] + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = [ + "--cfg", + "docsrs", ] [dependencies.async-trait] @@ -51,16 +58,25 @@ features = ["alloc"] default-features = false [dependencies.http] -version = "0.2.7" +version = "1.0.0" [dependencies.http-body] -version = "0.4.5" +version = "1.0.0" + +[dependencies.http-body-util] +version = "0.1.0" [dependencies.mime] version = "0.3.16" +[dependencies.pin-project-lite] +version = "0.2.7" + +[dependencies.sync_wrapper] +version = "0.1.1" + [dependencies.tower-http] -version = "0.4" +version = "0.5.0" features = ["limit"] optional = true @@ -76,8 +92,7 @@ optional = true default-features = false [dev-dependencies.axum] -version = "0.6.0" -features = ["headers"] +version = "0.7.2" [dev-dependencies.futures-util] version = "0.3" @@ -85,14 +100,14 @@ features = ["alloc"] default-features = false [dev-dependencies.hyper] -version = "0.14.24" +version = "1.0.0" [dev-dependencies.tokio] version = "1.25.0" features = ["macros"] [dev-dependencies.tower-http] -version = "0.4" +version = "0.5.0" features = ["limit"] [build-dependencies.rustversion] diff --git a/.cargo-vendor/axum-core/README.md b/.cargo-vendor/axum-core/README.md index bd5efbeb01..01ff4e5105 100644 --- a/.cargo-vendor/axum-core/README.md +++ b/.cargo-vendor/axum-core/README.md @@ -23,7 +23,7 @@ with your question. ## Contributing -:balloon: Thanks for your help improving the project! We are so happy to have +🎈 Thanks for your help improving the project! We are so happy to have you! We have a [contributing guide][contributing] to help you get involved in the `axum` project. diff --git a/.cargo-vendor/axum-core/src/body.rs b/.cargo-vendor/axum-core/src/body.rs index 9f25408936..3c3f6a10d8 100644 --- a/.cargo-vendor/axum-core/src/body.rs +++ b/.cargo-vendor/axum-core/src/body.rs @@ -2,17 +2,18 @@ use crate::{BoxError, Error}; use bytes::Bytes; -use bytes::{Buf, BufMut}; -use http_body::Body; +use futures_util::stream::Stream; +use futures_util::TryStream; +use http_body::{Body as _, Frame}; +use http_body_util::BodyExt; +use pin_project_lite::pin_project; +use std::pin::Pin; +use std::task::{Context, Poll}; +use sync_wrapper::SyncWrapper; -/// A boxed [`Body`] trait object. -/// -/// This is used in axum as the response body type for applications. It's -/// necessary to unify multiple response bodies types into one. -pub type BoxBody = http_body::combinators::UnsyncBoxBody; +type BoxBody = http_body_util::combinators::UnsyncBoxBody; -/// Convert a [`http_body::Body`] into a [`BoxBody`]. -pub fn boxed(body: B) -> BoxBody +fn boxed(body: B) -> BoxBody where B: http_body::Body + Send + 'static, B::Error: Into, @@ -33,56 +34,180 @@ where } } -// copied from hyper under the following license: -// Copyright (c) 2014-2021 Sean McArthur - -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: - -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. - -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. -pub(crate) async fn to_bytes(body: T) -> Result -where - T: Body, -{ - futures_util::pin_mut!(body); +/// The body type used in axum requests and responses. +#[derive(Debug)] +pub struct Body(BoxBody); - // If there's only 1 chunk, we can just return Buf::to_bytes() - let mut first = if let Some(buf) = body.data().await { - buf? - } else { - return Ok(Bytes::new()); - }; +impl Body { + /// Create a new `Body` that wraps another [`http_body::Body`]. + pub fn new(body: B) -> Self + where + B: http_body::Body + Send + 'static, + B::Error: Into, + { + try_downcast(body).unwrap_or_else(|body| Self(boxed(body))) + } - let second = if let Some(buf) = body.data().await { - buf? - } else { - return Ok(first.copy_to_bytes(first.remaining())); + /// Create an empty body. + pub fn empty() -> Self { + Self::new(http_body_util::Empty::new()) + } + + /// Create a new `Body` from a [`Stream`]. + /// + /// [`Stream`]: https://docs.rs/futures-core/latest/futures_core/stream/trait.Stream.html + pub fn from_stream(stream: S) -> Self + where + S: TryStream + Send + 'static, + S::Ok: Into, + S::Error: Into, + { + Self::new(StreamBody { + stream: SyncWrapper::new(stream), + }) + } + + /// Convert the body into a [`Stream`] of data frames. + /// + /// Non-data frames (such as trailers) will be discarded. Use [`http_body_util::BodyStream`] if + /// you need a [`Stream`] of all frame types. + /// + /// [`http_body_util::BodyStream`]: https://docs.rs/http-body-util/latest/http_body_util/struct.BodyStream.html + pub fn into_data_stream(self) -> BodyDataStream { + BodyDataStream { inner: self } + } +} + +impl Default for Body { + fn default() -> Self { + Self::empty() + } +} + +impl From<()> for Body { + fn from(_: ()) -> Self { + Self::empty() + } +} + +macro_rules! body_from_impl { + ($ty:ty) => { + impl From<$ty> for Body { + fn from(buf: $ty) -> Self { + Self::new(http_body_util::Full::from(buf)) + } + } }; +} + +body_from_impl!(&'static [u8]); +body_from_impl!(std::borrow::Cow<'static, [u8]>); +body_from_impl!(Vec); + +body_from_impl!(&'static str); +body_from_impl!(std::borrow::Cow<'static, str>); +body_from_impl!(String); - // With more than 1 buf, we gotta flatten into a Vec first. - let cap = first.remaining() + second.remaining() + body.size_hint().lower() as usize; - let mut vec = Vec::with_capacity(cap); - vec.put(first); - vec.put(second); +body_from_impl!(Bytes); + +impl http_body::Body for Body { + type Data = Bytes; + type Error = Error; + + #[inline] + fn poll_frame( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + Pin::new(&mut self.0).poll_frame(cx) + } - while let Some(buf) = body.data().await { - vec.put(buf?); + #[inline] + fn size_hint(&self) -> http_body::SizeHint { + self.0.size_hint() } - Ok(vec.into()) + #[inline] + fn is_end_stream(&self) -> bool { + self.0.is_end_stream() + } +} + +/// A stream of data frames. +/// +/// Created with [`Body::into_data_stream`]. +#[derive(Debug)] +pub struct BodyDataStream { + inner: Body, +} + +impl Stream for BodyDataStream { + type Item = Result; + + #[inline] + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + loop { + match futures_util::ready!(Pin::new(&mut self.inner).poll_frame(cx)?) { + Some(frame) => match frame.into_data() { + Ok(data) => return Poll::Ready(Some(Ok(data))), + Err(_frame) => {} + }, + None => return Poll::Ready(None), + } + } + } +} + +impl http_body::Body for BodyDataStream { + type Data = Bytes; + type Error = Error; + + #[inline] + fn poll_frame( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + Pin::new(&mut self.inner).poll_frame(cx) + } + + #[inline] + fn is_end_stream(&self) -> bool { + self.inner.is_end_stream() + } + + #[inline] + fn size_hint(&self) -> http_body::SizeHint { + self.inner.size_hint() + } +} + +pin_project! { + struct StreamBody { + #[pin] + stream: SyncWrapper, + } +} + +impl http_body::Body for StreamBody +where + S: TryStream, + S::Ok: Into, + S::Error: Into, +{ + type Data = Bytes; + type Error = Error; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + let stream = self.project().stream.get_pin_mut(); + match futures_util::ready!(stream.try_poll_next(cx)) { + Some(Ok(chunk)) => Poll::Ready(Some(Ok(Frame::data(chunk.into())))), + Some(Err(err)) => Poll::Ready(Some(Err(Error::new(err)))), + None => Poll::Ready(None), + } + } } #[test] diff --git a/.cargo-vendor/axum-core/src/ext_traits/request.rs b/.cargo-vendor/axum-core/src/ext_traits/request.rs index e49ba9216c..5b7aee783a 100644 --- a/.cargo-vendor/axum-core/src/ext_traits/request.rs +++ b/.cargo-vendor/axum-core/src/ext_traits/request.rs @@ -1,15 +1,14 @@ -use crate::extract::{DefaultBodyLimitKind, FromRequest, FromRequestParts}; +use crate::body::Body; +use crate::extract::{DefaultBodyLimitKind, FromRequest, FromRequestParts, Request}; use futures_util::future::BoxFuture; -use http::Request; -use http_body::Limited; mod sealed { - pub trait Sealed {} - impl Sealed for http::Request {} + pub trait Sealed {} + impl Sealed for http::Request {} } /// Extension trait that adds additional methods to [`Request`]. -pub trait RequestExt: sealed::Sealed + Sized { +pub trait RequestExt: sealed::Sealed + Sized { /// Apply an extractor to this `Request`. /// /// This is just a convenience for `E::from_request(req, &())`. @@ -22,8 +21,9 @@ pub trait RequestExt: sealed::Sealed + Sized { /// ``` /// use axum::{ /// async_trait, - /// extract::FromRequest, - /// http::{header::CONTENT_TYPE, Request, StatusCode}, + /// extract::{Request, FromRequest}, + /// body::Body, + /// http::{header::CONTENT_TYPE, StatusCode}, /// response::{IntoResponse, Response}, /// Form, Json, RequestExt, /// }; @@ -31,17 +31,16 @@ pub trait RequestExt: sealed::Sealed + Sized { /// struct FormOrJson(T); /// /// #[async_trait] - /// impl FromRequest for FormOrJson + /// impl FromRequest for FormOrJson /// where - /// Json: FromRequest<(), B>, - /// Form: FromRequest<(), B>, + /// Json: FromRequest<()>, + /// Form: FromRequest<()>, /// T: 'static, - /// B: Send + 'static, /// S: Send + Sync, /// { /// type Rejection = Response; /// - /// async fn from_request(req: Request, _state: &S) -> Result { + /// async fn from_request(req: Request, _state: &S) -> Result { /// let content_type = req /// .headers() /// .get(CONTENT_TYPE) @@ -70,7 +69,7 @@ pub trait RequestExt: sealed::Sealed + Sized { /// ``` fn extract(self) -> BoxFuture<'static, Result> where - E: FromRequest<(), B, M> + 'static, + E: FromRequest<(), M> + 'static, M: 'static; /// Apply an extractor that requires some state to this `Request`. @@ -85,8 +84,8 @@ pub trait RequestExt: sealed::Sealed + Sized { /// ``` /// use axum::{ /// async_trait, - /// extract::{FromRef, FromRequest}, - /// http::Request, + /// body::Body, + /// extract::{Request, FromRef, FromRequest}, /// RequestExt, /// }; /// @@ -95,15 +94,14 @@ pub trait RequestExt: sealed::Sealed + Sized { /// } /// /// #[async_trait] - /// impl FromRequest for MyExtractor + /// impl FromRequest for MyExtractor /// where /// String: FromRef, /// S: Send + Sync, - /// B: Send + 'static, /// { /// type Rejection = std::convert::Infallible; /// - /// async fn from_request(req: Request, state: &S) -> Result { + /// async fn from_request(req: Request, state: &S) -> Result { /// let requires_state = req.extract_with_state::(state).await?; /// /// Ok(Self { requires_state }) @@ -114,22 +112,21 @@ pub trait RequestExt: sealed::Sealed + Sized { /// struct RequiresState { /* ... */ } /// /// #[async_trait] - /// impl FromRequest for RequiresState + /// impl FromRequest for RequiresState /// where /// String: FromRef, /// S: Send + Sync, - /// B: Send + 'static, /// { /// // ... /// # type Rejection = std::convert::Infallible; - /// # async fn from_request(req: Request, _state: &S) -> Result { + /// # async fn from_request(req: Request, _state: &S) -> Result { /// # todo!() /// # } /// } /// ``` fn extract_with_state(self, state: &S) -> BoxFuture<'_, Result> where - E: FromRequest + 'static, + E: FromRequest + 'static, S: Send + Sync; /// Apply a parts extractor to this `Request`. @@ -141,32 +138,36 @@ pub trait RequestExt: sealed::Sealed + Sized { /// ``` /// use axum::{ /// async_trait, - /// extract::FromRequest, - /// headers::{authorization::Bearer, Authorization}, - /// http::Request, + /// extract::{Path, Request, FromRequest}, /// response::{IntoResponse, Response}, - /// Json, RequestExt, TypedHeader, + /// body::Body, + /// Json, RequestExt, + /// }; + /// use axum_extra::{ + /// TypedHeader, + /// headers::{authorization::Bearer, Authorization}, /// }; + /// use std::collections::HashMap; /// /// struct MyExtractor { - /// bearer_token: String, + /// path_params: HashMap, /// payload: T, /// } /// /// #[async_trait] - /// impl FromRequest for MyExtractor + /// impl FromRequest for MyExtractor /// where - /// B: Send + 'static, /// S: Send + Sync, - /// Json: FromRequest<(), B>, + /// Json: FromRequest<()>, /// T: 'static, /// { /// type Rejection = Response; /// - /// async fn from_request(mut req: Request, _state: &S) -> Result { - /// let TypedHeader(auth_header) = req - /// .extract_parts::>>() + /// async fn from_request(mut req: Request, _state: &S) -> Result { + /// let path_params = req + /// .extract_parts::>() /// .await + /// .map(|Path(path_params)| path_params) /// .map_err(|err| err.into_response())?; /// /// let Json(payload) = req @@ -174,10 +175,7 @@ pub trait RequestExt: sealed::Sealed + Sized { /// .await /// .map_err(|err| err.into_response())?; /// - /// Ok(Self { - /// bearer_token: auth_header.token().to_owned(), - /// payload, - /// }) + /// Ok(Self { path_params, payload }) /// } /// } /// ``` @@ -194,9 +192,10 @@ pub trait RequestExt: sealed::Sealed + Sized { /// ``` /// use axum::{ /// async_trait, - /// extract::{FromRef, FromRequest, FromRequestParts}, - /// http::{request::Parts, Request}, + /// extract::{Request, FromRef, FromRequest, FromRequestParts}, + /// http::request::Parts, /// response::{IntoResponse, Response}, + /// body::Body, /// Json, RequestExt, /// }; /// @@ -206,17 +205,16 @@ pub trait RequestExt: sealed::Sealed + Sized { /// } /// /// #[async_trait] - /// impl FromRequest for MyExtractor + /// impl FromRequest for MyExtractor /// where /// String: FromRef, - /// Json: FromRequest<(), B>, + /// Json: FromRequest<()>, /// T: 'static, /// S: Send + Sync, - /// B: Send + 'static, /// { /// type Rejection = Response; /// - /// async fn from_request(mut req: Request, state: &S) -> Result { + /// async fn from_request(mut req: Request, state: &S) -> Result { /// let requires_state = req /// .extract_parts_with_state::(state) /// .await @@ -259,22 +257,19 @@ pub trait RequestExt: sealed::Sealed + Sized { /// Apply the [default body limit](crate::extract::DefaultBodyLimit). /// - /// If it is disabled, return the request as-is in `Err`. - fn with_limited_body(self) -> Result>, Request>; + /// If it is disabled, the request is returned as-is. + fn with_limited_body(self) -> Request; - /// Consumes the request, returning the body wrapped in [`Limited`] if a + /// Consumes the request, returning the body wrapped in [`http_body_util::Limited`] if a /// [default limit](crate::extract::DefaultBodyLimit) is in place, or not wrapped if the /// default limit is disabled. - fn into_limited_body(self) -> Result, B>; + fn into_limited_body(self) -> Body; } -impl RequestExt for Request -where - B: Send + 'static, -{ +impl RequestExt for Request { fn extract(self) -> BoxFuture<'static, Result> where - E: FromRequest<(), B, M> + 'static, + E: FromRequest<(), M> + 'static, M: 'static, { self.extract_with_state(&()) @@ -282,7 +277,7 @@ where fn extract_with_state(self, state: &S) -> BoxFuture<'_, Result> where - E: FromRequest + 'static, + E: FromRequest + 'static, S: Send + Sync, { E::from_request(self, state) @@ -309,7 +304,7 @@ where *req.uri_mut() = self.uri().clone(); *req.headers_mut() = std::mem::take(self.headers_mut()); *req.extensions_mut() = std::mem::take(self.extensions_mut()); - let (mut parts, _) = req.into_parts(); + let (mut parts, ()) = req.into_parts(); Box::pin(async move { let result = E::from_request_parts(&mut parts, state).await; @@ -324,24 +319,22 @@ where }) } - fn with_limited_body(self) -> Result>, Request> { + fn with_limited_body(self) -> Request { // update docs in `axum-core/src/extract/default_body_limit.rs` and // `axum/src/docs/extract.md` if this changes const DEFAULT_LIMIT: usize = 2_097_152; // 2 mb match self.extensions().get::().copied() { - Some(DefaultBodyLimitKind::Disable) => Err(self), + Some(DefaultBodyLimitKind::Disable) => self, Some(DefaultBodyLimitKind::Limit(limit)) => { - Ok(self.map(|b| http_body::Limited::new(b, limit))) + self.map(|b| Body::new(http_body_util::Limited::new(b, limit))) } - None => Ok(self.map(|b| http_body::Limited::new(b, DEFAULT_LIMIT))), + None => self.map(|b| Body::new(http_body_util::Limited::new(b, DEFAULT_LIMIT))), } } - fn into_limited_body(self) -> Result, B> { - self.with_limited_body() - .map(Request::into_body) - .map_err(Request::into_body) + fn into_limited_body(self) -> Body { + self.with_limited_body().into_body() } } @@ -354,11 +347,10 @@ mod tests { }; use async_trait::async_trait; use http::Method; - use hyper::Body; #[tokio::test] async fn extract_without_state() { - let req = Request::new(()); + let req = Request::new(Body::empty()); let method: Method = req.extract().await.unwrap(); @@ -376,7 +368,7 @@ mod tests { #[tokio::test] async fn extract_with_state() { - let req = Request::new(()); + let req = Request::new(Body::empty()); let state = "state".to_owned(); @@ -387,7 +379,10 @@ mod tests { #[tokio::test] async fn extract_parts_without_state() { - let mut req = Request::builder().header("x-foo", "foo").body(()).unwrap(); + let mut req = Request::builder() + .header("x-foo", "foo") + .body(Body::empty()) + .unwrap(); let method: Method = req.extract_parts().await.unwrap(); @@ -397,7 +392,10 @@ mod tests { #[tokio::test] async fn extract_parts_with_state() { - let mut req = Request::builder().header("x-foo", "foo").body(()).unwrap(); + let mut req = Request::builder() + .header("x-foo", "foo") + .body(Body::empty()) + .unwrap(); let state = "state".to_owned(); @@ -417,15 +415,14 @@ mod tests { } #[async_trait] - impl FromRequest for WorksForCustomExtractor + impl FromRequest for WorksForCustomExtractor where S: Send + Sync, - B: Send + 'static, - String: FromRef + FromRequest<(), B>, + String: FromRef + FromRequest<()>, { - type Rejection = >::Rejection; + type Rejection = >::Rejection; - async fn from_request(mut req: Request, state: &S) -> Result { + async fn from_request(mut req: Request, state: &S) -> Result { let RequiresState(from_state) = req.extract_parts_with_state(state).await.unwrap(); let method = req.extract_parts().await.unwrap(); let body = req.extract().await?; diff --git a/.cargo-vendor/axum-core/src/ext_traits/request_parts.rs b/.cargo-vendor/axum-core/src/ext_traits/request_parts.rs index 07a7dbff30..e7063f4d8b 100644 --- a/.cargo-vendor/axum-core/src/ext_traits/request_parts.rs +++ b/.cargo-vendor/axum-core/src/ext_traits/request_parts.rs @@ -17,9 +17,8 @@ pub trait RequestPartsExt: sealed::Sealed + Sized { /// /// ``` /// use axum::{ - /// extract::{Query, TypedHeader, FromRequestParts}, + /// extract::{Query, Path, FromRequestParts}, /// response::{Response, IntoResponse}, - /// headers::UserAgent, /// http::request::Parts, /// RequestPartsExt, /// async_trait, @@ -27,7 +26,7 @@ pub trait RequestPartsExt: sealed::Sealed + Sized { /// use std::collections::HashMap; /// /// struct MyExtractor { - /// user_agent: String, + /// path_params: HashMap, /// query_params: HashMap, /// } /// @@ -39,10 +38,10 @@ pub trait RequestPartsExt: sealed::Sealed + Sized { /// type Rejection = Response; /// /// async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { - /// let user_agent = parts - /// .extract::>() + /// let path_params = parts + /// .extract::>>() /// .await - /// .map(|user_agent| user_agent.as_str().to_owned()) + /// .map(|Path(path_params)| path_params) /// .map_err(|err| err.into_response())?; /// /// let query_params = parts @@ -51,7 +50,7 @@ pub trait RequestPartsExt: sealed::Sealed + Sized { /// .map(|Query(params)| params) /// .map_err(|err| err.into_response())?; /// - /// Ok(MyExtractor { user_agent, query_params }) + /// Ok(MyExtractor { path_params, query_params }) /// } /// } /// ``` diff --git a/.cargo-vendor/axum-core/src/extract/default_body_limit.rs b/.cargo-vendor/axum-core/src/extract/default_body_limit.rs index 7b37f1edab..2ec82febc6 100644 --- a/.cargo-vendor/axum-core/src/extract/default_body_limit.rs +++ b/.cargo-vendor/axum-core/src/extract/default_body_limit.rs @@ -8,7 +8,7 @@ use tower_layer::Layer; /// /// This middleware provides ways to configure that. /// -/// Note that if an extractor consumes the body directly with [`Body::data`], or similar, the +/// Note that if an extractor consumes the body directly with [`Body::poll_frame`], or similar, the /// default limit is _not_ applied. /// /// # Difference between `DefaultBodyLimit` and [`RequestBodyLimit`] @@ -22,52 +22,52 @@ use tower_layer::Layer; /// [`RequestBodyLimit`] is applied globally to all requests, regardless of which extractors are /// used or how the body is consumed. /// -/// `DefaultBodyLimit` is also easier to integrate into an existing setup since it doesn't change -/// the request body type: +/// # Example /// /// ``` /// use axum::{ /// Router, /// routing::post, /// body::Body, -/// extract::{DefaultBodyLimit, RawBody}, -/// http::Request, +/// extract::{Request, DefaultBodyLimit}, /// }; /// /// let app = Router::new() -/// .route( -/// "/", -/// // even with `DefaultBodyLimit` the request body is still just `Body` -/// post(|request: Request| async {}), -/// ) +/// .route("/", post(|request: Request| async {})) +/// // change the default limit /// .layer(DefaultBodyLimit::max(1024)); -/// # let _: Router<(), _> = app; +/// # let _: Router = app; /// ``` /// +/// In general using `DefaultBodyLimit` is recommended but if you need to use third party +/// extractors and want to make sure a limit is also applied there then [`RequestBodyLimit`] should +/// be used. +/// +/// # Different limits for different routes +/// +/// `DefaultBodyLimit` can also be selectively applied to have different limits for different +/// routes: +/// /// ``` -/// use axum::{Router, routing::post, body::Body, extract::RawBody, http::Request}; -/// use tower_http::limit::RequestBodyLimitLayer; -/// use http_body::Limited; +/// use axum::{ +/// Router, +/// routing::post, +/// body::Body, +/// extract::{Request, DefaultBodyLimit}, +/// }; /// /// let app = Router::new() -/// .route( -/// "/", -/// // `RequestBodyLimitLayer` changes the request body type to `Limited` -/// // extracting a different body type wont work -/// post(|request: Request>| async {}), -/// ) -/// .layer(RequestBodyLimitLayer::new(1024)); -/// # let _: Router<(), _> = app; +/// // this route has a different limit +/// .route("/", post(|request: Request| async {}).layer(DefaultBodyLimit::max(1024))) +/// // this route still has the default limit +/// .route("/foo", post(|request: Request| async {})); +/// # let _: Router = app; /// ``` /// -/// In general using `DefaultBodyLimit` is recommended but if you need to use third party -/// extractors and want to sure a limit is also applied there then [`RequestBodyLimit`] should be -/// used. -/// -/// [`Body::data`]: http_body::Body::data +/// [`Body::poll_frame`]: http_body::Body::poll_frame /// [`Bytes`]: bytes::Bytes -/// [`Json`]: https://docs.rs/axum/0.6.0/axum/struct.Json.html -/// [`Form`]: https://docs.rs/axum/0.6.0/axum/struct.Form.html +/// [`Json`]: https://docs.rs/axum/0.7/axum/struct.Json.html +/// [`Form`]: https://docs.rs/axum/0.7/axum/struct.Form.html /// [`FromRequest`]: crate::extract::FromRequest /// [`RequestBodyLimit`]: tower_http::limit::RequestBodyLimit /// [`RequestExt::with_limited_body`]: crate::RequestExt::with_limited_body @@ -103,9 +103,9 @@ impl DefaultBodyLimit { /// extract::DefaultBodyLimit, /// }; /// use tower_http::limit::RequestBodyLimitLayer; - /// use http_body::Limited; + /// use http_body_util::Limited; /// - /// let app: Router<(), Limited> = Router::new() + /// let app: Router<()> = Router::new() /// .route("/", get(|body: Bytes| async {})) /// // Disable the default limit /// .layer(DefaultBodyLimit::disable()) @@ -114,8 +114,8 @@ impl DefaultBodyLimit { /// ``` /// /// [`Bytes`]: bytes::Bytes - /// [`Json`]: https://docs.rs/axum/0.6.0/axum/struct.Json.html - /// [`Form`]: https://docs.rs/axum/0.6.0/axum/struct.Form.html + /// [`Json`]: https://docs.rs/axum/0.7/axum/struct.Json.html + /// [`Form`]: https://docs.rs/axum/0.7/axum/struct.Form.html pub fn disable() -> Self { Self { kind: DefaultBodyLimitKind::Disable, @@ -138,17 +138,17 @@ impl DefaultBodyLimit { /// extract::DefaultBodyLimit, /// }; /// use tower_http::limit::RequestBodyLimitLayer; - /// use http_body::Limited; + /// use http_body_util::Limited; /// - /// let app: Router<(), Limited> = Router::new() + /// let app: Router<()> = Router::new() /// .route("/", get(|body: Bytes| async {})) /// // Replace the default of 2MB with 1024 bytes. /// .layer(DefaultBodyLimit::max(1024)); /// ``` /// /// [`Bytes::from_request`]: bytes::Bytes - /// [`Json`]: https://docs.rs/axum/0.6.0/axum/struct.Json.html - /// [`Form`]: https://docs.rs/axum/0.6.0/axum/struct.Form.html + /// [`Json`]: https://docs.rs/axum/0.7/axum/struct.Json.html + /// [`Form`]: https://docs.rs/axum/0.7/axum/struct.Form.html pub fn max(limit: usize) -> Self { Self { kind: DefaultBodyLimitKind::Limit(limit), diff --git a/.cargo-vendor/axum-core/src/extract/from_ref.rs b/.cargo-vendor/axum-core/src/extract/from_ref.rs index bdfa7dd07e..c224303ca4 100644 --- a/.cargo-vendor/axum-core/src/extract/from_ref.rs +++ b/.cargo-vendor/axum-core/src/extract/from_ref.rs @@ -7,7 +7,7 @@ /// /// This trait can be derived using `#[derive(FromRef)]`. /// -/// [`State`]: https://docs.rs/axum/0.6/axum/extract/struct.State.html +/// [`State`]: https://docs.rs/axum/0.7/axum/extract/struct.State.html // NOTE: This trait is defined in axum-core, even though it is mainly used with `State` which is // defined in axum. That allows crate authors to use it when implementing extractors. pub trait FromRef { diff --git a/.cargo-vendor/axum-core/src/extract/mod.rs b/.cargo-vendor/axum-core/src/extract/mod.rs index 1113a1ee7a..c8e2d2196f 100644 --- a/.cargo-vendor/axum-core/src/extract/mod.rs +++ b/.cargo-vendor/axum-core/src/extract/mod.rs @@ -2,11 +2,11 @@ //! //! See [`axum::extract`] for more details. //! -//! [`axum::extract`]: https://docs.rs/axum/latest/axum/extract/index.html +//! [`axum::extract`]: https://docs.rs/axum/0.7/axum/extract/index.html -use crate::response::IntoResponse; +use crate::{body::Body, response::IntoResponse}; use async_trait::async_trait; -use http::{request::Parts, Request}; +use http::request::Parts; use std::convert::Infallible; pub mod rejection; @@ -19,6 +19,10 @@ mod tuple; pub(crate) use self::default_body_limit::DefaultBodyLimitKind; pub use self::{default_body_limit::DefaultBodyLimit, from_ref::FromRef}; +/// Type alias for [`http::Request`] whose body type defaults to [`Body`], the most common body +/// type used with axum. +pub type Request = http::Request; + mod private { #[derive(Debug, Clone, Copy)] pub enum ViaParts {} @@ -37,12 +41,12 @@ mod private { /// /// See [`axum::extract`] for more general docs about extractors. /// -/// [`axum::extract`]: https://docs.rs/axum/0.6.0/axum/extract/index.html +/// [`axum::extract`]: https://docs.rs/axum/0.7/axum/extract/index.html #[async_trait] #[cfg_attr( nightly_error_messages, - rustc_on_unimplemented( - note = "Function argument is not a valid axum extractor. \nSee `https://docs.rs/axum/latest/axum/extract/index.html` for details", + diagnostic::on_unimplemented( + note = "Function argument is not a valid axum extractor. \nSee `https://docs.rs/axum/0.7/axum/extract/index.html` for details", ) )] pub trait FromRequestParts: Sized { @@ -64,75 +68,32 @@ pub trait FromRequestParts: Sized { /// /// See [`axum::extract`] for more general docs about extractors. /// -/// # What is the `B` type parameter? -/// -/// `FromRequest` is generic over the request body (the `B` in -/// [`http::Request`]). This is to allow `FromRequest` to be usable with any -/// type of request body. This is necessary because some middleware change the -/// request body, for example to add timeouts. -/// -/// If you're writing your own `FromRequest` that wont be used outside your -/// application, and not using any middleware that changes the request body, you -/// can most likely use `axum::body::Body`. -/// -/// If you're writing a library that's intended for others to use, it's recommended -/// to keep the generic type parameter: -/// -/// ```rust -/// use axum::{ -/// async_trait, -/// extract::FromRequest, -/// http::{self, Request}, -/// }; -/// -/// struct MyExtractor; -/// -/// #[async_trait] -/// impl FromRequest for MyExtractor -/// where -/// // these bounds are required by `async_trait` -/// B: Send + 'static, -/// S: Send + Sync, -/// { -/// type Rejection = http::StatusCode; -/// -/// async fn from_request(req: Request, state: &S) -> Result { -/// // ... -/// # unimplemented!() -/// } -/// } -/// ``` -/// -/// This ensures your extractor is as flexible as possible. -/// -/// [`http::Request`]: http::Request -/// [`axum::extract`]: https://docs.rs/axum/0.6.0/axum/extract/index.html +/// [`axum::extract`]: https://docs.rs/axum/0.7/axum/extract/index.html #[async_trait] #[cfg_attr( nightly_error_messages, - rustc_on_unimplemented( - note = "Function argument is not a valid axum extractor. \nSee `https://docs.rs/axum/latest/axum/extract/index.html` for details", + diagnostic::on_unimplemented( + note = "Function argument is not a valid axum extractor. \nSee `https://docs.rs/axum/0.7/axum/extract/index.html` for details", ) )] -pub trait FromRequest: Sized { +pub trait FromRequest: Sized { /// If the extractor fails it'll use this "rejection" type. A rejection is /// a kind of error that can be converted into a response. type Rejection: IntoResponse; /// Perform the extraction. - async fn from_request(req: Request, state: &S) -> Result; + async fn from_request(req: Request, state: &S) -> Result; } #[async_trait] -impl FromRequest for T +impl FromRequest for T where - B: Send + 'static, S: Send + Sync, T: FromRequestParts, { type Rejection = >::Rejection; - async fn from_request(req: Request, state: &S) -> Result { + async fn from_request(req: Request, state: &S) -> Result { let (mut parts, _) = req.into_parts(); Self::from_request_parts(&mut parts, state).await } @@ -155,15 +116,14 @@ where } #[async_trait] -impl FromRequest for Option +impl FromRequest for Option where - T: FromRequest, - B: Send + 'static, + T: FromRequest, S: Send + Sync, { type Rejection = Infallible; - async fn from_request(req: Request, state: &S) -> Result, Self::Rejection> { + async fn from_request(req: Request, state: &S) -> Result, Self::Rejection> { Ok(T::from_request(req, state).await.ok()) } } @@ -182,15 +142,14 @@ where } #[async_trait] -impl FromRequest for Result +impl FromRequest for Result where - T: FromRequest, - B: Send + 'static, + T: FromRequest, S: Send + Sync, { type Rejection = Infallible; - async fn from_request(req: Request, state: &S) -> Result { + async fn from_request(req: Request, state: &S) -> Result { Ok(T::from_request(req, state).await) } } diff --git a/.cargo-vendor/axum-core/src/extract/rejection.rs b/.cargo-vendor/axum-core/src/extract/rejection.rs index 958f3b2170..34b8115bd4 100644 --- a/.cargo-vendor/axum-core/src/extract/rejection.rs +++ b/.cargo-vendor/axum-core/src/extract/rejection.rs @@ -3,7 +3,7 @@ use crate::__composite_rejection as composite_rejection; use crate::__define_rejection as define_rejection; -use crate::BoxError; +use crate::{BoxError, Error}; composite_rejection! { /// Rejection type for extractors that buffer the request body. Used if the @@ -19,7 +19,18 @@ impl FailedToBufferBody { where E: Into, { - match err.into().downcast::() { + // two layers of boxes here because `with_limited_body` + // wraps the `http_body_util::Limited` in a `axum_core::Body` + // which also wraps the error type + let box_error = match err.into().downcast::() { + Ok(err) => err.into_inner(), + Err(err) => err, + }; + let box_error = match box_error.downcast::() { + Ok(err) => err.into_inner(), + Err(err) => err, + }; + match box_error.downcast::() { Ok(err) => Self::LengthLimitError(LengthLimitError::from_err(err)), Err(err) => Self::UnknownBodyError(UnknownBodyError::from_err(err)), } @@ -32,7 +43,7 @@ define_rejection! { /// Encountered some other error when buffering the body. /// /// This can _only_ happen when you're using [`tower_http::limit::RequestBodyLimitLayer`] or - /// otherwise wrapping request bodies in [`http_body::Limited`]. + /// otherwise wrapping request bodies in [`http_body_util::Limited`]. pub struct LengthLimitError(Error); } diff --git a/.cargo-vendor/axum-core/src/extract/request_parts.rs b/.cargo-vendor/axum-core/src/extract/request_parts.rs index 05d7d7277b..73f54db793 100644 --- a/.cargo-vendor/axum-core/src/extract/request_parts.rs +++ b/.cargo-vendor/axum-core/src/extract/request_parts.rs @@ -1,19 +1,19 @@ -use super::{rejection::*, FromRequest, FromRequestParts}; -use crate::{BoxError, RequestExt}; +use super::{rejection::*, FromRequest, FromRequestParts, Request}; +use crate::{body::Body, RequestExt}; use async_trait::async_trait; use bytes::Bytes; -use http::{request::Parts, HeaderMap, Method, Request, Uri, Version}; +use http::{request::Parts, Extensions, HeaderMap, Method, Uri, Version}; +use http_body_util::BodyExt; use std::convert::Infallible; #[async_trait] -impl FromRequest for Request +impl FromRequest for Request where - B: Send, S: Send + Sync, { type Rejection = Infallible; - async fn from_request(req: Request, _: &S) -> Result { + async fn from_request(req: Request, _: &S) -> Result { Ok(req) } } @@ -58,7 +58,7 @@ where /// /// Prefer using [`TypedHeader`] to extract only the headers you need. /// -/// [`TypedHeader`]: https://docs.rs/axum/latest/axum/extract/struct.TypedHeader.html +/// [`TypedHeader`]: https://docs.rs/axum/0.7/axum/extract/struct.TypedHeader.html #[async_trait] impl FromRequestParts for HeaderMap where @@ -72,40 +72,32 @@ where } #[async_trait] -impl FromRequest for Bytes +impl FromRequest for Bytes where - B: http_body::Body + Send + 'static, - B::Data: Send, - B::Error: Into, S: Send + Sync, { type Rejection = BytesRejection; - async fn from_request(req: Request, _: &S) -> Result { - let bytes = match req.into_limited_body() { - Ok(limited_body) => crate::body::to_bytes(limited_body) - .await - .map_err(FailedToBufferBody::from_err)?, - Err(unlimited_body) => crate::body::to_bytes(unlimited_body) - .await - .map_err(FailedToBufferBody::from_err)?, - }; + async fn from_request(req: Request, _: &S) -> Result { + let bytes = req + .into_limited_body() + .collect() + .await + .map_err(FailedToBufferBody::from_err)? + .to_bytes(); Ok(bytes) } } #[async_trait] -impl FromRequest for String +impl FromRequest for String where - B: http_body::Body + Send + 'static, - B::Data: Send, - B::Error: Into, S: Send + Sync, { type Rejection = StringRejection; - async fn from_request(req: Request, state: &S) -> Result { + async fn from_request(req: Request, state: &S) -> Result { let bytes = Bytes::from_request(req, state) .await .map_err(|err| match err { @@ -123,14 +115,37 @@ where } #[async_trait] -impl FromRequest for Parts +impl FromRequestParts for Parts +where + S: Send + Sync, +{ + type Rejection = Infallible; + + async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { + Ok(parts.clone()) + } +} + +#[async_trait] +impl FromRequestParts for Extensions +where + S: Send + Sync, +{ + type Rejection = Infallible; + + async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { + Ok(parts.extensions.clone()) + } +} + +#[async_trait] +impl FromRequest for Body where - B: Send + 'static, S: Send + Sync, { type Rejection = Infallible; - async fn from_request(req: Request, _: &S) -> Result { - Ok(req.into_parts().0) + async fn from_request(req: Request, _: &S) -> Result { + Ok(req.into_body()) } } diff --git a/.cargo-vendor/axum-core/src/extract/tuple.rs b/.cargo-vendor/axum-core/src/extract/tuple.rs index 728135b2a0..021b9616df 100644 --- a/.cargo-vendor/axum-core/src/extract/tuple.rs +++ b/.cargo-vendor/axum-core/src/extract/tuple.rs @@ -1,7 +1,7 @@ -use super::{FromRequest, FromRequestParts}; +use super::{FromRequest, FromRequestParts, Request}; use crate::response::{IntoResponse, Response}; use async_trait::async_trait; -use http::request::{Parts, Request}; +use http::request::Parts; use std::convert::Infallible; #[async_trait] @@ -45,19 +45,18 @@ macro_rules! impl_from_request { } // This impl must not be generic over M, otherwise it would conflict with the blanket - // implementation of `FromRequest` for `T: FromRequestParts`. + // implementation of `FromRequest` for `T: FromRequestParts`. #[async_trait] #[allow(non_snake_case, unused_mut, unused_variables)] - impl FromRequest for ($($ty,)* $last,) + impl FromRequest for ($($ty,)* $last,) where $( $ty: FromRequestParts + Send, )* - $last: FromRequest + Send, - B: Send + 'static, + $last: FromRequest + Send, S: Send + Sync, { type Rejection = Response; - async fn from_request(req: Request, state: &S) -> Result { + async fn from_request(req: Request, state: &S) -> Result { let (mut parts, body) = req.into_parts(); $( @@ -85,7 +84,7 @@ mod tests { fn assert_from_request() where - T: FromRequest<(), http_body::Full, M>, + T: FromRequest<(), M>, { } diff --git a/.cargo-vendor/axum-core/src/lib.rs b/.cargo-vendor/axum-core/src/lib.rs index 974e5e18d4..a4dd6cd969 100644 --- a/.cargo-vendor/axum-core/src/lib.rs +++ b/.cargo-vendor/axum-core/src/lib.rs @@ -1,4 +1,4 @@ -#![cfg_attr(nightly_error_messages, feature(rustc_attrs))] +#![cfg_attr(nightly_error_messages, feature(diagnostic_namespace))] //! Core types and traits for [`axum`]. //! //! Libraries authors that want to provide [`FromRequest`] or [`IntoResponse`] implementations @@ -44,10 +44,11 @@ missing_debug_implementations, missing_docs )] -#![deny(unreachable_pub, private_in_public)] +#![deny(unreachable_pub)] #![allow(elided_lifetimes_in_paths, clippy::type_complexity)] #![forbid(unsafe_code)] #![cfg_attr(test, allow(clippy::float_cmp))] +#![cfg_attr(not(test), warn(clippy::print_stdout, clippy::dbg_macro))] #[macro_use] pub(crate) mod macros; diff --git a/.cargo-vendor/axum-core/src/macros.rs b/.cargo-vendor/axum-core/src/macros.rs index 10365e1556..3fa61576be 100644 --- a/.cargo-vendor/axum-core/src/macros.rs +++ b/.cargo-vendor/axum-core/src/macros.rs @@ -193,7 +193,7 @@ macro_rules! __composite_rejection { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { $( - Self::$variant(inner) => write!(f, "{}", inner), + Self::$variant(inner) => write!(f, "{inner}"), )+ } } @@ -203,7 +203,7 @@ macro_rules! __composite_rejection { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match self { $( - Self::$variant(inner) => Some(inner), + Self::$variant(inner) => inner.source(), )+ } } @@ -294,3 +294,43 @@ macro_rules! __impl_deref { } }; } + +#[cfg(test)] +mod composite_rejection_tests { + use self::defs::*; + use crate::Error; + use std::error::Error as _; + + #[allow(dead_code, unreachable_pub)] + mod defs { + use crate::{__composite_rejection, __define_rejection}; + + __define_rejection! { + #[status = BAD_REQUEST] + #[body = "error message 1"] + pub struct Inner1; + } + __define_rejection! { + #[status = BAD_REQUEST] + #[body = "error message 2"] + pub struct Inner2(Error); + } + __composite_rejection! { + pub enum Outer { Inner1, Inner2 } + } + } + + /// The implementation of `.source()` on `Outer` should defer straight to the implementation + /// on its inner type instead of returning the inner type itself, because the `Display` + /// implementation on `Outer` already forwards to the inner type and so it would result in two + /// errors in the chain `Display`ing the same thing. + #[test] + fn source_gives_inner_source() { + let rejection = Outer::Inner1(Inner1); + assert!(rejection.source().is_none()); + + let msg = "hello world"; + let rejection = Outer::Inner2(Inner2(Error::new(msg))); + assert_eq!(rejection.source().unwrap().to_string(), msg); + } +} diff --git a/.cargo-vendor/axum-core/src/response/into_response.rs b/.cargo-vendor/axum-core/src/response/into_response.rs index f19974cfb7..15608b14ca 100644 --- a/.cargo-vendor/axum-core/src/response/into_response.rs +++ b/.cargo-vendor/axum-core/src/response/into_response.rs @@ -1,14 +1,11 @@ use super::{IntoResponseParts, Response, ResponseParts}; -use crate::{body, BoxError}; +use crate::{body::Body, BoxError}; use bytes::{buf::Chain, Buf, Bytes, BytesMut}; use http::{ header::{self, HeaderMap, HeaderName, HeaderValue}, Extensions, StatusCode, }; -use http_body::{ - combinators::{MapData, MapErr}, - Empty, Full, SizeHint, -}; +use http_body::{Frame, SizeHint}; use std::{ borrow::Cow, convert::Infallible, @@ -61,9 +58,7 @@ use std::{ /// async fn handler() -> Result<(), MyError> { /// Err(MyError::SomethingWentWrong) /// } -/// # async { -/// # hyper::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -/// # }; +/// # let _: Router = app; /// ``` /// /// Or if you have a custom body type you'll also need to implement @@ -74,11 +69,12 @@ use std::{ /// body, /// routing::get, /// response::{IntoResponse, Response}, +/// body::Body, /// Router, /// }; -/// use http_body::Body; /// use http::HeaderMap; /// use bytes::Bytes; +/// use http_body::Frame; /// use std::{ /// convert::Infallible, /// task::{Poll, Context}, @@ -89,22 +85,14 @@ use std::{ /// /// // First implement `Body` for `MyBody`. This could for example use /// // some custom streaming protocol. -/// impl Body for MyBody { +/// impl http_body::Body for MyBody { /// type Data = Bytes; /// type Error = Infallible; /// -/// fn poll_data( +/// fn poll_frame( /// self: Pin<&mut Self>, -/// cx: &mut Context<'_> -/// ) -> Poll>> { -/// # unimplemented!() -/// // ... -/// } -/// -/// fn poll_trailers( -/// self: Pin<&mut Self>, -/// cx: &mut Context<'_> -/// ) -> Poll, Self::Error>> { +/// cx: &mut Context<'_>, +/// ) -> Poll, Self::Error>>> { /// # unimplemented!() /// // ... /// } @@ -113,15 +101,13 @@ use std::{ /// // Now we can implement `IntoResponse` directly for `MyBody` /// impl IntoResponse for MyBody { /// fn into_response(self) -> Response { -/// Response::new(body::boxed(self)) +/// Response::new(Body::new(self)) /// } /// } /// /// // `MyBody` can now be returned from handlers. /// let app = Router::new().route("/", get(|| async { MyBody })); -/// # async { -/// # hyper::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -/// # }; +/// # let _: Router = app; /// ``` pub trait IntoResponse { /// Create a response. @@ -138,7 +124,7 @@ impl IntoResponse for StatusCode { impl IntoResponse for () { fn into_response(self) -> Response { - Empty::new().into_response() + Body::empty().into_response() } } @@ -167,65 +153,19 @@ where B::Error: Into, { fn into_response(self) -> Response { - self.map(body::boxed) + self.map(Body::new) } } impl IntoResponse for http::response::Parts { fn into_response(self) -> Response { - Response::from_parts(self, body::boxed(Empty::new())) - } -} - -impl IntoResponse for Full { - fn into_response(self) -> Response { - Response::new(body::boxed(self)) - } -} - -impl IntoResponse for Empty { - fn into_response(self) -> Response { - Response::new(body::boxed(self)) - } -} - -impl IntoResponse for http_body::combinators::BoxBody -where - E: Into + 'static, -{ - fn into_response(self) -> Response { - Response::new(body::boxed(self)) - } -} - -impl IntoResponse for http_body::combinators::UnsyncBoxBody -where - E: Into + 'static, -{ - fn into_response(self) -> Response { - Response::new(body::boxed(self)) - } -} - -impl IntoResponse for MapData -where - B: http_body::Body + Send + 'static, - F: FnMut(B::Data) -> Bytes + Send + 'static, - B::Error: Into, -{ - fn into_response(self) -> Response { - Response::new(body::boxed(self)) + Response::from_parts(self, Body::empty()) } } -impl IntoResponse for MapErr -where - B: http_body::Body + Send + 'static, - F: FnMut(B::Error) -> E + Send + 'static, - E: Into, -{ +impl IntoResponse for Body { fn into_response(self) -> Response { - Response::new(body::boxed(self)) + Response::new(self) } } @@ -241,9 +181,15 @@ impl IntoResponse for String { } } +impl IntoResponse for Box { + fn into_response(self) -> Response { + String::from(self).into_response() + } +} + impl IntoResponse for Cow<'static, str> { fn into_response(self) -> Response { - let mut res = Full::from(self).into_response(); + let mut res = Body::from(self).into_response(); res.headers_mut().insert( header::CONTENT_TYPE, HeaderValue::from_static(mime::TEXT_PLAIN_UTF_8.as_ref()), @@ -254,7 +200,7 @@ impl IntoResponse for Cow<'static, str> { impl IntoResponse for Bytes { fn into_response(self) -> Response { - let mut res = Full::from(self).into_response(); + let mut res = Body::from(self).into_response(); res.headers_mut().insert( header::CONTENT_TYPE, HeaderValue::from_static(mime::APPLICATION_OCTET_STREAM.as_ref()), @@ -276,7 +222,7 @@ where { fn into_response(self) -> Response { let (first, second) = self.into_inner(); - let mut res = Response::new(body::boxed(BytesChainBody { + let mut res = Response::new(Body::new(BytesChainBody { first: Some(first), second: Some(second), })); @@ -301,30 +247,23 @@ where type Data = Bytes; type Error = Infallible; - fn poll_data( + fn poll_frame( mut self: Pin<&mut Self>, _cx: &mut Context<'_>, - ) -> Poll>> { + ) -> Poll, Self::Error>>> { if let Some(mut buf) = self.first.take() { let bytes = buf.copy_to_bytes(buf.remaining()); - return Poll::Ready(Some(Ok(bytes))); + return Poll::Ready(Some(Ok(Frame::data(bytes)))); } if let Some(mut buf) = self.second.take() { let bytes = buf.copy_to_bytes(buf.remaining()); - return Poll::Ready(Some(Ok(bytes))); + return Poll::Ready(Some(Ok(Frame::data(bytes)))); } Poll::Ready(None) } - fn poll_trailers( - self: Pin<&mut Self>, - _cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - Poll::Ready(Ok(None)) - } - fn is_end_stream(&self) -> bool { self.first.is_none() && self.second.is_none() } @@ -366,9 +305,15 @@ impl IntoResponse for Vec { } } +impl IntoResponse for Box<[u8]> { + fn into_response(self) -> Response { + Vec::from(self).into_response() + } +} + impl IntoResponse for Cow<'static, [u8]> { fn into_response(self) -> Response { - let mut res = Full::from(self).into_response(); + let mut res = Body::from(self).into_response(); res.headers_mut().insert( header::CONTENT_TYPE, HeaderValue::from_static(mime::APPLICATION_OCTET_STREAM.as_ref()), @@ -437,6 +382,16 @@ where } } +impl IntoResponse for (R,) +where + R: IntoResponse, +{ + fn into_response(self) -> Response { + let (res,) = self; + res.into_response() + } +} + macro_rules! impl_into_response { ( $($ty:ident),* $(,)? ) => { #[allow(non_snake_case)] diff --git a/.cargo-vendor/axum-core/src/response/into_response_parts.rs b/.cargo-vendor/axum-core/src/response/into_response_parts.rs index 60f0b805e7..4be4fc55c3 100644 --- a/.cargo-vendor/axum-core/src/response/into_response_parts.rs +++ b/.cargo-vendor/axum-core/src/response/into_response_parts.rs @@ -258,3 +258,11 @@ impl IntoResponseParts for Extensions { Ok(res) } } + +impl IntoResponseParts for () { + type Error = Infallible; + + fn into_response_parts(self, res: ResponseParts) -> Result { + Ok(res) + } +} diff --git a/.cargo-vendor/axum-core/src/response/mod.rs b/.cargo-vendor/axum-core/src/response/mod.rs index d66dfec510..6b66c60e71 100644 --- a/.cargo-vendor/axum-core/src/response/mod.rs +++ b/.cargo-vendor/axum-core/src/response/mod.rs @@ -2,9 +2,9 @@ //! //! See [`axum::response`] for more details. //! -//! [`axum::response`]: https://docs.rs/axum/latest/axum/response/index.html +//! [`axum::response`]: https://docs.rs/axum/0.7/axum/response/index.html -use crate::body::BoxBody; +use crate::body::Body; mod append_headers; mod into_response; @@ -16,9 +16,9 @@ pub use self::{ into_response_parts::{IntoResponseParts, ResponseParts, TryIntoHeaderError}, }; -/// Type alias for [`http::Response`] whose body type defaults to [`BoxBody`], the most common body +/// Type alias for [`http::Response`] whose body type defaults to [`Body`], the most common body /// type used with axum. -pub type Response = http::Response; +pub type Response = http::Response; /// An [`IntoResponse`]-based result type that uses [`ErrorResponse`] as the error type. /// diff --git a/.cargo-vendor/axum/.cargo-checksum.json b/.cargo-vendor/axum/.cargo-checksum.json index 6664b81fa6..c75747ad1d 100644 --- a/.cargo-vendor/axum/.cargo-checksum.json +++ b/.cargo-vendor/axum/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"CHANGELOG.md":"80dbfc358bb9cea214f573bebb357878969c7d02e469be5c86b9432495860850","Cargo.toml":"a64509897a25122183449245546d644616ada3f0b3ecc667c43a9fb23ae1df4f","LICENSE":"c14b6ed9d732322af9bae1551f6ca373b3893d7ce6e9d46429fc378478d00dfb","README.md":"30bdfac17f1b03d7de708981458d1ceae159ba89e4927b92c27277d7efb34d18","benches/benches.rs":"7075f086f3150ab1ab61f20c7bd1859ce067b4bec51f2c9a19db880ffff448f9","build.rs":"3bad731d51fd3a32b235ebd437bc9a2788b35bcae4779237cd26696e13840bcb","src/body/mod.rs":"6ff16a4400248a601d68ee976bfdde784027db67ba0e9917432ea8184dd0db03","src/body/stream_body.rs":"7e6bccdcb868692e39597973fbf8589758e146349a1bbebdfe816608a999d294","src/boxed.rs":"6a24163de5a9a386c5a1626493922ea0f9bd36550829d2716733b5ef81828193","src/docs/debugging_handler_type_errors.md":"37c05a2aac5ae2cb9824cda699657c9d0876b7dfa5ef2a5aef5ed224943ab051","src/docs/error_handling.md":"9380e158554d390fa272e64d79c20c000b7125b3b6b618833620573d12140757","src/docs/extract.md":"1fa27266b4709b353abfe3968016472292735b524dd5286d4a94296aa88299e0","src/docs/handlers_intro.md":"44be7d8c2087380d3e512b6724cba41c9637dd7177c9b545d45cda244b6ec861","src/docs/method_routing/fallback.md":"c13a5fe4998bf86917fd1e2caed8090ebedd1e3b8e4d32ae6cc1907384e7ce08","src/docs/method_routing/layer.md":"b8c95e2777123f2aa99cbd7e0f11127f612ca0dab56b4959965f01b2e2433873","src/docs/method_routing/merge.md":"2e39d29f0a829609033213aaf670d67193f11cc1baf7d76c5d7ae9daf7b0020d","src/docs/method_routing/route_layer.md":"35a47515bd7405ceb9cd44cf789dc3a2b4fcb239dda89aa568d2de5c2b2a794a","src/docs/middleware.md":"8fe5565535722c833bc7b41b104e8370494f471ae6d6e139454c28af32d6669f","src/docs/response.md":"9846185ad98b58e4dce723cdd473a3793e7568b31f04790e5e2a17167f811c18","src/docs/routing/fallback.md":"545232927a5070a593492808ee888a4a145a67b7a22cce56848fed02abf0c321","src/docs/routing/into_make_service_with_connect_info.md":"6fd508c749946433d879def8d4d38865c086e601b038690ee95a044d2cb26c2b","src/docs/routing/layer.md":"574b295f64d4571df834ca0140c385755b7f67b334e887fc92f96801df9956c6","src/docs/routing/merge.md":"ad49169d947274bf02fd60bb075040ee2ead934cfc1bc748f0533ef9876ff58a","src/docs/routing/nest.md":"81ebb477536150dba95db8315471ab31af8bec1814e6b5c7e99c82826a92f1ff","src/docs/routing/route.md":"d4389881c31f721ad23ede7287c0ef2c8f2aa7ad505044fac6ce63700f52d018","src/docs/routing/route_layer.md":"4d3d2ed962f218d61d87db22339d26f94453a0501433e483794893d58991f1fa","src/docs/routing/route_service.md":"dcd44e5453475a775cca8ffdb1253c85bbd6a7785e32acdfbd1190285b4366c6","src/docs/routing/with_state.md":"6568e11a4388bba3a45221d1c6a707ebc9d8ac6e3a765b7df5381c76bce563d6","src/error_handling/mod.rs":"cada6274f59087113e40856d9e1ff7ff323ca2b2eaafe37f95eb12ac3b0d081a","src/extension.rs":"719cae16da05818ed770c6f8339db8beda05f5d00f53a72ec08a1a92f977978f","src/extract/connect_info.rs":"fd8fc767544fe0370eefd04fd0b36e2777596ed1bc108e66763cbb70a9fba3f2","src/extract/host.rs":"442742ba452085b6dd54f46f50543f9d8f6971c056851b8956477dc3dc0240cc","src/extract/matched_path.rs":"2620141b0c40eedd18956326f6b7f9c202a6e0e103f6b3aeb0da9a230f6fc105","src/extract/mod.rs":"17472a21a6d00ce6d7896fa0e294ea5e7e346f777948607dcdc312b55ee0d044","src/extract/multipart.rs":"64f10470b701df9b87b3b29c50a03b3c1c773045ba23c9892fe92c95c77231f2","src/extract/path/de.rs":"18687adc341b7d692d354429701847e40e3d3ae251f7e44517e65dede9aacc4e","src/extract/path/mod.rs":"f5ca11744b5bca18d29eb0bba1da3cc7d5d9133ee9c29b8a3d1186242219bf6b","src/extract/query.rs":"89e3371946f784576d3ebfc3317c5114c31859eaf945f52e0c7b8a8b11ffb1cb","src/extract/raw_form.rs":"d2bad0683f9312d79f62b84ae0cb33ff869a2b4beb2a7d99fb8472cf56755e0c","src/extract/raw_query.rs":"a09431e77038b90152d0ea7fdbdfdc0ff890f3b1f2317707136ba6b0d3e8409d","src/extract/rejection.rs":"5d92312fc8ccee8b3b9eb2408d6b00979ec43a990e5c047a928063ae557e79ba","src/extract/request_parts.rs":"ba5de7430e93f20056f5f25d72d971f30eeee8a94808530abc70f8e7317660de","src/extract/state.rs":"6abacbc54b559cf4a6c08ca965aa0bc192c1cdee49ebb65405fb7e73cdc410de","src/extract/ws.rs":"eca2d282443413661155fd30ac75389e50ceaabf95bc5edc9ad3b15a1bc7171d","src/form.rs":"9ac7535d25d54923d2749e699c7a3b3ba5399625b39373233de35e1b8b9e51bd","src/handler/future.rs":"c9dbf8e313c87437fd83fadd09ae4585eb1b573aabefa420a8eec0afda6560de","src/handler/into_service_state_in_extension.rs":"ea5fd70b1b56b3287416e92814f9f02fbca11dbc4969c355c89ade61cebad6a0","src/handler/mod.rs":"0d30a48a4d5f12d1ca79b14b4b3ee0c767682d489657913856b8fa83200af46c","src/handler/service.rs":"941977b1c66fba5670a8a24b0f755d881988b7cd05bb285001d89cad83ae5e3b","src/json.rs":"5df4e4f3e6de94b0619f7c4c12257275035f51b9d0b8129a7d53ab3d2cd6c901","src/lib.rs":"32b7910b38d6571043deb6dc24b01dea61bdb365f34de7201faafcb7315b5311","src/macros.rs":"6567e7ecaeef04071ced390360f314eb3d8b4d01aae603611039e799009db59e","src/middleware/from_extractor.rs":"ffe078c788ea8142d4b0cc992400008cee0958409db1410fcae0c68fac648a6a","src/middleware/from_fn.rs":"32084741acf5fe99aae5f114d3694b6ff618b1062157fbfd3cb83bf459aca57b","src/middleware/map_request.rs":"6ae78a54bc6090f441ba99eb0e8972b1bbb75ecafa8920c145477e376856e573","src/middleware/map_response.rs":"775d8d932aaa48c6f378ef7282c2c659232ba08a3ae0160fb8582a44a9817094","src/middleware/mod.rs":"5b7fccd72e7dc87e321516f9656995f701efc9eacffc322861d931c4de21629e","src/response/mod.rs":"c6537b8aa8f4b2da441409370934b108551cbe844081b7229cf6b828dff30497","src/response/redirect.rs":"b94d9118e86de3224ad8d198f374a768cb49b3e9ed4cf5b069fbd059bb4dce01","src/response/sse.rs":"3e1892a5dceb9a08ebdda24c029d2458bc80ddbf72cc8113ed613d14c9e08908","src/routing/future.rs":"c0610a9b104f64f02deec5fdf79e148c09e72e6f325b62afc174f42b36f295c5","src/routing/into_make_service.rs":"eb259a904f471078cf4942573a99ab11a3f9836a85c7d0daf5a92bcb8d69fda6","src/routing/method_filter.rs":"f4eef4a2f3b06b711ffeeee826c3e817e41fc6bf1b3a2bd38117e5d9c7a584fb","src/routing/method_routing.rs":"564b9095988f1de20473132b03b153af04e12f4c44a6998509a2954b26cf293e","src/routing/mod.rs":"c1e8181edd462c8d8911cb249104eccce47cafb71e2772bf74577e0da73eb7b5","src/routing/not_found.rs":"14145bf876006a603a1819d28454a7e8c2d45fdbc23046ea041b4e62cda4d2a4","src/routing/path_router.rs":"64c50cebebf570b7747a330c28c66ac489393451d76a9d616010f174cbec89f3","src/routing/route.rs":"7f8e8e61ac95cf2c9d7bd3b714e37ef71056f2b7162f5ef60026d2cae29d01a5","src/routing/strip_prefix.rs":"437a82ee5bfa3112058032cbf511b4abd11c3fc2ebc38d92f4c54e903ad3a906","src/routing/tests/fallback.rs":"644fa72a277a09a4309fbc1abda7d3d74d48ff60dd33e22a1c23f900020653b8","src/routing/tests/get_to_head.rs":"85659b88e83ecf829efc5293bedc9243eaffdb0dc568b3f79a125e12f0c68b21","src/routing/tests/handle_error.rs":"b56b085f9ee666a3c3ca669971e9c0250f786433e046261e05b3253869103690","src/routing/tests/merge.rs":"76b09c9c3554676624b7ef98b23bc9d0a74494a5b0fb7bdc21d1a13e3a0b7870","src/routing/tests/mod.rs":"dce57b4a04f63b78ad989fb640a37604cfeb6b1f38eee87db07bd94efd2b3892","src/routing/tests/nest.rs":"b84f3e708f2c8c6eef930c40d5bc6655eb652cef6b9b7651d7274e8b3084171c","src/routing/url_params.rs":"c9f1d2ce2044bd61df4a4db704990e6b0b10c9f809e516cf63eb116d2b0cb73b","src/service_ext.rs":"cacb02bae694edd667a176a9ffd5621434997bf1395ee635a407d9d98787265b","src/test_helpers/mod.rs":"288076f58a86276a31e8652f41f6960c446acfbe74169ab3cc05705e7c6edc3e","src/test_helpers/test_client.rs":"0ed24f8a249e728a9d28c93eb767c0bfe0cab9dca2b4252a879c92db2455cbe6","src/test_helpers/tracing_helpers.rs":"d664f080c7827137e82236c9dc94b87f0eb9f1fe0a8dc373ae5f82202a94eff2","src/typed_header.rs":"441ea68bdee820f5f126fed2da9eb9fb7bbfe14bd65395b52ab85dadc72a9486","src/util.rs":"bcc9ffb205e84d5ef265d2703a84e91cfb73fa0e9846d078d38ea78fd04b7131"},"package":"3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf"} \ No newline at end of file +{"files":{"CHANGELOG.md":"1753ae6259044ad3edeb21338d2355cf88e13506ea3406e1b620682f7b1997fd","Cargo.toml":"d4abd2a47a2c45bcea69d1d10aba1888153a3821c10f1c37b43a445c5e3a0bc5","LICENSE":"c14b6ed9d732322af9bae1551f6ca373b3893d7ce6e9d46429fc378478d00dfb","README.md":"b77d5b9d01257fad204dfad1148d30f7ed067f1e6da66a67e2fe98e210550fe1","benches/benches.rs":"8da4398b82fdb2ed6ff33dded1ca96963c914db5f69b533dc6f46c95723c9344","build.rs":"3bad731d51fd3a32b235ebd437bc9a2788b35bcae4779237cd26696e13840bcb","clippy.toml":"34224a9031387ef976118509a33c9b69911f35acad72a5a4761c992617f5cb42","src/body/mod.rs":"83a6632a770e9f3c0ddcdafc26dce4729a1c305f12eef0006c9200f38e774a5a","src/boxed.rs":"694301996740d46ac91f6d253a4bb7b312c7bdcf9641b97ee3843e5d8b6d8ccf","src/docs/debugging_handler_type_errors.md":"a934859b6464575afbf20c07bfd838d5c7c4baad51aeccee29a9b18f44e7cecb","src/docs/error_handling.md":"b4478fd1f0d9df9debc47e747e0b8b871361835f3e1319ab131df3b0d5091aab","src/docs/extract.md":"4b862733af09e9641fd9683fd21977878496cf1d1cf205c01d511b7354afe4cc","src/docs/handlers_intro.md":"44be7d8c2087380d3e512b6724cba41c9637dd7177c9b545d45cda244b6ec861","src/docs/method_routing/fallback.md":"a50eb78881b5dfa0f74536db1caf45ba7013103de4f897142c3fa00faff2f9d2","src/docs/method_routing/layer.md":"a54cbe8b067f675d95eb2b2b482022fe098aefda9d8dd60ad48c01e8517487e8","src/docs/method_routing/merge.md":"b031f00fe984fc705a0d83ec0000d6665e812168ad87bc8ac0ccfe283179486e","src/docs/method_routing/route_layer.md":"21e33b3eb401b1a8a66b53154b562760406c678549a7b2d10c87ae5060592fb0","src/docs/middleware.md":"6f15a1119baee7aad6de9b354dc44bbfe79b57568d1d38732e50d0820ea6e2d0","src/docs/response.md":"d3ff40109548f2c75aa9ba880ab2b5ee3561868b77395a0590937b8fa3bcef4a","src/docs/routing/fallback.md":"35b04f9c4f804d09a6703c5d337ea28999a3b3327bd588c85373e8a472e7113c","src/docs/routing/into_make_service_with_connect_info.md":"147ae56fb4d49ec484c2c89f45c77171b99142721160cfed4e628685be4bd825","src/docs/routing/layer.md":"574b295f64d4571df834ca0140c385755b7f67b334e887fc92f96801df9956c6","src/docs/routing/merge.md":"3f5d700be04f475f5ed7a8e3846e8131ab952a7ef2db94d3742df7c871871dd9","src/docs/routing/nest.md":"daa9a61e1a2c331bb494b817c756bc9913b500ff91dbb3f24f4ecbd80becff0e","src/docs/routing/route.md":"e1109b34fcfd6a01dd5a55caae3b7b04d71eaf06788bde70b21732ee74385404","src/docs/routing/route_layer.md":"0149e06e76b8f3604edca490c2dde9057a991385b65e0986655ec9537cfc419c","src/docs/routing/route_service.md":"7ae83b922b2ecc29108702c2d4b64199a8b63fbfd4cfd7cfc268b1b124cd34f3","src/docs/routing/with_state.md":"b8fe915e8dd92c90dc47969989e7d26a1215f0739395b4d8959b62f4d7abc019","src/error_handling/mod.rs":"cada6274f59087113e40856d9e1ff7ff323ca2b2eaafe37f95eb12ac3b0d081a","src/extension.rs":"c8ca308a93db4cf1effdc0960ef9496ed08b4c52dd1b522b0814882b64cd0b4f","src/extract/connect_info.rs":"90ee723636a6cd2c90e28210eeb598ce86ba851311581b90fa575271d350e7e3","src/extract/host.rs":"267f828ef165beb96cb51d56d83a29a69771bdc7885f47ee4ccaa4b91f07dc5c","src/extract/matched_path.rs":"769823be5371e33efed1319652760f2f2f1b765b0eb63be5fac0e87fd28a121b","src/extract/mod.rs":"823011dfd523dab18f36e86172325b0c31ec6fa2a997e564151dc646e98580fb","src/extract/multipart.rs":"4552b9725acc8927e61833d5daaa8477c8e2fb1475145c0aec4949640645f962","src/extract/nested_path.rs":"97a5672d1ac90fd269cd7f006ea573de6b1dc16998bc9a848f559e97e552b73c","src/extract/path/de.rs":"18687adc341b7d692d354429701847e40e3d3ae251f7e44517e65dede9aacc4e","src/extract/path/mod.rs":"e829588fd8b4752567e663498969a0a8a0106516e3e5814ca0957221a1f68570","src/extract/query.rs":"0105574038d232da7135d1d5a58fd319d0699aedd13757021b5b89a41da478de","src/extract/raw_form.rs":"b89020858b4262a656fda8772ed057cdb69b3a383bb2e6e461d1db5e33394450","src/extract/raw_query.rs":"bbc12c55f93e4c69c1df7cfbb2a3302d5183ed02d395f2a603d8396545d7895a","src/extract/rejection.rs":"4bdb98fc8f32defa41aeb5f82aec6267c8def9ce295865f0bc98288899f3b247","src/extract/request_parts.rs":"3f34dc5dc6da0def4a7548874834b6ddbed8ca1f9ed9bd1fed6c3c5962443b2a","src/extract/state.rs":"a7da4d18c6419d058ff4a027098697dea88875c4c612e7373b4b699272aafba5","src/extract/ws.rs":"47456c06a73c546886eac89454c5500a77d2051ef4da4d463f86d6d58be712fc","src/form.rs":"742b884e27e30cd28a1f182efa243246c46b283b64039fe9ee7870247e6ce762","src/handler/future.rs":"eaff9732ce0607e5631860642bfe109d183246e2e4bfdf47ed0917c21417fef2","src/handler/into_service_state_in_extension.rs":"ea5fd70b1b56b3287416e92814f9f02fbca11dbc4969c355c89ade61cebad6a0","src/handler/mod.rs":"3e0984196e74fdb565174ce6cb230c33da941895a9a4d4c55862f9f6fabdde6c","src/handler/service.rs":"0e976d10dc14c1e2bbd8df57a219ff40314808f844db797b0e3b0eb76ca687d4","src/json.rs":"01a6344f1248ae4582be11f5690c9b8c6539b1ebf2a24e9f82dfbe30117b2632","src/lib.rs":"2e003ce78fbcebbb438867ae56ea54e3a4e9d2f42b8ec500d7840227d51d22c4","src/macros.rs":"c7f5d711fb52086f08f9d76e2af1a3bc6f850b10253a64e23d429a074b303bfc","src/middleware/from_extractor.rs":"81f0f25e447f8247984a8de3cac6593c9a8c0974142278fce8312b92ae414ce4","src/middleware/from_fn.rs":"a15fe2cd670721cb99e7760ab1519e5c95f2f45d0df5cefa272a433e4fbf37fc","src/middleware/map_request.rs":"44c757870ec95817d1c9af637ff8a535008b86a7308b923bd77848f92e441493","src/middleware/map_response.rs":"d84b24d43d7bc12ee3f2411c8952ffb16fbd0bdd5f4c4d3300ef89929c42d339","src/middleware/mod.rs":"5b7fccd72e7dc87e321516f9656995f701efc9eacffc322861d931c4de21629e","src/response/mod.rs":"9313361d6ef0b2c9eeb14002b4fcd3c9747178573f6f25c506cff51eed9864a2","src/response/redirect.rs":"66dd26e2575c6e2e13881b4b8da8cdfeb84693da4fdf6f464b88aebd0f20d0aa","src/response/sse.rs":"3e4b11f66f49739e71bafc80845395b69bcfc29680e5ac438c243cb6c6900fa4","src/routing/future.rs":"c0610a9b104f64f02deec5fdf79e148c09e72e6f325b62afc174f42b36f295c5","src/routing/into_make_service.rs":"be40a59846270094b76d6a29cf36afe5f2ec23d4e8779c34eb2a7eea9c327b00","src/routing/method_filter.rs":"0c2a37fe289f7ab15759fb244b8dc41d819fc5ce8940392d659995aa89452549","src/routing/method_routing.rs":"070c88adf6493b262f17f35b2a62e0304c79148f53ca7f3801edf821189a4f30","src/routing/mod.rs":"c6e90769daffc8756993c7ee0e69b548fa25bae519b921402ba25d3267e0ba87","src/routing/not_found.rs":"14145bf876006a603a1819d28454a7e8c2d45fdbc23046ea041b4e62cda4d2a4","src/routing/path_router.rs":"94b537153affbfc0db167ca31e211841737c3a3e87e162670b03a03b3859573b","src/routing/route.rs":"26d1e1e13fea301c405f808423e6cafbd314ef6888c2a291426433dc546fc69c","src/routing/strip_prefix.rs":"9e752d9532078826400d71771a7d9ae52fd806959e7a328be3e87d380a9a2f11","src/routing/tests/fallback.rs":"c0a7e052d2eca3fe4196ceaf40993d0496313d69d7ea116ee93034acb1661eda","src/routing/tests/get_to_head.rs":"239896501902ad4b549ab5b88287095ce623b3e092353b00a2d39f2d8e23d0ad","src/routing/tests/handle_error.rs":"2f3533938f2a6cfd0c6eb7a0bac11729df4ed44f829e01b7eb9623f081914a9b","src/routing/tests/merge.rs":"a80924b9a0d9c2a92b8913b01baea5b751f380442a2ecbbb4d9e100d7033de5c","src/routing/tests/mod.rs":"73bd69545fa92d2acfc203e25340c6c3ca2ed35261af3a47eeed7f468e60f8b4","src/routing/tests/nest.rs":"a3dacc6016c856fcbfc90df3a2fce33331034500353339db3a35dbf406cdce5b","src/routing/url_params.rs":"3977ee0b5f7000b36c777104058d6d51db23c6bd6d51a4c574e921c2c1a48369","src/serve.rs":"56dfbcbf7829d97af69d865424a533e973e0af61b8eb2670604282ff3237c6e4","src/service_ext.rs":"e212b899108791b586d442e9c0a294ef908cfa97652b68c8770c782d1efd07cb","src/test_helpers/mod.rs":"c028c67c9bc2065c9c96caadec34ab8e522c34ce36e8f307292796a57f53c9c1","src/test_helpers/test_client.rs":"4d83ae9ba0b55fdda81351e23128933807f1fa4568ed81d6722d62d38028e25d","src/test_helpers/tracing_helpers.rs":"46cdffc651b25913e91a7ef4ededae65128ed93e1d8692e3933bdb1ffddd6ace","src/util.rs":"5aabb324ab4971c5ab5631ad40dc7f1fa6f13bffc307e833019a5c32f08b4ca8"},"package":"3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf"} \ No newline at end of file diff --git a/.cargo-vendor/axum/CHANGELOG.md b/.cargo-vendor/axum/CHANGELOG.md index 7bb8bd590b..caf41764b5 100644 --- a/.cargo-vendor/axum/CHANGELOG.md +++ b/.cargo-vendor/axum/CHANGELOG.md @@ -9,6 +9,153 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - None. +# 0.7.5 (24. March, 2024) + +- **fixed:** Fixed layers being cloned when calling `axum::serve` directly with + a `Router` or `MethodRouter` ([#2586]) +- **fixed:** `h2` is no longer pulled as a dependency unless the `http2` feature + is enabled ([#2605]) + +[#2586]: https://github.com/tokio-rs/axum/pull/2586 +[#2605]: https://github.com/tokio-rs/axum/pull/2605 + +# 0.7.4 (13. January, 2024) + +- **fixed:** Fix performance regression present since axum 0.7.0 ([#2483]) +- **fixed:** Improve `debug_handler` on tuple response types ([#2201]) +- **added:** Add `must_use` attribute to `Serve` and `WithGracefulShutdown` ([#2484]) +- **added:** Re-export `axum_core::body::BodyDataStream` from axum + +[#2201]: https://github.com/tokio-rs/axum/pull/2201 +[#2483]: https://github.com/tokio-rs/axum/pull/2483 +[#2201]: https://github.com/tokio-rs/axum/pull/2201 +[#2484]: https://github.com/tokio-rs/axum/pull/2484 + +# 0.7.3 (29. December, 2023) + +- **added:** `Body` implements `From<()>` now ([#2411]) +- **change:** Update version of multer used internally for multipart ([#2433]) +- **change:** Update tokio-tungstenite to 0.21 ([#2435]) +- **added:** Enable `tracing` feature by default ([#2460]) +- **added:** Support graceful shutdown on `serve` ([#2398]) +- **added:** `RouterIntoService` implements `Clone` ([#2456]) + +[#2411]: https://github.com/tokio-rs/axum/pull/2411 +[#2433]: https://github.com/tokio-rs/axum/pull/2433 +[#2435]: https://github.com/tokio-rs/axum/pull/2435 +[#2460]: https://github.com/tokio-rs/axum/pull/2460 +[#2398]: https://github.com/tokio-rs/axum/pull/2398 +[#2456]: https://github.com/tokio-rs/axum/pull/2456 + +# 0.7.2 (03. December, 2023) + +- **added:** Add `axum::body::to_bytes` ([#2373]) +- **fixed:** Gracefully handle accept errors in `serve` ([#2400]) + +[#2373]: https://github.com/tokio-rs/axum/pull/2373 +[#2400]: https://github.com/tokio-rs/axum/pull/2400 + +# 0.7.1 (27. November, 2023) + +- **fix**: Fix readme. + +# 0.7.0 (27. November, 2023) + +- **breaking:** Update public dependencies. axum now requires + - [hyper](https://crates.io/crates/hyper) 1.0 + - [http](https://crates.io/crates/http) 1.0 + - [http-body](https://crates.io/crates/http-body) 1.0 +- **breaking:** axum now requires [tower-http](https://crates.io/crates/tower-http) 0.5 +- **breaking:** Remove deprecated `WebSocketUpgrade::max_send_queue` +- **breaking:** The following types/traits are no longer generic over the request body + (i.e. the `B` type param has been removed) ([#1751] and [#1789]): + - `FromRequestParts` + - `FromRequest` + - `HandlerService` + - `HandlerWithoutStateExt` + - `Handler` + - `LayeredFuture` + - `Layered` + - `MethodRouter` + - `Next` + - `RequestExt` + - `RouteFuture` + - `Route` + - `Router` +- **breaking:** axum no longer re-exports `hyper::Body` as that type is removed + in hyper 1.0. Instead axum has its own body type at `axum::body::Body` ([#1751]) +- **breaking:** `extract::BodyStream` has been removed as `body::Body` + implements `Stream` and `FromRequest` directly ([#1751]) +- **breaking:** Change `sse::Event::json_data` to use `axum_core::Error` as its error type ([#1762]) +- **breaking:** Rename `DefaultOnFailedUpdgrade` to `DefaultOnFailedUpgrade` ([#1664]) +- **breaking:** Rename `OnFailedUpdgrade` to `OnFailedUpgrade` ([#1664]) +- **breaking:** `TypedHeader` has been moved to `axum-extra` as `axum_extra::TypedHeader` and requires enabling the `typed-header` feature on `axum-extra`. The `headers` feature has been removed from axum; what it provided under `axum::headers` is now found in `axum_extra::headers` by default. ([#1850]) +- **breaking:** Removed re-exports of `Empty` and `Full`. Use + `axum::body::Body::empty` and `axum::body::Body::from` respectively ([#1789]) +- **breaking:** The response returned by `IntoResponse::into_response` must use + `axum::body::Body` as the body type. `axum::response::Response` does this + ([#1789]) +- **breaking:** Removed the `BoxBody` type alias and its `box_body` + constructor. Use `axum::body::Body::new` instead ([#1789]) +- **breaking:** Remove `RawBody` extractor. `axum::body::Body` implements `FromRequest` directly ([#1789]) +- **breaking:** The following types from `http-body` no longer implement `IntoResponse`: + - `Full`, use `Body::from` instead + - `Empty`, use `Body::empty` instead + - `BoxBody`, use `Body::new` instead + - `UnsyncBoxBody`, use `Body::new` instead + - `MapData`, use `Body::new` instead + - `MapErr`, use `Body::new` instead +- **added:** Add `axum::extract::Request` type alias where the body is `axum::body::Body` ([#1789]) +- **added:** Add `Router::as_service` and `Router::into_service` to workaround + type inference issues when calling `ServiceExt` methods on a `Router` ([#1835]) +- **breaking:** Removed `axum::Server` as it was removed in hyper 1.0. Instead + use `axum::serve(listener, service)` or hyper/hyper-util for more configuration options ([#1868]) +- **breaking:** Only inherit fallbacks for routers nested with `Router::nest`. + Routers nested with `Router::nest_service` will no longer inherit fallbacks ([#1956]) +- **fixed:** Don't remove the `Sec-WebSocket-Key` header in `WebSocketUpgrade` ([#1972]) +- **added:** Add `axum::extract::Query::try_from_uri` ([#2058]) +- **added:** Implement `IntoResponse` for `Box` and `Box<[u8]>` ([#2035]) +- **breaking:** Simplify `MethodFilter`. It no longer uses bitflags ([#2073]) +- **fixed:** Fix bugs around merging routers with nested fallbacks ([#2096]) +- **fixed:** Fix `.source()` of composite rejections ([#2030]) +- **fixed:** Allow unreachable code in `#[debug_handler]` ([#2014]) +- **change:** axum's MSRV is now 1.66 ([#1882]) +- **added:** Implement `IntoResponse` for `(R,) where R: IntoResponse` ([#2143]) +- **changed:** For SSE, add space between field and value for compatibility ([#2149]) +- **added:** Add `NestedPath` extractor ([#1924]) +- **added:** Add `handle_error` function to existing `ServiceExt` trait ([#2235]) +- **breaking:** `impl IntoResponse(Parts) for Extension` now requires + `T: Clone`, as that is required by the http crate ([#1882]) +- **added:** Add `axum::Json::from_bytes` ([#2244]) +- **added:** Implement `FromRequestParts` for `http::request::Parts` ([#2328]) +- **added:** Implement `FromRequestParts` for `http::Extensions` ([#2328]) +- **fixed:** Clearly document applying `DefaultBodyLimit` to individual routes ([#2157]) + +[#1664]: https://github.com/tokio-rs/axum/pull/1664 +[#1751]: https://github.com/tokio-rs/axum/pull/1751 +[#1762]: https://github.com/tokio-rs/axum/pull/1762 +[#1789]: https://github.com/tokio-rs/axum/pull/1789 +[#1835]: https://github.com/tokio-rs/axum/pull/1835 +[#1850]: https://github.com/tokio-rs/axum/pull/1850 +[#1868]: https://github.com/tokio-rs/axum/pull/1868 +[#1882]: https://github.com/tokio-rs/axum/pull/1882 +[#1924]: https://github.com/tokio-rs/axum/pull/1924 +[#1956]: https://github.com/tokio-rs/axum/pull/1956 +[#1972]: https://github.com/tokio-rs/axum/pull/1972 +[#2014]: https://github.com/tokio-rs/axum/pull/2014 +[#2021]: https://github.com/tokio-rs/axum/pull/2021 +[#2030]: https://github.com/tokio-rs/axum/pull/2030 +[#2058]: https://github.com/tokio-rs/axum/pull/2058 +[#2073]: https://github.com/tokio-rs/axum/pull/2073 +[#2096]: https://github.com/tokio-rs/axum/pull/2096 +[#2140]: https://github.com/tokio-rs/axum/pull/2140 +[#2143]: https://github.com/tokio-rs/axum/pull/2143 +[#2149]: https://github.com/tokio-rs/axum/pull/2149 +[#2157]: https://github.com/tokio-rs/axum/pull/2157 +[#2235]: https://github.com/tokio-rs/axum/pull/2235 +[#2244]: https://github.com/tokio-rs/axum/pull/2244 +[#2328]: https://github.com/tokio-rs/axum/pull/2328 + # 0.6.20 (03. August, 2023) - **added:** `WebSocketUpgrade::write_buffer_size` and `WebSocketUpgrade::max_write_buffer_size` diff --git a/.cargo-vendor/axum/Cargo.toml b/.cargo-vendor/axum/Cargo.toml index e6bc8c5004..2bce75c4e1 100644 --- a/.cargo-vendor/axum/Cargo.toml +++ b/.cargo-vendor/axum/Cargo.toml @@ -11,9 +11,9 @@ [package] edition = "2021" -rust-version = "1.63" +rust-version = "1.66" name = "axum" -version = "0.6.20" +version = "0.7.5" description = "Web framework that focuses on ergonomics and modularity" homepage = "https://github.com/tokio-rs/axum" readme = "README.md" @@ -32,22 +32,19 @@ repository = "https://github.com/tokio-rs/axum" [package.metadata.cargo-public-api-crates] allowed = [ - "async_trait", "axum_core", "axum_macros", - "bytes", "futures_core", "futures_sink", "futures_util", - "headers", - "headers_core", + "tower_layer", + "tower_service", + "async_trait", + "bytes", "http", "http_body", - "hyper", "serde", - "serde_json", - "tower_layer", - "tower_service", + "tokio", ] [package.metadata.docs.rs] @@ -74,19 +71,16 @@ harness = false version = "0.1.67" [dependencies.axum-core] -version = "0.3.4" +version = "0.4.3" [dependencies.axum-macros] -version = "0.3.8" +version = "0.4.1" optional = true [dependencies.base64] version = "0.21.0" optional = true -[dependencies.bitflags] -version = "1.0" - [dependencies.bytes] version = "1.0" @@ -95,19 +89,26 @@ version = "0.3" features = ["alloc"] default-features = false -[dependencies.headers] -version = "0.3.7" -optional = true - [dependencies.http] -version = "0.2.9" +version = "1.0.0" [dependencies.http-body] -version = "0.4.4" +version = "1.0.0" + +[dependencies.http-body-util] +version = "0.1.0" [dependencies.hyper] -version = "0.14.24" -features = ["stream"] +version = "1.1.0" +optional = true + +[dependencies.hyper-util] +version = "0.1.3" +features = [ + "tokio", + "server", +] +optional = true [dependencies.itoa] version = "1.0.5" @@ -122,7 +123,7 @@ version = "2.4.1" version = "0.3.16" [dependencies.multer] -version = "2.0.0" +version = "3.0.0" optional = true [dependencies.percent-encoding] @@ -152,7 +153,7 @@ version = "0.10" optional = true [dependencies.sync_wrapper] -version = "0.1.1" +version = "1.0.0" [dependencies.tokio] version = "1.25.0" @@ -161,7 +162,7 @@ optional = true package = "tokio" [dependencies.tokio-tungstenite] -version = "0.20" +version = "0.21" optional = true [dependencies.tower] @@ -170,7 +171,7 @@ features = ["util"] default-features = false [dependencies.tower-http] -version = "0.4" +version = "0.5.0" features = [ "add-extension", "auth", @@ -217,7 +218,7 @@ default-features = false version = "1.0" [dev-dependencies.axum-macros] -version = "0.3.8" +version = "0.4.1" features = ["__private"] [dev-dependencies.quickcheck] @@ -263,6 +264,9 @@ package = "tokio" [dev-dependencies.tokio-stream] version = "0.1" +[dev-dependencies.tokio-tungstenite] +version = "0.21" + [dev-dependencies.tower] version = "0.4.10" features = [ @@ -276,7 +280,7 @@ features = [ package = "tower" [dev-dependencies.tower-http] -version = "0.4" +version = "0.5.0" features = [ "add-extension", "auth", @@ -338,10 +342,19 @@ default = [ "query", "tokio", "tower-log", + "tracing", ] form = ["dep:serde_urlencoded"] -http1 = ["hyper/http1"] -http2 = ["hyper/http2"] +http1 = [ + "dep:hyper", + "hyper?/http1", + "hyper-util?/http1", +] +http2 = [ + "dep:hyper", + "hyper?/http2", + "hyper-util?/http2", +] json = [ "dep:serde_json", "dep:serde_path_to_error", @@ -352,11 +365,12 @@ multipart = ["dep:multer"] original-uri = [] query = ["dep:serde_urlencoded"] tokio = [ + "dep:hyper-util", "dep:tokio", - "hyper/server", - "hyper/tcp", - "hyper/runtime", + "tokio/net", + "tokio/rt", "tower/make", + "tokio/macros", ] tower-log = ["tower/log"] tracing = [ @@ -364,6 +378,7 @@ tracing = [ "axum-core/tracing", ] ws = [ + "dep:hyper", "tokio", "dep:tokio-tungstenite", "dep:sha1", diff --git a/.cargo-vendor/axum/README.md b/.cargo-vendor/axum/README.md index 32e7ebf232..dc7f1a95dd 100644 --- a/.cargo-vendor/axum/README.md +++ b/.cargo-vendor/axum/README.md @@ -29,11 +29,9 @@ applications written using [`hyper`] or [`tonic`]. use axum::{ routing::{get, post}, http::StatusCode, - response::IntoResponse, Json, Router, }; use serde::{Deserialize, Serialize}; -use std::net::SocketAddr; #[tokio::main] async fn main() { @@ -47,14 +45,9 @@ async fn main() { // `POST /users` goes to `create_user` .route("/users", post(create_user)); - // run our app with hyper - // `axum::Server` is a re-export of `hyper::Server` - let addr = SocketAddr::from(([127, 0, 0, 1], 3000)); - tracing::debug!("listening on {}", addr); - axum::Server::bind(&addr) - .serve(app.into_make_service()) - .await - .unwrap(); + // run our app with hyper, listening globally on port 3000 + let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap(); + axum::serve(listener, app).await.unwrap(); } // basic handler that responds with a static string @@ -111,7 +104,7 @@ This crate uses `#![forbid(unsafe_code)]` to ensure everything is implemented in ## Minimum supported Rust version -axum's MSRV is 1.63. +axum's MSRV is 1.66. ## Examples diff --git a/.cargo-vendor/axum/benches/benches.rs b/.cargo-vendor/axum/benches/benches.rs index c3b9c19e34..bb1c303dd1 100644 --- a/.cargo-vendor/axum/benches/benches.rs +++ b/.cargo-vendor/axum/benches/benches.rs @@ -1,11 +1,11 @@ use axum::{ extract::State, routing::{get, post}, - Extension, Json, Router, Server, + Extension, Json, Router, }; -use hyper::server::conn::AddrIncoming; use serde::{Deserialize, Serialize}; use std::{ + future::IntoFuture, io::BufRead, process::{Command, Stdio}, }; @@ -162,13 +162,8 @@ impl BenchmarkBuilder { let addr = listener.local_addr().unwrap(); std::thread::spawn(move || { - rt.block_on(async move { - let incoming = AddrIncoming::from_listener(listener).unwrap(); - Server::builder(incoming) - .serve(app.into_make_service()) - .await - .unwrap(); - }); + rt.block_on(axum::serve(listener, app).into_future()) + .unwrap(); }); let mut cmd = Command::new("rewrk"); @@ -203,7 +198,7 @@ impl BenchmarkBuilder { eprintln!("Running {:?} benchmark", self.name); - // indent output from `rewrk` so its easier to read when running multiple benchmarks + // indent output from `rewrk` so it's easier to read when running multiple benchmarks let mut child = cmd.spawn().unwrap(); let stdout = child.stdout.take().unwrap(); let stdout = std::io::BufReader::new(stdout); diff --git a/.cargo-vendor/axum/clippy.toml b/.cargo-vendor/axum/clippy.toml new file mode 100644 index 0000000000..291e8cd5f4 --- /dev/null +++ b/.cargo-vendor/axum/clippy.toml @@ -0,0 +1,3 @@ +disallowed-types = [ + { path = "std::sync::Mutex", reason = "Use our internal AxumMutex instead" }, +] diff --git a/.cargo-vendor/axum/src/body/mod.rs b/.cargo-vendor/axum/src/body/mod.rs index 4eceec0ced..d32a89956d 100644 --- a/.cargo-vendor/axum/src/body/mod.rs +++ b/.cargo-vendor/axum/src/body/mod.rs @@ -1,17 +1,54 @@ //! HTTP body utilities. -mod stream_body; - -pub use self::stream_body::StreamBody; - -#[doc(no_inline)] -pub use http_body::{Body as HttpBody, Empty, Full}; - #[doc(no_inline)] -pub use hyper::body::Body; +pub use http_body::Body as HttpBody; #[doc(no_inline)] pub use bytes::Bytes; #[doc(inline)] -pub use axum_core::body::{boxed, BoxBody}; +pub use axum_core::body::{Body, BodyDataStream}; + +use http_body_util::{BodyExt, Limited}; + +/// Converts [`Body`] into [`Bytes`] and limits the maximum size of the body. +/// +/// # Example +/// +/// ```rust +/// use axum::body::{to_bytes, Body}; +/// +/// # async fn foo() -> Result<(), axum_core::Error> { +/// let body = Body::from(vec![1, 2, 3]); +/// // Use `usize::MAX` if you don't care about the maximum size. +/// let bytes = to_bytes(body, usize::MAX).await?; +/// assert_eq!(&bytes[..], &[1, 2, 3]); +/// # Ok(()) +/// # } +/// ``` +/// +/// You can detect if the limit was hit by checking the source of the error: +/// +/// ```rust +/// use axum::body::{to_bytes, Body}; +/// use http_body_util::LengthLimitError; +/// +/// # #[tokio::main] +/// # async fn main() { +/// let body = Body::from(vec![1, 2, 3]); +/// match to_bytes(body, 1).await { +/// Ok(_bytes) => panic!("should have hit the limit"), +/// Err(err) => { +/// let source = std::error::Error::source(&err).unwrap(); +/// assert!(source.is::()); +/// } +/// } +/// # } +/// ``` +pub async fn to_bytes(body: Body, limit: usize) -> Result { + Limited::new(body, limit) + .collect() + .await + .map(|col| col.to_bytes()) + .map_err(axum_core::Error::new) +} diff --git a/.cargo-vendor/axum/src/boxed.rs b/.cargo-vendor/axum/src/boxed.rs index f8191f2e26..f541a9fa30 100644 --- a/.cargo-vendor/axum/src/boxed.rs +++ b/.cargo-vendor/axum/src/boxed.rs @@ -1,105 +1,97 @@ use std::{convert::Infallible, fmt}; -use http::Request; +use crate::extract::Request; +use crate::util::AxumMutex; use tower::Service; use crate::{ - body::HttpBody, handler::Handler, routing::{future::RouteFuture, Route}, Router, }; -pub(crate) struct BoxedIntoRoute(Box>); +pub(crate) struct BoxedIntoRoute(AxumMutex>>); -impl BoxedIntoRoute +impl BoxedIntoRoute where S: Clone + Send + Sync + 'static, - B: Send + 'static, { pub(crate) fn from_handler(handler: H) -> Self where - H: Handler, + H: Handler, T: 'static, - B: HttpBody, { - Self(Box::new(MakeErasedHandler { + Self(AxumMutex::new(Box::new(MakeErasedHandler { handler, into_route: |handler, state| Route::new(Handler::with_state(handler, state)), - })) + }))) } } -impl BoxedIntoRoute { - pub(crate) fn map(self, f: F) -> BoxedIntoRoute +impl BoxedIntoRoute { + pub(crate) fn map(self, f: F) -> BoxedIntoRoute where S: 'static, - B: 'static, E: 'static, - F: FnOnce(Route) -> Route + Clone + Send + 'static, - B2: HttpBody + 'static, + F: FnOnce(Route) -> Route + Clone + Send + 'static, E2: 'static, { - BoxedIntoRoute(Box::new(Map { - inner: self.0, + BoxedIntoRoute(AxumMutex::new(Box::new(Map { + inner: self.0.into_inner().unwrap(), layer: Box::new(f), - })) + }))) } - pub(crate) fn into_route(self, state: S) -> Route { - self.0.into_route(state) + pub(crate) fn into_route(self, state: S) -> Route { + self.0.into_inner().unwrap().into_route(state) } } -impl Clone for BoxedIntoRoute { +impl Clone for BoxedIntoRoute { fn clone(&self) -> Self { - Self(self.0.clone_box()) + Self(AxumMutex::new(self.0.lock().unwrap().clone_box())) } } -impl fmt::Debug for BoxedIntoRoute { +impl fmt::Debug for BoxedIntoRoute { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("BoxedIntoRoute").finish() } } -pub(crate) trait ErasedIntoRoute: Send { - fn clone_box(&self) -> Box>; +pub(crate) trait ErasedIntoRoute: Send { + fn clone_box(&self) -> Box>; - fn into_route(self: Box, state: S) -> Route; + fn into_route(self: Box, state: S) -> Route; - fn call_with_state(self: Box, request: Request, state: S) -> RouteFuture; + #[allow(dead_code)] + fn call_with_state(self: Box, request: Request, state: S) -> RouteFuture; } -pub(crate) struct MakeErasedHandler { +pub(crate) struct MakeErasedHandler { pub(crate) handler: H, - pub(crate) into_route: fn(H, S) -> Route, + pub(crate) into_route: fn(H, S) -> Route, } -impl ErasedIntoRoute for MakeErasedHandler +impl ErasedIntoRoute for MakeErasedHandler where H: Clone + Send + 'static, S: 'static, - B: HttpBody + 'static, { - fn clone_box(&self) -> Box> { + fn clone_box(&self) -> Box> { Box::new(self.clone()) } - fn into_route(self: Box, state: S) -> Route { + fn into_route(self: Box, state: S) -> Route { (self.into_route)(self.handler, state) } - fn call_with_state( - self: Box, - request: Request, - state: S, - ) -> RouteFuture { + fn call_with_state(self: Box, request: Request, state: S) -> RouteFuture { self.into_route(state).call(request) } } -impl Clone for MakeErasedHandler +impl Clone for MakeErasedHandler where H: Clone, { @@ -111,34 +103,29 @@ where } } -pub(crate) struct MakeErasedRouter { - pub(crate) router: Router, - pub(crate) into_route: fn(Router, S) -> Route, +pub(crate) struct MakeErasedRouter { + pub(crate) router: Router, + pub(crate) into_route: fn(Router, S) -> Route, } -impl ErasedIntoRoute for MakeErasedRouter +impl ErasedIntoRoute for MakeErasedRouter where S: Clone + Send + Sync + 'static, - B: HttpBody + Send + 'static, { - fn clone_box(&self) -> Box> { + fn clone_box(&self) -> Box> { Box::new(self.clone()) } - fn into_route(self: Box, state: S) -> Route { + fn into_route(self: Box, state: S) -> Route { (self.into_route)(self.router, state) } - fn call_with_state( - mut self: Box, - request: Request, - state: S, - ) -> RouteFuture { + fn call_with_state(self: Box, request: Request, state: S) -> RouteFuture { self.router.call_with_state(request, state) } } -impl Clone for MakeErasedRouter +impl Clone for MakeErasedRouter where S: Clone, { @@ -150,44 +137,42 @@ where } } -pub(crate) struct Map { - pub(crate) inner: Box>, - pub(crate) layer: Box>, +pub(crate) struct Map { + pub(crate) inner: Box>, + pub(crate) layer: Box>, } -impl ErasedIntoRoute for Map +impl ErasedIntoRoute for Map where S: 'static, - B: 'static, E: 'static, - B2: HttpBody + 'static, E2: 'static, { - fn clone_box(&self) -> Box> { + fn clone_box(&self) -> Box> { Box::new(Self { inner: self.inner.clone_box(), layer: self.layer.clone_box(), }) } - fn into_route(self: Box, state: S) -> Route { + fn into_route(self: Box, state: S) -> Route { (self.layer)(self.inner.into_route(state)) } - fn call_with_state(self: Box, request: Request, state: S) -> RouteFuture { + fn call_with_state(self: Box, request: Request, state: S) -> RouteFuture { (self.layer)(self.inner.into_route(state)).call(request) } } -pub(crate) trait LayerFn: FnOnce(Route) -> Route + Send { - fn clone_box(&self) -> Box>; +pub(crate) trait LayerFn: FnOnce(Route) -> Route + Send { + fn clone_box(&self) -> Box>; } -impl LayerFn for F +impl LayerFn for F where - F: FnOnce(Route) -> Route + Clone + Send + 'static, + F: FnOnce(Route) -> Route + Clone + Send + 'static, { - fn clone_box(&self) -> Box> { + fn clone_box(&self) -> Box> { Box::new(self.clone()) } } diff --git a/.cargo-vendor/axum/src/docs/debugging_handler_type_errors.md b/.cargo-vendor/axum/src/docs/debugging_handler_type_errors.md index d9a5b45d14..731a6a9537 100644 --- a/.cargo-vendor/axum/src/docs/debugging_handler_type_errors.md +++ b/.cargo-vendor/axum/src/docs/debugging_handler_type_errors.md @@ -4,7 +4,9 @@ For a function to be used as a handler it must implement the [`Handler`] trait. axum provides blanket implementations for functions that: - Are `async fn`s. -- Take no more than 16 arguments that all implement [`FromRequest`]. +- Take no more than 16 arguments that all implement `Send`. + - All except the last argument implement [`FromRequestParts`]. + - The last argument implements [`FromRequest`]. - Returns something that implements [`IntoResponse`]. - If a closure is used it must implement `Clone + Send` and be `'static`. diff --git a/.cargo-vendor/axum/src/docs/error_handling.md b/.cargo-vendor/axum/src/docs/error_handling.md index d230a24f53..6993b29ad0 100644 --- a/.cargo-vendor/axum/src/docs/error_handling.md +++ b/.cargo-vendor/axum/src/docs/error_handling.md @@ -43,10 +43,10 @@ that can ultimately be converted to `Response`. This allows using `?` operator in handlers. See those examples: * [`anyhow-error-response`][anyhow] for generic boxed errors -* [`error-handling-and-dependency-injection`][ehdi] for application-specific detailed errors +* [`error-handling`][error-handling] for application-specific detailed errors [anyhow]: https://github.com/tokio-rs/axum/blob/main/examples/anyhow-error-response/src/main.rs -[ehdi]: https://github.com/tokio-rs/axum/blob/main/examples/error-handling-and-dependency-injection/src/main.rs +[error-handling]: https://github.com/tokio-rs/axum/blob/main/examples/error-handling/src/main.rs This also applies to extractors. If an extractor doesn't match the request the request will be rejected and a response will be returned without calling your @@ -92,12 +92,10 @@ let app = Router::new().route_service( async fn handle_anyhow_error(err: anyhow::Error) -> (StatusCode, String) { ( StatusCode::INTERNAL_SERVER_ERROR, - format!("Something went wrong: {}", err), + format!("Something went wrong: {err}"), ) } -# async { -# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -# }; +# let _: Router = app; ``` # Applying fallible middleware @@ -135,13 +133,11 @@ async fn handle_timeout_error(err: BoxError) -> (StatusCode, String) { } else { ( StatusCode::INTERNAL_SERVER_ERROR, - format!("Unhandled internal error: {}", err), + format!("Unhandled internal error: {err}"), ) } } -# async { -# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -# }; +# let _: Router = app; ``` # Running extractors for error handling @@ -178,12 +174,10 @@ async fn handle_timeout_error( ) -> (StatusCode, String) { ( StatusCode::INTERNAL_SERVER_ERROR, - format!("`{} {}` failed with {}", method, uri, err), + format!("`{method} {uri}` failed with {err}"), ) } -# async { -# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -# }; +# let _: Router = app; ``` [`tower::Service`]: `tower::Service` diff --git a/.cargo-vendor/axum/src/docs/extract.md b/.cargo-vendor/axum/src/docs/extract.md index 1e78d5719b..4965428959 100644 --- a/.cargo-vendor/axum/src/docs/extract.md +++ b/.cargo-vendor/axum/src/docs/extract.md @@ -12,8 +12,6 @@ Types and traits for extracting data from requests. - [Defining custom extractors](#defining-custom-extractors) - [Accessing other extractors in `FromRequest` or `FromRequestParts` implementations](#accessing-other-extractors-in-fromrequest-or-fromrequestparts-implementations) - [Request body limits](#request-body-limits) -- [Request body extractors](#request-body-extractors) -- [Running extractors from middleware](#running-extractors-from-middleware) - [Wrapping extractors](#wrapping-extractors) - [Logging rejections](#logging-rejections) @@ -47,9 +45,7 @@ async fn create_user(Json(payload): Json) { } let app = Router::new().route("/users", post(create_user)); -# async { -# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -# }; +# let _: Router = app; ``` # Common extractors @@ -58,10 +54,9 @@ Some commonly used extractors are: ```rust,no_run use axum::{ - extract::{Json, TypedHeader, Path, Extension, Query}, + extract::{Request, Json, Path, Extension, Query}, routing::post, - headers::UserAgent, - http::{Request, header::HeaderMap}, + http::header::HeaderMap, body::{Bytes, Body}, Router, }; @@ -78,10 +73,6 @@ async fn query(Query(params): Query>) {} // `HeaderMap` gives you all the headers async fn headers(headers: HeaderMap) {} -// `TypedHeader` can be used to extract a single header -// note this requires you've enabled axum's `headers` feature -async fn user_agent(TypedHeader(user_agent): TypedHeader) {} - // `String` consumes the request body and ensures it is valid utf-8 async fn string(body: String) {} @@ -92,7 +83,7 @@ async fn bytes(body: Bytes) {} async fn json(Json(payload): Json) {} // `Request` gives you the whole request for maximum control -async fn request(request: Request) {} +async fn request(request: Request) {} // `Extension` extracts data from "request extensions" // This is commonly used to share state with handlers @@ -104,16 +95,12 @@ struct State { /* ... */ } let app = Router::new() .route("/path/:user_id", post(path)) .route("/query", post(query)) - .route("/user_agent", post(user_agent)) - .route("/headers", post(headers)) .route("/string", post(string)) .route("/bytes", post(bytes)) .route("/json", post(json)) .route("/request", post(request)) .route("/extension", post(extension)); -# async { -# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -# }; +# let _: Router = app; ``` # Applying multiple extractors @@ -151,9 +138,7 @@ async fn get_user_things( // ... } -# async { -# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -# }; +# let _: Router = app; ``` # The order of extractors @@ -188,7 +173,7 @@ async fn handler( // ... } # -# let _: axum::routing::MethodRouter = axum::routing::get(handler); +# let _: axum::routing::MethodRouter = axum::routing::get(handler); ``` We get a compile error if `String` isn't the last extractor: @@ -253,9 +238,7 @@ async fn create_user(payload: Option>) { } let app = Router::new().route("/users", post(create_user)); -# async { -# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -# }; +# let _: Router = app; ``` Wrapping extractors in `Result` makes them optional and gives you the reason @@ -295,9 +278,7 @@ async fn create_user(payload: Result, JsonRejection>) { } let app = Router::new().route("/users", post(create_user)); -# async { -# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -# }; +# let _: Router = app; ``` # Customizing extractor responses @@ -309,7 +290,7 @@ options: 1. Use `Result` as your extractor like shown in ["Optional extractors"](#optional-extractors). This works well if you're only using the extractor in a single handler. -2. Create your own extractor that in its [`FromRequest`] implemention calls +2. Create your own extractor that in its [`FromRequest`] implementation calls one of axum's built in extractors but returns a different response for rejections. See the [customize-extractor-error] example for more details. @@ -473,9 +454,7 @@ async fn handler(ExtractUserAgent(user_agent): ExtractUserAgent) { } let app = Router::new().route("/foo", get(handler)); -# async { -# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -# }; +# let _: Router = app; ``` ## Implementing `FromRequest` @@ -485,30 +464,28 @@ If your extractor needs to consume the request body you must implement [`FromReq ```rust,no_run use axum::{ async_trait, - extract::FromRequest, + extract::{Request, FromRequest}, response::{Response, IntoResponse}, - body::Bytes, + body::{Bytes, Body}, routing::get, Router, http::{ StatusCode, header::{HeaderValue, USER_AGENT}, - Request, }, }; struct ValidatedBody(Bytes); #[async_trait] -impl FromRequest for ValidatedBody +impl FromRequest for ValidatedBody where - Bytes: FromRequest, - B: Send + 'static, + Bytes: FromRequest, S: Send + Sync, { type Rejection = Response; - async fn from_request(req: Request, state: &S) -> Result { + async fn from_request(req: Request, state: &S) -> Result { let body = Bytes::from_request(req, state) .await .map_err(IntoResponse::into_response)?; @@ -524,9 +501,7 @@ async fn handler(ValidatedBody(body): ValidatedBody) { } let app = Router::new().route("/foo", get(handler)); -# async { -# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -# }; +# let _: Router = app; ``` ## Cannot implement both `FromRequest` and `FromRequestParts` @@ -539,8 +514,9 @@ wrapping another extractor: use axum::{ Router, routing::get, - extract::{FromRequest, FromRequestParts}, - http::{Request, request::Parts}, + extract::{FromRequest, Request, FromRequestParts}, + http::request::Parts, + body::Body, async_trait, }; use std::convert::Infallible; @@ -550,14 +526,13 @@ struct MyExtractor; // `MyExtractor` implements both `FromRequest` #[async_trait] -impl FromRequest for MyExtractor +impl FromRequest for MyExtractor where S: Send + Sync, - B: Send + 'static, { type Rejection = Infallible; - async fn from_request(req: Request, state: &S) -> Result { + async fn from_request(req: Request, state: &S) -> Result { // ... # todo!() } @@ -593,15 +568,14 @@ let app = Router::new().route( # Accessing other extractors in `FromRequest` or `FromRequestParts` implementations -When defining custom extractors you often need to access another extractors +When defining custom extractors you often need to access another extractor in your implementation. ```rust use axum::{ async_trait, - extract::{Extension, FromRequestParts, TypedHeader}, - headers::{authorization::Bearer, Authorization}, - http::{StatusCode, request::Parts}, + extract::{Extension, FromRequestParts}, + http::{StatusCode, HeaderMap, request::Parts}, response::{IntoResponse, Response}, routing::get, Router, @@ -625,10 +599,9 @@ where async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { // You can either call them directly... - let TypedHeader(Authorization(token)) = - TypedHeader::>::from_request_parts(parts, state) - .await - .map_err(|err| err.into_response())?; + let headers = HeaderMap::from_request_parts(parts, state) + .await + .map_err(|err| match err {})?; // ... or use `extract` / `extract_with_state` from `RequestExt` / `RequestPartsExt` use axum::RequestPartsExt; @@ -647,9 +620,7 @@ async fn handler(user: AuthenticatedUser) { let state = State { /* ... */ }; let app = Router::new().route("/", get(handler)).layer(Extension(state)); -# async { -# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -# }; +# let _: Router = app; ``` # Request body limits @@ -660,129 +631,6 @@ For security reasons, [`Bytes`] will, by default, not accept bodies larger than For more details, including how to disable this limit, see [`DefaultBodyLimit`]. -# Request body extractors - -Most of the time your request body type will be [`body::Body`] (a re-export -of [`hyper::Body`]), which is directly supported by all extractors. - -However if you're applying a tower middleware that changes the request body type -you might have to apply a different body type to some extractors: - -```rust -use std::{ - task::{Context, Poll}, - pin::Pin, -}; -use tower_http::map_request_body::MapRequestBodyLayer; -use axum::{ - extract::{self, BodyStream}, - body::{Body, HttpBody}, - routing::get, - http::{header::HeaderMap, Request}, - Router, -}; - -struct MyBody(B); - -impl HttpBody for MyBody -where - B: HttpBody + Unpin, -{ - type Data = B::Data; - type Error = B::Error; - - fn poll_data( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>> { - Pin::new(&mut self.0).poll_data(cx) - } - - fn poll_trailers( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - Pin::new(&mut self.0).poll_trailers(cx) - } -} - -let app = Router::new() - .route( - "/string", - // `String` works directly with any body type - get(|_: String| async {}) - ) - .route( - "/body", - // `extract::Body` defaults to `axum::body::Body` - // but can be customized - get(|_: extract::RawBody>| async {}) - ) - .route( - "/body-stream", - // same for `extract::BodyStream` - get(|_: extract::BodyStream| async {}), - ) - .route( - // and `Request<_>` - "/request", - get(|_: Request>| async {}) - ) - // middleware that changes the request body type - .layer(MapRequestBodyLayer::new(MyBody)); -# async { -# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -# }; -``` - -# Running extractors from middleware - -Extractors can also be run from middleware: - -```rust -use axum::{ - middleware::{self, Next}, - extract::{TypedHeader, FromRequestParts}, - http::{Request, StatusCode}, - response::Response, - headers::authorization::{Authorization, Bearer}, - RequestPartsExt, Router, -}; - -async fn auth_middleware( - request: Request, - next: Next, -) -> Result -where - B: Send, -{ - // running extractors requires a `axum::http::request::Parts` - let (mut parts, body) = request.into_parts(); - - // `TypedHeader>` extracts the auth token - let auth: TypedHeader> = parts.extract() - .await - .map_err(|_| StatusCode::UNAUTHORIZED)?; - - if !token_is_valid(auth.token()) { - return Err(StatusCode::UNAUTHORIZED); - } - - // reconstruct the request - let request = Request::from_parts(parts, body); - - Ok(next.run(request).await) -} - -fn token_is_valid(token: &str) -> bool { - // ... - # false -} - -let app = Router::new().layer(middleware::from_fn(auth_middleware)); -# let _: Router<()> = app; -``` - # Wrapping extractors If you want write an extractor that generically wraps another extractor (that @@ -792,9 +640,10 @@ may or may not consume the request body) you should implement both ```rust use axum::{ Router, + body::Body, routing::get, - extract::{FromRequest, FromRequestParts}, - http::{Request, HeaderMap, request::Parts}, + extract::{Request, FromRequest, FromRequestParts}, + http::{HeaderMap, request::Parts}, async_trait, }; use std::time::{Instant, Duration}; @@ -827,15 +676,14 @@ where // and `FromRequest` #[async_trait] -impl FromRequest for Timing +impl FromRequest for Timing where - B: Send + 'static, S: Send + Sync, - T: FromRequest, + T: FromRequest, { type Rejection = T::Rejection; - async fn from_request(req: Request, state: &S) -> Result { + async fn from_request(req: Request, state: &S) -> Result { let start = Instant::now(); let extractor = T::from_request(req, state).await?; let duration = start.elapsed(); @@ -858,9 +706,9 @@ async fn handler( # Logging rejections All built-in extractors will log rejections for easier debugging. To see the -logs, enable the `tracing` feature for axum and the `axum::rejection=trace` -tracing target, for example with `RUST_LOG=info,axum::rejection=trace cargo -run`. +logs, enable the `tracing` feature for axum (enabled by default) and the +`axum::rejection=trace` tracing target, for example with +`RUST_LOG=info,axum::rejection=trace cargo run`. [`body::Body`]: crate::body::Body [`Bytes`]: crate::body::Bytes diff --git a/.cargo-vendor/axum/src/docs/method_routing/fallback.md b/.cargo-vendor/axum/src/docs/method_routing/fallback.md index 906cbb3b5d..e6f364a867 100644 --- a/.cargo-vendor/axum/src/docs/method_routing/fallback.md +++ b/.cargo-vendor/axum/src/docs/method_routing/fallback.md @@ -16,11 +16,9 @@ let handler = get(|| async {}).fallback(fallback); let app = Router::new().route("/", handler); async fn fallback(method: Method, uri: Uri) -> (StatusCode, String) { - (StatusCode::NOT_FOUND, format!("`{}` not allowed for {}", method, uri)) + (StatusCode::NOT_FOUND, format!("`{method}` not allowed for {uri}")) } -# async { -# hyper::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -# }; +# let _: Router = app; ``` ## When used with `MethodRouter::merge` @@ -44,10 +42,7 @@ let method_route = one.merge(two); async fn fallback_one() -> impl IntoResponse { /* ... */ } async fn fallback_two() -> impl IntoResponse { /* ... */ } -# let app = axum::Router::new().route("/", method_route); -# async { -# hyper::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -# }; +# let app: axum::Router = axum::Router::new().route("/", method_route); ``` ## Setting the `Allow` header diff --git a/.cargo-vendor/axum/src/docs/method_routing/layer.md b/.cargo-vendor/axum/src/docs/method_routing/layer.md index cdf6f93342..e155ee514a 100644 --- a/.cargo-vendor/axum/src/docs/method_routing/layer.md +++ b/.cargo-vendor/axum/src/docs/method_routing/layer.md @@ -16,14 +16,12 @@ more details. use axum::{routing::get, Router}; use tower::limit::ConcurrencyLimitLayer; -async fn hander() {} +async fn handler() {} let app = Router::new().route( "/", // All requests to `GET /` will be sent through `ConcurrencyLimitLayer` - get(hander).layer(ConcurrencyLimitLayer::new(64)), + get(handler).layer(ConcurrencyLimitLayer::new(64)), ); -# async { -# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -# }; +# let _: Router = app; ``` diff --git a/.cargo-vendor/axum/src/docs/method_routing/merge.md b/.cargo-vendor/axum/src/docs/method_routing/merge.md index 39d74d048a..a88ee2d7ae 100644 --- a/.cargo-vendor/axum/src/docs/method_routing/merge.md +++ b/.cargo-vendor/axum/src/docs/method_routing/merge.md @@ -19,7 +19,5 @@ let app = Router::new().route("/", merged); // Our app now accepts // - GET / // - POST / -# async { -# hyper::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -# }; +# let _: Router = app; ``` diff --git a/.cargo-vendor/axum/src/docs/method_routing/route_layer.md b/.cargo-vendor/axum/src/docs/method_routing/route_layer.md index f497e8b102..501b55174d 100644 --- a/.cargo-vendor/axum/src/docs/method_routing/route_layer.md +++ b/.cargo-vendor/axum/src/docs/method_routing/route_layer.md @@ -2,8 +2,9 @@ Apply a [`tower::Layer`] to the router that will only run if the request matches a route. Note that the middleware is only applied to existing routes. So you have to -first add your routes (and / or fallback) and then call `layer` afterwards. Additional -routes added after `layer` is called will not have the middleware added. +first add your routes (and / or fallback) and then call `route_layer` +afterwards. Additional routes added after `route_layer` is called will not have +the middleware added. This works similarly to [`MethodRouter::layer`] except the middleware will only run if the request matches a route. This is useful for middleware that return early @@ -28,7 +29,5 @@ let app = Router::new().route( // `GET /foo` with a valid token will receive `200 OK` // `GET /foo` with a invalid token will receive `401 Unauthorized` // `POST /FOO` with a invalid token will receive `405 Method Not Allowed` -# async { -# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -# }; +# let _: Router = app; ``` diff --git a/.cargo-vendor/axum/src/docs/middleware.md b/.cargo-vendor/axum/src/docs/middleware.md index fd601fcfe8..1529ef0365 100644 --- a/.cargo-vendor/axum/src/docs/middleware.md +++ b/.cargo-vendor/axum/src/docs/middleware.md @@ -16,7 +16,7 @@ axum is unique in that it doesn't have its own bespoke middleware system and instead integrates with [`tower`]. This means the ecosystem of [`tower`] and [`tower-http`] middleware all work with axum. -While its not necessary to fully understand tower to write or use middleware +While it's not necessary to fully understand tower to write or use middleware with axum, having at least a basic understanding of tower's concepts is recommended. See [tower's guides][tower-guides] for a general introduction. Reading the documentation for [`tower::ServiceBuilder`] is also recommended. @@ -31,7 +31,7 @@ axum allows you to add middleware just about anywhere ## Applying multiple middleware -Its recommended to use [`tower::ServiceBuilder`] to apply multiple middleware at +It's recommended to use [`tower::ServiceBuilder`] to apply multiple middleware at once, instead of calling `layer` (or `route_layer`) repeatedly: ```rust @@ -55,9 +55,7 @@ let app = Router::new() .layer(TraceLayer::new_for_http()) .layer(Extension(State {})) ); -# async { -# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -# }; +# let _: Router = app; ``` # Commonly used middleware @@ -66,14 +64,11 @@ Some commonly used middleware are: - [`TraceLayer`](tower_http::trace) for high level tracing/logging. - [`CorsLayer`](tower_http::cors) for handling CORS. -- [`CompressionLayer`](tower_http::compression) for automatic compression of - responses. +- [`CompressionLayer`](tower_http::compression) for automatic compression of responses. - [`RequestIdLayer`](tower_http::request_id) and [`PropagateRequestIdLayer`](tower_http::request_id) set and propagate request ids. -- [`TimeoutLayer`](tower::timeout::TimeoutLayer) for timeouts. Note this - requires using [`HandleErrorLayer`](crate::error_handling::HandleErrorLayer) - to convert timeouts to responses. +- [`TimeoutLayer`](tower_http::timeout::TimeoutLayer) for timeouts. # Ordering @@ -97,7 +92,7 @@ let app = Router::new() .layer(layer_one) .layer(layer_two) .layer(layer_three); -# let _: Router<(), axum::body::Body> = app; +# let _: Router = app; ``` Think of the middleware as being layered like an onion where each new layer @@ -133,9 +128,9 @@ That is: It's a little more complicated in practice because any middleware is free to return early and not call the next layer, for example if a request cannot be -authorized, but its a useful mental model to have. +authorized, but it's a useful mental model to have. -As previously mentioned its recommended to add multiple middleware using +As previously mentioned it's recommended to add multiple middleware using `tower::ServiceBuilder`, however this impacts ordering: ```rust @@ -156,7 +151,7 @@ let app = Router::new() .layer(layer_two) .layer(layer_three), ); -# let _: Router<(), axum::body::Body> = app; +# let _: Router = app; ``` `ServiceBuilder` works by composing all layers into one such that they run top @@ -223,7 +218,7 @@ A decent template for such a middleware could be: use axum::{ response::Response, body::Body, - http::Request, + extract::Request, }; use futures_util::future::BoxFuture; use tower::{Service, Layer}; @@ -245,9 +240,9 @@ struct MyMiddleware { inner: S, } -impl Service> for MyMiddleware +impl Service for MyMiddleware where - S: Service, Response = Response> + Send + 'static, + S: Service + Send + 'static, S::Future: Send + 'static, { type Response = S::Response; @@ -259,7 +254,7 @@ where self.inner.poll_ready(cx) } - fn call(&mut self, request: Request) -> Self::Future { + fn call(&mut self, request: Request) -> Self::Future { let future = self.inner.call(request); Box::pin(async move { let response: Response = future.await?; @@ -269,6 +264,21 @@ where } ``` +Note that your error type being defined as `S::Error` means that your middleware typically _returns no errors_. As a principle always try to return a response and try not to bail out with a custom error type. For example, if a 3rd party library you are using inside your new middleware returns its own specialized error type, try to convert it to some reasonable response and return `Ok` with that response. + +If you choose to implement a custom error type such as `type Error = BoxError` (a boxed opaque error), or any other error type that is not `Infallible`, you must use a `HandleErrorLayer`, here is an example using a `ServiceBuilder`: + +```ignore +ServiceBuilder::new() + .layer(HandleErrorLayer::new(|_: BoxError| async { + // because Axum uses infallible errors, you must handle your custom error type from your middleware here + StatusCode::BAD_REQUEST + })) + .layer( + // + ); +``` + ## `tower::Service` and custom futures If you're comfortable implementing your own futures (or want to learn it) and @@ -319,9 +329,7 @@ let app = Router::new() })) .layer(TimeoutLayer::new(Duration::from_secs(10))) ); -# async { -# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -# }; +# let _: Router = app; ``` See [`error_handling`](crate::error_handling) for more details on axum's error @@ -376,9 +384,7 @@ let app = Router::new().route("/", get(handler)); let app = ServiceBuilder::new() .layer(some_backpressure_sensitive_middleware) .service(app); -# async { -# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -# }; +# let _: Router = app; ``` However when applying middleware around your whole application in this way @@ -406,8 +412,7 @@ use axum::{ routing::get, middleware::{self, Next}, response::Response, - extract::State, - http::Request, + extract::{State, Request}, }; use tower::{Layer, Service}; use std::task::{Context, Poll}; @@ -477,17 +482,17 @@ State can be passed from middleware to handlers using [request extensions]: ```rust use axum::{ Router, - http::{Request, StatusCode}, + http::StatusCode, routing::get, response::{IntoResponse, Response}, middleware::{self, Next}, - extract::Extension, + extract::{Request, Extension}, }; #[derive(Clone)] struct CurrentUser { /* ... */ } -async fn auth(mut req: Request, next: Next) -> Result { +async fn auth(mut req: Request, next: Next) -> Result { let auth_header = req.headers() .get(http::header::AUTHORIZATION) .and_then(|header| header.to_str().ok()); @@ -523,7 +528,7 @@ async fn handler( let app = Router::new() .route("/", get(handler)) .route_layer(middleware::from_fn(auth)); -# let _: Router<()> = app; +# let _: Router = app; ``` [Response extensions] can also be used but note that request extensions are not @@ -546,16 +551,16 @@ use axum::{ ServiceExt, // for `into_make_service` response::Response, middleware::Next, - http::Request, + extract::Request, }; -async fn rewrite_request_uri(req: Request, next: Next) -> Response { +fn rewrite_request_uri(req: Request) -> Request { // ... - # next.run(req).await + # req } // this can be any `tower::Layer` -let middleware = axum::middleware::from_fn(rewrite_request_uri); +let middleware = tower::util::MapRequestLayer::new(rewrite_request_uri); let app = Router::new(); @@ -564,10 +569,8 @@ let app = Router::new(); let app_with_middleware = middleware.layer(app); # async { -axum::Server::bind(&"0.0.0.0:3000".parse().unwrap()) - .serve(app_with_middleware.into_make_service()) - .await - .unwrap(); +let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap(); +axum::serve(listener, app_with_middleware.into_make_service()).await.unwrap(); # }; ``` diff --git a/.cargo-vendor/axum/src/docs/response.md b/.cargo-vendor/axum/src/docs/response.md index 2afe476046..a5761c34ed 100644 --- a/.cargo-vendor/axum/src/docs/response.md +++ b/.cargo-vendor/axum/src/docs/response.md @@ -127,6 +127,7 @@ async fn with_status_extensions() -> impl IntoResponse { ) } +#[derive(Clone)] struct Foo(&'static str); // Or mix and match all the things @@ -171,15 +172,15 @@ Use [`Response`](crate::response::Response) for more low level control: use axum::{ Json, response::{IntoResponse, Response}, - body::{Full, Bytes}, + body::Body, http::StatusCode, }; -async fn response() -> Response> { +async fn response() -> Response { Response::builder() .status(StatusCode::NOT_FOUND) .header("x-foo", "custom header") - .body(Full::from("not found")) + .body(Body::from("not found")) .unwrap() } ``` diff --git a/.cargo-vendor/axum/src/docs/routing/fallback.md b/.cargo-vendor/axum/src/docs/routing/fallback.md index 11b25896ef..27fb76a59e 100644 --- a/.cargo-vendor/axum/src/docs/routing/fallback.md +++ b/.cargo-vendor/axum/src/docs/routing/fallback.md @@ -16,11 +16,9 @@ let app = Router::new() .fallback(fallback); async fn fallback(uri: Uri) -> (StatusCode, String) { - (StatusCode::NOT_FOUND, format!("No route for {}", uri)) + (StatusCode::NOT_FOUND, format!("No route for {uri}")) } -# async { -# hyper::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -# }; +# let _: Router = app; ``` Fallbacks only apply to routes that aren't matched by anything in the @@ -40,10 +38,8 @@ async fn handler() {} let app = Router::new().fallback(handler); # async { -axum::Server::bind(&"0.0.0.0:3000".parse().unwrap()) - .serve(app.into_make_service()) - .await - .unwrap(); +let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap(); +axum::serve(listener, app).await.unwrap(); # }; ``` @@ -55,9 +51,7 @@ use axum::handler::HandlerWithoutStateExt; async fn handler() {} # async { -axum::Server::bind(&"0.0.0.0:3000".parse().unwrap()) - .serve(handler.into_make_service()) - .await - .unwrap(); +let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap(); +axum::serve(listener, handler.into_make_service()).await.unwrap(); # }; ``` diff --git a/.cargo-vendor/axum/src/docs/routing/into_make_service_with_connect_info.md b/.cargo-vendor/axum/src/docs/routing/into_make_service_with_connect_info.md index 05ee750c56..26d0602f31 100644 --- a/.cargo-vendor/axum/src/docs/routing/into_make_service_with_connect_info.md +++ b/.cargo-vendor/axum/src/docs/routing/into_make_service_with_connect_info.md @@ -17,16 +17,12 @@ use std::net::SocketAddr; let app = Router::new().route("/", get(handler)); async fn handler(ConnectInfo(addr): ConnectInfo) -> String { - format!("Hello {}", addr) + format!("Hello {addr}") } # async { -axum::Server::bind(&"0.0.0.0:3000".parse().unwrap()) - .serve( - app.into_make_service_with_connect_info::() - ) - .await - .expect("server failed"); +let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap(); +axum::serve(listener, app.into_make_service_with_connect_info::()).await.unwrap(); # }; ``` @@ -36,16 +32,16 @@ You can implement custom a [`Connected`] like so: use axum::{ extract::connect_info::{ConnectInfo, Connected}, routing::get, + serve::IncomingStream, Router, }; -use hyper::server::conn::AddrStream; let app = Router::new().route("/", get(handler)); async fn handler( ConnectInfo(my_connect_info): ConnectInfo, ) -> String { - format!("Hello {:?}", my_connect_info) + format!("Hello {my_connect_info:?}") } #[derive(Clone, Debug)] @@ -53,8 +49,8 @@ struct MyConnectInfo { // ... } -impl Connected<&AddrStream> for MyConnectInfo { - fn connect_info(target: &AddrStream) -> Self { +impl Connected> for MyConnectInfo { + fn connect_info(target: IncomingStream<'_>) -> Self { MyConnectInfo { // ... } @@ -62,12 +58,8 @@ impl Connected<&AddrStream> for MyConnectInfo { } # async { -axum::Server::bind(&"0.0.0.0:3000".parse().unwrap()) - .serve( - app.into_make_service_with_connect_info::() - ) - .await - .expect("server failed"); +let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap(); +axum::serve(listener, app.into_make_service_with_connect_info::()).await.unwrap(); # }; ``` diff --git a/.cargo-vendor/axum/src/docs/routing/merge.md b/.cargo-vendor/axum/src/docs/routing/merge.md index b88175130b..e8f668712e 100644 --- a/.cargo-vendor/axum/src/docs/routing/merge.md +++ b/.cargo-vendor/axum/src/docs/routing/merge.md @@ -1,4 +1,4 @@ -Merge two routers into one. +Merge the paths and fallbacks of two routers into a single [`Router`]. This is useful for breaking apps into smaller pieces and combining them into one. @@ -32,9 +32,7 @@ let app = Router::new() // - GET /users // - GET /users/:id // - GET /teams -# async { -# hyper::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -# }; +# let _: Router = app; ``` # Merging routers with state @@ -71,6 +69,11 @@ let app = Router::new() # let _: axum::Router = app; ``` +# Merging routers with fallbacks + +When combining [`Router`]s with this method, the [fallback](Router::fallback) is also merged. +However only one of the routers can have a fallback. + # Panics - If two routers that each have a [fallback](Router::fallback) are merged. This diff --git a/.cargo-vendor/axum/src/docs/routing/nest.md b/.cargo-vendor/axum/src/docs/routing/nest.md index b40d0fc951..c3f7308fdb 100644 --- a/.cargo-vendor/axum/src/docs/routing/nest.md +++ b/.cargo-vendor/axum/src/docs/routing/nest.md @@ -24,9 +24,7 @@ let app = Router::new().nest("/api", api_routes); // Our app now accepts // - GET /api/users/:id // - POST /api/teams -# async { -# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -# }; +# let _: Router = app; ``` # How the URI changes @@ -59,9 +57,7 @@ async fn users_get(Path(params): Path>) { let users_api = Router::new().route("/users/:id", get(users_get)); let app = Router::new().nest("/:version/api", users_api); -# async { -# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -# }; +# let _: Router = app; ``` # Differences from wildcard routes @@ -83,9 +79,7 @@ let app = Router::new() // `uri` will contain `/foo` })) .nest("/bar", nested_router); -# async { -# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -# }; +# let _: Router = app; ``` # Fallbacks diff --git a/.cargo-vendor/axum/src/docs/routing/route.md b/.cargo-vendor/axum/src/docs/routing/route.md index ac5ed9406b..eefbb21beb 100644 --- a/.cargo-vendor/axum/src/docs/routing/route.md +++ b/.cargo-vendor/axum/src/docs/routing/route.md @@ -22,7 +22,8 @@ be called. # Captures Paths can contain segments like `/:key` which matches any single segment and -will store the value captured at `key`. +will store the value captured at `key`. The value captured can be zero-length +except for in the invalid path `//`. Examples: @@ -77,9 +78,7 @@ async fn get_root() {} async fn post_root() {} async fn delete_root() {} -# async { -# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -# }; +# let _: Router = app; ``` Or you can add them one by one: @@ -122,9 +121,7 @@ async fn show_user(Path(id): Path) {} async fn do_users_action(Path((version, id)): Path<(String, u64)>) {} async fn serve_asset(Path(path): Path) {} -# async { -# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -# }; +# let _: Router = app; ``` # Panics @@ -137,9 +134,7 @@ use axum::{routing::get, Router}; let app = Router::new() .route("/", get(|| async {})) .route("/", get(|| async {})); -# async { -# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -# }; +# let _: Router = app; ``` The static route `/foo` and the dynamic route `/:key` are not considered to diff --git a/.cargo-vendor/axum/src/docs/routing/route_layer.md b/.cargo-vendor/axum/src/docs/routing/route_layer.md index fe5b8faa7d..bc7b219742 100644 --- a/.cargo-vendor/axum/src/docs/routing/route_layer.md +++ b/.cargo-vendor/axum/src/docs/routing/route_layer.md @@ -2,8 +2,9 @@ Apply a [`tower::Layer`] to the router that will only run if the request matches a route. Note that the middleware is only applied to existing routes. So you have to -first add your routes (and / or fallback) and then call `layer` afterwards. Additional -routes added after `layer` is called will not have the middleware added. +first add your routes (and / or fallback) and then call `route_layer` +afterwards. Additional routes added after `route_layer` is called will not have +the middleware added. This works similarly to [`Router::layer`] except the middleware will only run if the request matches a route. This is useful for middleware that return early @@ -26,7 +27,5 @@ let app = Router::new() // `GET /foo` with a valid token will receive `200 OK` // `GET /foo` with a invalid token will receive `401 Unauthorized` // `GET /not-found` with a invalid token will receive `404 Not Found` -# async { -# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -# }; +# let _: Router = app; ``` diff --git a/.cargo-vendor/axum/src/docs/routing/route_service.md b/.cargo-vendor/axum/src/docs/routing/route_service.md index 1be229e4c6..7f016105df 100644 --- a/.cargo-vendor/axum/src/docs/routing/route_service.md +++ b/.cargo-vendor/axum/src/docs/routing/route_service.md @@ -7,7 +7,8 @@ use axum::{ Router, body::Body, routing::{any_service, get_service}, - http::{Request, StatusCode}, + extract::Request, + http::StatusCode, error_handling::HandleErrorLayer, }; use tower_http::services::ServeFile; @@ -22,7 +23,7 @@ let app = Router::new() // Services whose response body is not `axum::body::BoxBody` // can be wrapped in `axum::routing::any_service` (or one of the other routing filters) // to have the response body mapped - any_service(service_fn(|_: Request| async { + any_service(service_fn(|_: Request| async { let res = Response::new(Body::from("Hi from `GET /`")); Ok::<_, Infallible>(res) })) @@ -31,9 +32,8 @@ let app = Router::new() "/foo", // This service's response body is `axum::body::BoxBody` so // it can be routed to directly. - service_fn(|req: Request| async move { + service_fn(|req: Request| async move { let body = Body::from(format!("Hi from `{} /foo`", req.method())); - let body = axum::body::boxed(body); let res = Response::new(body); Ok::<_, Infallible>(res) }) @@ -43,9 +43,7 @@ let app = Router::new() "/static/Cargo.toml", ServeFile::new("Cargo.toml"), ); -# async { -# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -# }; +# let _: Router = app; ``` Routing to arbitrary services in this way has complications for backpressure @@ -64,9 +62,7 @@ let app = Router::new().route_service( "/", Router::new().route("/foo", get(|| async {})), ); -# async { -# axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -# }; +# let _: Router = app; ``` Use [`Router::nest`] instead. diff --git a/.cargo-vendor/axum/src/docs/routing/with_state.md b/.cargo-vendor/axum/src/docs/routing/with_state.md index 3f9c815132..bece920fe0 100644 --- a/.cargo-vendor/axum/src/docs/routing/with_state.md +++ b/.cargo-vendor/axum/src/docs/routing/with_state.md @@ -13,9 +13,8 @@ let routes = Router::new() .with_state(AppState {}); # async { -axum::Server::bind(&"0.0.0.0:3000".parse().unwrap()) - .serve(routes.into_make_service()) - .await; +let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap(); +axum::serve(listener, routes).await.unwrap(); # }; ``` @@ -40,9 +39,8 @@ fn routes() -> Router { let routes = routes().with_state(AppState {}); # async { -axum::Server::bind(&"0.0.0.0:3000".parse().unwrap()) - .serve(routes.into_make_service()) - .await; +let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap(); +axum::serve(listener, routes).await.unwrap(); # }; ``` @@ -64,9 +62,8 @@ fn routes(state: AppState) -> Router { let routes = routes(AppState {}); # async { -axum::Server::bind(&"0.0.0.0:3000".parse().unwrap()) - .serve(routes.into_make_service()) - .await; +let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap(); +axum::serve(listener, routes).await.unwrap(); # }; ``` @@ -92,9 +89,8 @@ fn routes(state: AppState) -> Router { let routes = Router::new().nest("/api", routes(AppState {})); # async { -axum::Server::bind(&"0.0.0.0:3000".parse().unwrap()) - .serve(routes.into_make_service()) - .await; +let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap(); +axum::serve(listener, routes).await.unwrap(); # }; ``` @@ -133,9 +129,8 @@ let router: Router<()> = router.with_state(AppState {}); // You cannot call `into_make_service` on a `Router` // because it is still missing an `AppState`. # async { -axum::Server::bind(&"0.0.0.0:3000".parse().unwrap()) - .serve(router.into_make_service()) - .await; +let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap(); +axum::serve(listener, router).await.unwrap(); # }; ``` @@ -163,9 +158,8 @@ let final_router: Router<()> = string_router.with_state("foo".to_owned()); // Since we have a `Router<()>` we can run it. # async { -axum::Server::bind(&"0.0.0.0:3000".parse().unwrap()) - .serve(final_router.into_make_service()) - .await; +let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap(); +axum::serve(listener, final_router).await.unwrap(); # }; ``` @@ -190,9 +184,8 @@ let app = routes(AppState {}); // We can only call `Router::into_make_service` on a `Router<()>` // but `app` is a `Router` # async { -axum::Server::bind(&"0.0.0.0:3000".parse().unwrap()) - .serve(app.into_make_service()) - .await; +let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap(); +axum::serve(listener, app).await.unwrap(); # }; ``` @@ -214,9 +207,8 @@ let app = routes(AppState {}); // We can now call `Router::into_make_service` # async { -axum::Server::bind(&"0.0.0.0:3000".parse().unwrap()) - .serve(app.into_make_service()) - .await; +let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap(); +axum::serve(listener, app).await.unwrap(); # }; ``` diff --git a/.cargo-vendor/axum/src/extension.rs b/.cargo-vendor/axum/src/extension.rs index d66c9466b9..e4d170fb6d 100644 --- a/.cargo-vendor/axum/src/extension.rs +++ b/.cargo-vendor/axum/src/extension.rs @@ -40,9 +40,7 @@ use tower_service::Service; /// // Add middleware that inserts the state into all incoming request's /// // extensions. /// .layer(Extension(state)); -/// # async { -/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -/// # }; +/// # let _: Router = app; /// ``` /// /// If the extension is missing it will reject the request with a `500 Internal @@ -89,8 +87,7 @@ where "Extension of type `{}` was not found. Perhaps you forgot to add it? See `axum::Extension`.", std::any::type_name::() )) - }) - .map(|x| x.clone())?; + }).cloned()?; Ok(Extension(value)) } @@ -100,7 +97,7 @@ axum_core::__impl_deref!(Extension); impl IntoResponseParts for Extension where - T: Send + Sync + 'static, + T: Clone + Send + Sync + 'static, { type Error = Infallible; @@ -112,7 +109,7 @@ where impl IntoResponse for Extension where - T: Send + Sync + 'static, + T: Clone + Send + Sync + 'static, { fn into_response(self) -> Response { let mut res = ().into_response(); diff --git a/.cargo-vendor/axum/src/extract/connect_info.rs b/.cargo-vendor/axum/src/extract/connect_info.rs index f22b89815c..f77db6dd44 100644 --- a/.cargo-vendor/axum/src/extract/connect_info.rs +++ b/.cargo-vendor/axum/src/extract/connect_info.rs @@ -4,11 +4,11 @@ //! //! [`Router::into_make_service_with_connect_info`]: crate::routing::Router::into_make_service_with_connect_info +use crate::extension::AddExtension; + use super::{Extension, FromRequestParts}; -use crate::middleware::AddExtension; use async_trait::async_trait; use http::request::Parts; -use hyper::server::conn::AddrStream; use std::{ convert::Infallible, fmt, @@ -83,9 +83,20 @@ pub trait Connected: Clone + Send + Sync + 'static { fn connect_info(target: T) -> Self; } -impl Connected<&AddrStream> for SocketAddr { - fn connect_info(target: &AddrStream) -> Self { - target.remote_addr() +#[cfg(all(feature = "tokio", any(feature = "http1", feature = "http2")))] +const _: () = { + use crate::serve::IncomingStream; + + impl Connected> for SocketAddr { + fn connect_info(target: IncomingStream<'_>) -> Self { + target.remote_addr() + } + } +}; + +impl Connected for SocketAddr { + fn connect_info(remote_addr: SocketAddr) -> Self { + remote_addr } } @@ -213,8 +224,8 @@ where #[cfg(test)] mod tests { use super::*; - use crate::{routing::get, test_helpers::TestClient, Router, Server}; - use std::net::{SocketAddr, TcpListener}; + use crate::{routing::get, serve::IncomingStream, test_helpers::TestClient, Router}; + use tokio::net::TcpListener; #[crate::test] async fn socket_addr() { @@ -222,17 +233,19 @@ mod tests { format!("{addr}") } - let listener = TcpListener::bind("127.0.0.1:0").unwrap(); + let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); let addr = listener.local_addr().unwrap(); let (tx, rx) = tokio::sync::oneshot::channel(); tokio::spawn(async move { let app = Router::new().route("/", get(handler)); - let server = Server::from_tcp(listener) - .unwrap() - .serve(app.into_make_service_with_connect_info::()); tx.send(()).unwrap(); - server.await.expect("server error"); + crate::serve( + listener, + app.into_make_service_with_connect_info::(), + ) + .await + .unwrap(); }); rx.await.unwrap(); @@ -250,8 +263,8 @@ mod tests { value: &'static str, } - impl Connected<&AddrStream> for MyConnectInfo { - fn connect_info(_target: &AddrStream) -> Self { + impl Connected> for MyConnectInfo { + fn connect_info(_target: IncomingStream<'_>) -> Self { Self { value: "it worked!", } @@ -262,17 +275,19 @@ mod tests { addr.value } - let listener = TcpListener::bind("127.0.0.1:0").unwrap(); + let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); let addr = listener.local_addr().unwrap(); let (tx, rx) = tokio::sync::oneshot::channel(); tokio::spawn(async move { let app = Router::new().route("/", get(handler)); - let server = Server::from_tcp(listener) - .unwrap() - .serve(app.into_make_service_with_connect_info::()); tx.send(()).unwrap(); - server.await.expect("server error"); + crate::serve( + listener, + app.into_make_service_with_connect_info::(), + ) + .await + .unwrap(); }); rx.await.unwrap(); @@ -295,7 +310,7 @@ mod tests { let client = TestClient::new(app); - let res = client.get("/").send().await; + let res = client.get("/").await; let body = res.text().await; assert!(body.starts_with("0.0.0.0:1337")); } @@ -306,7 +321,7 @@ mod tests { format!("{addr}") } - let listener = TcpListener::bind("127.0.0.1:0").unwrap(); + let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); let addr = listener.local_addr().unwrap(); tokio::spawn(async move { @@ -314,10 +329,12 @@ mod tests { .route("/", get(handler)) .layer(MockConnectInfo(SocketAddr::from(([0, 0, 0, 0], 1337)))); - let server = Server::from_tcp(listener) - .unwrap() - .serve(app.into_make_service_with_connect_info::()); - server.await.expect("server error"); + crate::serve( + listener, + app.into_make_service_with_connect_info::(), + ) + .await + .unwrap(); }); let client = reqwest::Client::new(); diff --git a/.cargo-vendor/axum/src/extract/host.rs b/.cargo-vendor/axum/src/extract/host.rs index d5be6a978d..f1d179a545 100644 --- a/.cargo-vendor/axum/src/extract/host.rs +++ b/.cargo-vendor/axum/src/extract/host.rs @@ -96,7 +96,6 @@ mod tests { let host = test_client() .get("/") .header(http::header::HOST, original_host) - .send() .await .text() .await; @@ -109,7 +108,6 @@ mod tests { let host = test_client() .get("/") .header(X_FORWARDED_HOST_HEADER_KEY, original_host) - .send() .await .text() .await; @@ -124,7 +122,6 @@ mod tests { .get("/") .header(X_FORWARDED_HOST_HEADER_KEY, x_forwarded_host_header) .header(http::header::HOST, host_header) - .send() .await .text() .await; @@ -133,7 +130,7 @@ mod tests { #[crate::test] async fn uri_host() { - let host = test_client().get("/").send().await.text().await; + let host = test_client().get("/").await.text().await; assert!(host.contains("127.0.0.1")); } diff --git a/.cargo-vendor/axum/src/extract/matched_path.rs b/.cargo-vendor/axum/src/extract/matched_path.rs index c3bd7b4589..6ac0397c05 100644 --- a/.cargo-vendor/axum/src/extract/matched_path.rs +++ b/.cargo-vendor/axum/src/extract/matched_path.rs @@ -20,9 +20,7 @@ use std::{collections::HashMap, sync::Arc}; /// // `path` will be "/users/:id" /// }) /// ); -/// # async { -/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -/// # }; +/// # let _: Router = app; /// ``` /// /// # Accessing `MatchedPath` via extensions @@ -35,8 +33,7 @@ use std::{collections::HashMap, sync::Arc}; /// ``` /// use axum::{ /// Router, -/// extract::MatchedPath, -/// http::Request, +/// extract::{Request, MatchedPath}, /// routing::get, /// }; /// use tower_http::trace::TraceLayer; @@ -55,44 +52,6 @@ use std::{collections::HashMap, sync::Arc}; /// ); /// # let _: Router = app; /// ``` -/// -/// # Matched path in nested routers -/// -/// Because of how [nesting] works `MatchedPath` isn't accessible in middleware on nested routes: -/// -/// ``` -/// use axum::{ -/// Router, -/// RequestExt, -/// routing::get, -/// extract::{MatchedPath, rejection::MatchedPathRejection}, -/// middleware::map_request, -/// http::Request, -/// body::Body, -/// }; -/// -/// async fn access_matched_path(mut request: Request) -> Request { -/// // if `/foo/bar` is called this will be `Err(_)` since that matches -/// // a nested route -/// let matched_path: Result = -/// request.extract_parts::().await; -/// -/// request -/// } -/// -/// // `MatchedPath` is always accessible on handlers added via `Router::route` -/// async fn handler(matched_path: MatchedPath) {} -/// -/// let app = Router::new() -/// .nest( -/// "/foo", -/// Router::new().route("/bar", get(handler)), -/// ) -/// .layer(map_request(access_matched_path)); -/// # let _: Router = app; -/// ``` -/// -/// [nesting]: crate::Router::nest #[cfg_attr(docsrs, doc(cfg(feature = "matched-path")))] #[derive(Clone, Debug)] pub struct MatchedPath(pub(crate) Arc); @@ -172,14 +131,14 @@ fn append_nested_matched_path(matched_path: &Arc, extensions: &http::Extens mod tests { use super::*; use crate::{ - body::Body, + extract::Request, handler::HandlerWithoutStateExt, middleware::map_request, routing::{any, get}, test_helpers::*, Router, }; - use http::{Request, StatusCode}; + use http::StatusCode; #[crate::test] async fn extracting_on_handler() { @@ -190,7 +149,7 @@ mod tests { let client = TestClient::new(app); - let res = client.get("/foo").send().await; + let res = client.get("/foo").await; assert_eq!(res.text().await, "/:a"); } @@ -206,7 +165,7 @@ mod tests { let client = TestClient::new(app); - let res = client.get("/foo/bar").send().await; + let res = client.get("/foo/bar").await; assert_eq!(res.text().await, "/:a/:b"); } @@ -225,7 +184,7 @@ mod tests { let client = TestClient::new(app); - let res = client.get("/foo/bar/baz").send().await; + let res = client.get("/foo/bar/baz").await; assert_eq!(res.text().await, "/:a/:b/:c"); } @@ -245,7 +204,7 @@ mod tests { let client = TestClient::new(app); - let res = client.get("/foo/bar").send().await; + let res = client.get("/foo/bar").await; assert_eq!(res.status(), StatusCode::OK); } @@ -265,7 +224,7 @@ mod tests { let client = TestClient::new(app); - let res = client.get("/foo/bar").send().await; + let res = client.get("/foo/bar").await; assert_eq!(res.status(), StatusCode::OK); } @@ -282,7 +241,7 @@ mod tests { let client = TestClient::new(app); - let res = client.get("/foo/bar").send().await; + let res = client.get("/foo/bar").await; assert_eq!(res.status(), StatusCode::OK); } @@ -299,7 +258,7 @@ mod tests { let client = TestClient::new(app); - let res = client.get("/foo/bar").send().await; + let res = client.get("/foo/bar").await; assert_eq!(res.status(), StatusCode::OK); } @@ -319,7 +278,7 @@ mod tests { let client = TestClient::new(app); - let res = client.get("/foo/bar").send().await; + let res = client.get("/foo/bar").await; assert_eq!(res.status(), StatusCode::OK); } @@ -340,7 +299,7 @@ mod tests { let client = TestClient::new(app); - let res = client.get("/foo/bar").send().await; + let res = client.get("/foo/bar").await; assert_eq!(res.status(), StatusCode::OK); } @@ -354,7 +313,7 @@ mod tests { let client = TestClient::new(app); - let res = client.get("/foo/bar").send().await; + let res = client.get("/foo/bar").await; assert_eq!(res.status(), StatusCode::OK); } @@ -365,7 +324,7 @@ mod tests { let app = Router::new().route( "/*path", - any(|req: Request| { + any(|req: Request| { Router::new() .nest("/", Router::new().route("/foo", get(|| async {}))) .oneshot(req) @@ -374,13 +333,13 @@ mod tests { let client = TestClient::new(app); - let res = client.get("/foo").send().await; + let res = client.get("/foo").await; assert_eq!(res.status(), StatusCode::OK); } #[crate::test] async fn cant_extract_in_fallback() { - async fn handler(path: Option, req: Request) { + async fn handler(path: Option, req: Request) { assert!(path.is_none()); assert!(req.extensions().get::().is_none()); } @@ -389,7 +348,7 @@ mod tests { let client = TestClient::new(app); - let res = client.get("/foo/bar").send().await; + let res = client.get("/foo/bar").await; assert_eq!(res.status(), StatusCode::OK); } } diff --git a/.cargo-vendor/axum/src/extract/mod.rs b/.cargo-vendor/axum/src/extract/mod.rs index cb4ebcd92c..c02bc6f0a6 100644 --- a/.cargo-vendor/axum/src/extract/mod.rs +++ b/.cargo-vendor/axum/src/extract/mod.rs @@ -11,13 +11,14 @@ pub mod rejection; pub mod ws; mod host; +pub(crate) mod nested_path; mod raw_form; mod raw_query; mod request_parts; mod state; #[doc(inline)] -pub use axum_core::extract::{DefaultBodyLimit, FromRef, FromRequest, FromRequestParts}; +pub use axum_core::extract::{DefaultBodyLimit, FromRef, FromRequest, FromRequestParts, Request}; #[cfg(feature = "macros")] pub use axum_macros::{FromRef, FromRequest, FromRequestParts}; @@ -26,10 +27,10 @@ pub use axum_macros::{FromRef, FromRequest, FromRequestParts}; #[allow(deprecated)] pub use self::{ host::Host, + nested_path::NestedPath, path::{Path, RawPathParams}, raw_form::RawForm, raw_query::RawQuery, - request_parts::{BodyStream, RawBody}, state::State, }; @@ -77,10 +78,6 @@ pub use self::request_parts::OriginalUri; #[doc(inline)] pub use self::ws::WebSocketUpgrade; -#[cfg(feature = "headers")] -#[doc(no_inline)] -pub use crate::TypedHeader; - // this is duplicated in `axum-extra/src/extract/form.rs` pub(super) fn has_content_type(headers: &HeaderMap, expected_content_type: &mime::Mime) -> bool { let content_type = if let Some(content_type) = headers.get(header::CONTENT_TYPE) { @@ -107,7 +104,7 @@ mod tests { let app = Router::new().route("/", get(|body: String| async { body })); let client = TestClient::new(app); - let res = client.get("/").body("foo").send().await; + let res = client.get("/").body("foo").await; let body = res.text().await; assert_eq!(body, "foo"); diff --git a/.cargo-vendor/axum/src/extract/multipart.rs b/.cargo-vendor/axum/src/extract/multipart.rs index 3827734f3e..7a303a4759 100644 --- a/.cargo-vendor/axum/src/extract/multipart.rs +++ b/.cargo-vendor/axum/src/extract/multipart.rs @@ -2,19 +2,21 @@ //! //! See [`Multipart`] for more details. -use super::{BodyStream, FromRequest}; -use crate::body::{Bytes, HttpBody}; -use crate::BoxError; +use super::{FromRequest, Request}; +use crate::body::Bytes; use async_trait::async_trait; -use axum_core::__composite_rejection as composite_rejection; -use axum_core::__define_rejection as define_rejection; -use axum_core::response::{IntoResponse, Response}; -use axum_core::RequestExt; +use axum_core::{ + __composite_rejection as composite_rejection, __define_rejection as define_rejection, + response::{IntoResponse, Response}, + RequestExt, +}; use futures_util::stream::Stream; -use http::header::{HeaderMap, CONTENT_TYPE}; -use http::{Request, StatusCode}; -use std::error::Error; +use http::{ + header::{HeaderMap, CONTENT_TYPE}, + StatusCode, +}; use std::{ + error::Error, fmt, pin::Pin, task::{Context, Poll}, @@ -48,10 +50,15 @@ use std::{ /// } /// /// let app = Router::new().route("/upload", post(upload)); -/// # async { -/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -/// # }; +/// # let _: Router = app; /// ``` +/// +/// # Large Files +/// +/// For security reasons, by default, `Multipart` limits the request body size to 2MB. +/// See [`DefaultBodyLimit`][default-body-limit] for how to configure this limit. +/// +/// [default-body-limit]: crate::extract::DefaultBodyLimit #[cfg_attr(docsrs, doc(cfg(feature = "multipart")))] #[derive(Debug)] pub struct Multipart { @@ -59,23 +66,16 @@ pub struct Multipart { } #[async_trait] -impl FromRequest for Multipart +impl FromRequest for Multipart where - B: HttpBody + Send + 'static, - B::Data: Into, - B::Error: Into, S: Send + Sync, { type Rejection = MultipartRejection; - async fn from_request(req: Request, state: &S) -> Result { + async fn from_request(req: Request, _state: &S) -> Result { let boundary = parse_boundary(req.headers()).ok_or(InvalidBoundary)?; - let stream_result = match req.with_limited_body() { - Ok(limited) => BodyStream::from_request(limited, state).await, - Err(unlimited) => BodyStream::from_request(unlimited, state).await, - }; - let stream = stream_result.unwrap_or_else(|err| match err {}); - let multipart = multer::Multipart::new(stream, boundary); + let stream = req.with_limited_body().into_body(); + let multipart = multer::Multipart::new(stream.into_data_stream(), boundary); Ok(Self { inner: multipart }) } } @@ -248,7 +248,7 @@ fn status_code_from_multer_error(err: &multer::Error) -> StatusCode { if err .downcast_ref::() .and_then(|err| err.source()) - .and_then(|err| err.downcast_ref::()) + .and_then(|err| err.downcast_ref::()) .is_some() { return StatusCode::PAYLOAD_TOO_LARGE; @@ -274,12 +274,13 @@ impl std::error::Error for MultipartError { impl IntoResponse for MultipartError { fn into_response(self) -> Response { + let body = self.body_text(); axum_core::__log_rejection!( rejection_type = Self, - body_text = self.body_text(), + body_text = body, status = self.status(), ); - (self.status(), self.body_text()).into_response() + (self.status(), body).into_response() } } @@ -310,7 +311,7 @@ mod tests { use axum_core::extract::DefaultBodyLimit; use super::*; - use crate::{body::Body, response::IntoResponse, routing::post, test_helpers::*, Router}; + use crate::{routing::post, test_helpers::*, Router}; #[crate::test] async fn content_type_with_encoding() { @@ -323,6 +324,7 @@ mod tests { assert_eq!(field.file_name().unwrap(), FILE_NAME); assert_eq!(field.content_type().unwrap(), CONTENT_TYPE); + assert_eq!(field.headers()["foo"], "bar"); assert_eq!(field.bytes().await.unwrap(), BYTES); assert!(multipart.next_field().await.unwrap().is_none()); @@ -337,16 +339,22 @@ mod tests { reqwest::multipart::Part::bytes(BYTES) .file_name(FILE_NAME) .mime_str(CONTENT_TYPE) - .unwrap(), + .unwrap() + .headers(reqwest::header::HeaderMap::from_iter([( + reqwest::header::HeaderName::from_static("foo"), + reqwest::header::HeaderValue::from_static("bar"), + )])), ); - client.post("/").multipart(form).send().await; + client.post("/").multipart(form).await; } // No need for this to be a #[test], we just want to make sure it compiles fn _multipart_from_request_limited() { async fn handler(_: Multipart) {} - let _app: Router<(), http_body::Limited> = Router::new().route("/", post(handler)); + let _app: Router = Router::new() + .route("/", post(handler)) + .layer(tower_http::limit::RequestBodyLimitLayer::new(1024)); } #[crate::test] @@ -369,7 +377,7 @@ mod tests { let form = reqwest::multipart::Form::new().part("file", reqwest::multipart::Part::bytes(BYTES)); - let res = client.post("/").multipart(form).send().await; + let res = client.post("/").multipart(form).await; assert_eq!(res.status(), StatusCode::PAYLOAD_TOO_LARGE); } } diff --git a/.cargo-vendor/axum/src/extract/nested_path.rs b/.cargo-vendor/axum/src/extract/nested_path.rs new file mode 100644 index 0000000000..72712a4e9a --- /dev/null +++ b/.cargo-vendor/axum/src/extract/nested_path.rs @@ -0,0 +1,265 @@ +use std::{ + sync::Arc, + task::{Context, Poll}, +}; + +use crate::extract::Request; +use async_trait::async_trait; +use axum_core::extract::FromRequestParts; +use http::request::Parts; +use tower_layer::{layer_fn, Layer}; +use tower_service::Service; + +use super::rejection::NestedPathRejection; + +/// Access the path the matched the route is nested at. +/// +/// This can for example be used when doing redirects. +/// +/// # Example +/// +/// ``` +/// use axum::{ +/// Router, +/// extract::NestedPath, +/// routing::get, +/// }; +/// +/// let api = Router::new().route( +/// "/users", +/// get(|path: NestedPath| async move { +/// // `path` will be "/api" because thats what this +/// // router is nested at when we build `app` +/// let path = path.as_str(); +/// }) +/// ); +/// +/// let app = Router::new().nest("/api", api); +/// # let _: Router = app; +/// ``` +#[derive(Debug, Clone)] +pub struct NestedPath(Arc); + +impl NestedPath { + /// Returns a `str` representation of the path. + pub fn as_str(&self) -> &str { + &self.0 + } +} + +#[async_trait] +impl FromRequestParts for NestedPath +where + S: Send + Sync, +{ + type Rejection = NestedPathRejection; + + async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { + match parts.extensions.get::() { + Some(nested_path) => Ok(nested_path.clone()), + None => Err(NestedPathRejection), + } + } +} + +#[derive(Clone)] +pub(crate) struct SetNestedPath { + inner: S, + path: Arc, +} + +impl SetNestedPath { + pub(crate) fn layer(path: &str) -> impl Layer + Clone { + let path = Arc::from(path); + layer_fn(move |inner| Self { + inner, + path: Arc::clone(&path), + }) + } +} + +impl Service> for SetNestedPath +where + S: Service>, +{ + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, mut req: Request) -> Self::Future { + if let Some(prev) = req.extensions_mut().get_mut::() { + let new_path = if prev.as_str() == "/" { + Arc::clone(&self.path) + } else { + format!("{}{}", prev.as_str().trim_end_matches('/'), self.path).into() + }; + prev.0 = new_path; + } else { + req.extensions_mut() + .insert(NestedPath(Arc::clone(&self.path))); + }; + + self.inner.call(req) + } +} + +#[cfg(test)] +mod tests { + use axum_core::response::Response; + use http::StatusCode; + + use crate::{ + extract::{NestedPath, Request}, + middleware::{from_fn, Next}, + routing::get, + test_helpers::*, + Router, + }; + + #[crate::test] + async fn one_level_of_nesting() { + let api = Router::new().route( + "/users", + get(|nested_path: NestedPath| { + assert_eq!(nested_path.as_str(), "/api"); + async {} + }), + ); + + let app = Router::new().nest("/api", api); + + let client = TestClient::new(app); + + let res = client.get("/api/users").await; + assert_eq!(res.status(), StatusCode::OK); + } + + #[crate::test] + async fn one_level_of_nesting_with_trailing_slash() { + let api = Router::new().route( + "/users", + get(|nested_path: NestedPath| { + assert_eq!(nested_path.as_str(), "/api/"); + async {} + }), + ); + + let app = Router::new().nest("/api/", api); + + let client = TestClient::new(app); + + let res = client.get("/api/users").await; + assert_eq!(res.status(), StatusCode::OK); + } + + #[crate::test] + async fn two_levels_of_nesting() { + let api = Router::new().route( + "/users", + get(|nested_path: NestedPath| { + assert_eq!(nested_path.as_str(), "/api/v2"); + async {} + }), + ); + + let app = Router::new().nest("/api", Router::new().nest("/v2", api)); + + let client = TestClient::new(app); + + let res = client.get("/api/v2/users").await; + assert_eq!(res.status(), StatusCode::OK); + } + + #[crate::test] + async fn two_levels_of_nesting_with_trailing_slash() { + let api = Router::new().route( + "/users", + get(|nested_path: NestedPath| { + assert_eq!(nested_path.as_str(), "/api/v2"); + async {} + }), + ); + + let app = Router::new().nest("/api/", Router::new().nest("/v2", api)); + + let client = TestClient::new(app); + + let res = client.get("/api/v2/users").await; + assert_eq!(res.status(), StatusCode::OK); + } + + #[crate::test] + async fn nested_at_root() { + let api = Router::new().route( + "/users", + get(|nested_path: NestedPath| { + assert_eq!(nested_path.as_str(), "/"); + async {} + }), + ); + + let app = Router::new().nest("/", api); + + let client = TestClient::new(app); + + let res = client.get("/users").await; + assert_eq!(res.status(), StatusCode::OK); + } + + #[crate::test] + async fn deeply_nested_from_root() { + let api = Router::new().route( + "/users", + get(|nested_path: NestedPath| { + assert_eq!(nested_path.as_str(), "/api"); + async {} + }), + ); + + let app = Router::new().nest("/", Router::new().nest("/api", api)); + + let client = TestClient::new(app); + + let res = client.get("/api/users").await; + assert_eq!(res.status(), StatusCode::OK); + } + + #[crate::test] + async fn in_fallbacks() { + let api = Router::new().fallback(get(|nested_path: NestedPath| { + assert_eq!(nested_path.as_str(), "/api"); + async {} + })); + + let app = Router::new().nest("/api", api); + + let client = TestClient::new(app); + + let res = client.get("/api/doesnt-exist").await; + assert_eq!(res.status(), StatusCode::OK); + } + + #[crate::test] + async fn in_middleware() { + async fn middleware(nested_path: NestedPath, req: Request, next: Next) -> Response { + assert_eq!(nested_path.as_str(), "/api"); + next.run(req).await + } + + let api = Router::new() + .route("/users", get(|| async {})) + .layer(from_fn(middleware)); + + let app = Router::new().nest("/api", api); + + let client = TestClient::new(app); + + let res = client.get("/api/users").await; + assert_eq!(res.status(), StatusCode::OK); + } +} diff --git a/.cargo-vendor/axum/src/extract/path/mod.rs b/.cargo-vendor/axum/src/extract/path/mod.rs index 189e476e5c..330e270ebc 100644 --- a/.cargo-vendor/axum/src/extract/path/mod.rs +++ b/.cargo-vendor/axum/src/extract/path/mod.rs @@ -25,6 +25,9 @@ use std::{fmt, sync::Arc}; /// /// These examples assume the `serde` feature of the [`uuid`] crate is enabled. /// +/// One `Path` can extract multiple captures. It is not necessary (and does +/// not work) to give a handler more than one `Path` argument. +/// /// [`uuid`]: https://crates.io/crates/uuid /// /// ```rust,no_run @@ -42,9 +45,7 @@ use std::{fmt, sync::Arc}; /// } /// /// let app = Router::new().route("/users/:user_id/team/:team_id", get(users_teams_show)); -/// # async { -/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -/// # }; +/// # let _: Router = app; /// ``` /// /// If the path contains only one parameter, then you can omit the tuple. @@ -62,9 +63,7 @@ use std::{fmt, sync::Arc}; /// } /// /// let app = Router::new().route("/users/:user_id", get(user_info)); -/// # async { -/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -/// # }; +/// # let _: Router = app; /// ``` /// /// Path segments also can be deserialized into any type that implements @@ -103,9 +102,7 @@ use std::{fmt, sync::Arc}; /// "/users/:user_id/team/:team_id", /// get(users_teams_show).post(users_teams_create), /// ); -/// # async { -/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -/// # }; +/// # let _: Router = app; /// ``` /// /// If you wish to capture all path parameters you can use `HashMap` or `Vec`: @@ -132,9 +129,7 @@ use std::{fmt, sync::Arc}; /// /// let app = Router::new() /// .route("/users/:user_id/team/:team_id", get(params_map).post(params_vec)); -/// # async { -/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -/// # }; +/// # let _: Router = app; /// ``` /// /// # Providing detailed rejection output @@ -356,7 +351,7 @@ impl fmt::Display for ErrorKind { } } -/// Rejection type for [`Path`](super::Path) if the captured routes params couldn't be deserialized +/// Rejection type for [`Path`] if the captured routes params couldn't be deserialized /// into the expected type. #[derive(Debug)] pub struct FailedToDeserializePathParams(PathDeserializationError); @@ -403,12 +398,13 @@ impl FailedToDeserializePathParams { impl IntoResponse for FailedToDeserializePathParams { fn into_response(self) -> Response { + let body = self.body_text(); axum_core::__log_rejection!( rejection_type = Self, - body_text = self.body_text(), + body_text = body, status = self.status(), ); - (self.status(), self.body_text()).into_response() + (self.status(), body).into_response() } } @@ -535,7 +531,13 @@ impl std::error::Error for InvalidUtf8InPathParam {} impl IntoResponse for InvalidUtf8InPathParam { fn into_response(self) -> Response { - (self.status(), self.body_text()).into_response() + let body = self.body_text(); + axum_core::__log_rejection!( + rejection_type = Self, + body_text = body, + status = self.status(), + ); + (self.status(), body).into_response() } } @@ -543,7 +545,6 @@ impl IntoResponse for InvalidUtf8InPathParam { mod tests { use super::*; use crate::{routing::get, test_helpers::*, Router}; - use http::StatusCode; use serde::Deserialize; use std::collections::HashMap; @@ -561,10 +562,10 @@ mod tests { let client = TestClient::new(app); - let res = client.get("/users/42").send().await; + let res = client.get("/users/42").await; assert_eq!(res.status(), StatusCode::OK); - let res = client.post("/users/1337").send().await; + let res = client.post("/users/1337").await; assert_eq!(res.status(), StatusCode::OK); } @@ -574,7 +575,7 @@ mod tests { let client = TestClient::new(app); - let res = client.get("/users/42").send().await; + let res = client.get("/users/42").await; assert_eq!(res.status(), StatusCode::OK); } @@ -587,7 +588,7 @@ mod tests { let client = TestClient::new(app); - let res = client.get("/one%20two").send().await; + let res = client.get("/one%20two").await; assert_eq!(res.text().await, "one two"); } @@ -606,10 +607,10 @@ mod tests { let client = TestClient::new(app); - let res = client.get("/i/123").send().await; + let res = client.get("/i/123").await; assert_eq!(res.text().await, "123"); - let res = client.get("/u/123").send().await; + let res = client.get("/u/123").await; assert_eq!(res.text().await, "123"); } @@ -629,26 +630,83 @@ mod tests { let client = TestClient::new(app); - let res = client.get("/foo/bar/baz").send().await; + let res = client.get("/foo/bar/baz").await; assert_eq!(res.text().await, "bar/baz"); - let res = client.get("/bar/baz/qux").send().await; + let res = client.get("/bar/baz/qux").await; assert_eq!(res.text().await, "baz/qux"); } #[crate::test] - async fn captures_dont_match_empty_segments() { + async fn captures_dont_match_empty_path() { let app = Router::new().route("/:key", get(|| async {})); let client = TestClient::new(app); - let res = client.get("/").send().await; + let res = client.get("/").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); - let res = client.get("/foo").send().await; + let res = client.get("/foo").await; assert_eq!(res.status(), StatusCode::OK); } + #[crate::test] + async fn captures_match_empty_inner_segments() { + let app = Router::new().route( + "/:key/method", + get(|Path(param): Path| async move { param.to_string() }), + ); + + let client = TestClient::new(app); + + let res = client.get("/abc/method").await; + assert_eq!(res.text().await, "abc"); + + let res = client.get("//method").await; + assert_eq!(res.text().await, ""); + } + + #[crate::test] + async fn captures_match_empty_inner_segments_near_end() { + let app = Router::new().route( + "/method/:key/", + get(|Path(param): Path| async move { param.to_string() }), + ); + + let client = TestClient::new(app); + + let res = client.get("/method/abc").await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + + let res = client.get("/method/abc/").await; + assert_eq!(res.text().await, "abc"); + + let res = client.get("/method//").await; + assert_eq!(res.text().await, ""); + } + + #[crate::test] + async fn captures_match_empty_trailing_segment() { + let app = Router::new().route( + "/method/:key", + get(|Path(param): Path| async move { param.to_string() }), + ); + + let client = TestClient::new(app); + + let res = client.get("/method/abc/").await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + + let res = client.get("/method/abc").await; + assert_eq!(res.text().await, "abc"); + + let res = client.get("/method/").await; + assert_eq!(res.text().await, ""); + + let res = client.get("/method").await; + assert_eq!(res.status(), StatusCode::NOT_FOUND); + } + #[crate::test] async fn str_reference_deserialize() { struct Param(String); @@ -666,11 +724,11 @@ mod tests { let client = TestClient::new(app); - let res = client.get("/foo").send().await; + let res = client.get("/foo").await; assert_eq!(res.text().await, "foo"); // percent decoding should also work - let res = client.get("/foo%20bar").send().await; + let res = client.get("/foo%20bar").await; assert_eq!(res.text().await, "foo bar"); } @@ -680,7 +738,7 @@ mod tests { let client = TestClient::new(app); - let res = client.get("/a/b").send().await; + let res = client.get("/a/b").await; assert_eq!(res.status(), StatusCode::INTERNAL_SERVER_ERROR); assert_eq!( res.text().await, @@ -706,7 +764,7 @@ mod tests { let client = TestClient::new(app); - let res = client.get("/foo/bar").send().await; + let res = client.get("/foo/bar").await; assert_eq!(res.status(), StatusCode::OK); } @@ -764,40 +822,27 @@ mod tests { let client = TestClient::new(app); - let res = client.get("/single/2023-01-01").send().await; + let res = client.get("/single/2023-01-01").await; assert_eq!(res.text().await, "single: 2023-01-01"); - let res = client - .get("/tuple/2023-01-01/2023-01-02/2023-01-03") - .send() - .await; + let res = client.get("/tuple/2023-01-01/2023-01-02/2023-01-03").await; assert_eq!(res.text().await, "tuple: 2023-01-01 2023-01-02 2023-01-03"); - let res = client - .get("/vec/2023-01-01/2023-01-02/2023-01-03") - .send() - .await; + let res = client.get("/vec/2023-01-01/2023-01-02/2023-01-03").await; assert_eq!(res.text().await, "vec: 2023-01-01 2023-01-02 2023-01-03"); let res = client .get("/vec_pairs/2023-01-01/2023-01-02/2023-01-03") - .send() .await; assert_eq!( res.text().await, "vec_pairs: 2023-01-01 2023-01-02 2023-01-03", ); - let res = client - .get("/map/2023-01-01/2023-01-02/2023-01-03") - .send() - .await; + let res = client.get("/map/2023-01-01/2023-01-02/2023-01-03").await; assert_eq!(res.text().await, "map: 2023-01-01 2023-01-02 2023-01-03"); - let res = client - .get("/struct/2023-01-01/2023-01-02/2023-01-03") - .send() - .await; + let res = client.get("/struct/2023-01-01/2023-01-02/2023-01-03").await; assert_eq!(res.text().await, "struct: 2023-01-01 2023-01-02 2023-01-03"); } @@ -811,13 +856,13 @@ mod tests { let client = TestClient::new(app); - let res = client.get("/one/1").send().await; + let res = client.get("/one/1").await; assert!(res .text() .await .starts_with("Wrong number of path arguments for `Path`. Expected 2 but got 1")); - let res = client.get("/two/1/2").send().await; + let res = client.get("/two/1/2").await; assert!(res .text() .await @@ -838,7 +883,7 @@ mod tests { ); let client = TestClient::new(app); - let res = client.get("/foo/bar/baz").send().await; + let res = client.get("/foo/bar/baz").await; let body = res.text().await; assert_eq!(body, "a=foo b=bar c=baz"); } diff --git a/.cargo-vendor/axum/src/extract/query.rs b/.cargo-vendor/axum/src/extract/query.rs index 6f8cb89dc1..a331b68ca5 100644 --- a/.cargo-vendor/axum/src/extract/query.rs +++ b/.cargo-vendor/axum/src/extract/query.rs @@ -32,9 +32,7 @@ use serde::de::DeserializeOwned; /// } /// /// let app = Router::new().route("/list_things", get(list_things)); -/// # async { -/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -/// # }; +/// # let _: Router = app; /// ``` /// /// If the query string cannot be parsed it will reject the request with a `400 @@ -44,6 +42,11 @@ use serde::de::DeserializeOwned; /// example. /// /// [example]: https://github.com/tokio-rs/axum/blob/main/examples/query-params-with-empty-strings/src/main.rs +/// +/// For handling multiple values for the same query parameter, in a `?foo=1&foo=2&foo=3` +/// fashion, use [`axum_extra::extract::Query`] instead. +/// +/// [`axum_extra::extract::Query`]: https://docs.rs/axum-extra/latest/axum_extra/extract/struct.Query.html #[cfg_attr(docsrs, doc(cfg(feature = "query")))] #[derive(Debug, Clone, Copy, Default)] pub struct Query(pub T); @@ -99,7 +102,7 @@ mod tests { use crate::{routing::get, test_helpers::TestClient, Router}; use super::*; - use axum_core::extract::FromRequest; + use axum_core::{body::Body, extract::FromRequest}; use http::{Request, StatusCode}; use serde::Deserialize; use std::fmt::Debug; @@ -108,7 +111,10 @@ mod tests { where T: DeserializeOwned + PartialEq + Debug, { - let req = Request::builder().uri(uri.as_ref()).body(()).unwrap(); + let req = Request::builder() + .uri(uri.as_ref()) + .body(Body::empty()) + .unwrap(); assert_eq!(Query::::from_request(req, &()).await.unwrap().0, value); } @@ -161,7 +167,7 @@ mod tests { let app = Router::new().route("/", get(handler)); let client = TestClient::new(app); - let res = client.get("/?n=hi").send().await; + let res = client.get("/?n=hi").await; assert_eq!(res.status(), StatusCode::BAD_REQUEST); } diff --git a/.cargo-vendor/axum/src/extract/raw_form.rs b/.cargo-vendor/axum/src/extract/raw_form.rs index 830d8b62ae..a4e0d6c57c 100644 --- a/.cargo-vendor/axum/src/extract/raw_form.rs +++ b/.cargo-vendor/axum/src/extract/raw_form.rs @@ -1,15 +1,13 @@ use async_trait::async_trait; -use axum_core::extract::FromRequest; -use bytes::{Bytes, BytesMut}; -use http::{Method, Request}; +use axum_core::extract::{FromRequest, Request}; +use bytes::Bytes; +use http::Method; use super::{ has_content_type, rejection::{InvalidFormContentType, RawFormRejection}, }; -use crate::{body::HttpBody, BoxError}; - /// Extractor that extracts raw form requests. /// /// For `GET` requests it will extract the raw query. For other methods it extracts the raw @@ -27,32 +25,25 @@ use crate::{body::HttpBody, BoxError}; /// async fn handler(RawForm(form): RawForm) {} /// /// let app = Router::new().route("/", get(handler)); -/// # async { -/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -/// # }; +/// # let _: Router = app; /// ``` #[derive(Debug)] pub struct RawForm(pub Bytes); #[async_trait] -impl FromRequest for RawForm +impl FromRequest for RawForm where - B: HttpBody + Send + 'static, - B::Data: Send, - B::Error: Into, S: Send + Sync, { type Rejection = RawFormRejection; - async fn from_request(req: Request, state: &S) -> Result { + async fn from_request(req: Request, state: &S) -> Result { if req.method() == Method::GET { - let mut bytes = BytesMut::new(); - if let Some(query) = req.uri().query() { - bytes.extend(query.as_bytes()); + return Ok(Self(Bytes::copy_from_slice(query.as_bytes()))); } - Ok(Self(bytes.freeze())) + Ok(Self(Bytes::new())) } else { if !has_content_type(req.headers(), &mime::APPLICATION_WWW_FORM_URLENCODED) { return Err(InvalidFormContentType.into()); @@ -65,20 +56,15 @@ where #[cfg(test)] mod tests { + use axum_core::body::Body; use http::{header::CONTENT_TYPE, Request}; use super::{InvalidFormContentType, RawForm, RawFormRejection}; - use crate::{ - body::{Bytes, Empty, Full}, - extract::FromRequest, - }; + use crate::extract::FromRequest; async fn check_query(uri: &str, value: &[u8]) { - let req = Request::builder() - .uri(uri) - .body(Empty::::new()) - .unwrap(); + let req = Request::builder().uri(uri).body(Body::empty()).unwrap(); assert_eq!(RawForm::from_request(req, &()).await.unwrap().0, value); } @@ -86,7 +72,7 @@ mod tests { async fn check_body(body: &'static [u8]) { let req = Request::post("http://example.com/test") .header(CONTENT_TYPE, mime::APPLICATION_WWW_FORM_URLENCODED.as_ref()) - .body(Full::new(Bytes::from(body))) + .body(Body::from(body)) .unwrap(); assert_eq!(RawForm::from_request(req, &()).await.unwrap().0, body); @@ -109,7 +95,7 @@ mod tests { #[crate::test] async fn test_incorrect_content_type() { let req = Request::post("http://example.com/test") - .body(Full::::from(Bytes::from("page=0&size=10"))) + .body(Body::from("page=0&size=10")) .unwrap(); assert!(matches!( diff --git a/.cargo-vendor/axum/src/extract/raw_query.rs b/.cargo-vendor/axum/src/extract/raw_query.rs index 98a60b0930..d8c56f84a4 100644 --- a/.cargo-vendor/axum/src/extract/raw_query.rs +++ b/.cargo-vendor/axum/src/extract/raw_query.rs @@ -20,9 +20,7 @@ use std::convert::Infallible; /// } /// /// let app = Router::new().route("/users", get(handler)); -/// # async { -/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -/// # }; +/// # let _: Router = app; /// ``` #[derive(Debug)] pub struct RawQuery(pub Option); diff --git a/.cargo-vendor/axum/src/extract/rejection.rs b/.cargo-vendor/axum/src/extract/rejection.rs index 07c322e94d..cba76af054 100644 --- a/.cargo-vendor/axum/src/extract/rejection.rs +++ b/.cargo-vendor/axum/src/extract/rejection.rs @@ -208,5 +208,11 @@ composite_rejection! { } } -#[cfg(feature = "headers")] -pub use crate::typed_header::{TypedHeaderRejection, TypedHeaderRejectionReason}; +define_rejection! { + #[status = INTERNAL_SERVER_ERROR] + #[body = "The matched route is not nested"] + /// Rejection type for [`NestedPath`](super::NestedPath). + /// + /// This rejection is used if the matched route wasn't nested. + pub struct NestedPathRejection; +} diff --git a/.cargo-vendor/axum/src/extract/request_parts.rs b/.cargo-vendor/axum/src/extract/request_parts.rs index 9af618fa20..55bc340b20 100644 --- a/.cargo-vendor/axum/src/extract/request_parts.rs +++ b/.cargo-vendor/axum/src/extract/request_parts.rs @@ -1,18 +1,7 @@ -use super::{Extension, FromRequest, FromRequestParts}; -use crate::{ - body::{Body, Bytes, HttpBody}, - BoxError, Error, -}; +use super::{Extension, FromRequestParts}; use async_trait::async_trait; -use futures_util::stream::Stream; -use http::{request::Parts, Request, Uri}; -use std::{ - convert::Infallible, - fmt, - pin::Pin, - task::{Context, Poll}, -}; -use sync_wrapper::SyncWrapper; +use http::{request::Parts, Uri}; +use std::convert::Infallible; /// Extractor that gets the original request URI regardless of nesting. /// @@ -39,9 +28,7 @@ use sync_wrapper::SyncWrapper; /// ); /// /// let app = Router::new().nest("/api", api_routes); -/// # async { -/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -/// # }; +/// # let _: Router = app; /// ``` /// /// # Extracting via request extensions @@ -76,9 +63,7 @@ use sync_wrapper::SyncWrapper; /// ); /// /// let app = Router::new().nest("/api", api_routes); -/// # async { -/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -/// # }; +/// # let _: Router = app; /// ``` #[cfg(feature = "original-uri")] #[derive(Debug, Clone)] @@ -104,128 +89,6 @@ where #[cfg(feature = "original-uri")] axum_core::__impl_deref!(OriginalUri: Uri); -/// Extractor that extracts the request body as a [`Stream`]. -/// -/// Since extracting the request body requires consuming it, the `BodyStream` extractor must be -/// *last* if there are multiple extractors in a handler. -/// See ["the order of extractors"][order-of-extractors] -/// -/// [order-of-extractors]: crate::extract#the-order-of-extractors -/// -/// # Example -/// -/// ```rust,no_run -/// use axum::{ -/// extract::BodyStream, -/// routing::get, -/// Router, -/// }; -/// use futures_util::StreamExt; -/// -/// async fn handler(mut stream: BodyStream) { -/// while let Some(chunk) = stream.next().await { -/// // ... -/// } -/// } -/// -/// let app = Router::new().route("/users", get(handler)); -/// # async { -/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -/// # }; -/// ``` -/// -/// [`Stream`]: https://docs.rs/futures/latest/futures/stream/trait.Stream.html -/// [`body::Body`]: crate::body::Body -pub struct BodyStream( - SyncWrapper + Send + 'static>>>, -); - -impl Stream for BodyStream { - type Item = Result; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(self.0.get_mut()).poll_data(cx) - } -} - -#[async_trait] -impl FromRequest for BodyStream -where - B: HttpBody + Send + 'static, - B::Data: Into, - B::Error: Into, - S: Send + Sync, -{ - type Rejection = Infallible; - - async fn from_request(req: Request, _state: &S) -> Result { - let body = req - .into_body() - .map_data(Into::into) - .map_err(|err| Error::new(err.into())); - let stream = BodyStream(SyncWrapper::new(Box::pin(body))); - Ok(stream) - } -} - -impl fmt::Debug for BodyStream { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("BodyStream").finish() - } -} - -#[test] -fn body_stream_traits() { - crate::test_helpers::assert_send::(); - crate::test_helpers::assert_sync::(); -} - -/// Extractor that extracts the raw request body. -/// -/// Since extracting the raw request body requires consuming it, the `RawBody` extractor must be -/// *last* if there are multiple extractors in a handler. See ["the order of extractors"][order-of-extractors] -/// -/// [order-of-extractors]: crate::extract#the-order-of-extractors -/// -/// # Example -/// -/// ```rust,no_run -/// use axum::{ -/// extract::RawBody, -/// routing::get, -/// Router, -/// }; -/// use futures_util::StreamExt; -/// -/// async fn handler(RawBody(body): RawBody) { -/// // ... -/// } -/// -/// let app = Router::new().route("/users", get(handler)); -/// # async { -/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -/// # }; -/// ``` -/// -/// [`body::Body`]: crate::body::Body -#[derive(Debug, Default, Clone)] -pub struct RawBody(pub B); - -#[async_trait] -impl FromRequest for RawBody -where - B: Send, - S: Send + Sync, -{ - type Rejection = Infallible; - - async fn from_request(req: Request, _state: &S) -> Result { - Ok(Self(req.into_body())) - } -} - -axum_core::__impl_deref!(RawBody); - #[cfg(test)] mod tests { use crate::{extract::Extension, routing::get, test_helpers::*, Router}; @@ -246,7 +109,7 @@ mod tests { let client = TestClient::new(Router::new().route("/", get(handler)).layer(Extension(Ext))); - let res = client.get("/").header("x-foo", "123").send().await; + let res = client.get("/").header("x-foo", "123").await; assert_eq!(res.status(), StatusCode::OK); } } diff --git a/.cargo-vendor/axum/src/extract/state.rs b/.cargo-vendor/axum/src/extract/state.rs index e2307d391c..fb401c00d8 100644 --- a/.cargo-vendor/axum/src/extract/state.rs +++ b/.cargo-vendor/axum/src/extract/state.rs @@ -131,13 +131,11 @@ use std::{ /// let method_router_with_state = get(handler) /// // provide the state so the handler can access it /// .with_state(state); +/// # let _: axum::routing::MethodRouter = method_router_with_state; /// /// async fn handler(State(state): State) { /// // use `state`... /// } -/// # async { -/// # axum::Server::bind(&"".parse().unwrap()).serve(method_router_with_state.into_make_service()).await.unwrap(); -/// # }; /// ``` /// /// # With `Handler` @@ -158,10 +156,8 @@ use std::{ /// let handler_with_state = handler.with_state(state); /// /// # async { -/// axum::Server::bind(&"0.0.0.0:3000".parse().unwrap()) -/// .serve(handler_with_state.into_make_service()) -/// .await -/// .expect("server failed"); +/// let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap(); +/// axum::serve(listener, handler_with_state.into_make_service()).await.unwrap(); /// # }; /// ``` /// @@ -325,8 +321,10 @@ use std::{ /// } /// /// async fn handler(State(state): State) { -/// let mut data = state.data.lock().expect("mutex was poisoned"); -/// *data = "updated foo".to_owned(); +/// { +/// let mut data = state.data.lock().expect("mutex was poisoned"); +/// *data = "updated foo".to_owned(); +/// } /// /// // ... /// } diff --git a/.cargo-vendor/axum/src/extract/ws.rs b/.cargo-vendor/axum/src/extract/ws.rs index 26f28609ad..cfd10c5642 100644 --- a/.cargo-vendor/axum/src/extract/ws.rs +++ b/.cargo-vendor/axum/src/extract/ws.rs @@ -31,9 +31,7 @@ //! } //! } //! } -//! # async { -//! # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -//! # }; +//! # let _: Router = app; //! ``` //! //! # Passing data and/or state to an `on_upgrade` callback @@ -62,9 +60,7 @@ //! let app = Router::new() //! .route("/ws", get(handler)) //! .with_state(AppState { /* ... */ }); -//! # async { -//! # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -//! # }; +//! # let _: Router = app; //! ``` //! //! # Read and write concurrently @@ -96,12 +92,9 @@ use self::rejection::*; use super::FromRequestParts; -use crate::{ - body::{self, Bytes}, - response::Response, - Error, -}; +use crate::{body::Bytes, response::Response, Error}; use async_trait::async_trait; +use axum_core::body::Body; use futures_util::{ sink::{Sink, SinkExt}, stream::{Stream, StreamExt}, @@ -111,7 +104,7 @@ use http::{ request::Parts, Method, StatusCode, }; -use hyper::upgrade::{OnUpgrade, Upgraded}; +use hyper_util::rt::TokioIo; use sha1::{Digest, Sha1}; use std::{ borrow::Cow, @@ -135,12 +128,12 @@ use tokio_tungstenite::{ /// /// See the [module docs](self) for an example. #[cfg_attr(docsrs, doc(cfg(feature = "ws")))] -pub struct WebSocketUpgrade { +pub struct WebSocketUpgrade { config: WebSocketConfig, /// The chosen protocol sent in the `Sec-WebSocket-Protocol` header of the response. protocol: Option, sec_websocket_key: HeaderValue, - on_upgrade: OnUpgrade, + on_upgrade: hyper::upgrade::OnUpgrade, on_failed_upgrade: F, sec_websocket_protocol: Option, } @@ -157,12 +150,6 @@ impl std::fmt::Debug for WebSocketUpgrade { } impl WebSocketUpgrade { - /// Does nothing, instead use `max_write_buffer_size`. - #[deprecated] - pub fn max_send_queue(self, _: usize) -> Self { - self - } - /// The target minimum size of the write buffer to reach before writing the data /// to the underlying stream. /// @@ -239,9 +226,7 @@ impl WebSocketUpgrade { /// // ... /// }) /// } - /// # async { - /// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); - /// # }; + /// # let _: Router = app; /// ``` pub fn protocols(mut self, protocols: I) -> Self where @@ -298,7 +283,7 @@ impl WebSocketUpgrade { /// ``` pub fn on_failed_upgrade(self, callback: C) -> WebSocketUpgrade where - C: OnFailedUpdgrade, + C: OnFailedUpgrade, { WebSocketUpgrade { config: self.config, @@ -312,12 +297,12 @@ impl WebSocketUpgrade { /// Finalize upgrading the connection and call the provided callback with /// the stream. - #[must_use = "to setup the WebSocket connection, this response must be returned"] + #[must_use = "to set up the WebSocket connection, this response must be returned"] pub fn on_upgrade(self, callback: C) -> Response where C: FnOnce(WebSocket) -> Fut + Send + 'static, Fut: Future + Send + 'static, - F: OnFailedUpdgrade, + F: OnFailedUpgrade, { let on_upgrade = self.on_upgrade; let config = self.config; @@ -333,6 +318,7 @@ impl WebSocketUpgrade { return; } }; + let upgraded = TokioIo::new(upgraded); let socket = WebSocketStream::from_raw_socket(upgraded, protocol::Role::Server, Some(config)) @@ -362,19 +348,19 @@ impl WebSocketUpgrade { builder = builder.header(header::SEC_WEBSOCKET_PROTOCOL, protocol); } - builder.body(body::boxed(body::Empty::new())).unwrap() + builder.body(Body::empty()).unwrap() } } /// What to do when a connection upgrade fails. /// /// See [`WebSocketUpgrade::on_failed_upgrade`] for more details. -pub trait OnFailedUpdgrade: Send + 'static { +pub trait OnFailedUpgrade: Send + 'static { /// Call the callback. fn call(self, error: Error); } -impl OnFailedUpdgrade for F +impl OnFailedUpgrade for F where F: FnOnce(Error) + Send + 'static, { @@ -383,20 +369,20 @@ where } } -/// The default `OnFailedUpdgrade` used by `WebSocketUpgrade`. +/// The default `OnFailedUpgrade` used by `WebSocketUpgrade`. /// /// It simply ignores the error. #[non_exhaustive] #[derive(Debug)] -pub struct DefaultOnFailedUpdgrade; +pub struct DefaultOnFailedUpgrade; -impl OnFailedUpdgrade for DefaultOnFailedUpdgrade { +impl OnFailedUpgrade for DefaultOnFailedUpgrade { #[inline] fn call(self, _error: Error) {} } #[async_trait] -impl FromRequestParts for WebSocketUpgrade +impl FromRequestParts for WebSocketUpgrade where S: Send + Sync, { @@ -427,7 +413,7 @@ where let on_upgrade = parts .extensions - .remove::() + .remove::() .ok_or(ConnectionNotUpgradable)?; let sec_websocket_protocol = parts.headers.get(header::SEC_WEBSOCKET_PROTOCOL).cloned(); @@ -438,7 +424,7 @@ where sec_websocket_key, on_upgrade, sec_websocket_protocol, - on_failed_upgrade: DefaultOnFailedUpdgrade, + on_failed_upgrade: DefaultOnFailedUpgrade, }) } } @@ -470,7 +456,7 @@ fn header_contains(headers: &HeaderMap, key: HeaderName, value: &'static str) -> /// See [the module level documentation](self) for more details. #[derive(Debug)] pub struct WebSocket { - inner: WebSocketStream, + inner: WebSocketStream>, protocol: Option, } @@ -844,9 +830,12 @@ pub mod close_code { #[cfg(test)] mod tests { + use std::future::ready; + use super::*; - use crate::{body::Body, routing::get, Router}; + use crate::{routing::get, test_helpers::spawn_service, Router}; use http::{Request, Version}; + use tokio_tungstenite::tungstenite; use tower::ServiceExt; #[crate::test] @@ -891,4 +880,47 @@ mod tests { } let _: Router = Router::new().route("/", get(handler)); } + + #[crate::test] + async fn integration_test() { + let app = Router::new().route( + "/echo", + get(|ws: WebSocketUpgrade| ready(ws.on_upgrade(handle_socket))), + ); + + async fn handle_socket(mut socket: WebSocket) { + while let Some(Ok(msg)) = socket.recv().await { + match msg { + Message::Text(_) | Message::Binary(_) | Message::Close(_) => { + if socket.send(msg).await.is_err() { + break; + } + } + Message::Ping(_) | Message::Pong(_) => { + // tungstenite will respond to pings automatically + } + } + } + } + + let addr = spawn_service(app); + let (mut socket, _response) = tokio_tungstenite::connect_async(format!("ws://{addr}/echo")) + .await + .unwrap(); + + let input = tungstenite::Message::Text("foobar".to_owned()); + socket.send(input.clone()).await.unwrap(); + let output = socket.next().await.unwrap().unwrap(); + assert_eq!(input, output); + + socket + .send(tungstenite::Message::Ping("ping".to_owned().into_bytes())) + .await + .unwrap(); + let output = socket.next().await.unwrap().unwrap(); + assert_eq!( + output, + tungstenite::Message::Pong("ping".to_owned().into_bytes()) + ); + } } diff --git a/.cargo-vendor/axum/src/form.rs b/.cargo-vendor/axum/src/form.rs index c690d48ef3..966517a124 100644 --- a/.cargo-vendor/axum/src/form.rs +++ b/.cargo-vendor/axum/src/form.rs @@ -1,11 +1,10 @@ -use crate::body::HttpBody; +use crate::extract::Request; use crate::extract::{rejection::*, FromRequest, RawForm}; -use crate::BoxError; use async_trait::async_trait; use axum_core::response::{IntoResponse, Response}; use axum_core::RequestExt; use http::header::CONTENT_TYPE; -use http::{Request, StatusCode}; +use http::StatusCode; use serde::de::DeserializeOwned; use serde::Serialize; @@ -13,9 +12,18 @@ use serde::Serialize; /// /// # As extractor /// -/// If used as an extractor `Form` will deserialize the query parameters for `GET` and `HEAD` -/// requests and `application/x-www-form-urlencoded` encoded request bodies for other methods. It -/// supports any type that implements [`serde::Deserialize`]. +/// If used as an extractor, `Form` will deserialize form data from the request, +/// specifically: +/// +/// - If the request has a method of `GET` or `HEAD`, the form data will be read +/// from the query string (same as with [`Query`]) +/// - If the request has a different method, the form will be read from the body +/// of the request. It must have a `content-type` of +/// `application/x-www-form-urlencoded` for this to work. If you want to parse +/// `multipart/form-data` request bodies, use [`Multipart`] instead. +/// +/// This matches how HTML forms are sent by browsers by default. +/// In both cases, the inner type `T` must implement [`serde::Deserialize`]. /// /// ⚠️ Since parsing form data might require consuming the request body, the `Form` extractor must be /// *last* if there are multiple extractors in a handler. See ["the order of @@ -38,11 +46,11 @@ use serde::Serialize; /// } /// ``` /// -/// Note that `Content-Type: multipart/form-data` requests are not supported. Use [`Multipart`] -/// instead. -/// /// # As response /// +/// `Form` can also be used to encode any type that implements +/// [`serde::Serialize`] as `application/x-www-form-urlencoded` +/// /// ```rust /// use axum::Form; /// use serde::Serialize; @@ -57,6 +65,7 @@ use serde::Serialize; /// } /// ``` /// +/// [`Query`]: crate::extract::Query /// [`Multipart`]: crate::extract::Multipart #[cfg_attr(docsrs, doc(cfg(feature = "form")))] #[derive(Debug, Clone, Copy, Default)] @@ -64,17 +73,14 @@ use serde::Serialize; pub struct Form(pub T); #[async_trait] -impl FromRequest for Form +impl FromRequest for Form where T: DeserializeOwned, - B: HttpBody + Send + 'static, - B::Data: Send, - B::Error: Into, S: Send + Sync, { type Rejection = FormRejection; - async fn from_request(req: Request, _state: &S) -> Result { + async fn from_request(req: Request, _state: &S) -> Result { let is_get_or_head = req.method() == http::Method::GET || req.method() == http::Method::HEAD; @@ -118,15 +124,15 @@ axum_core::__impl_deref!(Form); #[cfg(test)] mod tests { - use super::*; use crate::{ - body::{Empty, Full}, routing::{on, MethodFilter}, test_helpers::TestClient, Router, }; - use bytes::Bytes; - use http::{header::CONTENT_TYPE, Method, Request}; + + use super::*; + use axum_core::body::Body; + use http::{Method, Request}; use mime::APPLICATION_WWW_FORM_URLENCODED; use serde::{Deserialize, Serialize}; use std::fmt::Debug; @@ -140,7 +146,7 @@ mod tests { async fn check_query(uri: impl AsRef, value: T) { let req = Request::builder() .uri(uri.as_ref()) - .body(Empty::::new()) + .body(Body::empty()) .unwrap(); assert_eq!(Form::::from_request(req, &()).await.unwrap().0, value); } @@ -150,9 +156,7 @@ mod tests { .uri("http://example.com/test") .method(Method::POST) .header(CONTENT_TYPE, APPLICATION_WWW_FORM_URLENCODED.as_ref()) - .body(Full::::new( - serde_urlencoded::to_string(&value).unwrap().into(), - )) + .body(Body::from(serde_urlencoded::to_string(&value).unwrap())) .unwrap(); assert_eq!(Form::::from_request(req, &()).await.unwrap().0, value); } @@ -214,13 +218,12 @@ mod tests { .uri("http://example.com/test") .method(Method::POST) .header(CONTENT_TYPE, mime::APPLICATION_JSON.as_ref()) - .body(Full::::new( + .body(Body::from( serde_urlencoded::to_string(&Pagination { size: Some(10), page: None, }) - .unwrap() - .into(), + .unwrap(), )) .unwrap(); assert!(matches!( @@ -242,21 +245,20 @@ mod tests { let app = Router::new().route( "/", on( - MethodFilter::GET | MethodFilter::POST, + MethodFilter::GET.or(MethodFilter::POST), |_: Form| async {}, ), ); let client = TestClient::new(app); - let res = client.get("/?a=false").send().await; + let res = client.get("/?a=false").await; assert_eq!(res.status(), StatusCode::BAD_REQUEST); let res = client .post("/") .header(CONTENT_TYPE, APPLICATION_WWW_FORM_URLENCODED.as_ref()) .body("a=false") - .send() .await; assert_eq!(res.status(), StatusCode::UNPROCESSABLE_ENTITY); } diff --git a/.cargo-vendor/axum/src/handler/future.rs b/.cargo-vendor/axum/src/handler/future.rs index 59487c31b2..751984d0c6 100644 --- a/.cargo-vendor/axum/src/handler/future.rs +++ b/.cargo-vendor/axum/src/handler/future.rs @@ -1,8 +1,8 @@ //! Handler future types. use crate::response::Response; +use axum_core::extract::Request; use futures_util::future::Map; -use http::Request; use pin_project_lite::pin_project; use std::{convert::Infallible, future::Future, pin::Pin, task::Context}; use tower::util::Oneshot; @@ -19,29 +19,29 @@ opaque_future! { pin_project! { /// The response future for [`Layered`](super::Layered). - pub struct LayeredFuture + pub struct LayeredFuture where - S: Service>, + S: Service, { #[pin] - inner: Map>, fn(Result) -> Response>, + inner: Map, fn(Result) -> Response>, } } -impl LayeredFuture +impl LayeredFuture where - S: Service>, + S: Service, { pub(super) fn new( - inner: Map>, fn(Result) -> Response>, + inner: Map, fn(Result) -> Response>, ) -> Self { Self { inner } } } -impl Future for LayeredFuture +impl Future for LayeredFuture where - S: Service>, + S: Service, { type Output = Response; diff --git a/.cargo-vendor/axum/src/handler/mod.rs b/.cargo-vendor/axum/src/handler/mod.rs index 5087ac7c93..579fdbbad4 100644 --- a/.cargo-vendor/axum/src/handler/mod.rs +++ b/.cargo-vendor/axum/src/handler/mod.rs @@ -37,22 +37,20 @@ //! in handlers. See those examples: //! //! * [`anyhow-error-response`][anyhow] for generic boxed errors -//! * [`error-handling-and-dependency-injection`][ehdi] for application-specific detailed errors +//! * [`error-handling`][error-handling] for application-specific detailed errors //! //! [anyhow]: https://github.com/tokio-rs/axum/blob/main/examples/anyhow-error-response/src/main.rs -//! [ehdi]: https://github.com/tokio-rs/axum/blob/main/examples/error-handling-and-dependency-injection/src/main.rs +//! [error-handling]: https://github.com/tokio-rs/axum/blob/main/examples/error-handling/src/main.rs //! #![doc = include_str!("../docs/debugging_handler_type_errors.md")] #[cfg(feature = "tokio")] use crate::extract::connect_info::IntoMakeServiceWithConnectInfo; use crate::{ - body::Body, - extract::{FromRequest, FromRequestParts}, + extract::{FromRequest, FromRequestParts, Request}, response::{IntoResponse, Response}, routing::IntoMakeService, }; -use http::Request; use std::{convert::Infallible, fmt, future::Future, marker::PhantomData, pin::Pin}; use tower::ServiceExt; use tower_layer::Layer; @@ -78,9 +76,8 @@ pub use self::service::HandlerService; /// ``` /// use tower::Service; /// use axum::{ -/// extract::State, +/// extract::{State, Request}, /// body::Body, -/// http::Request, /// handler::{HandlerWithoutStateExt, Handler}, /// }; /// @@ -99,7 +96,7 @@ pub use self::service::HandlerService; /// // helper to check that a value implements `Service` /// fn assert_service(service: S) /// where -/// S: Service>, +/// S: Service, /// {} /// ``` #[doc = include_str!("../docs/debugging_handler_type_errors.md")] @@ -130,16 +127,16 @@ pub use self::service::HandlerService; /// ``` #[cfg_attr( nightly_error_messages, - rustc_on_unimplemented( + diagnostic::on_unimplemented( note = "Consider using `#[axum::debug_handler]` to improve the error message" ) )] -pub trait Handler: Clone + Send + Sized + 'static { +pub trait Handler: Clone + Send + Sized + 'static { /// The type of future calling this handler returns. type Future: Future + Send + 'static; /// Call the handler with the given request. - fn call(self, req: Request, state: S) -> Self::Future; + fn call(self, req: Request, state: S) -> Self::Future; /// Apply a [`tower::Layer`] to the handler. /// @@ -173,14 +170,12 @@ pub trait Handler: Clone + Send + Sized + 'static { /// /// let layered_handler = handler.layer(ConcurrencyLimitLayer::new(64)); /// let app = Router::new().route("/", get(layered_handler)); - /// # async { - /// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); - /// # }; + /// # let _: Router = app; /// ``` - fn layer(self, layer: L) -> Layered + fn layer(self, layer: L) -> Layered where - L: Layer> + Clone, - L::Service: Service>, + L: Layer> + Clone, + L::Service: Service, { Layered { layer, @@ -190,21 +185,20 @@ pub trait Handler: Clone + Send + Sized + 'static { } /// Convert the handler into a [`Service`] by providing the state - fn with_state(self, state: S) -> HandlerService { + fn with_state(self, state: S) -> HandlerService { HandlerService::new(self, state) } } -impl Handler<((),), S, B> for F +impl Handler<((),), S> for F where F: FnOnce() -> Fut + Clone + Send + 'static, Fut: Future + Send, Res: IntoResponse, - B: Send + 'static, { type Future = Pin + Send>>; - fn call(self, _req: Request, _state: S) -> Self::Future { + fn call(self, _req: Request, _state: S) -> Self::Future { Box::pin(async move { self().await.into_response() }) } } @@ -214,19 +208,18 @@ macro_rules! impl_handler { [$($ty:ident),*], $last:ident ) => { #[allow(non_snake_case, unused_mut)] - impl Handler<(M, $($ty,)* $last,), S, B> for F + impl Handler<(M, $($ty,)* $last,), S> for F where F: FnOnce($($ty,)* $last,) -> Fut + Clone + Send + 'static, Fut: Future + Send, - B: Send + 'static, S: Send + Sync + 'static, Res: IntoResponse, $( $ty: FromRequestParts + Send, )* - $last: FromRequest + Send, + $last: FromRequest + Send, { type Future = Pin + Send>>; - fn call(self, req: Request, state: S) -> Self::Future { + fn call(self, req: Request, state: S) -> Self::Future { Box::pin(async move { let (mut parts, body) = req.into_parts(); let state = &state; @@ -262,14 +255,13 @@ mod private { pub enum IntoResponseHandler {} } -impl Handler for T +impl Handler for T where T: IntoResponse + Clone + Send + 'static, - B: Send + 'static, { type Future = std::future::Ready; - fn call(self, _req: Request, _state: S) -> Self::Future { + fn call(self, _req: Request, _state: S) -> Self::Future { std::future::ready(self.into_response()) } } @@ -277,13 +269,13 @@ where /// A [`Service`] created from a [`Handler`] by applying a Tower middleware. /// /// Created with [`Handler::layer`]. See that method for more details. -pub struct Layered { +pub struct Layered { layer: L, handler: H, - _marker: PhantomData (T, S, B, B2)>, + _marker: PhantomData (T, S)>, } -impl fmt::Debug for Layered +impl fmt::Debug for Layered where L: fmt::Debug, { @@ -294,7 +286,7 @@ where } } -impl Clone for Layered +impl Clone for Layered where L: Clone, H: Clone, @@ -308,21 +300,19 @@ where } } -impl Handler for Layered +impl Handler for Layered where - L: Layer> + Clone + Send + 'static, - H: Handler, - L::Service: Service, Error = Infallible> + Clone + Send + 'static, - >>::Response: IntoResponse, - >>::Future: Send, + L: Layer> + Clone + Send + 'static, + H: Handler, + L::Service: Service + Clone + Send + 'static, + >::Response: IntoResponse, + >::Future: Send, T: 'static, S: 'static, - B: Send + 'static, - B2: Send + 'static, { - type Future = future::LayeredFuture; + type Future = future::LayeredFuture; - fn call(self, req: Request, state: S) -> Self::Future { + fn call(self, req: Request, state: S) -> Self::Future { use futures_util::future::{FutureExt, Map}; let svc = self.handler.with_state(state); @@ -332,8 +322,8 @@ where _, fn( Result< - >>::Response, - >>::Error, + >::Response, + >::Error, >, ) -> _, > = svc.oneshot(req).map(|result| match result { @@ -350,16 +340,16 @@ where /// This provides convenience methods to convert the [`Handler`] into a [`Service`] or [`MakeService`]. /// /// [`MakeService`]: tower::make::MakeService -pub trait HandlerWithoutStateExt: Handler { +pub trait HandlerWithoutStateExt: Handler { /// Convert the handler into a [`Service`] and no state. - fn into_service(self) -> HandlerService; + fn into_service(self) -> HandlerService; /// Convert the handler into a [`MakeService`] and no state. /// /// See [`HandlerService::into_make_service`] for more details. /// /// [`MakeService`]: tower::make::MakeService - fn into_make_service(self) -> IntoMakeService>; + fn into_make_service(self) -> IntoMakeService>; /// Convert the handler into a [`MakeService`] which stores information /// about the incoming connection and has no state. @@ -370,25 +360,25 @@ pub trait HandlerWithoutStateExt: Handler { #[cfg(feature = "tokio")] fn into_make_service_with_connect_info( self, - ) -> IntoMakeServiceWithConnectInfo, C>; + ) -> IntoMakeServiceWithConnectInfo, C>; } -impl HandlerWithoutStateExt for H +impl HandlerWithoutStateExt for H where - H: Handler, + H: Handler, { - fn into_service(self) -> HandlerService { + fn into_service(self) -> HandlerService { self.with_state(()) } - fn into_make_service(self) -> IntoMakeService> { + fn into_make_service(self) -> IntoMakeService> { self.into_service().into_make_service() } #[cfg(feature = "tokio")] fn into_make_service_with_connect_info( self, - ) -> IntoMakeServiceWithConnectInfo, C> { + ) -> IntoMakeServiceWithConnectInfo, C> { self.into_service().into_make_service_with_connect_info() } } @@ -396,13 +386,13 @@ where #[cfg(test)] mod tests { use super::*; - use crate::{body, extract::State, test_helpers::*}; + use crate::{extract::State, test_helpers::*}; + use axum_core::body::Body; use http::StatusCode; use std::time::Duration; use tower_http::{ - compression::CompressionLayer, limit::RequestBodyLimitLayer, - map_request_body::MapRequestBodyLayer, map_response_body::MapResponseBodyLayer, - timeout::TimeoutLayer, + limit::RequestBodyLimitLayer, map_request_body::MapRequestBodyLayer, + map_response_body::MapResponseBodyLayer, timeout::TimeoutLayer, }; #[crate::test] @@ -413,7 +403,7 @@ mod tests { let client = TestClient::new(handle.into_service()); - let res = client.post("/").body("hi there!").send().await; + let res = client.post("/").body("hi there!").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.text().await, "you said: hi there!"); } @@ -428,14 +418,13 @@ mod tests { .layer(( RequestBodyLimitLayer::new(1024), TimeoutLayer::new(Duration::from_secs(10)), - MapResponseBodyLayer::new(body::boxed), - CompressionLayer::new(), + MapResponseBodyLayer::new(Body::new), )) - .layer(MapRequestBodyLayer::new(body::boxed)) + .layer(MapRequestBodyLayer::new(Body::new)) .with_state("foo"); let client = TestClient::new(svc); - let res = client.get("/").send().await; + let res = client.get("/").await; assert_eq!(res.text().await, "foo"); } } diff --git a/.cargo-vendor/axum/src/handler/service.rs b/.cargo-vendor/axum/src/handler/service.rs index 52fd5de67d..e6b8df9316 100644 --- a/.cargo-vendor/axum/src/handler/service.rs +++ b/.cargo-vendor/axum/src/handler/service.rs @@ -1,8 +1,10 @@ use super::Handler; +use crate::body::{Body, Bytes, HttpBody}; #[cfg(feature = "tokio")] use crate::extract::connect_info::IntoMakeServiceWithConnectInfo; use crate::response::Response; use crate::routing::IntoMakeService; +use crate::BoxError; use http::Request; use std::{ convert::Infallible, @@ -17,13 +19,13 @@ use tower_service::Service; /// Created with [`Handler::with_state`] or [`HandlerWithoutStateExt::into_service`]. /// /// [`HandlerWithoutStateExt::into_service`]: super::HandlerWithoutStateExt::into_service -pub struct HandlerService { +pub struct HandlerService { handler: H, state: S, - _marker: PhantomData (T, B)>, + _marker: PhantomData T>, } -impl HandlerService { +impl HandlerService { /// Get a reference to the state. pub fn state(&self) -> &S { &self.state @@ -35,7 +37,6 @@ impl HandlerService { /// /// ```rust /// use axum::{ - /// Server, /// handler::Handler, /// extract::State, /// http::{Uri, Method}, @@ -53,15 +54,13 @@ impl HandlerService { /// let app = handler.with_state(AppState {}); /// /// # async { - /// Server::bind(&SocketAddr::from(([127, 0, 0, 1], 3000))) - /// .serve(app.into_make_service()) - /// .await?; - /// # Ok::<_, hyper::Error>(()) + /// let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap(); + /// axum::serve(listener, app.into_make_service()).await.unwrap(); /// # }; /// ``` /// /// [`MakeService`]: tower::make::MakeService - pub fn into_make_service(self) -> IntoMakeService> { + pub fn into_make_service(self) -> IntoMakeService> { IntoMakeService::new(self) } @@ -72,7 +71,6 @@ impl HandlerService { /// /// ```rust /// use axum::{ - /// Server, /// handler::Handler, /// response::IntoResponse, /// extract::{ConnectInfo, State}, @@ -86,16 +84,17 @@ impl HandlerService { /// ConnectInfo(addr): ConnectInfo, /// State(state): State, /// ) -> String { - /// format!("Hello {}", addr) + /// format!("Hello {addr}") /// } /// /// let app = handler.with_state(AppState {}); /// /// # async { - /// Server::bind(&SocketAddr::from(([127, 0, 0, 1], 3000))) - /// .serve(app.into_make_service_with_connect_info::()) - /// .await?; - /// # Ok::<_, hyper::Error>(()) + /// let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap(); + /// axum::serve( + /// listener, + /// app.into_make_service_with_connect_info::(), + /// ).await.unwrap(); /// # }; /// ``` /// @@ -104,7 +103,7 @@ impl HandlerService { #[cfg(feature = "tokio")] pub fn into_make_service_with_connect_info( self, - ) -> IntoMakeServiceWithConnectInfo, C> { + ) -> IntoMakeServiceWithConnectInfo, C> { IntoMakeServiceWithConnectInfo::new(self) } } @@ -112,11 +111,11 @@ impl HandlerService { #[test] fn traits() { use crate::test_helpers::*; - assert_send::>(); - assert_sync::>(); + assert_send::>(); + assert_sync::>(); } -impl HandlerService { +impl HandlerService { pub(super) fn new(handler: H, state: S) -> Self { Self { handler, @@ -126,13 +125,13 @@ impl HandlerService { } } -impl fmt::Debug for HandlerService { +impl fmt::Debug for HandlerService { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("IntoService").finish_non_exhaustive() } } -impl Clone for HandlerService +impl Clone for HandlerService where H: Clone, S: Clone, @@ -146,10 +145,11 @@ where } } -impl Service> for HandlerService +impl Service> for HandlerService where - H: Handler + Clone + Send + 'static, - B: Send + 'static, + H: Handler + Clone + Send + 'static, + B: HttpBody + Send + 'static, + B::Error: Into, S: Clone + Send + Sync, { type Response = Response; @@ -167,6 +167,8 @@ where fn call(&mut self, req: Request) -> Self::Future { use futures_util::future::FutureExt; + let req = req.map(Body::new); + let handler = self.handler.clone(); let future = Handler::call(handler, req, self.state.clone()); let future = future.map(Ok as _); @@ -174,3 +176,27 @@ where super::future::IntoServiceFuture::new(future) } } + +// for `axum::serve(listener, handler)` +#[cfg(all(feature = "tokio", any(feature = "http1", feature = "http2")))] +const _: () = { + use crate::serve::IncomingStream; + + impl Service> for HandlerService + where + H: Clone, + S: Clone, + { + type Response = Self; + type Error = Infallible; + type Future = std::future::Ready>; + + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, _req: IncomingStream<'_>) -> Self::Future { + std::future::ready(Ok(self.clone())) + } + } +}; diff --git a/.cargo-vendor/axum/src/json.rs b/.cargo-vendor/axum/src/json.rs index 39fd7f4280..c4435922a4 100644 --- a/.cargo-vendor/axum/src/json.rs +++ b/.cargo-vendor/axum/src/json.rs @@ -1,26 +1,23 @@ -use crate::{ - body::{Bytes, HttpBody}, - extract::{rejection::*, FromRequest}, - BoxError, -}; +use crate::extract::Request; +use crate::extract::{rejection::*, FromRequest}; use async_trait::async_trait; use axum_core::response::{IntoResponse, Response}; -use bytes::{BufMut, BytesMut}; +use bytes::{BufMut, Bytes, BytesMut}; use http::{ header::{self, HeaderMap, HeaderValue}, - Request, StatusCode, + StatusCode, }; use serde::{de::DeserializeOwned, Serialize}; /// JSON Extractor / Response. /// /// When used as an extractor, it can deserialize request bodies into some type that -/// implements [`serde::Deserialize`]. The request will be rejected (and a [`JsonRejection`] will +/// implements [`serde::de::DeserializeOwned`]. The request will be rejected (and a [`JsonRejection`] will /// be returned) if: /// /// - The request doesn't have a `Content-Type: application/json` (or similar) header. /// - The body doesn't contain syntactically valid JSON. -/// - The body contains syntactically valid JSON but it couldn't be deserialized into the target +/// - The body contains syntactically valid JSON, but it couldn't be deserialized into the target /// type. /// - Buffering the request body fails. /// @@ -53,9 +50,7 @@ use serde::{de::DeserializeOwned, Serialize}; /// } /// /// let app = Router::new().route("/users", post(create_user)); -/// # async { -/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -/// # }; +/// # let _: Router = app; /// ``` /// /// When used as a response, it can serialize any type that implements [`serde::Serialize`] to @@ -90,9 +85,7 @@ use serde::{de::DeserializeOwned, Serialize}; /// } /// /// let app = Router::new().route("/users/:id", get(get_user)); -/// # async { -/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -/// # }; +/// # let _: Router = app; /// ``` #[derive(Debug, Clone, Copy, Default)] #[cfg_attr(docsrs, doc(cfg(feature = "json")))] @@ -100,44 +93,17 @@ use serde::{de::DeserializeOwned, Serialize}; pub struct Json(pub T); #[async_trait] -impl FromRequest for Json +impl FromRequest for Json where T: DeserializeOwned, - B: HttpBody + Send + 'static, - B::Data: Send, - B::Error: Into, S: Send + Sync, { type Rejection = JsonRejection; - async fn from_request(req: Request, state: &S) -> Result { + async fn from_request(req: Request, state: &S) -> Result { if json_content_type(req.headers()) { let bytes = Bytes::from_request(req, state).await?; - let deserializer = &mut serde_json::Deserializer::from_slice(&bytes); - - let value = match serde_path_to_error::deserialize(deserializer) { - Ok(value) => value, - Err(err) => { - let rejection = match err.inner().classify() { - serde_json::error::Category::Data => JsonDataError::from_err(err).into(), - serde_json::error::Category::Syntax | serde_json::error::Category::Eof => { - JsonSyntaxError::from_err(err).into() - } - serde_json::error::Category::Io => { - if cfg!(debug_assertions) { - // we don't use `serde_json::from_reader` and instead always buffer - // bodies first, so we shouldn't encounter any IO errors - unreachable!() - } else { - JsonSyntaxError::from_err(err).into() - } - } - }; - return Err(rejection); - } - }; - - Ok(Json(value)) + Self::from_bytes(&bytes) } else { Err(MissingJsonContentType.into()) } @@ -177,6 +143,42 @@ impl From for Json { } } +impl Json +where + T: DeserializeOwned, +{ + /// Construct a `Json` from a byte slice. Most users should prefer to use the `FromRequest` impl + /// but special cases may require first extracting a `Request` into `Bytes` then optionally + /// constructing a `Json`. + pub fn from_bytes(bytes: &[u8]) -> Result { + let deserializer = &mut serde_json::Deserializer::from_slice(bytes); + + let value = match serde_path_to_error::deserialize(deserializer) { + Ok(value) => value, + Err(err) => { + let rejection = match err.inner().classify() { + serde_json::error::Category::Data => JsonDataError::from_err(err).into(), + serde_json::error::Category::Syntax | serde_json::error::Category::Eof => { + JsonSyntaxError::from_err(err).into() + } + serde_json::error::Category::Io => { + if cfg!(debug_assertions) { + // we don't use `serde_json::from_reader` and instead always buffer + // bodies first, so we shouldn't encounter any IO errors + unreachable!() + } else { + JsonSyntaxError::from_err(err).into() + } + } + }; + return Err(rejection); + } + }; + + Ok(Json(value)) + } +} + impl IntoResponse for Json where T: Serialize, @@ -224,7 +226,7 @@ mod tests { let app = Router::new().route("/", post(|input: Json| async { input.0.foo })); let client = TestClient::new(app); - let res = client.post("/").json(&json!({ "foo": "bar" })).send().await; + let res = client.post("/").json(&json!({ "foo": "bar" })).await; let body = res.text().await; assert_eq!(body, "bar"); @@ -240,7 +242,7 @@ mod tests { let app = Router::new().route("/", post(|input: Json| async { input.0.foo })); let client = TestClient::new(app); - let res = client.post("/").body(r#"{ "foo": "bar" }"#).send().await; + let res = client.post("/").body(r#"{ "foo": "bar" }"#).await; let status = res.status(); @@ -258,7 +260,6 @@ mod tests { .post("/") .header("content-type", content_type) .body("{}") - .send() .await; res.status() == StatusCode::OK @@ -280,7 +281,6 @@ mod tests { .post("/") .body("{") .header("content-type", "application/json") - .send() .await; assert_eq!(res.status(), StatusCode::BAD_REQUEST); @@ -311,7 +311,6 @@ mod tests { .post("/") .body("{\"a\": 1, \"b\": [{\"x\": 2}]}") .header("content-type", "application/json") - .send() .await; assert_eq!(res.status(), StatusCode::UNPROCESSABLE_ENTITY); diff --git a/.cargo-vendor/axum/src/lib.rs b/.cargo-vendor/axum/src/lib.rs index da60aef5ae..601c14ae74 100644 --- a/.cargo-vendor/axum/src/lib.rs +++ b/.cargo-vendor/axum/src/lib.rs @@ -1,4 +1,3 @@ -#![cfg_attr(nightly_error_messages, feature(rustc_attrs))] //! axum is a web application framework that focuses on ergonomics and modularity. //! //! # Table of contents @@ -53,11 +52,9 @@ //! // build our application with a single route //! let app = Router::new().route("/", get(|| async { "Hello, World!" })); //! -//! // run it with hyper on localhost:3000 -//! axum::Server::bind(&"0.0.0.0:3000".parse().unwrap()) -//! .serve(app.into_make_service()) -//! .await -//! .unwrap(); +//! // run our app with hyper, listening globally on port 3000 +//! let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap(); +//! axum::serve(listener, app).await.unwrap(); //! } //! ``` //! @@ -66,7 +63,7 @@ //! //! # Routing //! -//! [`Router`] is used to setup which paths goes to which services: +//! [`Router`] is used to set up which paths goes to which services: //! //! ```rust //! use axum::{Router, routing::get}; @@ -82,9 +79,7 @@ //! async fn get_foo() {} //! async fn post_foo() {} //! async fn foo_bar() {} -//! # async { -//! # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -//! # }; +//! # let _: Router = app; //! ``` //! //! See [`Router`] for more details on routing. @@ -145,9 +140,7 @@ //! let app = Router::new() //! .route("/plain_text", get(plain_text)) //! .route("/json", get(json)); -//! # async { -//! # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -//! # }; +//! # let _: Router = app; //! ``` //! //! See [`response`](crate::response) for more details on building responses. @@ -202,9 +195,7 @@ //! ) { //! // ... //! } -//! # async { -//! # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -//! # }; +//! # let _: Router = app; //! ``` //! //! You should prefer using [`State`] if possible since it's more type safe. The downside is that @@ -240,9 +231,7 @@ //! ) { //! // ... //! } -//! # async { -//! # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -//! # }; +//! # let _: Router = app; //! ``` //! //! The downside to this approach is that you'll get runtime errors @@ -298,9 +287,7 @@ //! struct CreateUserPayload { //! // ... //! } -//! # async { -//! # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -//! # }; +//! # let _: Router = app; //! ``` //! //! The downside to this approach is that it's a little more verbose than using @@ -320,16 +307,11 @@ //! ```toml //! [dependencies] //! axum = "" -//! hyper = { version = "", features = ["full"] } //! tokio = { version = "", features = ["full"] } //! tower = "" //! ``` //! -//! The `"full"` feature for hyper and tokio isn't strictly necessary but it's -//! the easiest way to get started. -//! -//! Note that [`hyper::Server`] is re-exported by axum so if that's all you need -//! then you don't have to explicitly depend on hyper. +//! The `"full"` feature for tokio isn't necessary but it's the easiest way to get started. //! //! Tower isn't strictly necessary either but helpful for testing. See the //! testing example in the repo to learn more about testing axum apps. @@ -348,7 +330,6 @@ //! //! Name | Description | Default? //! ---|---|--- -//! `headers` | Enables extracting typed headers via [`TypedHeader`] | No //! `http1` | Enables hyper's `http1` feature | Yes //! `http2` | Enables hyper's `http2` feature | No //! `json` | Enables the [`Json`] type and some similar convenience functionality | Yes @@ -356,14 +337,13 @@ //! `matched-path` | Enables capturing of every request's router path and the [`MatchedPath`] extractor | Yes //! `multipart` | Enables parsing `multipart/form-data` requests with [`Multipart`] | No //! `original-uri` | Enables capturing of every request's original URI and the [`OriginalUri`] extractor | Yes -//! `tokio` | Enables `tokio` as a dependency and `axum::Server`, `SSE` and `extract::connect_info` types. | Yes +//! `tokio` | Enables `tokio` as a dependency and `axum::serve`, `SSE` and `extract::connect_info` types. | Yes //! `tower-log` | Enables `tower`'s `log` feature | Yes -//! `tracing` | Log rejections from built-in extractors | No +//! `tracing` | Log rejections from built-in extractors | Yes //! `ws` | Enables WebSockets support via [`extract::ws`] | No //! `form` | Enables the `Form` extractor | Yes //! `query` | Enables the `Query` extractor | Yes //! -//! [`TypedHeader`]: crate::extract::TypedHeader //! [`MatchedPath`]: crate::extract::MatchedPath //! [`Multipart`]: crate::extract::Multipart //! [`OriginalUri`]: crate::extract::OriginalUri @@ -377,7 +357,6 @@ //! [`Timeout`]: tower::timeout::Timeout //! [examples]: https://github.com/tokio-rs/axum/tree/main/examples //! [`Router::merge`]: crate::routing::Router::merge -//! [`axum::Server`]: hyper::server::Server //! [`Service`]: tower::Service //! [`Service::poll_ready`]: tower::Service::poll_ready //! [`Service`'s]: tower::Service @@ -431,7 +410,7 @@ missing_debug_implementations, missing_docs )] -#![deny(unreachable_pub, private_in_public)] +#![deny(unreachable_pub)] #![allow(elided_lifetimes_in_paths, clippy::type_complexity)] #![forbid(unsafe_code)] #![cfg_attr(docsrs, feature(doc_auto_cfg, doc_cfg))] @@ -448,8 +427,6 @@ mod form; #[cfg(feature = "json")] mod json; mod service_ext; -#[cfg(feature = "headers")] -mod typed_header; mod util; pub mod body; @@ -459,20 +436,16 @@ pub mod handler; pub mod middleware; pub mod response; pub mod routing; +#[cfg(all(feature = "tokio", any(feature = "http1", feature = "http2")))] +pub mod serve; #[cfg(test)] mod test_helpers; #[doc(no_inline)] pub use async_trait::async_trait; -#[cfg(feature = "headers")] -#[doc(no_inline)] -pub use headers; #[doc(no_inline)] pub use http; -#[cfg(feature = "tokio")] -#[doc(no_inline)] -pub use hyper::Server; #[doc(inline)] pub use self::extension::Extension; @@ -482,10 +455,6 @@ pub use self::json::Json; #[doc(inline)] pub use self::routing::Router; -#[doc(inline)] -#[cfg(feature = "headers")] -pub use self::typed_header::TypedHeader; - #[doc(inline)] #[cfg(feature = "form")] pub use self::form::Form; @@ -496,6 +465,10 @@ pub use axum_core::{BoxError, Error, RequestExt, RequestPartsExt}; #[cfg(feature = "macros")] pub use axum_macros::debug_handler; +#[cfg(all(feature = "tokio", any(feature = "http1", feature = "http2")))] +#[doc(inline)] +pub use self::serve::serve; + pub use self::service_ext::ServiceExt; #[cfg(test)] diff --git a/.cargo-vendor/axum/src/macros.rs b/.cargo-vendor/axum/src/macros.rs index 180c3c05a5..5b8a335ef4 100644 --- a/.cargo-vendor/axum/src/macros.rs +++ b/.cargo-vendor/axum/src/macros.rs @@ -66,3 +66,27 @@ macro_rules! all_the_tuples { $name!([T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15], T16); }; } + +#[cfg(feature = "tracing")] +macro_rules! trace { + ($($tt:tt)*) => { + tracing::trace!($($tt)*) + } +} + +#[cfg(feature = "tracing")] +macro_rules! error { + ($($tt:tt)*) => { + tracing::error!($($tt)*) + }; +} + +#[cfg(not(feature = "tracing"))] +macro_rules! trace { + ($($tt:tt)*) => {}; +} + +#[cfg(not(feature = "tracing"))] +macro_rules! error { + ($($tt:tt)*) => {}; +} diff --git a/.cargo-vendor/axum/src/middleware/from_extractor.rs b/.cargo-vendor/axum/src/middleware/from_extractor.rs index 8c9a24833f..63ef85800a 100644 --- a/.cargo-vendor/axum/src/middleware/from_extractor.rs +++ b/.cargo-vendor/axum/src/middleware/from_extractor.rs @@ -84,9 +84,7 @@ use tower_service::Service; /// .route("/foo", post(other_handler)) /// // The extractor will run before all routes /// .route_layer(from_extractor::()); -/// # async { -/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -/// # }; +/// # let _: Router = app; /// ``` /// /// [`Bytes`]: bytes::Bytes @@ -354,13 +352,12 @@ mod tests { let client = TestClient::new(app); - let res = client.get("/").send().await; + let res = client.get("/").await; assert_eq!(res.status(), StatusCode::UNAUTHORIZED); let res = client .get("/") .header(http::header::AUTHORIZATION, "secret") - .send() .await; assert_eq!(res.status(), StatusCode::OK); } diff --git a/.cargo-vendor/axum/src/middleware/from_fn.rs b/.cargo-vendor/axum/src/middleware/from_fn.rs index f380a580ad..e4c44c74f5 100644 --- a/.cargo-vendor/axum/src/middleware/from_fn.rs +++ b/.cargo-vendor/axum/src/middleware/from_fn.rs @@ -1,7 +1,6 @@ use crate::response::{IntoResponse, Response}; -use axum_core::extract::{FromRequest, FromRequestParts}; +use axum_core::extract::{FromRequest, FromRequestParts, Request}; use futures_util::future::BoxFuture; -use http::Request; use std::{ any::type_name, convert::Infallible, @@ -21,7 +20,7 @@ use tower_service::Service; /// /// 1. Be an `async fn`. /// 2. Take one or more [extractors] as the first arguments. -/// 3. Take [`Next`](Next) as the final argument. +/// 3. Take [`Next`](Next) as the final argument. /// 4. Return something that implements [`IntoResponse`]. /// /// Note that this function doesn't support extracting [`State`]. For that, use [`from_fn_with_state`]. @@ -31,15 +30,16 @@ use tower_service::Service; /// ```rust /// use axum::{ /// Router, -/// http::{self, Request}, +/// http, /// routing::get, /// response::Response, /// middleware::{self, Next}, +/// extract::Request, /// }; /// -/// async fn my_middleware( -/// request: Request, -/// next: Next, +/// async fn my_middleware( +/// request: Request, +/// next: Next, /// ) -> Response { /// // do something with `request`... /// @@ -61,32 +61,38 @@ use tower_service::Service; /// ```rust /// use axum::{ /// Router, -/// extract::TypedHeader, -/// http::StatusCode, -/// headers::authorization::{Authorization, Bearer}, -/// http::Request, +/// extract::Request, +/// http::{StatusCode, HeaderMap}, /// middleware::{self, Next}, /// response::Response, /// routing::get, /// }; /// -/// async fn auth( -/// // run the `TypedHeader` extractor -/// TypedHeader(auth): TypedHeader>, +/// async fn auth( +/// // run the `HeaderMap` extractor +/// headers: HeaderMap, /// // you can also add more extractors here but the last /// // extractor must implement `FromRequest` which /// // `Request` does -/// request: Request, -/// next: Next, +/// request: Request, +/// next: Next, /// ) -> Result { -/// if token_is_valid(auth.token()) { -/// let response = next.run(request).await; -/// Ok(response) -/// } else { -/// Err(StatusCode::UNAUTHORIZED) +/// match get_token(&headers) { +/// Some(token) if token_is_valid(token) => { +/// let response = next.run(request).await; +/// Ok(response) +/// } +/// _ => { +/// Err(StatusCode::UNAUTHORIZED) +/// } /// } /// } /// +/// fn get_token(headers: &HeaderMap) -> Option<&str> { +/// // ... +/// # None +/// } +/// /// fn token_is_valid(token: &str) -> bool { /// // ... /// # false @@ -113,23 +119,23 @@ pub fn from_fn(f: F) -> FromFnLayer { /// ```rust /// use axum::{ /// Router, -/// http::{Request, StatusCode}, +/// http::StatusCode, /// routing::get, /// response::{IntoResponse, Response}, /// middleware::{self, Next}, -/// extract::State, +/// extract::{Request, State}, /// }; /// /// #[derive(Clone)] /// struct AppState { /* ... */ } /// -/// async fn my_middleware( +/// async fn my_middleware( /// State(state): State, /// // you can add more extractors here but the last /// // extractor must implement `FromRequest` which /// // `Request` does -/// request: Request, -/// next: Next, +/// request: Request, +/// next: Next, /// ) -> Response { /// // do something with `request`... /// @@ -243,20 +249,19 @@ macro_rules! impl_service { [$($ty:ident),*], $last:ident ) => { #[allow(non_snake_case, unused_mut)] - impl Service> for FromFn + impl Service for FromFn where - F: FnMut($($ty,)* $last, Next) -> Fut + Clone + Send + 'static, + F: FnMut($($ty,)* $last, Next) -> Fut + Clone + Send + 'static, $( $ty: FromRequestParts + Send, )* - $last: FromRequest + Send, + $last: FromRequest + Send, Fut: Future + Send + 'static, Out: IntoResponse + 'static, - I: Service, Error = Infallible> + I: Service + Clone + Send + 'static, I::Response: IntoResponse, I::Future: Send + 'static, - B: Send + 'static, S: Clone + Send + Sync + 'static, { type Response = Response; @@ -267,7 +272,7 @@ macro_rules! impl_service { self.inner.poll_ready(cx) } - fn call(&mut self, req: Request) -> Self::Future { + fn call(&mut self, req: Request) -> Self::Future { let not_ready_inner = self.inner.clone(); let ready_inner = std::mem::replace(&mut self.inner, not_ready_inner); @@ -325,13 +330,14 @@ where } /// The remainder of a middleware stack, including the handler. -pub struct Next { - inner: BoxCloneService, Response, Infallible>, +#[derive(Debug, Clone)] +pub struct Next { + inner: BoxCloneService, } -impl Next { +impl Next { /// Execute the remaining middleware stack. - pub async fn run(mut self, req: Request) -> Response { + pub async fn run(mut self, req: Request) -> Response { match self.inner.call(req).await { Ok(res) => res, Err(err) => match err {}, @@ -339,23 +345,7 @@ impl Next { } } -impl fmt::Debug for Next { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("FromFnLayer") - .field("inner", &self.inner) - .finish() - } -} - -impl Clone for Next { - fn clone(&self) -> Self { - Self { - inner: self.inner.clone(), - } - } -} - -impl Service> for Next { +impl Service for Next { type Response = Response; type Error = Infallible; type Future = Pin> + Send>>; @@ -364,7 +354,7 @@ impl Service> for Next { self.inner.poll_ready(cx) } - fn call(&mut self, req: Request) -> Self::Future { + fn call(&mut self, req: Request) -> Self::Future { self.inner.call(req) } } @@ -393,11 +383,12 @@ mod tests { use super::*; use crate::{body::Body, routing::get, Router}; use http::{HeaderMap, StatusCode}; + use http_body_util::BodyExt; use tower::ServiceExt; #[crate::test] async fn basic() { - async fn insert_header(mut req: Request, next: Next) -> impl IntoResponse { + async fn insert_header(mut req: Request, next: Next) -> impl IntoResponse { req.headers_mut() .insert("x-axum-test", "ok".parse().unwrap()); @@ -417,7 +408,7 @@ mod tests { .await .unwrap(); assert_eq!(res.status(), StatusCode::OK); - let body = hyper::body::to_bytes(res).await.unwrap(); + let body = res.collect().await.unwrap().to_bytes(); assert_eq!(&body[..], b"ok"); } } diff --git a/.cargo-vendor/axum/src/middleware/map_request.rs b/.cargo-vendor/axum/src/middleware/map_request.rs index 5d1801ac7c..596b6c3c87 100644 --- a/.cargo-vendor/axum/src/middleware/map_request.rs +++ b/.cargo-vendor/axum/src/middleware/map_request.rs @@ -1,4 +1,6 @@ +use crate::body::{Body, Bytes, HttpBody}; use crate::response::{IntoResponse, Response}; +use crate::BoxError; use axum_core::extract::{FromRequest, FromRequestParts}; use futures_util::future::BoxFuture; use http::Request; @@ -251,7 +253,7 @@ macro_rules! impl_service { where F: FnMut($($ty,)* $last) -> Fut + Clone + Send + 'static, $( $ty: FromRequestParts + Send, )* - $last: FromRequest + Send, + $last: FromRequest + Send, Fut: Future + Send + 'static, Fut::Output: IntoMapRequestResult + Send + 'static, I: Service, Error = Infallible> @@ -260,7 +262,8 @@ macro_rules! impl_service { + 'static, I::Response: IntoResponse, I::Future: Send + 'static, - B: Send + 'static, + B: HttpBody + Send + 'static, + B::Error: Into, S: Clone + Send + Sync + 'static, { type Response = Response; @@ -272,6 +275,8 @@ macro_rules! impl_service { } fn call(&mut self, req: Request) -> Self::Future { + let req = req.map(Body::new); + let not_ready_inner = self.inner.clone(); let mut ready_inner = std::mem::replace(&mut self.inner, not_ready_inner); @@ -406,7 +411,7 @@ mod tests { .layer(map_request(add_header)); let client = TestClient::new(app); - let res = client.get("/").send().await; + let res = client.get("/").await; assert_eq!(res.text().await, "foo"); } @@ -426,7 +431,7 @@ mod tests { .layer(map_request(add_header)); let client = TestClient::new(app); - let res = client.get("/").send().await; + let res = client.get("/").await; assert_eq!(res.status(), StatusCode::INTERNAL_SERVER_ERROR); assert_eq!(res.text().await, "something went wrong"); diff --git a/.cargo-vendor/axum/src/middleware/map_response.rs b/.cargo-vendor/axum/src/middleware/map_response.rs index 06f9825740..2510cdc256 100644 --- a/.cargo-vendor/axum/src/middleware/map_response.rs +++ b/.cargo-vendor/axum/src/middleware/map_response.rs @@ -357,7 +357,7 @@ mod tests { let app = Router::new().layer(map_response(add_header)); let client = TestClient::new(app); - let res = client.get("/").send().await; + let res = client.get("/").await; assert_eq!(res.headers()["x-foo"], "foo"); } diff --git a/.cargo-vendor/axum/src/response/mod.rs b/.cargo-vendor/axum/src/response/mod.rs index 2c149748a6..6cfd9b0763 100644 --- a/.cargo-vendor/axum/src/response/mod.rs +++ b/.cargo-vendor/axum/src/response/mod.rs @@ -1,6 +1,6 @@ #![doc = include_str!("../docs/response.md")] -use crate::body::{Bytes, Full}; +use axum_core::body::Body; use http::{header, HeaderValue}; mod redirect; @@ -12,10 +12,6 @@ pub mod sse; #[cfg(feature = "json")] pub use crate::Json; -#[doc(no_inline)] -#[cfg(feature = "headers")] -pub use crate::TypedHeader; - #[cfg(feature = "form")] #[doc(no_inline)] pub use crate::form::Form; @@ -44,7 +40,7 @@ pub struct Html(pub T); impl IntoResponse for Html where - T: Into>, + T: Into, { fn into_response(self) -> Response { ( @@ -67,7 +63,7 @@ impl From for Html { #[cfg(test)] mod tests { use crate::extract::Extension; - use crate::{body::Body, routing::get, Router}; + use crate::{routing::get, Router}; use axum_core::response::IntoResponse; use http::HeaderMap; use http::{StatusCode, Uri}; @@ -99,7 +95,7 @@ mod tests { } } - _ = Router::<(), Body>::new() + _ = Router::<()>::new() .route("/", get(impl_trait_ok)) .route("/", get(impl_trait_err)) .route("/", get(impl_trait_both)) @@ -209,7 +205,7 @@ mod tests { ) } - _ = Router::<(), Body>::new() + _ = Router::<()>::new() .route("/", get(status)) .route("/", get(status_headermap)) .route("/", get(status_header_array)) diff --git a/.cargo-vendor/axum/src/response/redirect.rs b/.cargo-vendor/axum/src/response/redirect.rs index 4dee5b5c82..8bc6eb5e15 100644 --- a/.cargo-vendor/axum/src/response/redirect.rs +++ b/.cargo-vendor/axum/src/response/redirect.rs @@ -15,9 +15,7 @@ use http::{header::LOCATION, HeaderValue, StatusCode}; /// let app = Router::new() /// .route("/old", get(|| async { Redirect::permanent("/new") })) /// .route("/new", get(|| async { "Hello!" })); -/// # async { -/// # hyper::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -/// # }; +/// # let _: Router = app; /// ``` #[must_use = "needs to be returned from a handler or otherwise turned into a Response to be useful"] #[derive(Debug, Clone)] diff --git a/.cargo-vendor/axum/src/response/sse.rs b/.cargo-vendor/axum/src/response/sse.rs index 2e9e28535e..e77b8c78a8 100644 --- a/.cargo-vendor/axum/src/response/sse.rs +++ b/.cargo-vendor/axum/src/response/sse.rs @@ -22,9 +22,7 @@ //! //! Sse::new(stream).keep_alive(KeepAlive::default()) //! } -//! # async { -//! # hyper::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -//! # }; +//! # let _: Router = app; //! ``` use crate::{ @@ -32,7 +30,7 @@ use crate::{ BoxError, }; use axum_core::{ - body, + body::Body, response::{IntoResponse, Response}, }; use bytes::{BufMut, BytesMut}; @@ -40,6 +38,7 @@ use futures_util::{ ready, stream::{Stream, TryStream}, }; +use http_body::Frame; use pin_project_lite::pin_project; use std::{ fmt, @@ -104,7 +103,7 @@ where (http::header::CONTENT_TYPE, mime::TEXT_EVENT_STREAM.as_ref()), (http::header::CACHE_CONTROL, "no-cache"), ], - body::boxed(Body { + Body::new(SseBody { event_stream: SyncWrapper::new(self.stream), keep_alive: self.keep_alive.map(KeepAliveStream::new), }), @@ -114,7 +113,7 @@ where } pin_project! { - struct Body { + struct SseBody { #[pin] event_stream: SyncWrapper, #[pin] @@ -122,23 +121,23 @@ pin_project! { } } -impl HttpBody for Body +impl HttpBody for SseBody where S: Stream>, { type Data = Bytes; type Error = E; - fn poll_data( + fn poll_frame( self: Pin<&mut Self>, cx: &mut Context<'_>, - ) -> Poll>> { + ) -> Poll, Self::Error>>> { let this = self.project(); match this.event_stream.get_pin_mut().poll_next(cx) { Poll::Pending => { if let Some(keep_alive) = this.keep_alive.as_pin_mut() { - keep_alive.poll_event(cx).map(|e| Some(Ok(e))) + keep_alive.poll_event(cx).map(|e| Some(Ok(Frame::data(e)))) } else { Poll::Pending } @@ -147,19 +146,12 @@ where if let Some(keep_alive) = this.keep_alive.as_pin_mut() { keep_alive.reset(); } - Poll::Ready(Some(Ok(event.finalize()))) + Poll::Ready(Some(Ok(Frame::data(event.finalize())))) } Poll::Ready(Some(Err(error))) => Poll::Ready(Some(Err(error))), Poll::Ready(None) => Poll::Ready(None), } } - - fn poll_trailers( - self: Pin<&mut Self>, - _cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - Poll::Ready(Ok(None)) - } } /// Server-sent event @@ -171,9 +163,9 @@ pub struct Event { } impl Event { - /// Set the event's data data field(s) (`data:`) + /// Set the event's data data field(s) (`data: `) /// - /// Newlines in `data` will automatically be broken across `data:` fields. + /// Newlines in `data` will automatically be broken across `data: ` fields. /// /// This corresponds to [`MessageEvent`'s data field]. /// @@ -202,7 +194,7 @@ impl Event { self } - /// Set the event's data field to a value serialized as unformatted JSON (`data:`). + /// Set the event's data field to a value serialized as unformatted JSON (`data: `). /// /// This corresponds to [`MessageEvent`'s data field]. /// @@ -212,7 +204,7 @@ impl Event { /// /// [`MessageEvent`'s data field]: https://developer.mozilla.org/en-US/docs/Web/API/MessageEvent/data #[cfg(feature = "json")] - pub fn json_data(mut self, data: T) -> serde_json::Result + pub fn json_data(mut self, data: T) -> Result where T: serde::Serialize, { @@ -220,8 +212,8 @@ impl Event { panic!("Called `EventBuilder::json_data` multiple times"); } - self.buffer.extend_from_slice(b"data:"); - serde_json::to_writer((&mut self.buffer).writer(), &data)?; + self.buffer.extend_from_slice(b"data: "); + serde_json::to_writer((&mut self.buffer).writer(), &data).map_err(axum_core::Error::new)?; self.buffer.put_u8(b'\n'); self.flags.insert(EventFlags::HAS_DATA); @@ -358,10 +350,7 @@ impl Event { ); self.buffer.extend_from_slice(name.as_bytes()); self.buffer.put_u8(b':'); - // Prevent values that start with spaces having that space stripped - if value.starts_with(b" ") { - self.buffer.put_u8(b' '); - } + self.buffer.put_u8(b' '); self.buffer.extend_from_slice(value); self.buffer.put_u8(b'\n'); } @@ -372,13 +361,29 @@ impl Event { } } -bitflags::bitflags! { - #[derive(Default)] - struct EventFlags: u8 { - const HAS_DATA = 0b0001; - const HAS_EVENT = 0b0010; - const HAS_RETRY = 0b0100; - const HAS_ID = 0b1000; +#[derive(Default, Debug, Copy, Clone, PartialEq)] +struct EventFlags(u8); + +impl EventFlags { + const HAS_DATA: Self = Self::from_bits(0b0001); + const HAS_EVENT: Self = Self::from_bits(0b0010); + const HAS_RETRY: Self = Self::from_bits(0b0100); + const HAS_ID: Self = Self::from_bits(0b1000); + + const fn bits(&self) -> u8 { + self.0 + } + + const fn from_bits(bits: u8) -> Self { + Self(bits) + } + + const fn contains(&self, other: Self) -> bool { + self.bits() & other.bits() == other.bits() + } + + fn insert(&mut self, other: Self) { + *self = Self::from_bits(self.bits() | other.bits()); } } @@ -516,7 +521,7 @@ mod tests { #[test] fn leading_space_is_not_stripped() { let no_leading_space = Event::default().data("\tfoobar"); - assert_eq!(&*no_leading_space.finalize(), b"data:\tfoobar\n\n"); + assert_eq!(&*no_leading_space.finalize(), b"data: \tfoobar\n\n"); let leading_space = Event::default().data(" foobar"); assert_eq!(&*leading_space.finalize(), b"data: foobar\n\n"); @@ -543,7 +548,7 @@ mod tests { ); let client = TestClient::new(app); - let mut stream = client.get("/").send().await; + let mut stream = client.get("/").await; assert_eq!(stream.headers()["content-type"], "text/event-stream"); assert_eq!(stream.headers()["cache-control"], "no-cache"); @@ -554,13 +559,13 @@ mod tests { let event_fields = parse_event(&stream.chunk_text().await.unwrap()); assert_eq!(event_fields.get("data").unwrap(), "{\"foo\":\"bar\"}"); - assert!(event_fields.get("comment").is_none()); + assert!(!event_fields.contains_key("comment")); let event_fields = parse_event(&stream.chunk_text().await.unwrap()); assert_eq!(event_fields.get("event").unwrap(), "three"); assert_eq!(event_fields.get("retry").unwrap(), "30000"); assert_eq!(event_fields.get("id").unwrap(), "unique-id"); - assert!(event_fields.get("comment").is_none()); + assert!(!event_fields.contains_key("comment")); assert!(stream.chunk_text().await.is_none()); } @@ -585,7 +590,7 @@ mod tests { ); let client = TestClient::new(app); - let mut stream = client.get("/").send().await; + let mut stream = client.get("/").await; for _ in 0..5 { // first message should be an event @@ -622,7 +627,7 @@ mod tests { ); let client = TestClient::new(app); - let mut stream = client.get("/").send().await; + let mut stream = client.get("/").await; // first message should be an event let event_fields = parse_event(&stream.chunk_text().await.unwrap()); @@ -665,7 +670,7 @@ mod tests { } #[test] - fn memchr_spliting() { + fn memchr_splitting() { assert_eq!( memchr_split(2, &[]).collect::>(), [&[]] as [&[u8]; 1] diff --git a/.cargo-vendor/axum/src/routing/into_make_service.rs b/.cargo-vendor/axum/src/routing/into_make_service.rs index fbc57c4acc..36da73a21e 100644 --- a/.cargo-vendor/axum/src/routing/into_make_service.rs +++ b/.cargo-vendor/axum/src/routing/into_make_service.rs @@ -46,12 +46,11 @@ opaque_future! { #[cfg(test)] mod tests { use super::*; - use crate::body::Body; #[test] fn traits() { use crate::test_helpers::*; - assert_send::>(); + assert_send::>(); } } diff --git a/.cargo-vendor/axum/src/routing/method_filter.rs b/.cargo-vendor/axum/src/routing/method_filter.rs index ca9b0c06e3..1cea4235e5 100644 --- a/.cargo-vendor/axum/src/routing/method_filter.rs +++ b/.cargo-vendor/axum/src/routing/method_filter.rs @@ -1,29 +1,47 @@ -use bitflags::bitflags; use http::Method; use std::{ fmt, fmt::{Debug, Formatter}, }; -bitflags! { - /// A filter that matches one or more HTTP methods. - pub struct MethodFilter: u16 { - /// Match `DELETE` requests. - const DELETE = 0b000000010; - /// Match `GET` requests. - const GET = 0b000000100; - /// Match `HEAD` requests. - const HEAD = 0b000001000; - /// Match `OPTIONS` requests. - const OPTIONS = 0b000010000; - /// Match `PATCH` requests. - const PATCH = 0b000100000; - /// Match `POST` requests. - const POST = 0b001000000; - /// Match `PUT` requests. - const PUT = 0b010000000; - /// Match `TRACE` requests. - const TRACE = 0b100000000; +/// A filter that matches one or more HTTP methods. +#[derive(Debug, Copy, Clone, PartialEq)] +pub struct MethodFilter(u16); + +impl MethodFilter { + /// Match `DELETE` requests. + pub const DELETE: Self = Self::from_bits(0b0_0000_0010); + /// Match `GET` requests. + pub const GET: Self = Self::from_bits(0b0_0000_0100); + /// Match `HEAD` requests. + pub const HEAD: Self = Self::from_bits(0b0_0000_1000); + /// Match `OPTIONS` requests. + pub const OPTIONS: Self = Self::from_bits(0b0_0001_0000); + /// Match `PATCH` requests. + pub const PATCH: Self = Self::from_bits(0b0_0010_0000); + /// Match `POST` requests. + pub const POST: Self = Self::from_bits(0b0_0100_0000); + /// Match `PUT` requests. + pub const PUT: Self = Self::from_bits(0b0_1000_0000); + /// Match `TRACE` requests. + pub const TRACE: Self = Self::from_bits(0b1_0000_0000); + + const fn bits(&self) -> u16 { + let bits = self; + bits.0 + } + + const fn from_bits(bits: u16) -> Self { + Self(bits) + } + + pub(crate) const fn contains(&self, other: Self) -> bool { + self.bits() & other.bits() == other.bits() + } + + /// Performs the OR operation between the [`MethodFilter`] in `self` with `other`. + pub const fn or(self, other: Self) -> Self { + Self(self.0 | other.0) } } diff --git a/.cargo-vendor/axum/src/routing/method_routing.rs b/.cargo-vendor/axum/src/routing/method_routing.rs index cdc7a11cd3..1eb6075b22 100644 --- a/.cargo-vendor/axum/src/routing/method_routing.rs +++ b/.cargo-vendor/axum/src/routing/method_routing.rs @@ -8,11 +8,11 @@ use crate::{ boxed::BoxedIntoRoute, error_handling::{HandleError, HandleErrorLayer}, handler::Handler, - http::{Method, Request, StatusCode}, + http::{Method, StatusCode}, response::Response, routing::{future::RouteFuture, Fallback, MethodFilter, Route}, }; -use axum_core::response::IntoResponse; +use axum_core::{extract::Request, response::IntoResponse, BoxError}; use bytes::BytesMut; use std::{ convert::Infallible, @@ -34,23 +34,21 @@ macro_rules! top_level_service_fn { /// /// ```rust /// use axum::{ - /// http::Request, + /// extract::Request, /// Router, /// routing::get_service, + /// body::Body, /// }; /// use http::Response; /// use std::convert::Infallible; - /// use hyper::Body; /// - /// let service = tower::service_fn(|request: Request| async { + /// let service = tower::service_fn(|request: Request| async { /// Ok::<_, Infallible>(Response::new(Body::empty())) /// }); /// /// // Requests to `GET /` will go to `service`. /// let app = Router::new().route("/", get_service(service)); - /// # async { - /// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); - /// # }; + /// # let _: Router = app; /// ``` /// /// Note that `get` routes will also be called for `HEAD` requests but will have @@ -78,12 +76,11 @@ macro_rules! top_level_service_fn { $name:ident, $method:ident ) => { $(#[$m])+ - pub fn $name(svc: T) -> MethodRouter + pub fn $name(svc: T) -> MethodRouter where - T: Service> + Clone + Send + 'static, + T: Service + Clone + Send + 'static, T::Response: IntoResponse + 'static, T::Future: Send + 'static, - B: HttpBody + Send + 'static, S: Clone, { on_service(MethodFilter::$method, svc) @@ -110,9 +107,7 @@ macro_rules! top_level_handler_fn { /// /// // Requests to `GET /` will go to `handler`. /// let app = Router::new().route("/", get(handler)); - /// # async { - /// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); - /// # }; + /// # let _: Router = app; /// ``` /// /// Note that `get` routes will also be called for `HEAD` requests but will have @@ -140,10 +135,9 @@ macro_rules! top_level_handler_fn { $name:ident, $method:ident ) => { $(#[$m])+ - pub fn $name(handler: H) -> MethodRouter + pub fn $name(handler: H) -> MethodRouter where - H: Handler, - B: HttpBody + Send + 'static, + H: Handler, T: 'static, S: Clone + Send + Sync + 'static, { @@ -163,28 +157,26 @@ macro_rules! chained_service_fn { /// /// ```rust /// use axum::{ - /// http::Request, + /// extract::Request, /// Router, /// routing::post_service, + /// body::Body, /// }; /// use http::Response; /// use std::convert::Infallible; - /// use hyper::Body; /// - /// let service = tower::service_fn(|request: Request| async { + /// let service = tower::service_fn(|request: Request| async { /// Ok::<_, Infallible>(Response::new(Body::empty())) /// }); /// - /// let other_service = tower::service_fn(|request: Request| async { + /// let other_service = tower::service_fn(|request: Request| async { /// Ok::<_, Infallible>(Response::new(Body::empty())) /// }); /// /// // Requests to `POST /` will go to `service` and `GET /` will go to /// // `other_service`. /// let app = Router::new().route("/", post_service(service).get_service(other_service)); - /// # async { - /// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); - /// # }; + /// # let _: Router = app; /// ``` /// /// Note that `get` routes will also be called for `HEAD` requests but will have @@ -215,7 +207,7 @@ macro_rules! chained_service_fn { #[track_caller] pub fn $name(self, svc: T) -> Self where - T: Service, Error = E> + T: Service + Clone + Send + 'static, @@ -246,9 +238,7 @@ macro_rules! chained_handler_fn { /// // Requests to `POST /` will go to `handler` and `GET /` will go to /// // `other_handler`. /// let app = Router::new().route("/", post(handler).get(other_handler)); - /// # async { - /// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); - /// # }; + /// # let _: Router = app; /// ``` /// /// Note that `get` routes will also be called for `HEAD` requests but will have @@ -279,7 +269,7 @@ macro_rules! chained_handler_fn { #[track_caller] pub fn $name(self, handler: H) -> Self where - H: Handler, + H: Handler, T: 'static, S: Send + Sync + 'static, { @@ -303,31 +293,28 @@ top_level_service_fn!(trace_service, TRACE); /// /// ```rust /// use axum::{ -/// http::Request, +/// extract::Request, /// routing::on, /// Router, +/// body::Body, /// routing::{MethodFilter, on_service}, /// }; /// use http::Response; /// use std::convert::Infallible; -/// use hyper::Body; /// -/// let service = tower::service_fn(|request: Request| async { +/// let service = tower::service_fn(|request: Request| async { /// Ok::<_, Infallible>(Response::new(Body::empty())) /// }); /// /// // Requests to `POST /` will go to `service`. /// let app = Router::new().route("/", on_service(MethodFilter::POST, service)); -/// # async { -/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -/// # }; +/// # let _: Router = app; /// ``` -pub fn on_service(filter: MethodFilter, svc: T) -> MethodRouter +pub fn on_service(filter: MethodFilter, svc: T) -> MethodRouter where - T: Service> + Clone + Send + 'static, + T: Service + Clone + Send + 'static, T::Response: IntoResponse + 'static, T::Future: Send + 'static, - B: HttpBody + Send + 'static, S: Clone, { MethodRouter::new().on_service(filter, svc) @@ -339,59 +326,54 @@ where /// /// ```rust /// use axum::{ -/// http::Request, +/// extract::Request, /// Router, /// routing::any_service, +/// body::Body, /// }; /// use http::Response; /// use std::convert::Infallible; -/// use hyper::Body; /// -/// let service = tower::service_fn(|request: Request| async { +/// let service = tower::service_fn(|request: Request| async { /// Ok::<_, Infallible>(Response::new(Body::empty())) /// }); /// /// // All requests to `/` will go to `service`. /// let app = Router::new().route("/", any_service(service)); -/// # async { -/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -/// # }; +/// # let _: Router = app; /// ``` /// /// Additional methods can still be chained: /// /// ```rust /// use axum::{ -/// http::Request, +/// extract::Request, /// Router, /// routing::any_service, +/// body::Body, /// }; /// use http::Response; /// use std::convert::Infallible; -/// use hyper::Body; /// -/// let service = tower::service_fn(|request: Request| async { +/// let service = tower::service_fn(|request: Request| async { /// # Ok::<_, Infallible>(Response::new(Body::empty())) /// // ... /// }); /// -/// let other_service = tower::service_fn(|request: Request| async { +/// let other_service = tower::service_fn(|request: Request| async { /// # Ok::<_, Infallible>(Response::new(Body::empty())) /// // ... /// }); /// /// // `POST /` goes to `other_service`. All other requests go to `service` /// let app = Router::new().route("/", any_service(service).post_service(other_service)); -/// # async { -/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -/// # }; +/// # let _: Router = app; /// ``` -pub fn any_service(svc: T) -> MethodRouter +pub fn any_service(svc: T) -> MethodRouter where - T: Service> + Clone + Send + 'static, + T: Service + Clone + Send + 'static, T::Response: IntoResponse + 'static, T::Future: Send + 'static, - B: HttpBody + Send + 'static, S: Clone, { MethodRouter::new() @@ -423,14 +405,11 @@ top_level_handler_fn!(trace, TRACE); /// /// // Requests to `POST /` will go to `handler`. /// let app = Router::new().route("/", on(MethodFilter::POST, handler)); -/// # async { -/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -/// # }; +/// # let _: Router = app; /// ``` -pub fn on(filter: MethodFilter, handler: H) -> MethodRouter +pub fn on(filter: MethodFilter, handler: H) -> MethodRouter where - H: Handler, - B: HttpBody + Send + 'static, + H: Handler, T: 'static, S: Clone + Send + Sync + 'static, { @@ -451,9 +430,7 @@ where /// /// // All requests to `/` will go to `handler`. /// let app = Router::new().route("/", any(handler)); -/// # async { -/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -/// # }; +/// # let _: Router = app; /// ``` /// /// Additional methods can still be chained: @@ -470,14 +447,11 @@ where /// /// // `POST /` goes to `other_handler`. All other requests go to `handler` /// let app = Router::new().route("/", any(handler).post(other_handler)); -/// # async { -/// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); -/// # }; +/// # let _: Router = app; /// ``` -pub fn any(handler: H) -> MethodRouter +pub fn any(handler: H) -> MethodRouter where - H: Handler, - B: HttpBody + Send + 'static, + H: Handler, T: 'static, S: Clone + Send + Sync + 'static, { @@ -493,7 +467,7 @@ where /// /// ``` /// use tower::Service; -/// use axum::{routing::get, extract::State, body::Body, http::Request}; +/// use axum::{routing::get, extract::{State, Request}, body::Body}; /// /// // this `MethodRouter` doesn't require any state, i.e. the state is `()`, /// let method_router = get(|| async {}); @@ -510,20 +484,20 @@ where /// // helper to check that a value implements `Service` /// fn assert_service(service: S) /// where -/// S: Service>, +/// S: Service, /// {} /// ``` #[must_use] -pub struct MethodRouter { - get: MethodEndpoint, - head: MethodEndpoint, - delete: MethodEndpoint, - options: MethodEndpoint, - patch: MethodEndpoint, - post: MethodEndpoint, - put: MethodEndpoint, - trace: MethodEndpoint, - fallback: Fallback, +pub struct MethodRouter { + get: MethodEndpoint, + head: MethodEndpoint, + delete: MethodEndpoint, + options: MethodEndpoint, + patch: MethodEndpoint, + post: MethodEndpoint, + put: MethodEndpoint, + trace: MethodEndpoint, + fallback: Fallback, allow_header: AllowHeader, } @@ -553,7 +527,7 @@ impl AllowHeader { } } -impl fmt::Debug for MethodRouter { +impl fmt::Debug for MethodRouter { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("MethodRouter") .field("get", &self.get) @@ -570,9 +544,8 @@ impl fmt::Debug for MethodRouter { } } -impl MethodRouter +impl MethodRouter where - B: HttpBody + Send + 'static, S: Clone, { /// Chain an additional handler that will accept requests matching the given @@ -594,14 +567,12 @@ where /// // Requests to `GET /` will go to `handler` and `DELETE /` will go to /// // `other_handler` /// let app = Router::new().route("/", get(handler).on(MethodFilter::DELETE, other_handler)); - /// # async { - /// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); - /// # }; + /// # let _: Router = app; /// ``` #[track_caller] pub fn on(self, filter: MethodFilter, handler: H) -> Self where - H: Handler, + H: Handler, T: 'static, S: Send + Sync + 'static, { @@ -623,7 +594,7 @@ where /// Add a fallback [`Handler`] to the router. pub fn fallback(mut self, handler: H) -> Self where - H: Handler, + H: Handler, T: 'static, S: Send + Sync + 'static, { @@ -632,17 +603,14 @@ where } } -impl MethodRouter<(), B, Infallible> -where - B: HttpBody + Send + 'static, -{ - /// Convert the handler into a [`MakeService`]. +impl MethodRouter<(), Infallible> { + /// Convert the router into a [`MakeService`]. /// - /// This allows you to serve a single handler if you don't need any routing: + /// This allows you to serve a single `MethodRouter` if you don't need any + /// routing based on the path: /// /// ```rust /// use axum::{ - /// Server, /// handler::Handler, /// http::{Uri, Method}, /// response::IntoResponse, @@ -651,16 +619,14 @@ where /// use std::net::SocketAddr; /// /// async fn handler(method: Method, uri: Uri, body: String) -> String { - /// format!("received `{} {}` with body `{:?}`", method, uri, body) + /// format!("received `{method} {uri}` with body `{body:?}`") /// } /// /// let router = get(handler).post(handler); /// /// # async { - /// Server::bind(&SocketAddr::from(([127, 0, 0, 1], 3000))) - /// .serve(router.into_make_service()) - /// .await?; - /// # Ok::<_, hyper::Error>(()) + /// let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap(); + /// axum::serve(listener, router.into_make_service()).await.unwrap(); /// # }; /// ``` /// @@ -676,7 +642,6 @@ where /// /// ```rust /// use axum::{ - /// Server, /// handler::Handler, /// response::IntoResponse, /// extract::ConnectInfo, @@ -685,16 +650,14 @@ where /// use std::net::SocketAddr; /// /// async fn handler(ConnectInfo(addr): ConnectInfo) -> String { - /// format!("Hello {}", addr) + /// format!("Hello {addr}") /// } /// /// let router = get(handler).post(handler); /// /// # async { - /// Server::bind(&SocketAddr::from(([127, 0, 0, 1], 3000))) - /// .serve(router.into_make_service_with_connect_info::()) - /// .await?; - /// # Ok::<_, hyper::Error>(()) + /// let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap(); + /// axum::serve(listener, router.into_make_service()).await.unwrap(); /// # }; /// ``` /// @@ -706,15 +669,14 @@ where } } -impl MethodRouter +impl MethodRouter where - B: HttpBody + Send + 'static, S: Clone, { /// Create a default `MethodRouter` that will respond with `405 Method Not Allowed` to all /// requests. pub fn new() -> Self { - let fallback = Route::new(service_fn(|_: Request| async { + let fallback = Route::new(service_fn(|_: Request| async { Ok(StatusCode::METHOD_NOT_ALLOWED.into_response()) })); @@ -733,7 +695,7 @@ where } /// Provide the state for the router. - pub fn with_state(self, state: S) -> MethodRouter { + pub fn with_state(self, state: S) -> MethodRouter { MethodRouter { get: self.get.with_state(&state), head: self.head.with_state(&state), @@ -755,28 +717,26 @@ where /// /// ```rust /// use axum::{ - /// http::Request, + /// extract::Request, /// Router, /// routing::{MethodFilter, on_service}, + /// body::Body, /// }; /// use http::Response; /// use std::convert::Infallible; - /// use hyper::Body; /// - /// let service = tower::service_fn(|request: Request| async { + /// let service = tower::service_fn(|request: Request| async { /// Ok::<_, Infallible>(Response::new(Body::empty())) /// }); /// /// // Requests to `DELETE /` will go to `service` /// let app = Router::new().route("/", on_service(MethodFilter::DELETE, service)); - /// # async { - /// # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); - /// # }; + /// # let _: Router = app; /// ``` #[track_caller] pub fn on_service(self, filter: MethodFilter, svc: T) -> Self where - T: Service, Error = E> + Clone + Send + 'static, + T: Service + Clone + Send + 'static, T::Response: IntoResponse + 'static, T::Future: Send + 'static, { @@ -784,19 +744,19 @@ where } #[track_caller] - fn on_endpoint(mut self, filter: MethodFilter, endpoint: MethodEndpoint) -> Self { + fn on_endpoint(mut self, filter: MethodFilter, endpoint: MethodEndpoint) -> Self { // written as a separate function to generate less IR #[track_caller] - fn set_endpoint( + fn set_endpoint( method_name: &str, - out: &mut MethodEndpoint, - endpoint: &MethodEndpoint, + out: &mut MethodEndpoint, + endpoint: &MethodEndpoint, endpoint_filter: MethodFilter, filter: MethodFilter, allow_header: &mut AllowHeader, methods: &[&'static str], ) where - MethodEndpoint: Clone, + MethodEndpoint: Clone, S: Clone, { if endpoint_filter.contains(filter) { @@ -908,7 +868,7 @@ where #[doc = include_str!("../docs/method_routing/fallback.md")] pub fn fallback_service(mut self, svc: T) -> Self where - T: Service, Error = E> + Clone + Send + 'static, + T: Service + Clone + Send + 'static, T::Response: IntoResponse + 'static, T::Future: Send + 'static, { @@ -917,19 +877,18 @@ where } #[doc = include_str!("../docs/method_routing/layer.md")] - pub fn layer(self, layer: L) -> MethodRouter + pub fn layer(self, layer: L) -> MethodRouter where - L: Layer> + Clone + Send + 'static, - L::Service: Service> + Clone + Send + 'static, - >>::Response: IntoResponse + 'static, - >>::Error: Into + 'static, - >>::Future: Send + 'static, + L: Layer> + Clone + Send + 'static, + L::Service: Service + Clone + Send + 'static, + >::Response: IntoResponse + 'static, + >::Error: Into + 'static, + >::Future: Send + 'static, E: 'static, S: 'static, - NewReqBody: HttpBody + 'static, NewError: 'static, { - let layer_fn = move |route: Route| route.layer(layer.clone()); + let layer_fn = move |route: Route| route.layer(layer.clone()); MethodRouter { get: self.get.map(layer_fn.clone()), @@ -947,12 +906,12 @@ where #[doc = include_str!("../docs/method_routing/route_layer.md")] #[track_caller] - pub fn route_layer(mut self, layer: L) -> MethodRouter + pub fn route_layer(mut self, layer: L) -> MethodRouter where - L: Layer> + Clone + Send + 'static, - L::Service: Service, Error = E> + Clone + Send + 'static, - >>::Response: IntoResponse + 'static, - >>::Future: Send + 'static, + L: Layer> + Clone + Send + 'static, + L::Service: Service + Clone + Send + 'static, + >::Response: IntoResponse + 'static, + >::Future: Send + 'static, E: 'static, S: 'static, { @@ -990,19 +949,15 @@ where } #[track_caller] - pub(crate) fn merge_for_path( - mut self, - path: Option<&str>, - other: MethodRouter, - ) -> Self { + pub(crate) fn merge_for_path(mut self, path: Option<&str>, other: MethodRouter) -> Self { // written using inner functions to generate less IR #[track_caller] - fn merge_inner( + fn merge_inner( path: Option<&str>, name: &str, - first: MethodEndpoint, - second: MethodEndpoint, - ) -> MethodEndpoint { + first: MethodEndpoint, + second: MethodEndpoint, + ) -> MethodEndpoint { match (first, second) { (MethodEndpoint::None, MethodEndpoint::None) => MethodEndpoint::None, (pick, MethodEndpoint::None) | (MethodEndpoint::None, pick) => pick, @@ -1042,22 +997,21 @@ where #[doc = include_str!("../docs/method_routing/merge.md")] #[track_caller] - pub fn merge(self, other: MethodRouter) -> Self { + pub fn merge(self, other: MethodRouter) -> Self { self.merge_for_path(None, other) } /// Apply a [`HandleErrorLayer`]. /// /// This is a convenience method for doing `self.layer(HandleErrorLayer::new(f))`. - pub fn handle_error(self, f: F) -> MethodRouter + pub fn handle_error(self, f: F) -> MethodRouter where F: Clone + Send + Sync + 'static, - HandleError, F, T>: Service, Error = Infallible>, - , F, T> as Service>>::Future: Send, - , F, T> as Service>>::Response: IntoResponse + Send, + HandleError, F, T>: Service, + , F, T> as Service>::Future: Send, + , F, T> as Service>::Response: IntoResponse + Send, T: 'static, E: 'static, - B: 'static, S: 'static, { self.layer(HandleErrorLayer::new(f)) @@ -1068,7 +1022,7 @@ where self } - pub(crate) fn call_with_state(&mut self, req: Request, state: S) -> RouteFuture { + pub(crate) fn call_with_state(&self, req: Request, state: S) -> RouteFuture { macro_rules! call { ( $req:expr, @@ -1080,12 +1034,12 @@ where match $svc { MethodEndpoint::None => {} MethodEndpoint::Route(route) => { - return RouteFuture::from_future(route.oneshot_inner($req)) + return RouteFuture::from_future(route.clone().oneshot_inner($req)) .strip_body($method == Method::HEAD); } MethodEndpoint::BoxedHandler(handler) => { - let mut route = handler.clone().into_route(state); - return RouteFuture::from_future(route.oneshot_inner($req)) + let route = handler.clone().into_route(state); + return RouteFuture::from_future(route.clone().oneshot_inner($req)) .strip_body($method == Method::HEAD); } } @@ -1119,7 +1073,7 @@ where call!(req, method, DELETE, delete); call!(req, method, TRACE, trace); - let future = fallback.call_with_state(req, state); + let future = fallback.clone().call_with_state(req, state); match allow_header { AllowHeader::None => future.allow_header(Bytes::new()), @@ -1149,7 +1103,7 @@ fn append_allow_header(allow_header: &mut AllowHeader, method: &'static str) { } } -impl Clone for MethodRouter { +impl Clone for MethodRouter { fn clone(&self) -> Self { Self { get: self.get.clone(), @@ -1166,9 +1120,8 @@ impl Clone for MethodRouter { } } -impl Default for MethodRouter +impl Default for MethodRouter where - B: HttpBody + Send + 'static, S: Clone, { fn default() -> Self { @@ -1176,13 +1129,13 @@ where } } -enum MethodEndpoint { +enum MethodEndpoint { None, - Route(Route), - BoxedHandler(BoxedIntoRoute), + Route(Route), + BoxedHandler(BoxedIntoRoute), } -impl MethodEndpoint +impl MethodEndpoint where S: Clone, { @@ -1194,13 +1147,11 @@ where matches!(self, Self::None) } - fn map(self, f: F) -> MethodEndpoint + fn map(self, f: F) -> MethodEndpoint where S: 'static, - B: 'static, E: 'static, - F: FnOnce(Route) -> Route + Clone + Send + 'static, - B2: HttpBody + 'static, + F: FnOnce(Route) -> Route + Clone + Send + 'static, E2: 'static, { match self { @@ -1210,7 +1161,7 @@ where } } - fn with_state(self, state: &S) -> MethodEndpoint { + fn with_state(self, state: &S) -> MethodEndpoint { match self { MethodEndpoint::None => MethodEndpoint::None, MethodEndpoint::Route(route) => MethodEndpoint::Route(route), @@ -1221,7 +1172,7 @@ where } } -impl Clone for MethodEndpoint { +impl Clone for MethodEndpoint { fn clone(&self) -> Self { match self { Self::None => Self::None, @@ -1231,7 +1182,7 @@ impl Clone for MethodEndpoint { } } -impl fmt::Debug for MethodEndpoint { +impl fmt::Debug for MethodEndpoint { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::None => f.debug_tuple("None").finish(), @@ -1241,13 +1192,14 @@ impl fmt::Debug for MethodEndpoint { } } -impl Service> for MethodRouter<(), B, E> +impl Service> for MethodRouter<(), E> where - B: HttpBody + Send + 'static, + B: HttpBody + Send + 'static, + B::Error: Into, { type Response = Response; type Error = E; - type Future = RouteFuture; + type Future = RouteFuture; #[inline] fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { @@ -1256,34 +1208,53 @@ where #[inline] fn call(&mut self, req: Request) -> Self::Future { + let req = req.map(Body::new); self.call_with_state(req, ()) } } -impl Handler<(), S, B> for MethodRouter +impl Handler<(), S> for MethodRouter where S: Clone + 'static, - B: HttpBody + Send + 'static, { - type Future = InfallibleRouteFuture; + type Future = InfallibleRouteFuture; - fn call(mut self, req: Request, state: S) -> Self::Future { + fn call(self, req: Request, state: S) -> Self::Future { InfallibleRouteFuture::new(self.call_with_state(req, state)) } } +// for `axum::serve(listener, router)` +#[cfg(all(feature = "tokio", any(feature = "http1", feature = "http2")))] +const _: () = { + use crate::serve::IncomingStream; + + impl Service> for MethodRouter<()> { + type Response = Self; + type Error = Infallible; + type Future = std::future::Ready>; + + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, _req: IncomingStream<'_>) -> Self::Future { + std::future::ready(Ok(self.clone().with_state(()))) + } + } +}; + #[cfg(test)] mod tests { use super::*; - use crate::{ - body::Body, error_handling::HandleErrorLayer, extract::State, - handler::HandlerWithoutStateExt, - }; - use axum_core::response::IntoResponse; + use crate::{extract::State, handler::HandlerWithoutStateExt}; use http::{header::ALLOW, HeaderMap}; + use http_body_util::BodyExt; use std::time::Duration; - use tower::{timeout::TimeoutLayer, Service, ServiceBuilder, ServiceExt}; - use tower_http::{services::fs::ServeDir, validate_request::ValidateRequestHeaderLayer}; + use tower::ServiceExt; + use tower_http::{ + services::fs::ServeDir, timeout::TimeoutLayer, validate_request::ValidateRequestHeaderLayer, + }; #[crate::test] async fn method_not_allowed_by_default() { @@ -1295,7 +1266,7 @@ mod tests { #[crate::test] async fn get_service_fn() { - async fn handle(_req: Request) -> Result, Infallible> { + async fn handle(_req: Request) -> Result, Infallible> { Ok(Response::new(Body::from("ok"))) } @@ -1372,7 +1343,7 @@ mod tests { } #[allow(dead_code)] - fn buiding_complex_router() { + async fn building_complex_router() { let app = crate::Router::new().route( "/", // use the all the things 💣️ @@ -1382,16 +1353,11 @@ mod tests { .merge(delete_service(ServeDir::new("."))) .fallback(|| async { StatusCode::NOT_FOUND }) .put(ok) - .layer( - ServiceBuilder::new() - .layer(HandleErrorLayer::new(|_| async { - StatusCode::REQUEST_TIMEOUT - })) - .layer(TimeoutLayer::new(Duration::from_secs(10))), - ), + .layer(TimeoutLayer::new(Duration::from_secs(10))), ); - crate::Server::bind(&"0.0.0.0:0".parse().unwrap()).serve(app.into_make_service()); + let listener = tokio::net::TcpListener::bind("0.0.0.0:0").await.unwrap(); + crate::serve(listener, app).await.unwrap(); } #[crate::test] @@ -1556,7 +1522,7 @@ mod tests { async fn call(method: Method, svc: &mut S) -> (StatusCode, HeaderMap, String) where - S: Service, Error = Infallible>, + S: Service, S::Response: IntoResponse, { let request = Request::builder() @@ -1573,7 +1539,8 @@ mod tests { .unwrap() .into_response(); let (parts, body) = response.into_parts(); - let body = String::from_utf8(hyper::body::to_bytes(body).await.unwrap().to_vec()).unwrap(); + let body = + String::from_utf8(BodyExt::collect(body).await.unwrap().to_bytes().to_vec()).unwrap(); (parts.status, parts.headers, body) } diff --git a/.cargo-vendor/axum/src/routing/mod.rs b/.cargo-vendor/axum/src/routing/mod.rs index 1760157cbc..6564df7d62 100644 --- a/.cargo-vendor/axum/src/routing/mod.rs +++ b/.cargo-vendor/axum/src/routing/mod.rs @@ -9,14 +9,17 @@ use crate::{ handler::Handler, util::try_downcast, }; -use axum_core::response::{IntoResponse, Response}; -use http::Request; +use axum_core::{ + extract::Request, + response::{IntoResponse, Response}, +}; use std::{ convert::Infallible, fmt, + marker::PhantomData, + sync::Arc, task::{Context, Poll}, }; -use sync_wrapper::SyncWrapper; use tower_layer::Layer; use tower_service::Service; @@ -56,27 +59,27 @@ pub(crate) struct RouteId(u32); /// The router type for composing handlers and services. #[must_use] -pub struct Router { - path_router: PathRouter, - fallback_router: PathRouter, - default_fallback: bool, - catch_all_fallback: Fallback, +pub struct Router { + inner: Arc>, } -impl Clone for Router { +impl Clone for Router { fn clone(&self) -> Self { Self { - path_router: self.path_router.clone(), - fallback_router: self.fallback_router.clone(), - default_fallback: self.default_fallback, - catch_all_fallback: self.catch_all_fallback.clone(), + inner: Arc::clone(&self.inner), } } } -impl Default for Router +struct RouterInner { + path_router: PathRouter, + fallback_router: PathRouter, + default_fallback: bool, + catch_all_fallback: Fallback, +} + +impl Default for Router where - B: HttpBody + Send + 'static, S: Clone + Send + Sync + 'static, { fn default() -> Self { @@ -84,13 +87,13 @@ where } } -impl fmt::Debug for Router { +impl fmt::Debug for Router { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Router") - .field("path_router", &self.path_router) - .field("fallback_router", &self.fallback_router) - .field("default_fallback", &self.default_fallback) - .field("catch_all_fallback", &self.catch_all_fallback) + .field("path_router", &self.inner.path_router) + .field("fallback_router", &self.inner.fallback_router) + .field("default_fallback", &self.inner.default_fallback) + .field("catch_all_fallback", &self.inner.catch_all_fallback) .finish() } } @@ -100,9 +103,8 @@ pub(crate) const NEST_TAIL_PARAM_CAPTURE: &str = "/*__private__axum_nest_tail_pa pub(crate) const FALLBACK_PARAM: &str = "__private__axum_fallback"; pub(crate) const FALLBACK_PARAM_PATH: &str = "/*__private__axum_fallback"; -impl Router +impl Router where - B: HttpBody + Send + 'static, S: Clone + Send + Sync + 'static, { /// Create a new `Router`. @@ -111,28 +113,63 @@ where /// all requests. pub fn new() -> Self { Self { - path_router: Default::default(), - fallback_router: PathRouter::new_fallback(), - default_fallback: true, - catch_all_fallback: Fallback::Default(Route::new(NotFound)), + inner: Arc::new(RouterInner { + path_router: Default::default(), + fallback_router: PathRouter::new_fallback(), + default_fallback: true, + catch_all_fallback: Fallback::Default(Route::new(NotFound)), + }), + } + } + + fn map_inner(self, f: F) -> Router + where + F: FnOnce(RouterInner) -> RouterInner, + { + Router { + inner: Arc::new(f(self.into_inner())), + } + } + + fn tap_inner_mut(self, f: F) -> Self + where + F: FnOnce(&mut RouterInner), + { + let mut inner = self.into_inner(); + f(&mut inner); + Router { + inner: Arc::new(inner), + } + } + + fn into_inner(self) -> RouterInner { + match Arc::try_unwrap(self.inner) { + Ok(inner) => inner, + Err(arc) => RouterInner { + path_router: arc.path_router.clone(), + fallback_router: arc.fallback_router.clone(), + default_fallback: arc.default_fallback, + catch_all_fallback: arc.catch_all_fallback.clone(), + }, } } #[doc = include_str!("../docs/routing/route.md")] #[track_caller] - pub fn route(mut self, path: &str, method_router: MethodRouter) -> Self { - panic_on_err!(self.path_router.route(path, method_router)); - self + pub fn route(self, path: &str, method_router: MethodRouter) -> Self { + self.tap_inner_mut(|this| { + panic_on_err!(this.path_router.route(path, method_router)); + }) } #[doc = include_str!("../docs/routing/route_service.md")] - pub fn route_service(mut self, path: &str, service: T) -> Self + pub fn route_service(self, path: &str, service: T) -> Self where - T: Service, Error = Infallible> + Clone + Send + 'static, + T: Service + Clone + Send + 'static, T::Response: IntoResponse, T::Future: Send + 'static, { - let service = match try_downcast::, _>(service) { + let service = match try_downcast::, _>(service) { Ok(_) => { panic!( "Invalid route: `Router::route_service` cannot be used with `Router`s. \ @@ -142,14 +179,15 @@ where Err(service) => service, }; - panic_on_err!(self.path_router.route_service(path, service)); - self + self.tap_inner_mut(|this| { + panic_on_err!(this.path_router.route_service(path, service)); + }) } #[doc = include_str!("../docs/routing/nest.md")] #[track_caller] - pub fn nest(mut self, path: &str, router: Router) -> Self { - let Router { + pub fn nest(self, path: &str, router: Router) -> Self { + let RouterInner { path_router, fallback_router, default_fallback, @@ -157,205 +195,258 @@ where // requests with an empty path. If we were to inherit the catch-all fallback // it would end up matching `/{path}/*` which doesn't match empty paths. catch_all_fallback: _, - } = router; + } = router.into_inner(); - panic_on_err!(self.path_router.nest(path, path_router)); + self.tap_inner_mut(|this| { + panic_on_err!(this.path_router.nest(path, path_router)); - if !default_fallback { - panic_on_err!(self.fallback_router.nest(path, fallback_router)); - } - - self + if !default_fallback { + panic_on_err!(this.fallback_router.nest(path, fallback_router)); + } + }) } /// Like [`nest`](Self::nest), but accepts an arbitrary `Service`. #[track_caller] - pub fn nest_service(mut self, path: &str, service: T) -> Self + pub fn nest_service(self, path: &str, service: T) -> Self where - T: Service, Error = Infallible> + Clone + Send + 'static, + T: Service + Clone + Send + 'static, T::Response: IntoResponse, T::Future: Send + 'static, { - panic_on_err!(self.path_router.nest_service(path, service)); - self + self.tap_inner_mut(|this| { + panic_on_err!(this.path_router.nest_service(path, service)); + }) } #[doc = include_str!("../docs/routing/merge.md")] #[track_caller] - pub fn merge(mut self, other: R) -> Self + pub fn merge(self, other: R) -> Self where - R: Into>, + R: Into>, { const PANIC_MSG: &str = "Failed to merge fallbacks. This is a bug in axum. Please file an issue"; - let Router { + let other: Router = other.into(); + let RouterInner { path_router, fallback_router: mut other_fallback, default_fallback, catch_all_fallback, - } = other.into(); + } = other.into_inner(); - panic_on_err!(self.path_router.merge(path_router)); + self.map_inner(|mut this| { + panic_on_err!(this.path_router.merge(path_router)); - match (self.default_fallback, default_fallback) { - // both have the default fallback - // use the one from other - (true, true) => { - self.fallback_router.merge(other_fallback).expect(PANIC_MSG); - } - // self has default fallback, other has a custom fallback - (true, false) => { - self.fallback_router.merge(other_fallback).expect(PANIC_MSG); - self.default_fallback = false; - } - // self has a custom fallback, other has a default - (false, true) => { - let fallback_router = std::mem::take(&mut self.fallback_router); - other_fallback.merge(fallback_router).expect(PANIC_MSG); - self.fallback_router = other_fallback; - } - // both have a custom fallback, not allowed - (false, false) => { - panic!("Cannot merge two `Router`s that both have a fallback") - } - }; + match (this.default_fallback, default_fallback) { + // both have the default fallback + // use the one from other + (true, true) => { + this.fallback_router.merge(other_fallback).expect(PANIC_MSG); + } + // this has default fallback, other has a custom fallback + (true, false) => { + this.fallback_router.merge(other_fallback).expect(PANIC_MSG); + this.default_fallback = false; + } + // this has a custom fallback, other has a default + (false, true) => { + let fallback_router = std::mem::take(&mut this.fallback_router); + other_fallback.merge(fallback_router).expect(PANIC_MSG); + this.fallback_router = other_fallback; + } + // both have a custom fallback, not allowed + (false, false) => { + panic!("Cannot merge two `Router`s that both have a fallback") + } + }; - self.catch_all_fallback = self - .catch_all_fallback - .merge(catch_all_fallback) - .unwrap_or_else(|| panic!("Cannot merge two `Router`s that both have a fallback")); + this.catch_all_fallback = this + .catch_all_fallback + .merge(catch_all_fallback) + .unwrap_or_else(|| panic!("Cannot merge two `Router`s that both have a fallback")); - self + this + }) } #[doc = include_str!("../docs/routing/layer.md")] - pub fn layer(self, layer: L) -> Router + pub fn layer(self, layer: L) -> Router where - L: Layer> + Clone + Send + 'static, - L::Service: Service> + Clone + Send + 'static, - >>::Response: IntoResponse + 'static, - >>::Error: Into + 'static, - >>::Future: Send + 'static, - NewReqBody: HttpBody + 'static, + L: Layer + Clone + Send + 'static, + L::Service: Service + Clone + Send + 'static, + >::Response: IntoResponse + 'static, + >::Error: Into + 'static, + >::Future: Send + 'static, { - Router { - path_router: self.path_router.layer(layer.clone()), - fallback_router: self.fallback_router.layer(layer.clone()), - default_fallback: self.default_fallback, - catch_all_fallback: self.catch_all_fallback.map(|route| route.layer(layer)), - } + self.map_inner(|this| RouterInner { + path_router: this.path_router.layer(layer.clone()), + fallback_router: this.fallback_router.layer(layer.clone()), + default_fallback: this.default_fallback, + catch_all_fallback: this.catch_all_fallback.map(|route| route.layer(layer)), + }) } #[doc = include_str!("../docs/routing/route_layer.md")] #[track_caller] pub fn route_layer(self, layer: L) -> Self where - L: Layer> + Clone + Send + 'static, - L::Service: Service> + Clone + Send + 'static, - >>::Response: IntoResponse + 'static, - >>::Error: Into + 'static, - >>::Future: Send + 'static, + L: Layer + Clone + Send + 'static, + L::Service: Service + Clone + Send + 'static, + >::Response: IntoResponse + 'static, + >::Error: Into + 'static, + >::Future: Send + 'static, { - Router { - path_router: self.path_router.route_layer(layer), - fallback_router: self.fallback_router, - default_fallback: self.default_fallback, - catch_all_fallback: self.catch_all_fallback, - } + self.map_inner(|this| RouterInner { + path_router: this.path_router.route_layer(layer), + fallback_router: this.fallback_router, + default_fallback: this.default_fallback, + catch_all_fallback: this.catch_all_fallback, + }) } #[track_caller] #[doc = include_str!("../docs/routing/fallback.md")] - pub fn fallback(mut self, handler: H) -> Self + pub fn fallback(self, handler: H) -> Self where - H: Handler, + H: Handler, T: 'static, { - self.catch_all_fallback = - Fallback::BoxedHandler(BoxedIntoRoute::from_handler(handler.clone())); - self.fallback_endpoint(Endpoint::MethodRouter(any(handler))) + self.tap_inner_mut(|this| { + this.catch_all_fallback = + Fallback::BoxedHandler(BoxedIntoRoute::from_handler(handler.clone())); + }) + .fallback_endpoint(Endpoint::MethodRouter(any(handler))) } /// Add a fallback [`Service`] to the router. /// /// See [`Router::fallback`] for more details. - pub fn fallback_service(mut self, service: T) -> Self + pub fn fallback_service(self, service: T) -> Self where - T: Service, Error = Infallible> + Clone + Send + 'static, + T: Service + Clone + Send + 'static, T::Response: IntoResponse, T::Future: Send + 'static, { let route = Route::new(service); - self.catch_all_fallback = Fallback::Service(route.clone()); - self.fallback_endpoint(Endpoint::Route(route)) + self.tap_inner_mut(|this| { + this.catch_all_fallback = Fallback::Service(route.clone()); + }) + .fallback_endpoint(Endpoint::Route(route)) } - fn fallback_endpoint(mut self, endpoint: Endpoint) -> Self { - self.fallback_router.set_fallback(endpoint); - self.default_fallback = false; - self + fn fallback_endpoint(self, endpoint: Endpoint) -> Self { + self.tap_inner_mut(|this| { + this.fallback_router.set_fallback(endpoint); + this.default_fallback = false; + }) } #[doc = include_str!("../docs/routing/with_state.md")] - pub fn with_state(self, state: S) -> Router { - Router { - path_router: self.path_router.with_state(state.clone()), - fallback_router: self.fallback_router.with_state(state.clone()), - default_fallback: self.default_fallback, - catch_all_fallback: self.catch_all_fallback.with_state(state), - } + pub fn with_state(self, state: S) -> Router { + self.map_inner(|this| RouterInner { + path_router: this.path_router.with_state(state.clone()), + fallback_router: this.fallback_router.with_state(state.clone()), + default_fallback: this.default_fallback, + catch_all_fallback: this.catch_all_fallback.with_state(state), + }) } - pub(crate) fn call_with_state( - &mut self, - mut req: Request, - state: S, - ) -> RouteFuture { - // required for opaque routers to still inherit the fallback - // TODO(david): remove this feature in 0.7 - if !self.default_fallback { - req.extensions_mut().insert(SuperFallback(SyncWrapper::new( - self.fallback_router.clone(), - ))); - } + pub(crate) fn call_with_state(&self, req: Request, state: S) -> RouteFuture { + let (req, state) = match self.inner.path_router.call_with_state(req, state) { + Ok(future) => return future, + Err((req, state)) => (req, state), + }; - match self.path_router.call_with_state(req, state) { - Ok(future) => future, - Err((mut req, state)) => { - let super_fallback = req - .extensions_mut() - .remove::>() - .map(|SuperFallback(path_router)| path_router.into_inner()); - - if let Some(mut super_fallback) = super_fallback { - match super_fallback.call_with_state(req, state) { - Ok(future) => return future, - Err((req, state)) => { - return self.catch_all_fallback.call_with_state(req, state); - } - } - } + let (req, state) = match self.inner.fallback_router.call_with_state(req, state) { + Ok(future) => return future, + Err((req, state)) => (req, state), + }; - match self.fallback_router.call_with_state(req, state) { - Ok(future) => future, - Err((req, state)) => self.catch_all_fallback.call_with_state(req, state), - } - } + self.inner + .catch_all_fallback + .clone() + .call_with_state(req, state) + } + + /// Convert the router into a borrowed [`Service`] with a fixed request body type, to aid type + /// inference. + /// + /// In some cases when calling methods from [`tower::ServiceExt`] on a [`Router`] you might get + /// type inference errors along the lines of + /// + /// ```not_rust + /// let response = router.ready().await?.call(request).await?; + /// ^^^^^ cannot infer type for type parameter `B` + /// ``` + /// + /// This happens because `Router` implements [`Service`] with `impl Service> for Router<()>`. + /// + /// For example: + /// + /// ```compile_fail + /// use axum::{ + /// Router, + /// routing::get, + /// http::Request, + /// body::Body, + /// }; + /// use tower::{Service, ServiceExt}; + /// + /// # async fn async_main() -> Result<(), Box> { + /// let mut router = Router::new().route("/", get(|| async {})); + /// let request = Request::new(Body::empty()); + /// let response = router.ready().await?.call(request).await?; + /// # Ok(()) + /// # } + /// ``` + /// + /// Calling `Router::as_service` fixes that: + /// + /// ``` + /// use axum::{ + /// Router, + /// routing::get, + /// http::Request, + /// body::Body, + /// }; + /// use tower::{Service, ServiceExt}; + /// + /// # async fn async_main() -> Result<(), Box> { + /// let mut router = Router::new().route("/", get(|| async {})); + /// let request = Request::new(Body::empty()); + /// let response = router.as_service().ready().await?.call(request).await?; + /// # Ok(()) + /// # } + /// ``` + /// + /// This is mainly used when calling `Router` in tests. It shouldn't be necessary when running + /// the `Router` normally via [`Router::into_make_service`]. + pub fn as_service(&mut self) -> RouterAsService<'_, B, S> { + RouterAsService { + router: self, + _marker: PhantomData, + } + } + + /// Convert the router into an owned [`Service`] with a fixed request body type, to aid type + /// inference. + /// + /// This is the same as [`Router::as_service`] instead it returns an owned [`Service`]. See + /// that method for more details. + pub fn into_service(self) -> RouterIntoService { + RouterIntoService { + router: self, + _marker: PhantomData, } } } -impl Router<(), B> -where - B: HttpBody + Send + 'static, -{ +impl Router { /// Convert this router into a [`MakeService`], that is a [`Service`] whose /// response is another service. /// - /// This is useful when running your application with hyper's - /// [`Server`](hyper::server::Server): - /// /// ``` /// use axum::{ /// routing::get, @@ -365,10 +456,8 @@ where /// let app = Router::new().route("/", get(|| async { "Hi!" })); /// /// # async { - /// axum::Server::bind(&"0.0.0.0:3000".parse().unwrap()) - /// .serve(app.into_make_service()) - /// .await - /// .expect("server failed"); + /// let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap(); + /// axum::serve(listener, app).await.unwrap(); /// # }; /// ``` /// @@ -388,13 +477,36 @@ where } } -impl Service> for Router<(), B> +// for `axum::serve(listener, router)` +#[cfg(all(feature = "tokio", any(feature = "http1", feature = "http2")))] +const _: () = { + use crate::serve::IncomingStream; + + impl Service> for Router<()> { + type Response = Self; + type Error = Infallible; + type Future = std::future::Ready>; + + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, _req: IncomingStream<'_>) -> Self::Future { + // call `Router::with_state` such that everything is turned into `Route` eagerly + // rather than doing that per request + std::future::ready(Ok(self.clone().with_state(()))) + } + } +}; + +impl Service> for Router<()> where - B: HttpBody + Send + 'static, + B: HttpBody + Send + 'static, + B::Error: Into, { type Response = Response; type Error = Infallible; - type Future = RouteFuture; + type Future = RouteFuture; #[inline] fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { @@ -403,17 +515,108 @@ where #[inline] fn call(&mut self, req: Request) -> Self::Future { + let req = req.map(Body::new); self.call_with_state(req, ()) } } -enum Fallback { - Default(Route), - Service(Route), - BoxedHandler(BoxedIntoRoute), +/// A [`Router`] converted into a borrowed [`Service`] with a fixed body type. +/// +/// See [`Router::as_service`] for more details. +pub struct RouterAsService<'a, B, S = ()> { + router: &'a mut Router, + _marker: PhantomData, } -impl Fallback +impl<'a, B> Service> for RouterAsService<'a, B, ()> +where + B: HttpBody + Send + 'static, + B::Error: Into, +{ + type Response = Response; + type Error = Infallible; + type Future = RouteFuture; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + >>::poll_ready(self.router, cx) + } + + #[inline] + fn call(&mut self, req: Request) -> Self::Future { + self.router.call(req) + } +} + +impl<'a, B, S> fmt::Debug for RouterAsService<'a, B, S> +where + S: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RouterAsService") + .field("router", &self.router) + .finish() + } +} + +/// A [`Router`] converted into an owned [`Service`] with a fixed body type. +/// +/// See [`Router::into_service`] for more details. +pub struct RouterIntoService { + router: Router, + _marker: PhantomData, +} + +impl Clone for RouterIntoService +where + Router: Clone, +{ + fn clone(&self) -> Self { + Self { + router: self.router.clone(), + _marker: PhantomData, + } + } +} + +impl Service> for RouterIntoService +where + B: HttpBody + Send + 'static, + B::Error: Into, +{ + type Response = Response; + type Error = Infallible; + type Future = RouteFuture; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + >>::poll_ready(&mut self.router, cx) + } + + #[inline] + fn call(&mut self, req: Request) -> Self::Future { + self.router.call(req) + } +} + +impl fmt::Debug for RouterIntoService +where + S: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RouterIntoService") + .field("router", &self.router) + .finish() + } +} + +enum Fallback { + Default(Route), + Service(Route), + BoxedHandler(BoxedIntoRoute), +} + +impl Fallback where S: Clone, { @@ -425,13 +628,11 @@ where } } - fn map(self, f: F) -> Fallback + fn map(self, f: F) -> Fallback where S: 'static, - B: 'static, E: 'static, - F: FnOnce(Route) -> Route + Clone + Send + 'static, - B2: HttpBody + 'static, + F: FnOnce(Route) -> Route + Clone + Send + 'static, E2: 'static, { match self { @@ -441,7 +642,7 @@ where } } - fn with_state(self, state: S) -> Fallback { + fn with_state(self, state: S) -> Fallback { match self { Fallback::Default(route) => Fallback::Default(route), Fallback::Service(route) => Fallback::Service(route), @@ -449,7 +650,7 @@ where } } - fn call_with_state(&mut self, req: Request, state: S) -> RouteFuture { + fn call_with_state(&mut self, req: Request, state: S) -> RouteFuture { match self { Fallback::Default(route) | Fallback::Service(route) => { RouteFuture::from_future(route.oneshot_inner(req)) @@ -462,7 +663,7 @@ where } } -impl Clone for Fallback { +impl Clone for Fallback { fn clone(&self) -> Self { match self { Self::Default(inner) => Self::Default(inner.clone()), @@ -472,7 +673,7 @@ impl Clone for Fallback { } } -impl fmt::Debug for Fallback { +impl fmt::Debug for Fallback { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::Default(inner) => f.debug_tuple("Default").field(inner).finish(), @@ -483,24 +684,22 @@ impl fmt::Debug for Fallback { } #[allow(clippy::large_enum_variant)] -enum Endpoint { - MethodRouter(MethodRouter), - Route(Route), +enum Endpoint { + MethodRouter(MethodRouter), + Route(Route), } -impl Endpoint +impl Endpoint where - B: HttpBody + Send + 'static, S: Clone + Send + Sync + 'static, { - fn layer(self, layer: L) -> Endpoint + fn layer(self, layer: L) -> Endpoint where - L: Layer> + Clone + Send + 'static, - L::Service: Service> + Clone + Send + 'static, - >>::Response: IntoResponse + 'static, - >>::Error: Into + 'static, - >>::Future: Send + 'static, - NewReqBody: HttpBody + 'static, + L: Layer + Clone + Send + 'static, + L::Service: Service + Clone + Send + 'static, + >::Response: IntoResponse + 'static, + >::Error: Into + 'static, + >::Future: Send + 'static, { match self { Endpoint::MethodRouter(method_router) => { @@ -511,7 +710,7 @@ where } } -impl Clone for Endpoint { +impl Clone for Endpoint { fn clone(&self) -> Self { match self { Self::MethodRouter(inner) => Self::MethodRouter(inner.clone()), @@ -520,7 +719,7 @@ impl Clone for Endpoint { } } -impl fmt::Debug for Endpoint { +impl fmt::Debug for Endpoint { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::MethodRouter(method_router) => { @@ -531,11 +730,9 @@ impl fmt::Debug for Endpoint { } } -struct SuperFallback(SyncWrapper>); - #[test] -#[allow(warnings)] fn traits() { use crate::test_helpers::*; - assert_send::>(); + assert_send::>(); + assert_sync::>(); } diff --git a/.cargo-vendor/axum/src/routing/path_router.rs b/.cargo-vendor/axum/src/routing/path_router.rs index b415f4f7b2..e9353dc748 100644 --- a/.cargo-vendor/axum/src/routing/path_router.rs +++ b/.cargo-vendor/axum/src/routing/path_router.rs @@ -1,6 +1,5 @@ -use crate::body::HttpBody; +use crate::extract::{nested_path::SetNestedPath, Request}; use axum_core::response::IntoResponse; -use http::Request; use matchit::MatchError; use std::{borrow::Cow, collections::HashMap, convert::Infallible, fmt, sync::Arc}; use tower_layer::Layer; @@ -11,15 +10,14 @@ use super::{ MethodRouter, Route, RouteId, FALLBACK_PARAM_PATH, NEST_TAIL_PARAM, }; -pub(super) struct PathRouter { - routes: HashMap>, +pub(super) struct PathRouter { + routes: HashMap>, node: Arc, prev_route_id: RouteId, } -impl PathRouter +impl PathRouter where - B: HttpBody + Send + 'static, S: Clone + Send + Sync + 'static, { pub(super) fn new_fallback() -> Self { @@ -28,21 +26,20 @@ where this } - pub(super) fn set_fallback(&mut self, endpoint: Endpoint) { + pub(super) fn set_fallback(&mut self, endpoint: Endpoint) { self.replace_endpoint("/", endpoint.clone()); self.replace_endpoint(FALLBACK_PARAM_PATH, endpoint); } } -impl PathRouter +impl PathRouter where - B: HttpBody + Send + 'static, S: Clone + Send + Sync + 'static, { pub(super) fn route( &mut self, path: &str, - method_router: MethodRouter, + method_router: MethodRouter, ) -> Result<(), Cow<'static, str>> { fn validate_path(path: &str) -> Result<(), &'static str> { if path.is_empty() { @@ -56,8 +53,6 @@ where validate_path(path)?; - let id = self.next_route_id(); - let endpoint = if let Some((route_id, Endpoint::MethodRouter(prev_method_router))) = self .node .path_to_route_id @@ -77,6 +72,7 @@ where Endpoint::MethodRouter(method_router) }; + let id = self.next_route_id(); self.set_node(path, id)?; self.routes.insert(id, endpoint); @@ -89,7 +85,7 @@ where service: T, ) -> Result<(), Cow<'static, str>> where - T: Service, Error = Infallible> + Clone + Send + 'static, + T: Service + Clone + Send + 'static, T::Response: IntoResponse, T::Future: Send + 'static, { @@ -99,7 +95,7 @@ where pub(super) fn route_endpoint( &mut self, path: &str, - endpoint: Endpoint, + endpoint: Endpoint, ) -> Result<(), Cow<'static, str>> { if path.is_empty() { return Err("Paths must start with a `/`. Use \"/\" for root routes".into()); @@ -126,7 +122,7 @@ where pub(super) fn merge( &mut self, - other: PathRouter, + other: PathRouter, ) -> Result<(), Cow<'static, str>> { let PathRouter { routes, @@ -166,10 +162,10 @@ where pub(super) fn nest( &mut self, - path: &str, - router: PathRouter, + path_to_nest_at: &str, + router: PathRouter, ) -> Result<(), Cow<'static, str>> { - let prefix = validate_nest_path(path); + let prefix = validate_nest_path(path_to_nest_at); let PathRouter { routes, @@ -185,7 +181,11 @@ where let path = path_for_nested_route(prefix, inner_path); - match endpoint.layer(StripPrefix::layer(prefix)) { + let layer = ( + StripPrefix::layer(prefix), + SetNestedPath::layer(path_to_nest_at), + ); + match endpoint.layer(layer) { Endpoint::MethodRouter(method_router) => { self.route(&path, method_router)?; } @@ -198,13 +198,17 @@ where Ok(()) } - pub(super) fn nest_service(&mut self, path: &str, svc: T) -> Result<(), Cow<'static, str>> + pub(super) fn nest_service( + &mut self, + path_to_nest_at: &str, + svc: T, + ) -> Result<(), Cow<'static, str>> where - T: Service, Error = Infallible> + Clone + Send + 'static, + T: Service + Clone + Send + 'static, T::Response: IntoResponse, T::Future: Send + 'static, { - let path = validate_nest_path(path); + let path = validate_nest_path(path_to_nest_at); let prefix = path; let path = if path.ends_with('/') { @@ -213,7 +217,11 @@ where format!("{path}/*{NEST_TAIL_PARAM}") }; - let endpoint = Endpoint::Route(Route::new(StripPrefix::new(svc, prefix))); + let layer = ( + StripPrefix::layer(prefix), + SetNestedPath::layer(path_to_nest_at), + ); + let endpoint = Endpoint::Route(Route::new(layer.layer(svc))); self.route_endpoint(&path, endpoint.clone())?; @@ -229,14 +237,13 @@ where Ok(()) } - pub(super) fn layer(self, layer: L) -> PathRouter + pub(super) fn layer(self, layer: L) -> PathRouter where - L: Layer> + Clone + Send + 'static, - L::Service: Service> + Clone + Send + 'static, - >>::Response: IntoResponse + 'static, - >>::Error: Into + 'static, - >>::Future: Send + 'static, - NewReqBody: HttpBody + 'static, + L: Layer + Clone + Send + 'static, + L::Service: Service + Clone + Send + 'static, + >::Response: IntoResponse + 'static, + >::Error: Into + 'static, + >::Future: Send + 'static, { let routes = self .routes @@ -257,11 +264,11 @@ where #[track_caller] pub(super) fn route_layer(self, layer: L) -> Self where - L: Layer> + Clone + Send + 'static, - L::Service: Service> + Clone + Send + 'static, - >>::Response: IntoResponse + 'static, - >>::Error: Into + 'static, - >>::Future: Send + 'static, + L: Layer + Clone + Send + 'static, + L::Service: Service + Clone + Send + 'static, + >::Response: IntoResponse + 'static, + >::Error: Into + 'static, + >::Future: Send + 'static, { if self.routes.is_empty() { panic!( @@ -286,12 +293,12 @@ where } } - pub(super) fn with_state(self, state: S) -> PathRouter { + pub(super) fn with_state(self, state: S) -> PathRouter { let routes = self .routes .into_iter() .map(|(id, endpoint)| { - let endpoint: Endpoint = match endpoint { + let endpoint: Endpoint = match endpoint { Endpoint::MethodRouter(method_router) => { Endpoint::MethodRouter(method_router.with_state(state.clone())) } @@ -309,10 +316,10 @@ where } pub(super) fn call_with_state( - &mut self, - mut req: Request, + &self, + mut req: Request, state: S, - ) -> Result, (Request, S)> { + ) -> Result, (Request, S)> { #[cfg(feature = "original-uri")] { use crate::extract::OriginalUri; @@ -340,12 +347,12 @@ where url_params::insert_url_params(req.extensions_mut(), match_.params); - let endpont = self + let endpoint = self .routes - .get_mut(&id) + .get(&id) .expect("no route for id. This is a bug in axum. Please file an issue"); - match endpont { + match endpoint { Endpoint::MethodRouter(method_router) => { Ok(method_router.call_with_state(req, state)) } @@ -362,7 +369,7 @@ where } } - pub(super) fn replace_endpoint(&mut self, path: &str, endpoint: Endpoint) { + pub(super) fn replace_endpoint(&mut self, path: &str, endpoint: Endpoint) { match self.node.at(path) { Ok(match_) => { let id = *match_.value; @@ -385,7 +392,7 @@ where } } -impl Default for PathRouter { +impl Default for PathRouter { fn default() -> Self { Self { routes: Default::default(), @@ -395,7 +402,7 @@ impl Default for PathRouter { } } -impl fmt::Debug for PathRouter { +impl fmt::Debug for PathRouter { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("PathRouter") .field("routes", &self.routes) @@ -404,7 +411,7 @@ impl fmt::Debug for PathRouter } } -impl Clone for PathRouter { +impl Clone for PathRouter { fn clone(&self) -> Self { Self { routes: self.routes.clone(), diff --git a/.cargo-vendor/axum/src/routing/route.rs b/.cargo-vendor/axum/src/routing/route.rs index 1667db1607..2bde8c8c5f 100644 --- a/.cargo-vendor/axum/src/routing/route.rs +++ b/.cargo-vendor/axum/src/routing/route.rs @@ -1,12 +1,13 @@ use crate::{ - body::{boxed, Body, Empty, HttpBody}, + body::{Body, HttpBody}, response::Response, + util::AxumMutex, }; -use axum_core::response::IntoResponse; +use axum_core::{extract::Request, response::IntoResponse}; use bytes::Bytes; use http::{ header::{self, CONTENT_LENGTH}, - HeaderMap, HeaderValue, Request, + HeaderMap, HeaderValue, }; use pin_project_lite::pin_project; use std::{ @@ -17,8 +18,8 @@ use std::{ task::{Context, Poll}, }; use tower::{ - util::{BoxCloneService, MapResponseLayer, Oneshot}, - ServiceBuilder, ServiceExt, + util::{BoxCloneService, MapErrLayer, MapRequestLayer, MapResponseLayer, Oneshot}, + ServiceExt, }; use tower_layer::Layer; use tower_service::Service; @@ -27,66 +28,68 @@ use tower_service::Service; /// /// You normally shouldn't need to care about this type. It's used in /// [`Router::layer`](super::Router::layer). -pub struct Route(BoxCloneService, Response, E>); +pub struct Route(AxumMutex>); -impl Route { +impl Route { pub(crate) fn new(svc: T) -> Self where - T: Service, Error = E> + Clone + Send + 'static, + T: Service + Clone + Send + 'static, T::Response: IntoResponse + 'static, T::Future: Send + 'static, { - Self(BoxCloneService::new( + Self(AxumMutex::new(BoxCloneService::new( svc.map_response(IntoResponse::into_response), - )) + ))) } pub(crate) fn oneshot_inner( &mut self, - req: Request, - ) -> Oneshot, Response, E>, Request> { - self.0.clone().oneshot(req) + req: Request, + ) -> Oneshot, Request> { + self.0.get_mut().unwrap().clone().oneshot(req) } - pub(crate) fn layer(self, layer: L) -> Route + pub(crate) fn layer(self, layer: L) -> Route where - L: Layer> + Clone + Send + 'static, - L::Service: Service> + Clone + Send + 'static, - >>::Response: IntoResponse + 'static, - >>::Error: Into + 'static, - >>::Future: Send + 'static, - NewReqBody: 'static, + L: Layer> + Clone + Send + 'static, + L::Service: Service + Clone + Send + 'static, + >::Response: IntoResponse + 'static, + >::Error: Into + 'static, + >::Future: Send + 'static, NewError: 'static, { - let layer = ServiceBuilder::new() - .map_err(Into::into) - .layer(MapResponseLayer::new(IntoResponse::into_response)) - .layer(layer) - .into_inner(); + let layer = ( + MapRequestLayer::new(|req: Request<_>| req.map(Body::new)), + MapErrLayer::new(Into::into), + MapResponseLayer::new(IntoResponse::into_response), + layer, + ); Route::new(layer.layer(self)) } } -impl Clone for Route { +impl Clone for Route { + #[track_caller] fn clone(&self) -> Self { - Self(self.0.clone()) + Self(AxumMutex::new(self.0.lock().unwrap().clone())) } } -impl fmt::Debug for Route { +impl fmt::Debug for Route { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Route").finish() } } -impl Service> for Route +impl Service> for Route where - B: HttpBody, + B: HttpBody + Send + 'static, + B::Error: Into, { type Response = Response; type Error = E; - type Future = RouteFuture; + type Future = RouteFuture; #[inline] fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { @@ -95,15 +98,16 @@ where #[inline] fn call(&mut self, req: Request) -> Self::Future { + let req = req.map(Body::new); RouteFuture::from_future(self.oneshot_inner(req)) } } pin_project! { /// Response future for [`Route`]. - pub struct RouteFuture { + pub struct RouteFuture { #[pin] - kind: RouteFutureKind, + kind: RouteFutureKind, strip_body: bool, allow_header: Option, } @@ -111,12 +115,12 @@ pin_project! { pin_project! { #[project = RouteFutureKindProj] - enum RouteFutureKind { + enum RouteFutureKind { Future { #[pin] future: Oneshot< - BoxCloneService, Response, E>, - Request, + BoxCloneService, + Request, >, }, Response { @@ -125,9 +129,9 @@ pin_project! { } } -impl RouteFuture { +impl RouteFuture { pub(crate) fn from_future( - future: Oneshot, Response, E>, Request>, + future: Oneshot, Request>, ) -> Self { Self { kind: RouteFutureKind::Future { future }, @@ -147,10 +151,7 @@ impl RouteFuture { } } -impl Future for RouteFuture -where - B: HttpBody, -{ +impl Future for RouteFuture { type Output = Result; #[inline] @@ -174,7 +175,7 @@ where set_content_length(res.size_hint(), res.headers_mut()); let res = if *this.strip_body { - res.map(|_| boxed(Empty::new())) + res.map(|_| Body::empty()) } else { res }; @@ -217,22 +218,19 @@ fn set_content_length(size_hint: http_body::SizeHint, headers: &mut HeaderMap) { pin_project! { /// A [`RouteFuture`] that always yields a [`Response`]. - pub struct InfallibleRouteFuture { + pub struct InfallibleRouteFuture { #[pin] - future: RouteFuture, + future: RouteFuture, } } -impl InfallibleRouteFuture { - pub(crate) fn new(future: RouteFuture) -> Self { +impl InfallibleRouteFuture { + pub(crate) fn new(future: RouteFuture) -> Self { Self { future } } } -impl Future for InfallibleRouteFuture -where - B: HttpBody, -{ +impl Future for InfallibleRouteFuture { type Output = Response; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { diff --git a/.cargo-vendor/axum/src/routing/strip_prefix.rs b/.cargo-vendor/axum/src/routing/strip_prefix.rs index 671c4de773..0b06db4d28 100644 --- a/.cargo-vendor/axum/src/routing/strip_prefix.rs +++ b/.cargo-vendor/axum/src/routing/strip_prefix.rs @@ -14,13 +14,6 @@ pub(super) struct StripPrefix { } impl StripPrefix { - pub(super) fn new(inner: S, prefix: &str) -> Self { - Self { - inner, - prefix: prefix.into(), - } - } - pub(super) fn layer(prefix: &str) -> impl Layer + Clone { let prefix = Arc::from(prefix); layer_fn(move |inner| Self { diff --git a/.cargo-vendor/axum/src/routing/tests/fallback.rs b/.cargo-vendor/axum/src/routing/tests/fallback.rs index 869b7329cf..ee116a419a 100644 --- a/.cargo-vendor/axum/src/routing/tests/fallback.rs +++ b/.cargo-vendor/axum/src/routing/tests/fallback.rs @@ -1,5 +1,3 @@ -use tower::ServiceExt; - use super::*; use crate::middleware::{map_request, map_response}; @@ -11,9 +9,9 @@ async fn basic() { let client = TestClient::new(app); - assert_eq!(client.get("/foo").send().await.status(), StatusCode::OK); + assert_eq!(client.get("/foo").await.status(), StatusCode::OK); - let res = client.get("/does-not-exist").send().await; + let res = client.get("/does-not-exist").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.text().await, "fallback"); } @@ -26,9 +24,21 @@ async fn nest() { let client = TestClient::new(app); - assert_eq!(client.get("/foo/bar").send().await.status(), StatusCode::OK); + assert_eq!(client.get("/foo/bar").await.status(), StatusCode::OK); - let res = client.get("/does-not-exist").send().await; + let res = client.get("/does-not-exist").await; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.text().await, "fallback"); +} + +#[crate::test] +async fn two() { + let app = Router::new() + .route("/first", get(|| async {})) + .route("/second", get(|| async {})) + .fallback(get(|| async { "fallback" })); + let client = TestClient::new(app); + let res = client.get("/does-not-exist").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.text().await, "fallback"); } @@ -42,10 +52,10 @@ async fn or() { let client = TestClient::new(app); - assert_eq!(client.get("/one").send().await.status(), StatusCode::OK); - assert_eq!(client.get("/two").send().await.status(), StatusCode::OK); + assert_eq!(client.get("/one").await.status(), StatusCode::OK); + assert_eq!(client.get("/two").await.status(), StatusCode::OK); - let res = client.get("/does-not-exist").send().await; + let res = client.get("/does-not-exist").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.text().await, "fallback"); } @@ -58,7 +68,7 @@ async fn fallback_accessing_state() { let client = TestClient::new(app); - let res = client.get("/does-not-exist").send().await; + let res = client.get("/does-not-exist").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.text().await, "state"); } @@ -78,23 +88,23 @@ async fn nested_router_inherits_fallback() { let client = TestClient::new(app); - let res = client.get("/foo/bar").send().await; + let res = client.get("/foo/bar").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); assert_eq!(res.text().await, "outer"); } #[crate::test] -async fn doesnt_inherit_fallback_if_overriden() { +async fn doesnt_inherit_fallback_if_overridden() { let inner = Router::new().fallback(inner_fallback); let app = Router::new().nest("/foo", inner).fallback(outer_fallback); let client = TestClient::new(app); - let res = client.get("/foo/bar").send().await; + let res = client.get("/foo/bar").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); assert_eq!(res.text().await, "inner"); - let res = client.get("/").send().await; + let res = client.get("/").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); assert_eq!(res.text().await, "outer"); } @@ -107,7 +117,7 @@ async fn deeply_nested_inherit_from_top() { let client = TestClient::new(app); - let res = client.get("/foo/bar/baz").send().await; + let res = client.get("/foo/bar/baz").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); assert_eq!(res.text().await, "outer"); } @@ -123,7 +133,7 @@ async fn deeply_nested_inherit_from_middle() { let client = TestClient::new(app); - let res = client.get("/foo/bar/baz").send().await; + let res = client.get("/foo/bar/baz").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); assert_eq!(res.text().await, "outer"); } @@ -139,7 +149,7 @@ async fn with_middleware_on_inner_fallback() { let client = TestClient::new(app); - let res = client.get("/foo/bar").send().await; + let res = client.get("/foo/bar").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); assert_eq!(res.text().await, "outer"); } @@ -160,54 +170,12 @@ async fn also_inherits_default_layered_fallback() { let client = TestClient::new(app); - let res = client.get("/foo/bar").send().await; + let res = client.get("/foo/bar").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); assert_eq!(res.headers()["x-from-fallback"], "1"); assert_eq!(res.text().await, "outer"); } -#[crate::test] -async fn fallback_inherited_into_nested_router_service() { - let inner = Router::new() - .route( - "/bar", - get(|State(state): State<&'static str>| async move { state }), - ) - .with_state("inner"); - - // with a different state - let app = Router::<()>::new() - .nest_service("/foo", inner) - .fallback(outer_fallback); - - let client = TestClient::new(app); - let res = client.get("/foo/not-found").send().await; - assert_eq!(res.status(), StatusCode::NOT_FOUND); - assert_eq!(res.text().await, "outer"); -} - -#[crate::test] -async fn fallback_inherited_into_nested_opaque_service() { - let inner = Router::new() - .route( - "/bar", - get(|State(state): State<&'static str>| async move { state }), - ) - .with_state("inner") - // even if the service is made more opaque it should still inherit the fallback - .boxed_clone(); - - // with a different state - let app = Router::<()>::new() - .nest_service("/foo", inner) - .fallback(outer_fallback); - - let client = TestClient::new(app); - let res = client.get("/foo/not-found").send().await; - assert_eq!(res.status(), StatusCode::NOT_FOUND); - assert_eq!(res.text().await, "outer"); -} - #[crate::test] async fn nest_fallback_on_inner() { let app = Router::new() @@ -221,7 +189,7 @@ async fn nest_fallback_on_inner() { let client = TestClient::new(app); - let res = client.get("/foo/not-found").send().await; + let res = client.get("/foo/not-found").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); assert_eq!(res.text().await, "inner fallback"); } @@ -238,7 +206,7 @@ async fn doesnt_panic_if_used_with_nested_router() { let client = TestClient::new(routes_all); - let res = client.get("/foobar").send().await; + let res = client.get("/foobar").await; assert_eq!(res.status(), StatusCode::OK); } @@ -252,11 +220,11 @@ async fn issue_2072() { let client = TestClient::new(app); - let res = client.get("/nested/does-not-exist").send().await; + let res = client.get("/nested/does-not-exist").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); assert_eq!(res.text().await, "inner"); - let res = client.get("/does-not-exist").send().await; + let res = client.get("/does-not-exist").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); assert_eq!(res.text().await, ""); } @@ -272,11 +240,11 @@ async fn issue_2072_outer_fallback_before_merge() { let client = TestClient::new(app); - let res = client.get("/nested/does-not-exist").send().await; + let res = client.get("/nested/does-not-exist").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); assert_eq!(res.text().await, "inner"); - let res = client.get("/does-not-exist").send().await; + let res = client.get("/does-not-exist").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); assert_eq!(res.text().await, "outer"); } @@ -292,11 +260,11 @@ async fn issue_2072_outer_fallback_after_merge() { let client = TestClient::new(app); - let res = client.get("/nested/does-not-exist").send().await; + let res = client.get("/nested/does-not-exist").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); assert_eq!(res.text().await, "inner"); - let res = client.get("/does-not-exist").send().await; + let res = client.get("/does-not-exist").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); assert_eq!(res.text().await, "outer"); } @@ -311,11 +279,11 @@ async fn merge_router_with_fallback_into_nested_router_with_fallback() { let client = TestClient::new(app); - let res = client.get("/nested/does-not-exist").send().await; + let res = client.get("/nested/does-not-exist").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); assert_eq!(res.text().await, "inner"); - let res = client.get("/does-not-exist").send().await; + let res = client.get("/does-not-exist").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); assert_eq!(res.text().await, "outer"); } @@ -330,11 +298,11 @@ async fn merging_nested_router_with_fallback_into_router_with_fallback() { let client = TestClient::new(app); - let res = client.get("/nested/does-not-exist").send().await; + let res = client.get("/nested/does-not-exist").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); assert_eq!(res.text().await, "inner"); - let res = client.get("/does-not-exist").send().await; + let res = client.get("/does-not-exist").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); assert_eq!(res.text().await, "outer"); } @@ -345,7 +313,7 @@ async fn merge_empty_into_router_with_fallback() { let client = TestClient::new(app); - let res = client.get("/does-not-exist").send().await; + let res = client.get("/does-not-exist").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); assert_eq!(res.text().await, "outer"); } @@ -356,7 +324,7 @@ async fn merge_router_with_fallback_into_empty() { let client = TestClient::new(app); - let res = client.get("/does-not-exist").send().await; + let res = client.get("/does-not-exist").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); assert_eq!(res.text().await, "outer"); } diff --git a/.cargo-vendor/axum/src/routing/tests/get_to_head.rs b/.cargo-vendor/axum/src/routing/tests/get_to_head.rs index b46114c60e..b20e8cd032 100644 --- a/.cargo-vendor/axum/src/routing/tests/get_to_head.rs +++ b/.cargo-vendor/axum/src/routing/tests/get_to_head.rs @@ -4,7 +4,6 @@ use tower::ServiceExt; mod for_handlers { use super::*; - use http::HeaderMap; #[crate::test] async fn get_handles_head() { @@ -32,20 +31,19 @@ mod for_handlers { assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.headers()["x-some-header"], "foobar"); - let body = hyper::body::to_bytes(res.into_body()).await.unwrap(); + let body = BodyExt::collect(res.into_body()).await.unwrap().to_bytes(); assert_eq!(body.len(), 0); } } mod for_services { use super::*; - use crate::routing::get_service; #[crate::test] async fn get_handles_head() { let app = Router::new().route( "/", - get_service(service_fn(|_req: Request| async move { + get_service(service_fn(|_req: Request| async move { Ok::<_, Infallible>( ([("x-some-header", "foobar")], "you shouldn't see this").into_response(), ) @@ -67,7 +65,7 @@ mod for_services { assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.headers()["x-some-header"], "foobar"); - let body = hyper::body::to_bytes(res.into_body()).await.unwrap(); + let body = BodyExt::collect(res.into_body()).await.unwrap().to_bytes(); assert_eq!(body.len(), 0); } } diff --git a/.cargo-vendor/axum/src/routing/tests/handle_error.rs b/.cargo-vendor/axum/src/routing/tests/handle_error.rs index 3781677d6b..e5d575e9dc 100644 --- a/.cargo-vendor/axum/src/routing/tests/handle_error.rs +++ b/.cargo-vendor/axum/src/routing/tests/handle_error.rs @@ -1,6 +1,6 @@ use super::*; -use std::future::{pending, ready}; -use tower::{timeout::TimeoutLayer, ServiceBuilder}; +use std::future::pending; +use tower::timeout::TimeoutLayer; async fn unit() {} @@ -33,18 +33,15 @@ impl Service for Svc { async fn handler() { let app = Router::new().route( "/", - get(forever.layer( - ServiceBuilder::new() - .layer(HandleErrorLayer::new(|_: BoxError| async { - StatusCode::REQUEST_TIMEOUT - })) - .layer(timeout()), - )), + get(forever.layer(( + HandleErrorLayer::new(|_: BoxError| async { StatusCode::REQUEST_TIMEOUT }), + timeout(), + ))), ); let client = TestClient::new(app); - let res = client.get("/").send().await; + let res = client.get("/").await; assert_eq!(res.status(), StatusCode::REQUEST_TIMEOUT); } @@ -52,19 +49,16 @@ async fn handler() { async fn handler_multiple_methods_first() { let app = Router::new().route( "/", - get(forever.layer( - ServiceBuilder::new() - .layer(HandleErrorLayer::new(|_: BoxError| async { - StatusCode::REQUEST_TIMEOUT - })) - .layer(timeout()), - )) + get(forever.layer(( + HandleErrorLayer::new(|_: BoxError| async { StatusCode::REQUEST_TIMEOUT }), + timeout(), + ))) .post(unit), ); let client = TestClient::new(app); - let res = client.get("/").send().await; + let res = client.get("/").await; assert_eq!(res.status(), StatusCode::REQUEST_TIMEOUT); } @@ -73,21 +67,16 @@ async fn handler_multiple_methods_middle() { let app = Router::new().route( "/", delete(unit) - .get( - forever.layer( - ServiceBuilder::new() - .layer(HandleErrorLayer::new(|_: BoxError| async { - StatusCode::REQUEST_TIMEOUT - })) - .layer(timeout()), - ), - ) + .get(forever.layer(( + HandleErrorLayer::new(|_: BoxError| async { StatusCode::REQUEST_TIMEOUT }), + timeout(), + ))) .post(unit), ); let client = TestClient::new(app); - let res = client.get("/").send().await; + let res = client.get("/").await; assert_eq!(res.status(), StatusCode::REQUEST_TIMEOUT); } @@ -95,19 +84,28 @@ async fn handler_multiple_methods_middle() { async fn handler_multiple_methods_last() { let app = Router::new().route( "/", - delete(unit).get( - forever.layer( - ServiceBuilder::new() - .layer(HandleErrorLayer::new(|_: BoxError| async { - StatusCode::REQUEST_TIMEOUT - })) - .layer(timeout()), - ), - ), + delete(unit).get(forever.layer(( + HandleErrorLayer::new(|_: BoxError| async { StatusCode::REQUEST_TIMEOUT }), + timeout(), + ))), ); let client = TestClient::new(app); - let res = client.get("/").send().await; + let res = client.get("/").await; assert_eq!(res.status(), StatusCode::REQUEST_TIMEOUT); } + +#[crate::test] +async fn handler_service_ext() { + let fallible_service = tower::service_fn(|_| async { Err::<(), ()>(()) }); + let handle_error_service = + fallible_service.handle_error(|_| async { StatusCode::INTERNAL_SERVER_ERROR }); + + let app = Router::new().route("/", get_service(handle_error_service)); + + let client = TestClient::new(app); + + let res = client.get("/").await; + assert_eq!(res.status(), StatusCode::INTERNAL_SERVER_ERROR); +} diff --git a/.cargo-vendor/axum/src/routing/tests/merge.rs b/.cargo-vendor/axum/src/routing/tests/merge.rs index 0344a87939..b760184f54 100644 --- a/.cargo-vendor/axum/src/routing/tests/merge.rs +++ b/.cargo-vendor/axum/src/routing/tests/merge.rs @@ -1,7 +1,7 @@ use super::*; -use crate::{error_handling::HandleErrorLayer, extract::OriginalUri, response::IntoResponse, Json}; +use crate::extract::OriginalUri; use serde_json::{json, Value}; -use tower::{limit::ConcurrencyLimitLayer, timeout::TimeoutLayer}; +use tower::limit::ConcurrencyLimitLayer; #[crate::test] async fn basic() { @@ -13,16 +13,16 @@ async fn basic() { let client = TestClient::new(app); - let res = client.get("/foo").send().await; + let res = client.get("/foo").await; assert_eq!(res.status(), StatusCode::OK); - let res = client.get("/bar").send().await; + let res = client.get("/bar").await; assert_eq!(res.status(), StatusCode::OK); - let res = client.get("/baz").send().await; + let res = client.get("/baz").await; assert_eq!(res.status(), StatusCode::OK); - let res = client.get("/qux").send().await; + let res = client.get("/qux").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); } @@ -64,7 +64,7 @@ async fn multiple_ors_balanced_differently() { for n in ["one", "two", "three", "four"].iter() { println!("running: {name} / {n}"); - let res = client.get(&format!("/{n}")).send().await; + let res = client.get(&format!("/{n}")).await; assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.text().await, *n); } @@ -79,12 +79,12 @@ async fn nested_or() { let bar_or_baz = bar.merge(baz); let client = TestClient::new(bar_or_baz.clone()); - assert_eq!(client.get("/bar").send().await.text().await, "bar"); - assert_eq!(client.get("/baz").send().await.text().await, "baz"); + assert_eq!(client.get("/bar").await.text().await, "bar"); + assert_eq!(client.get("/baz").await.text().await, "baz"); let client = TestClient::new(Router::new().nest("/foo", bar_or_baz)); - assert_eq!(client.get("/foo/bar").send().await.text().await, "bar"); - assert_eq!(client.get("/foo/baz").send().await.text().await, "baz"); + assert_eq!(client.get("/foo/bar").await.text().await, "bar"); + assert_eq!(client.get("/foo/baz").await.text().await, "baz"); } #[crate::test] @@ -95,13 +95,13 @@ async fn or_with_route_following() { let client = TestClient::new(app); - let res = client.get("/one").send().await; + let res = client.get("/one").await; assert_eq!(res.status(), StatusCode::OK); - let res = client.get("/two").send().await; + let res = client.get("/two").await; assert_eq!(res.status(), StatusCode::OK); - let res = client.get("/three").send().await; + let res = client.get("/three").await; assert_eq!(res.status(), StatusCode::OK); } @@ -115,10 +115,10 @@ async fn layer() { let client = TestClient::new(app); - let res = client.get("/foo").send().await; + let res = client.get("/foo").await; assert_eq!(res.status(), StatusCode::OK); - let res = client.get("/bar").send().await; + let res = client.get("/bar").await; assert_eq!(res.status(), StatusCode::OK); } @@ -127,19 +127,15 @@ async fn layer_and_handle_error() { let one = Router::new().route("/foo", get(|| async {})); let two = Router::new() .route("/timeout", get(std::future::pending::<()>)) - .layer( - ServiceBuilder::new() - .layer(HandleErrorLayer::new(|_| async { - StatusCode::REQUEST_TIMEOUT - })) - .layer(TimeoutLayer::new(Duration::from_millis(10))), - ); + .layer(TimeoutLayer::new(Duration::from_millis(10))); let app = one.merge(two); let client = TestClient::new(app); - let res = client.get("/timeout").send().await; + let res = client.get("/timeout").await; assert_eq!(res.status(), StatusCode::REQUEST_TIMEOUT); + let res = client.get("/foo").await; + assert_eq!(res.status(), StatusCode::OK); } #[crate::test] @@ -150,7 +146,7 @@ async fn nesting() { let client = TestClient::new(app); - let res = client.get("/bar/baz").send().await; + let res = client.get("/bar/baz").await; assert_eq!(res.status(), StatusCode::OK); } @@ -162,7 +158,7 @@ async fn boxed() { let client = TestClient::new(app); - let res = client.get("/bar").send().await; + let res = client.get("/bar").await; assert_eq!(res.status(), StatusCode::OK); } @@ -180,11 +176,11 @@ async fn many_ors() { let client = TestClient::new(app); for n in 1..=7 { - let res = client.get(&format!("/r{n}")).send().await; + let res = client.get(&format!("/r{n}")).await; assert_eq!(res.status(), StatusCode::OK); } - let res = client.get("/r8").send().await; + let res = client.get("/r8").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); } @@ -195,30 +191,30 @@ async fn services() { let app = Router::new() .route( "/foo", - get_service(service_fn(|_: Request| async { + get_service(service_fn(|_: Request| async { Ok::<_, Infallible>(Response::new(Body::empty())) })), ) .merge(Router::new().route( "/bar", - get_service(service_fn(|_: Request| async { + get_service(service_fn(|_: Request| async { Ok::<_, Infallible>(Response::new(Body::empty())) })), )); let client = TestClient::new(app); - let res = client.get("/foo").send().await; + let res = client.get("/foo").await; assert_eq!(res.status(), StatusCode::OK); - let res = client.get("/bar").send().await; + let res = client.get("/bar").await; assert_eq!(res.status(), StatusCode::OK); } async fn all_the_uris( uri: Uri, OriginalUri(original_uri): OriginalUri, - req: Request, + req: Request, ) -> impl IntoResponse { Json(json!({ "uri": uri.to_string(), @@ -234,7 +230,7 @@ async fn nesting_and_seeing_the_right_uri() { let client = TestClient::new(one.merge(two)); - let res = client.get("/foo/bar").send().await; + let res = client.get("/foo/bar").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!( res.json::().await, @@ -245,7 +241,7 @@ async fn nesting_and_seeing_the_right_uri() { }) ); - let res = client.get("/foo").send().await; + let res = client.get("/foo").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!( res.json::().await, @@ -267,7 +263,7 @@ async fn nesting_and_seeing_the_right_uri_at_more_levels_of_nesting() { let client = TestClient::new(one.merge(two)); - let res = client.get("/foo/bar/baz").send().await; + let res = client.get("/foo/bar/baz").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!( res.json::().await, @@ -278,7 +274,7 @@ async fn nesting_and_seeing_the_right_uri_at_more_levels_of_nesting() { }) ); - let res = client.get("/foo").send().await; + let res = client.get("/foo").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!( res.json::().await, @@ -301,7 +297,7 @@ async fn nesting_and_seeing_the_right_uri_ors_with_nesting() { let client = TestClient::new(one.merge(two).merge(three)); - let res = client.get("/one/bar/baz").send().await; + let res = client.get("/one/bar/baz").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!( res.json::().await, @@ -312,7 +308,7 @@ async fn nesting_and_seeing_the_right_uri_ors_with_nesting() { }) ); - let res = client.get("/two/qux").send().await; + let res = client.get("/two/qux").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!( res.json::().await, @@ -323,7 +319,7 @@ async fn nesting_and_seeing_the_right_uri_ors_with_nesting() { }) ); - let res = client.get("/three").send().await; + let res = client.get("/three").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!( res.json::().await, @@ -345,7 +341,7 @@ async fn nesting_and_seeing_the_right_uri_ors_with_multi_segment_uris() { let client = TestClient::new(one.merge(two)); - let res = client.get("/one/foo/bar").send().await; + let res = client.get("/one/foo/bar").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!( res.json::().await, @@ -356,7 +352,7 @@ async fn nesting_and_seeing_the_right_uri_ors_with_multi_segment_uris() { }) ); - let res = client.get("/two/foo").send().await; + let res = client.get("/two/foo").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!( res.json::().await, @@ -378,22 +374,18 @@ async fn middleware_that_return_early() { let client = TestClient::new(private.merge(public)); - assert_eq!( - client.get("/").send().await.status(), - StatusCode::UNAUTHORIZED - ); + assert_eq!(client.get("/").await.status(), StatusCode::UNAUTHORIZED); assert_eq!( client .get("/") .header("authorization", "Bearer password") - .send() .await .status(), StatusCode::OK ); assert_eq!( - client.get("/doesnt-exist").send().await.status(), + client.get("/doesnt-exist").await.status(), StatusCode::NOT_FOUND ); - assert_eq!(client.get("/public").send().await.status(), StatusCode::OK); + assert_eq!(client.get("/public").await.status(), StatusCode::OK); } diff --git a/.cargo-vendor/axum/src/routing/tests/mod.rs b/.cargo-vendor/axum/src/routing/tests/mod.rs index f1a459d645..144c870dfa 100644 --- a/.cargo-vendor/axum/src/routing/tests/mod.rs +++ b/.cargo-vendor/axum/src/routing/tests/mod.rs @@ -1,9 +1,9 @@ use crate::{ - body::{Bytes, Empty}, + body::{Body, Bytes}, error_handling::HandleErrorLayer, extract::{self, DefaultBodyLimit, FromRef, Path, State}, handler::{Handler, HandlerWithoutStateExt}, - response::IntoResponse, + response::{IntoResponse, Response}, routing::{ delete, get, get_service, on, on_service, patch, patch_service, path_router::path_for_nested_route, post, MethodFilter, @@ -12,28 +12,30 @@ use crate::{ tracing_helpers::{capture_tracing, TracingEvent}, *, }, - BoxError, Extension, Json, Router, + util::mutex_num_locked, + BoxError, Extension, Json, Router, ServiceExt, }; +use axum_core::extract::Request; use futures_util::stream::StreamExt; use http::{ - header::CONTENT_LENGTH, - header::{ALLOW, HOST}, - HeaderMap, Method, Request, Response, StatusCode, Uri, + header::{ALLOW, CONTENT_LENGTH, HOST}, + HeaderMap, Method, StatusCode, Uri, }; -use hyper::Body; +use http_body_util::BodyExt; use serde::Deserialize; use serde_json::json; use std::{ convert::Infallible, - future::{ready, Ready}, + future::{ready, IntoFuture, Ready}, sync::atomic::{AtomicBool, AtomicUsize, Ordering}, task::{Context, Poll}, time::Duration, }; -use tower::{ - service_fn, timeout::TimeoutLayer, util::MapResponseLayer, ServiceBuilder, ServiceExt, +use tower::{service_fn, util::MapResponseLayer, ServiceExt as TowerServiceExt}; +use tower_http::{ + limit::RequestBodyLimitLayer, timeout::TimeoutLayer, + validate_request::ValidateRequestHeaderLayer, }; -use tower_http::{limit::RequestBodyLimitLayer, validate_request::ValidateRequestHeaderLayer}; use tower_service::Service; mod fallback; @@ -44,15 +46,15 @@ mod nest; #[crate::test] async fn hello_world() { - async fn root(_: Request) -> &'static str { + async fn root(_: Request) -> &'static str { "Hello, World!" } - async fn foo(_: Request) -> &'static str { + async fn foo(_: Request) -> &'static str { "foo" } - async fn users_create(_: Request) -> &'static str { + async fn users_create(_: Request) -> &'static str { "users#create" } @@ -62,15 +64,15 @@ async fn hello_world() { let client = TestClient::new(app); - let res = client.get("/").send().await; + let res = client.get("/").await; let body = res.text().await; assert_eq!(body, "Hello, World!"); - let res = client.post("/").send().await; + let res = client.post("/").await; let body = res.text().await; assert_eq!(body, "foo"); - let res = client.post("/users").send().await; + let res = client.post("/users").await; let body = res.text().await; assert_eq!(body, "users#create"); } @@ -80,33 +82,32 @@ async fn routing() { let app = Router::new() .route( "/users", - get(|_: Request| async { "users#index" }) - .post(|_: Request| async { "users#create" }), + get(|_: Request| async { "users#index" }).post(|_: Request| async { "users#create" }), ) - .route("/users/:id", get(|_: Request| async { "users#show" })) + .route("/users/:id", get(|_: Request| async { "users#show" })) .route( "/users/:id/action", - get(|_: Request| async { "users#action" }), + get(|_: Request| async { "users#action" }), ); let client = TestClient::new(app); - let res = client.get("/").send().await; + let res = client.get("/").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); - let res = client.get("/users").send().await; + let res = client.get("/users").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.text().await, "users#index"); - let res = client.post("/users").send().await; + let res = client.post("/users").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.text().await, "users#create"); - let res = client.get("/users/1").send().await; + let res = client.get("/users/1").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.text().await, "users#show"); - let res = client.get("/users/1/action").send().await; + let res = client.get("/users/1/action").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.text().await, "users#action"); } @@ -116,22 +117,18 @@ async fn router_type_doesnt_change() { let app: Router = Router::new() .route( "/", - on(MethodFilter::GET, |_: Request| async { - "hi from GET" - }) - .on(MethodFilter::POST, |_: Request| async { - "hi from POST" - }), + on(MethodFilter::GET, |_: Request| async { "hi from GET" }) + .on(MethodFilter::POST, |_: Request| async { "hi from POST" }), ) - .layer(tower_http::compression::CompressionLayer::new()); + .layer(tower_http::trace::TraceLayer::new_for_http()); let client = TestClient::new(app); - let res = client.get("/").send().await; + let res = client.get("/").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.text().await, "hi from GET"); - let res = client.post("/").send().await; + let res = client.post("/").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.text().await, "hi from POST"); } @@ -141,22 +138,22 @@ async fn routing_between_services() { use std::convert::Infallible; use tower::service_fn; - async fn handle(_: Request) -> &'static str { + async fn handle(_: Request) -> &'static str { "handler" } let app = Router::new() .route( "/one", - get_service(service_fn(|_: Request| async { + get_service(service_fn(|_: Request| async { Ok::<_, Infallible>(Response::new(Body::from("one get"))) })) - .post_service(service_fn(|_: Request| async { + .post_service(service_fn(|_: Request| async { Ok::<_, Infallible>(Response::new(Body::from("one post"))) })) .on_service( MethodFilter::PUT, - service_fn(|_: Request| async { + service_fn(|_: Request| async { Ok::<_, Infallible>(Response::new(Body::from("one put"))) }), ), @@ -165,45 +162,36 @@ async fn routing_between_services() { let client = TestClient::new(app); - let res = client.get("/one").send().await; + let res = client.get("/one").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.text().await, "one get"); - let res = client.post("/one").send().await; + let res = client.post("/one").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.text().await, "one post"); - let res = client.put("/one").send().await; + let res = client.put("/one").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.text().await, "one put"); - let res = client.get("/two").send().await; + let res = client.get("/two").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.text().await, "handler"); } #[crate::test] async fn middleware_on_single_route() { - use tower::ServiceBuilder; - use tower_http::{compression::CompressionLayer, trace::TraceLayer}; + use tower_http::trace::TraceLayer; - async fn handle(_: Request) -> &'static str { + async fn handle(_: Request) -> &'static str { "Hello, World!" } - let app = Router::new().route( - "/", - get(handle.layer( - ServiceBuilder::new() - .layer(TraceLayer::new_for_http()) - .layer(CompressionLayer::new()) - .into_inner(), - )), - ); + let app = Router::new().route("/", get(handle.layer(TraceLayer::new_for_http()))); let client = TestClient::new(app); - let res = client.get("/").send().await; + let res = client.get("/").await; let body = res.text().await; assert_eq!(body, "Hello, World!"); @@ -211,8 +199,8 @@ async fn middleware_on_single_route() { #[crate::test] async fn service_in_bottom() { - async fn handler(_req: Request) -> Result, Infallible> { - Ok(Response::new(hyper::Body::empty())) + async fn handler(_req: Request) -> Result, Infallible> { + Ok(Response::new(Body::empty())) } let app = Router::new().route("/", get_service(service_fn(handler))); @@ -228,18 +216,18 @@ async fn wrong_method_handler() { let client = TestClient::new(app); - let res = client.patch("/").send().await; + let res = client.patch("/").await; assert_eq!(res.status(), StatusCode::METHOD_NOT_ALLOWED); assert_eq!(res.headers()[ALLOW], "GET,HEAD,POST"); - let res = client.patch("/foo").send().await; + let res = client.patch("/foo").await; assert_eq!(res.status(), StatusCode::OK); - let res = client.post("/foo").send().await; + let res = client.post("/foo").await; assert_eq!(res.status(), StatusCode::METHOD_NOT_ALLOWED); assert_eq!(res.headers()[ALLOW], "PATCH"); - let res = client.get("/bar").send().await; + let res = client.get("/bar").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); } @@ -249,7 +237,7 @@ async fn wrong_method_service() { struct Svc; impl Service for Svc { - type Response = Response>; + type Response = Response; type Error = Infallible; type Future = Ready>; @@ -258,7 +246,7 @@ async fn wrong_method_service() { } fn call(&mut self, _req: R) -> Self::Future { - ready(Ok(Response::new(Empty::new()))) + ready(Ok(().into_response())) } } @@ -268,35 +256,35 @@ async fn wrong_method_service() { let client = TestClient::new(app); - let res = client.patch("/").send().await; + let res = client.patch("/").await; assert_eq!(res.status(), StatusCode::METHOD_NOT_ALLOWED); assert_eq!(res.headers()[ALLOW], "GET,HEAD,POST"); - let res = client.patch("/foo").send().await; + let res = client.patch("/foo").await; assert_eq!(res.status(), StatusCode::OK); - let res = client.post("/foo").send().await; + let res = client.post("/foo").await; assert_eq!(res.status(), StatusCode::METHOD_NOT_ALLOWED); assert_eq!(res.headers()[ALLOW], "PATCH"); - let res = client.get("/bar").send().await; + let res = client.get("/bar").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); } #[crate::test] async fn multiple_methods_for_one_handler() { - async fn root(_: Request) -> &'static str { + async fn root(_: Request) -> &'static str { "Hello, World!" } - let app = Router::new().route("/", on(MethodFilter::GET | MethodFilter::POST, root)); + let app = Router::new().route("/", on(MethodFilter::GET.or(MethodFilter::POST), root)); let client = TestClient::new(app); - let res = client.get("/").send().await; + let res = client.get("/").await; assert_eq!(res.status(), StatusCode::OK); - let res = client.post("/").send().await; + let res = client.post("/").await; assert_eq!(res.status(), StatusCode::OK); } @@ -306,7 +294,7 @@ async fn wildcard_sees_whole_url() { let client = TestClient::new(app); - let res = client.get("/api/foo/bar").send().await; + let res = client.get("/api/foo/bar").await; assert_eq!(res.text().await, "/api/foo/bar"); } @@ -314,21 +302,15 @@ async fn wildcard_sees_whole_url() { async fn middleware_applies_to_routes_above() { let app = Router::new() .route("/one", get(std::future::pending::<()>)) - .layer( - ServiceBuilder::new() - .layer(HandleErrorLayer::new(|_: BoxError| async move { - StatusCode::REQUEST_TIMEOUT - })) - .layer(TimeoutLayer::new(Duration::new(0, 0))), - ) + .layer(TimeoutLayer::new(Duration::ZERO)) .route("/two", get(|| async {})); let client = TestClient::new(app); - let res = client.get("/one").send().await; + let res = client.get("/one").await; assert_eq!(res.status(), StatusCode::REQUEST_TIMEOUT); - let res = client.get("/two").send().await; + let res = client.get("/two").await; assert_eq!(res.status(), StatusCode::OK); } @@ -338,10 +320,10 @@ async fn not_found_for_extra_trailing_slash() { let client = TestClient::new(app); - let res = client.get("/foo/").send().await; + let res = client.get("/foo/").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); - let res = client.get("/foo").send().await; + let res = client.get("/foo").await; assert_eq!(res.status(), StatusCode::OK); } @@ -351,7 +333,7 @@ async fn not_found_for_missing_trailing_slash() { let client = TestClient::new(app); - let res = client.get("/foo").send().await; + let res = client.get("/foo").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); } @@ -363,11 +345,11 @@ async fn with_and_without_trailing_slash() { let client = TestClient::new(app); - let res = client.get("/foo/").send().await; + let res = client.get("/foo/").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.text().await, "with tsr"); - let res = client.get("/foo").send().await; + let res = client.get("/foo").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.text().await, "without tsr"); } @@ -382,13 +364,13 @@ async fn wildcard_doesnt_match_just_trailing_slash() { let client = TestClient::new(app); - let res = client.get("/x").send().await; + let res = client.get("/x").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); - let res = client.get("/x/").send().await; + let res = client.get("/x/").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); - let res = client.get("/x/foo/bar").send().await; + let res = client.get("/x/foo/bar").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.text().await, "foo/bar"); } @@ -403,7 +385,7 @@ async fn what_matches_wildcard() { let client = TestClient::new(app); let get = |path| { - let f = client.get(path).send(); + let f = client.get(path); async move { f.await.text().await } }; @@ -432,10 +414,10 @@ async fn static_and_dynamic_paths() { let client = TestClient::new(app); - let res = client.get("/bar").send().await; + let res = client.get("/bar").await; assert_eq!(res.text().await, "dynamic: bar"); - let res = client.get("/foo").send().await; + let res = client.get("/foo").await; assert_eq!(res.text().await, "static"); } @@ -479,10 +461,10 @@ async fn middleware_still_run_for_unmatched_requests() { assert_eq!(COUNT.load(Ordering::SeqCst), 0); - client.get("/").send().await; + client.get("/").await; assert_eq!(COUNT.load(Ordering::SeqCst), 1); - client.get("/not-found").send().await; + client.get("/not-found").await; assert_eq!(COUNT.load(Ordering::SeqCst), 2); } @@ -506,20 +488,19 @@ async fn route_layer() { let res = client .get("/foo") .header("authorization", "Bearer password") - .send() .await; assert_eq!(res.status(), StatusCode::OK); - let res = client.get("/foo").send().await; + let res = client.get("/foo").await; assert_eq!(res.status(), StatusCode::UNAUTHORIZED); - let res = client.get("/not-found").send().await; + let res = client.get("/not-found").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); // it would be nice if this would return `405 Method Not Allowed` // but that requires knowing more about which method route we're calling, which we - // don't know currently since its just a generic `Service` - let res = client.post("/foo").send().await; + // don't know currently since it's just a generic `Service` + let res = client.post("/foo").await; assert_eq!(res.status(), StatusCode::UNAUTHORIZED); } @@ -531,11 +512,11 @@ async fn different_methods_added_in_different_routes() { let client = TestClient::new(app); - let res = client.get("/").send().await; + let res = client.get("/").await; let body = res.text().await; assert_eq!(body, "GET"); - let res = client.post("/").send().await; + let res = client.post("/").await; let body = res.text().await; assert_eq!(body, "POST"); } @@ -573,11 +554,11 @@ async fn merging_routers_with_same_paths_but_different_methods() { let client = TestClient::new(one.merge(two)); - let res = client.get("/").send().await; + let res = client.get("/").await; let body = res.text().await; assert_eq!(body, "GET"); - let res = client.post("/").send().await; + let res = client.post("/").await; let body = res.text().await; assert_eq!(body, "POST"); } @@ -590,11 +571,11 @@ async fn head_content_length_through_hyper_server() { let client = TestClient::new(app); - let res = client.head("/").send().await; + let res = client.head("/").await; assert_eq!(res.headers()["content-length"], "3"); assert!(res.text().await.is_empty()); - let res = client.head("/json").send().await; + let res = client.head("/json").await; assert_eq!(res.headers()["content-length"], "9"); assert!(res.text().await.is_empty()); } @@ -605,7 +586,7 @@ async fn head_content_length_through_hyper_server_that_hits_fallback() { let client = TestClient::new(app); - let res = client.head("/").send().await; + let res = client.head("/").await; assert_eq!(res.headers()["content-length"], "3"); } @@ -623,21 +604,13 @@ async fn head_with_middleware_applied() { let client = TestClient::new(app); // send GET request - let res = client - .get("/") - .header("accept-encoding", "gzip") - .send() - .await; + let res = client.get("/").header("accept-encoding", "gzip").await; assert_eq!(res.headers()["transfer-encoding"], "chunked"); // cannot have `transfer-encoding: chunked` and `content-length` assert!(!res.headers().contains_key("content-length")); // send HEAD request - let res = client - .head("/") - .header("accept-encoding", "gzip") - .send() - .await; + let res = client.head("/").header("accept-encoding", "gzip").await; // no response body so no `transfer-encoding` assert!(!res.headers().contains_key("transfer-encoding")); // no content-length since we cannot know it since the response @@ -665,13 +638,13 @@ async fn body_limited_by_default() { println!("calling {uri}"); let stream = futures_util::stream::repeat("a".repeat(1000)).map(Ok::<_, hyper::Error>); - let body = Body::wrap_stream(stream); + let body = reqwest::Body::wrap_stream(stream); let res_future = client .post(uri) .header("content-type", "application/json") .body(body) - .send(); + .into_future(); let res = tokio::time::timeout(Duration::from_secs(3), res_future) .await .expect("never got response"); @@ -689,9 +662,9 @@ async fn disabling_the_default_limit() { let client = TestClient::new(app); // `DEFAULT_LIMIT` is 2mb so make a body larger than that - let body = Body::from("a".repeat(3_000_000)); + let body = reqwest::Body::from("a".repeat(3_000_000)); - let res = client.post("/").body(body).send().await; + let res = client.post("/").body(body).await; assert_eq!(res.status(), StatusCode::OK); } @@ -711,10 +684,10 @@ async fn limited_body_with_content_length() { let client = TestClient::new(app); - let res = client.post("/").body("a".repeat(LIMIT)).send().await; + let res = client.post("/").body("a".repeat(LIMIT)).await; assert_eq!(res.status(), StatusCode::OK); - let res = client.post("/").body("a".repeat(LIMIT * 2)).send().await; + let res = client.post("/").body("a".repeat(LIMIT * 2)).await; assert_eq!(res.status(), StatusCode::PAYLOAD_TOO_LARGE); } @@ -730,15 +703,75 @@ async fn changing_the_default_limit() { let res = client .post("/") - .body(Body::from("a".repeat(new_limit))) - .send() + .body(reqwest::Body::from("a".repeat(new_limit))) .await; assert_eq!(res.status(), StatusCode::OK); let res = client .post("/") - .body(Body::from("a".repeat(new_limit + 1))) - .send() + .body(reqwest::Body::from("a".repeat(new_limit + 1))) + .await; + assert_eq!(res.status(), StatusCode::PAYLOAD_TOO_LARGE); +} + +#[crate::test] +async fn changing_the_default_limit_differently_on_different_routes() { + let limit1 = 2; + let limit2 = 10; + + let app = Router::new() + .route( + "/limit1", + post(|_: Bytes| async {}).layer(DefaultBodyLimit::max(limit1)), + ) + .route( + "/limit2", + post(|_: Bytes| async {}).layer(DefaultBodyLimit::max(limit2)), + ) + .route("/default", post(|_: Bytes| async {})); + + let client = TestClient::new(app); + + let res = client + .post("/limit1") + .body(reqwest::Body::from("a".repeat(limit1))) + .await; + assert_eq!(res.status(), StatusCode::OK); + + let res = client + .post("/limit1") + .body(reqwest::Body::from("a".repeat(limit2))) + .await; + assert_eq!(res.status(), StatusCode::PAYLOAD_TOO_LARGE); + + let res = client + .post("/limit2") + .body(reqwest::Body::from("a".repeat(limit1))) + .await; + assert_eq!(res.status(), StatusCode::OK); + + let res = client + .post("/limit2") + .body(reqwest::Body::from("a".repeat(limit2))) + .await; + assert_eq!(res.status(), StatusCode::OK); + + let res = client + .post("/limit2") + .body(reqwest::Body::from("a".repeat(limit1 + limit2))) + .await; + assert_eq!(res.status(), StatusCode::PAYLOAD_TOO_LARGE); + + let res = client + .post("/default") + .body(reqwest::Body::from("a".repeat(limit1 + limit2))) + .await; + assert_eq!(res.status(), StatusCode::OK); + + let res = client + .post("/default") + // `DEFAULT_LIMIT` is 2mb so make a body larger than that + .body(reqwest::Body::from("a".repeat(3_000_000))) .await; assert_eq!(res.status(), StatusCode::PAYLOAD_TOO_LARGE); } @@ -761,16 +794,14 @@ async fn limited_body_with_streaming_body() { let stream = futures_util::stream::iter(vec![Ok::<_, hyper::Error>("a".repeat(LIMIT))]); let res = client .post("/") - .body(Body::wrap_stream(stream)) - .send() + .body(reqwest::Body::wrap_stream(stream)) .await; assert_eq!(res.status(), StatusCode::OK); let stream = futures_util::stream::iter(vec![Ok::<_, hyper::Error>("a".repeat(LIMIT * 2))]); let res = client .post("/") - .body(Body::wrap_stream(stream)) - .send() + .body(reqwest::Body::wrap_stream(stream)) .await; assert_eq!(res.status(), StatusCode::PAYLOAD_TOO_LARGE); } @@ -807,7 +838,7 @@ async fn extract_state() { let app = Router::new().route("/", get(handler)).with_state(state); let client = TestClient::new(app); - let res = client.get("/").send().await; + let res = client.get("/").await; assert_eq!(res.status(), StatusCode::OK); } @@ -821,7 +852,7 @@ async fn explicitly_set_state() { .with_state("..."); let client = TestClient::new(app); - let res = client.get("/").send().await; + let res = client.get("/").await; assert_eq!(res.text().await, "foo"); } @@ -839,7 +870,7 @@ async fn layer_response_into_response() { let client = TestClient::new(app); - let res = client.get("/").send().await; + let res = client.get("/").await; assert_eq!(res.headers()["x-foo"], "bar"); assert_eq!(res.status(), StatusCode::IM_A_TEAPOT); } @@ -881,7 +912,7 @@ async fn state_isnt_cloned_too_much() { impl Clone for AppState { fn clone(&self) -> Self { - #[rustversion::since(1.65)] + #[rustversion::since(1.66)] #[track_caller] fn count() { if SETUP_DONE.load(Ordering::SeqCst) { @@ -892,12 +923,12 @@ async fn state_isnt_cloned_too_much() { .filter(|line| line.contains("axum") || line.contains("./src")) .collect::>() .join("\n"); - println!("AppState::Clone:\n===============\n{}\n", bt); + println!("AppState::Clone:\n===============\n{bt}\n"); COUNT.fetch_add(1, Ordering::SeqCst); } } - #[rustversion::not(since(1.65))] + #[rustversion::not(since(1.66))] fn count() { if SETUP_DONE.load(Ordering::SeqCst) { COUNT.fetch_add(1, Ordering::SeqCst); @@ -919,7 +950,7 @@ async fn state_isnt_cloned_too_much() { // ignore clones made during setup SETUP_DONE.store(true, Ordering::SeqCst); - client.get("/").send().await; + client.get("/").await; assert_eq!(COUNT.load(Ordering::SeqCst), 4); } @@ -943,7 +974,7 @@ async fn logging_rejections() { let client = TestClient::new(app); assert_eq!( - client.get("/extension").send().await.status(), + client.get("/extension").await.status(), StatusCode::INTERNAL_SERVER_ERROR ); @@ -951,7 +982,6 @@ async fn logging_rejections() { client .post("/string") .body(Vec::from([0, 159, 146, 150])) - .send() .await .status(), StatusCode::BAD_REQUEST, @@ -1005,7 +1035,7 @@ async fn connect_going_to_custom_fallback() { let res = app.oneshot(req).await.unwrap(); assert_eq!(res.status(), StatusCode::NOT_FOUND); - let text = String::from_utf8(hyper::body::to_bytes(res).await.unwrap().to_vec()).unwrap(); + let text = String::from_utf8(res.collect().await.unwrap().to_bytes().to_vec()).unwrap(); assert_eq!(text, "custom fallback"); } @@ -1023,7 +1053,7 @@ async fn connect_going_to_default_fallback() { let res = app.oneshot(req).await.unwrap(); assert_eq!(res.status(), StatusCode::NOT_FOUND); - let body = hyper::body::to_bytes(res).await.unwrap(); + let body = res.collect().await.unwrap().to_bytes(); assert!(body.is_empty()); } @@ -1033,7 +1063,39 @@ async fn impl_handler_for_into_response() { let client = TestClient::new(app); - let res = client.post("/things").send().await; + let res = client.post("/things").await; assert_eq!(res.status(), StatusCode::CREATED); assert_eq!(res.text().await, "thing created"); } + +#[crate::test] +async fn locks_mutex_very_little() { + let (num, app) = mutex_num_locked(|| async { + Router::new() + .route("/a", get(|| async {})) + .route("/b", get(|| async {})) + .route("/c", get(|| async {})) + .with_state::<()>(()) + .into_service::() + }) + .await; + // once for `Router::new` for setting the default fallback and 3 times, once per route + assert_eq!(num, 4); + + for path in ["/a", "/b", "/c"] { + // calling the router should only lock the mutex once + let (num, _res) = mutex_num_locked(|| async { + // We cannot use `TestClient` because it uses `serve` which spawns a new task per + // connection and `mutex_num_locked` uses a task local to keep track of the number of + // locks. So spawning a new task would unset the task local set by `mutex_num_locked` + // + // So instead `call` the service directly without spawning new tasks. + app.clone() + .oneshot(Request::builder().uri(path).body(Body::empty()).unwrap()) + .await + .unwrap() + }) + .await; + assert_eq!(num, 1); + } +} diff --git a/.cargo-vendor/axum/src/routing/tests/nest.rs b/.cargo-vendor/axum/src/routing/tests/nest.rs index 0544f8be59..40df1f1ad8 100644 --- a/.cargo-vendor/axum/src/routing/tests/nest.rs +++ b/.cargo-vendor/axum/src/routing/tests/nest.rs @@ -1,5 +1,4 @@ use super::*; -use crate::{body::boxed, extract::Extension}; use std::collections::HashMap; use tower_http::services::ServeDir; @@ -41,19 +40,19 @@ async fn nesting_apps() { let client = TestClient::new(app); - let res = client.get("/").send().await; + let res = client.get("/").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.text().await, "hi"); - let res = client.get("/v0/api/users").send().await; + let res = client.get("/v0/api/users").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.text().await, "users#index"); - let res = client.get("/v0/api/users/123").send().await; + let res = client.get("/v0/api/users/123").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.text().await, "v0: users#show (123)"); - let res = client.get("/v0/api/games/123").send().await; + let res = client.get("/v0/api/games/123").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.text().await, "v0: games#show (123)"); } @@ -65,14 +64,14 @@ async fn wrong_method_nest() { let client = TestClient::new(app); - let res = client.get("/").send().await; + let res = client.get("/").await; assert_eq!(res.status(), StatusCode::OK); - let res = client.post("/").send().await; + let res = client.post("/").await; assert_eq!(res.status(), StatusCode::METHOD_NOT_ALLOWED); assert_eq!(res.headers()[ALLOW], "GET,HEAD"); - let res = client.patch("/foo").send().await; + let res = client.patch("/foo").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); } @@ -83,14 +82,14 @@ async fn nesting_router_at_root() { let client = TestClient::new(app); - let res = client.get("/").send().await; + let res = client.get("/").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); - let res = client.get("/foo").send().await; + let res = client.get("/foo").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.text().await, "/foo"); - let res = client.get("/foo/bar").send().await; + let res = client.get("/foo/bar").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); } @@ -101,14 +100,14 @@ async fn nesting_router_at_empty_path() { let client = TestClient::new(app); - let res = client.get("/").send().await; + let res = client.get("/").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); - let res = client.get("/foo").send().await; + let res = client.get("/foo").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.text().await, "/foo"); - let res = client.get("/foo/bar").send().await; + let res = client.get("/foo/bar").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); } @@ -118,15 +117,15 @@ async fn nesting_handler_at_root() { let client = TestClient::new(app); - let res = client.get("/").send().await; + let res = client.get("/").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.text().await, "/"); - let res = client.get("/foo").send().await; + let res = client.get("/foo").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.text().await, "/foo"); - let res = client.get("/foo/bar").send().await; + let res = client.get("/foo/bar").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.text().await, "/foo/bar"); } @@ -141,18 +140,18 @@ async fn nested_url_extractor() { .route("/baz", get(|uri: Uri| async move { uri.to_string() })) .route( "/qux", - get(|req: Request| async move { req.uri().to_string() }), + get(|req: Request| async move { req.uri().to_string() }), ), ), ); let client = TestClient::new(app); - let res = client.get("/foo/bar/baz").send().await; + let res = client.get("/foo/bar/baz").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.text().await, "/baz"); - let res = client.get("/foo/bar/qux").send().await; + let res = client.get("/foo/bar/qux").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.text().await, "/qux"); } @@ -172,7 +171,7 @@ async fn nested_url_original_extractor() { let client = TestClient::new(app); - let res = client.get("/foo/bar/baz").send().await; + let res = client.get("/foo/bar/baz").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.text().await, "/foo/bar/baz"); } @@ -185,8 +184,8 @@ async fn nested_service_sees_stripped_uri() { "/bar", Router::new().route_service( "/baz", - service_fn(|req: Request| async move { - let body = boxed(Body::from(req.uri().to_string())); + service_fn(|req: Request| async move { + let body = Body::from(req.uri().to_string()); Ok::<_, Infallible>(Response::new(body)) }), ), @@ -195,7 +194,7 @@ async fn nested_service_sees_stripped_uri() { let client = TestClient::new(app); - let res = client.get("/foo/bar/baz").send().await; + let res = client.get("/foo/bar/baz").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.text().await, "/baz"); } @@ -206,7 +205,7 @@ async fn nest_static_file_server() { let client = TestClient::new(app); - let res = client.get("/static/README.md").send().await; + let res = client.get("/static/README.md").await; assert_eq!(res.status(), StatusCode::OK); } @@ -223,9 +222,9 @@ async fn nested_multiple_routes() { let client = TestClient::new(app); - assert_eq!(client.get("/").send().await.text().await, "root"); - assert_eq!(client.get("/api/users").send().await.text().await, "users"); - assert_eq!(client.get("/api/teams").send().await.text().await, "teams"); + assert_eq!(client.get("/").await.text().await, "root"); + assert_eq!(client.get("/api/users").await.text().await, "users"); + assert_eq!(client.get("/api/teams").await.text().await, "teams"); } #[test] @@ -257,14 +256,14 @@ async fn multiple_top_level_nests() { let client = TestClient::new(app); - assert_eq!(client.get("/one/route").send().await.text().await, "one"); - assert_eq!(client.get("/two/route").send().await.text().await, "two"); + assert_eq!(client.get("/one/route").await.text().await, "one"); + assert_eq!(client.get("/two/route").await.text().await, "two"); } #[crate::test] #[should_panic(expected = "Invalid route: nested routes cannot contain wildcards (*)")] async fn nest_cannot_contain_wildcards() { - _ = Router::<(), Body>::new().nest("/one/*rest", Router::new()); + _ = Router::<()>::new().nest("/one/*rest", Router::new()); } #[crate::test] @@ -308,14 +307,11 @@ async fn outer_middleware_still_see_whole_url() { let client = TestClient::new(app); - assert_eq!(client.get("/").send().await.text().await, "/"); - assert_eq!(client.get("/foo").send().await.text().await, "/foo"); - assert_eq!(client.get("/foo/bar").send().await.text().await, "/foo/bar"); - assert_eq!( - client.get("/not-found").send().await.text().await, - "/not-found" - ); - assert_eq!(client.get("/one/two").send().await.text().await, "/one/two"); + assert_eq!(client.get("/").await.text().await, "/"); + assert_eq!(client.get("/foo").await.text().await, "/foo"); + assert_eq!(client.get("/foo/bar").await.text().await, "/foo/bar"); + assert_eq!(client.get("/not-found").await.text().await, "/not-found"); + assert_eq!(client.get("/one/two").await.text().await, "/one/two"); } #[crate::test] @@ -329,7 +325,7 @@ async fn nest_at_capture() { let client = TestClient::new(app); - let res = client.get("/foo/bar").send().await; + let res = client.get("/foo/bar").await; assert_eq!(res.status(), StatusCode::OK); assert_eq!(res.text().await, "a=foo b=bar"); } @@ -340,13 +336,13 @@ async fn nest_with_and_without_trailing() { let client = TestClient::new(app); - let res = client.get("/foo").send().await; + let res = client.get("/foo").await; assert_eq!(res.status(), StatusCode::OK); - let res = client.get("/foo/").send().await; + let res = client.get("/foo/").await; assert_eq!(res.status(), StatusCode::OK); - let res = client.get("/foo/bar").send().await; + let res = client.get("/foo/bar").await; assert_eq!(res.status(), StatusCode::OK); } @@ -361,28 +357,28 @@ async fn nesting_with_root_inner_router() { // `/service/` does match the `/service` prefix and the remaining path is technically // empty, which is the same as `/` which matches `.route("/", _)` - let res = client.get("/service").send().await; + let res = client.get("/service").await; assert_eq!(res.status(), StatusCode::OK); // `/service/` does match the `/service` prefix and the remaining path is `/` // which matches `.route("/", _)` // // this is perhaps a little surprising but don't think there is much we can do - let res = client.get("/service/").send().await; + let res = client.get("/service/").await; assert_eq!(res.status(), StatusCode::OK); // at least it does work like you'd expect when using `nest` - let res = client.get("/router").send().await; + let res = client.get("/router").await; assert_eq!(res.status(), StatusCode::OK); - let res = client.get("/router/").send().await; + let res = client.get("/router/").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); - let res = client.get("/router-slash").send().await; + let res = client.get("/router-slash").await; assert_eq!(res.status(), StatusCode::NOT_FOUND); - let res = client.get("/router-slash/").send().await; + let res = client.get("/router-slash/").await; assert_eq!(res.status(), StatusCode::OK); } @@ -401,7 +397,7 @@ macro_rules! nested_route_test { let inner = Router::new().route($route_path, get(|| async {})); let app = Router::new().nest($nested_path, inner); let client = TestClient::new(app); - let res = client.get($expected_path).send().await; + let res = client.get($expected_path).await; let status = res.status(); assert_eq!(status, StatusCode::OK, "Router"); } diff --git a/.cargo-vendor/axum/src/routing/url_params.rs b/.cargo-vendor/axum/src/routing/url_params.rs index 6243d379c0..eb5a08a330 100644 --- a/.cargo-vendor/axum/src/routing/url_params.rs +++ b/.cargo-vendor/axum/src/routing/url_params.rs @@ -3,6 +3,7 @@ use http::Extensions; use matchit::Params; use std::sync::Arc; +#[derive(Clone)] pub(crate) enum UrlParams { Params(Vec<(Arc, PercentDecodedStr)>), InvalidUtf8InPathParam { key: Arc }, diff --git a/.cargo-vendor/axum/src/serve.rs b/.cargo-vendor/axum/src/serve.rs new file mode 100644 index 0000000000..a2df756b09 --- /dev/null +++ b/.cargo-vendor/axum/src/serve.rs @@ -0,0 +1,563 @@ +//! Serve services. + +use std::{ + convert::Infallible, + fmt::Debug, + future::{poll_fn, Future, IntoFuture}, + io, + marker::PhantomData, + net::SocketAddr, + pin::Pin, + sync::Arc, + task::{Context, Poll}, + time::Duration, +}; + +use axum_core::{body::Body, extract::Request, response::Response}; +use futures_util::{pin_mut, FutureExt}; +use hyper::body::Incoming; +use hyper_util::rt::{TokioExecutor, TokioIo}; +#[cfg(any(feature = "http1", feature = "http2"))] +use hyper_util::server::conn::auto::Builder; +use pin_project_lite::pin_project; +use tokio::{ + net::{TcpListener, TcpStream}, + sync::watch, +}; +use tower::util::{Oneshot, ServiceExt}; +use tower_service::Service; + +/// Serve the service with the supplied listener. +/// +/// This method of running a service is intentionally simple and doesn't support any configuration. +/// Use hyper or hyper-util if you need configuration. +/// +/// It supports both HTTP/1 as well as HTTP/2. +/// +/// # Examples +/// +/// Serving a [`Router`]: +/// +/// ``` +/// use axum::{Router, routing::get}; +/// +/// # async { +/// let router = Router::new().route("/", get(|| async { "Hello, World!" })); +/// +/// let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap(); +/// axum::serve(listener, router).await.unwrap(); +/// # }; +/// ``` +/// +/// See also [`Router::into_make_service_with_connect_info`]. +/// +/// Serving a [`MethodRouter`]: +/// +/// ``` +/// use axum::routing::get; +/// +/// # async { +/// let router = get(|| async { "Hello, World!" }); +/// +/// let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap(); +/// axum::serve(listener, router).await.unwrap(); +/// # }; +/// ``` +/// +/// See also [`MethodRouter::into_make_service_with_connect_info`]. +/// +/// Serving a [`Handler`]: +/// +/// ``` +/// use axum::handler::HandlerWithoutStateExt; +/// +/// # async { +/// async fn handler() -> &'static str { +/// "Hello, World!" +/// } +/// +/// let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap(); +/// axum::serve(listener, handler.into_make_service()).await.unwrap(); +/// # }; +/// ``` +/// +/// See also [`HandlerWithoutStateExt::into_make_service_with_connect_info`] and +/// [`HandlerService::into_make_service_with_connect_info`]. +/// +/// [`Router`]: crate::Router +/// [`Router::into_make_service_with_connect_info`]: crate::Router::into_make_service_with_connect_info +/// [`MethodRouter`]: crate::routing::MethodRouter +/// [`MethodRouter::into_make_service_with_connect_info`]: crate::routing::MethodRouter::into_make_service_with_connect_info +/// [`Handler`]: crate::handler::Handler +/// [`HandlerWithoutStateExt::into_make_service_with_connect_info`]: crate::handler::HandlerWithoutStateExt::into_make_service_with_connect_info +/// [`HandlerService::into_make_service_with_connect_info`]: crate::handler::HandlerService::into_make_service_with_connect_info +#[cfg(all(feature = "tokio", any(feature = "http1", feature = "http2")))] +pub fn serve(tcp_listener: TcpListener, make_service: M) -> Serve +where + M: for<'a> Service, Error = Infallible, Response = S>, + S: Service + Clone + Send + 'static, + S::Future: Send, +{ + Serve { + tcp_listener, + make_service, + _marker: PhantomData, + } +} + +/// Future returned by [`serve`]. +#[cfg(all(feature = "tokio", any(feature = "http1", feature = "http2")))] +#[must_use = "futures must be awaited or polled"] +pub struct Serve { + tcp_listener: TcpListener, + make_service: M, + _marker: PhantomData, +} + +#[cfg(all(feature = "tokio", any(feature = "http1", feature = "http2")))] +impl Serve { + /// Prepares a server to handle graceful shutdown when the provided future completes. + /// + /// # Example + /// + /// ``` + /// use axum::{Router, routing::get}; + /// + /// # async { + /// let router = Router::new().route("/", get(|| async { "Hello, World!" })); + /// + /// let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap(); + /// axum::serve(listener, router) + /// .with_graceful_shutdown(shutdown_signal()) + /// .await + /// .unwrap(); + /// # }; + /// + /// async fn shutdown_signal() { + /// // ... + /// } + /// ``` + pub fn with_graceful_shutdown(self, signal: F) -> WithGracefulShutdown + where + F: Future + Send + 'static, + { + WithGracefulShutdown { + tcp_listener: self.tcp_listener, + make_service: self.make_service, + signal, + _marker: PhantomData, + } + } +} + +#[cfg(all(feature = "tokio", any(feature = "http1", feature = "http2")))] +impl Debug for Serve +where + M: Debug, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let Self { + tcp_listener, + make_service, + _marker: _, + } = self; + + f.debug_struct("Serve") + .field("tcp_listener", tcp_listener) + .field("make_service", make_service) + .finish() + } +} + +#[cfg(all(feature = "tokio", any(feature = "http1", feature = "http2")))] +impl IntoFuture for Serve +where + M: for<'a> Service, Error = Infallible, Response = S> + Send + 'static, + for<'a> >>::Future: Send, + S: Service + Clone + Send + 'static, + S::Future: Send, +{ + type Output = io::Result<()>; + type IntoFuture = private::ServeFuture; + + fn into_future(self) -> Self::IntoFuture { + private::ServeFuture(Box::pin(async move { + let Self { + tcp_listener, + mut make_service, + _marker: _, + } = self; + + loop { + let (tcp_stream, remote_addr) = match tcp_accept(&tcp_listener).await { + Some(conn) => conn, + None => continue, + }; + let tcp_stream = TokioIo::new(tcp_stream); + + poll_fn(|cx| make_service.poll_ready(cx)) + .await + .unwrap_or_else(|err| match err {}); + + let tower_service = make_service + .call(IncomingStream { + tcp_stream: &tcp_stream, + remote_addr, + }) + .await + .unwrap_or_else(|err| match err {}); + + let hyper_service = TowerToHyperService { + service: tower_service, + }; + + tokio::spawn(async move { + match Builder::new(TokioExecutor::new()) + // upgrades needed for websockets + .serve_connection_with_upgrades(tcp_stream, hyper_service) + .await + { + Ok(()) => {} + Err(_err) => { + // This error only appears when the client doesn't send a request and + // terminate the connection. + // + // If client sends one request then terminate connection whenever, it doesn't + // appear. + } + } + }); + } + })) + } +} + +/// Serve future with graceful shutdown enabled. +#[cfg(all(feature = "tokio", any(feature = "http1", feature = "http2")))] +#[must_use = "futures must be awaited or polled"] +pub struct WithGracefulShutdown { + tcp_listener: TcpListener, + make_service: M, + signal: F, + _marker: PhantomData, +} + +#[cfg(all(feature = "tokio", any(feature = "http1", feature = "http2")))] +impl Debug for WithGracefulShutdown +where + M: Debug, + S: Debug, + F: Debug, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let Self { + tcp_listener, + make_service, + signal, + _marker: _, + } = self; + + f.debug_struct("WithGracefulShutdown") + .field("tcp_listener", tcp_listener) + .field("make_service", make_service) + .field("signal", signal) + .finish() + } +} + +#[cfg(all(feature = "tokio", any(feature = "http1", feature = "http2")))] +impl IntoFuture for WithGracefulShutdown +where + M: for<'a> Service, Error = Infallible, Response = S> + Send + 'static, + for<'a> >>::Future: Send, + S: Service + Clone + Send + 'static, + S::Future: Send, + F: Future + Send + 'static, +{ + type Output = io::Result<()>; + type IntoFuture = private::ServeFuture; + + fn into_future(self) -> Self::IntoFuture { + let Self { + tcp_listener, + mut make_service, + signal, + _marker: _, + } = self; + + let (signal_tx, signal_rx) = watch::channel(()); + let signal_tx = Arc::new(signal_tx); + tokio::spawn(async move { + signal.await; + trace!("received graceful shutdown signal. Telling tasks to shutdown"); + drop(signal_rx); + }); + + let (close_tx, close_rx) = watch::channel(()); + + private::ServeFuture(Box::pin(async move { + loop { + let (tcp_stream, remote_addr) = tokio::select! { + conn = tcp_accept(&tcp_listener) => { + match conn { + Some(conn) => conn, + None => continue, + } + } + _ = signal_tx.closed() => { + trace!("signal received, not accepting new connections"); + break; + } + }; + let tcp_stream = TokioIo::new(tcp_stream); + + trace!("connection {remote_addr} accepted"); + + poll_fn(|cx| make_service.poll_ready(cx)) + .await + .unwrap_or_else(|err| match err {}); + + let tower_service = make_service + .call(IncomingStream { + tcp_stream: &tcp_stream, + remote_addr, + }) + .await + .unwrap_or_else(|err| match err {}); + + let hyper_service = TowerToHyperService { + service: tower_service, + }; + + let signal_tx = Arc::clone(&signal_tx); + + let close_rx = close_rx.clone(); + + tokio::spawn(async move { + let builder = Builder::new(TokioExecutor::new()); + let conn = builder.serve_connection_with_upgrades(tcp_stream, hyper_service); + pin_mut!(conn); + + let signal_closed = signal_tx.closed().fuse(); + pin_mut!(signal_closed); + + loop { + tokio::select! { + result = conn.as_mut() => { + if let Err(_err) = result { + trace!("failed to serve connection: {_err:#}"); + } + break; + } + _ = &mut signal_closed => { + trace!("signal received in task, starting graceful shutdown"); + conn.as_mut().graceful_shutdown(); + } + } + } + + trace!("connection {remote_addr} closed"); + + drop(close_rx); + }); + } + + drop(close_rx); + drop(tcp_listener); + + trace!( + "waiting for {} task(s) to finish", + close_tx.receiver_count() + ); + close_tx.closed().await; + + Ok(()) + })) + } +} + +fn is_connection_error(e: &io::Error) -> bool { + matches!( + e.kind(), + io::ErrorKind::ConnectionRefused + | io::ErrorKind::ConnectionAborted + | io::ErrorKind::ConnectionReset + ) +} + +async fn tcp_accept(listener: &TcpListener) -> Option<(TcpStream, SocketAddr)> { + match listener.accept().await { + Ok(conn) => Some(conn), + Err(e) => { + if is_connection_error(&e) { + return None; + } + + // [From `hyper::Server` in 0.14](https://github.com/hyperium/hyper/blob/v0.14.27/src/server/tcp.rs#L186) + // + // > A possible scenario is that the process has hit the max open files + // > allowed, and so trying to accept a new connection will fail with + // > `EMFILE`. In some cases, it's preferable to just wait for some time, if + // > the application will likely close some files (or connections), and try + // > to accept the connection again. If this option is `true`, the error + // > will be logged at the `error` level, since it is still a big deal, + // > and then the listener will sleep for 1 second. + // + // hyper allowed customizing this but axum does not. + error!("accept error: {e}"); + tokio::time::sleep(Duration::from_secs(1)).await; + None + } + } +} + +mod private { + use std::{ + future::Future, + io, + pin::Pin, + task::{Context, Poll}, + }; + + pub struct ServeFuture(pub(super) futures_util::future::BoxFuture<'static, io::Result<()>>); + + impl Future for ServeFuture { + type Output = io::Result<()>; + + #[inline] + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + self.0.as_mut().poll(cx) + } + } + + impl std::fmt::Debug for ServeFuture { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("ServeFuture").finish_non_exhaustive() + } + } +} + +#[derive(Debug, Copy, Clone)] +struct TowerToHyperService { + service: S, +} + +impl hyper::service::Service> for TowerToHyperService +where + S: tower_service::Service + Clone, +{ + type Response = S::Response; + type Error = S::Error; + type Future = TowerToHyperServiceFuture; + + fn call(&self, req: Request) -> Self::Future { + let req = req.map(Body::new); + TowerToHyperServiceFuture { + future: self.service.clone().oneshot(req), + } + } +} + +pin_project! { + struct TowerToHyperServiceFuture + where + S: tower_service::Service, + { + #[pin] + future: Oneshot, + } +} + +impl Future for TowerToHyperServiceFuture +where + S: tower_service::Service, +{ + type Output = Result; + + #[inline] + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + self.project().future.poll(cx) + } +} + +/// An incoming stream. +/// +/// Used with [`serve`] and [`IntoMakeServiceWithConnectInfo`]. +/// +/// [`IntoMakeServiceWithConnectInfo`]: crate::extract::connect_info::IntoMakeServiceWithConnectInfo +#[derive(Debug)] +pub struct IncomingStream<'a> { + tcp_stream: &'a TokioIo, + remote_addr: SocketAddr, +} + +impl IncomingStream<'_> { + /// Returns the local address that this stream is bound to. + pub fn local_addr(&self) -> std::io::Result { + self.tcp_stream.inner().local_addr() + } + + /// Returns the remote address that this stream is bound to. + pub fn remote_addr(&self) -> SocketAddr { + self.remote_addr + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + handler::{Handler, HandlerWithoutStateExt}, + routing::get, + Router, + }; + + #[allow(dead_code, unused_must_use)] + async fn if_it_compiles_it_works() { + let router: Router = Router::new(); + + let addr = "0.0.0.0:0"; + + // router + serve(TcpListener::bind(addr).await.unwrap(), router.clone()); + serve( + TcpListener::bind(addr).await.unwrap(), + router.clone().into_make_service(), + ); + serve( + TcpListener::bind(addr).await.unwrap(), + router.into_make_service_with_connect_info::(), + ); + + // method router + serve(TcpListener::bind(addr).await.unwrap(), get(handler)); + serve( + TcpListener::bind(addr).await.unwrap(), + get(handler).into_make_service(), + ); + serve( + TcpListener::bind(addr).await.unwrap(), + get(handler).into_make_service_with_connect_info::(), + ); + + // handler + serve( + TcpListener::bind(addr).await.unwrap(), + handler.into_service(), + ); + serve( + TcpListener::bind(addr).await.unwrap(), + handler.with_state(()), + ); + serve( + TcpListener::bind(addr).await.unwrap(), + handler.into_make_service(), + ); + serve( + TcpListener::bind(addr).await.unwrap(), + handler.into_make_service_with_connect_info::(), + ); + } + + async fn handler() {} +} diff --git a/.cargo-vendor/axum/src/service_ext.rs b/.cargo-vendor/axum/src/service_ext.rs index e603d65f16..1b49f244b6 100644 --- a/.cargo-vendor/axum/src/service_ext.rs +++ b/.cargo-vendor/axum/src/service_ext.rs @@ -1,3 +1,4 @@ +use crate::error_handling::HandleError; #[cfg(feature = "tokio")] use crate::extract::connect_info::IntoMakeServiceWithConnectInfo; use crate::routing::IntoMakeService; @@ -30,6 +31,17 @@ pub trait ServiceExt: Service + Sized { /// [`ConnectInfo`]: crate::extract::connect_info::ConnectInfo #[cfg(feature = "tokio")] fn into_make_service_with_connect_info(self) -> IntoMakeServiceWithConnectInfo; + + /// Convert this service into a [`HandleError`], that will handle errors + /// by converting them into responses. + /// + /// See ["error handling model"] for more details. + /// + /// [`HandleError`]: crate::error_handling::HandleError + /// ["error handling model"]: crate::error_handling#axums-error-handling-model + fn handle_error(self, f: F) -> HandleError { + HandleError::new(self, f) + } } impl ServiceExt for S diff --git a/.cargo-vendor/axum/src/test_helpers/mod.rs b/.cargo-vendor/axum/src/test_helpers/mod.rs index de4554905e..c6ae1bff4e 100644 --- a/.cargo-vendor/axum/src/test_helpers/mod.rs +++ b/.cargo-vendor/axum/src/test_helpers/mod.rs @@ -1,6 +1,6 @@ #![allow(clippy::disallowed_names)] -use crate::{body::HttpBody, BoxError}; +use crate::{extract::Request, response::Response, serve}; mod test_client; pub(crate) use self::test_client::*; @@ -9,6 +9,6 @@ pub(crate) mod tracing_helpers; pub(crate) fn assert_send() {} pub(crate) fn assert_sync() {} -pub(crate) fn assert_unpin() {} +#[allow(dead_code)] pub(crate) struct NotSendSync(*const ()); diff --git a/.cargo-vendor/axum/src/test_helpers/test_client.rs b/.cargo-vendor/axum/src/test_helpers/test_client.rs index d1d73f6c1d..f6751fd2f4 100644 --- a/.cargo-vendor/axum/src/test_helpers/test_client.rs +++ b/.cargo-vendor/axum/src/test_helpers/test_client.rs @@ -1,37 +1,48 @@ -use super::{BoxError, HttpBody}; +use super::{serve, Request, Response}; use bytes::Bytes; +use futures_util::future::BoxFuture; use http::{ header::{HeaderName, HeaderValue}, - Request, StatusCode, + StatusCode, }; -use hyper::{Body, Server}; -use std::net::{SocketAddr, TcpListener}; +use std::{convert::Infallible, future::IntoFuture, net::SocketAddr, str::FromStr}; +use tokio::net::TcpListener; use tower::make::Shared; use tower_service::Service; +pub(crate) fn spawn_service(svc: S) -> SocketAddr +where + S: Service + Clone + Send + 'static, + S::Future: Send, +{ + let std_listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap(); + std_listener.set_nonblocking(true).unwrap(); + let listener = TcpListener::from_std(std_listener).unwrap(); + + let addr = listener.local_addr().unwrap(); + println!("Listening on {addr}"); + + tokio::spawn(async move { + serve(listener, Shared::new(svc)) + .await + .expect("server error") + }); + + addr +} + pub(crate) struct TestClient { client: reqwest::Client, addr: SocketAddr, } impl TestClient { - pub(crate) fn new(svc: S) -> Self + pub(crate) fn new(svc: S) -> Self where - S: Service, Response = http::Response> + Clone + Send + 'static, - ResBody: HttpBody + Send + 'static, - ResBody::Data: Send, - ResBody::Error: Into, + S: Service + Clone + Send + 'static, S::Future: Send, - S::Error: Into, { - let listener = TcpListener::bind("127.0.0.1:0").expect("Could not bind ephemeral socket"); - let addr = listener.local_addr().unwrap(); - println!("Listening on {addr}"); - - tokio::spawn(async move { - let server = Server::from_tcp(listener).unwrap().serve(Shared::new(svc)); - server.await.expect("server error"); - }); + let addr = spawn_service(svc); let client = reqwest::Client::builder() .redirect(reqwest::redirect::Policy::none()) @@ -79,12 +90,6 @@ pub(crate) struct RequestBuilder { } impl RequestBuilder { - pub(crate) async fn send(self) -> TestResponse { - TestResponse { - response: self.builder.send().await.unwrap(), - } - } - pub(crate) fn body(mut self, body: impl Into) -> Self { self.builder = self.builder.body(body); self @@ -105,7 +110,15 @@ impl RequestBuilder { HeaderValue: TryFrom, >::Error: Into, { + // reqwest still uses http 0.2 + let key: HeaderName = key.try_into().map_err(Into::into).unwrap(); + let key = reqwest::header::HeaderName::from_bytes(key.as_ref()).unwrap(); + + let value: HeaderValue = value.try_into().map_err(Into::into).unwrap(); + let value = reqwest::header::HeaderValue::from_bytes(value.as_bytes()).unwrap(); + self.builder = self.builder.header(key, value); + self } @@ -116,6 +129,19 @@ impl RequestBuilder { } } +impl IntoFuture for RequestBuilder { + type Output = TestResponse; + type IntoFuture = BoxFuture<'static, Self::Output>; + + fn into_future(self) -> Self::IntoFuture { + Box::pin(async { + TestResponse { + response: self.builder.send().await.unwrap(), + } + }) + } +} + #[derive(Debug)] pub(crate) struct TestResponse { response: reqwest::Response, @@ -140,11 +166,18 @@ impl TestResponse { } pub(crate) fn status(&self) -> StatusCode { - self.response.status() + StatusCode::from_u16(self.response.status().as_u16()).unwrap() } - pub(crate) fn headers(&self) -> &http::HeaderMap { - self.response.headers() + pub(crate) fn headers(&self) -> http::HeaderMap { + // reqwest still uses http 0.2 so have to convert into http 1.0 + let mut headers = http::HeaderMap::new(); + for (key, value) in self.response.headers() { + let key = http::HeaderName::from_str(key.as_str()).unwrap(); + let value = http::HeaderValue::from_bytes(value.as_bytes()).unwrap(); + headers.insert(key, value); + } + headers } pub(crate) async fn chunk(&mut self) -> Option { diff --git a/.cargo-vendor/axum/src/test_helpers/tracing_helpers.rs b/.cargo-vendor/axum/src/test_helpers/tracing_helpers.rs index 3d5cf18149..2240717ee4 100644 --- a/.cargo-vendor/axum/src/test_helpers/tracing_helpers.rs +++ b/.cargo-vendor/axum/src/test_helpers/tracing_helpers.rs @@ -1,8 +1,5 @@ -use std::{ - future::Future, - io, - sync::{Arc, Mutex}, -}; +use crate::util::AxumMutex; +use std::{future::Future, io, sync::Arc}; use serde::{de::DeserializeOwned, Deserialize}; use tracing_subscriber::prelude::*; @@ -50,12 +47,12 @@ where } struct TestMakeWriter { - write: Arc>>>, + write: Arc>>>, } impl TestMakeWriter { fn new() -> (Self, Handle) { - let write = Arc::new(Mutex::new(Some(Vec::::new()))); + let write = Arc::new(AxumMutex::new(Some(Vec::::new()))); ( Self { @@ -97,7 +94,7 @@ impl<'a> io::Write for Writer<'a> { } struct Handle { - write: Arc>>>, + write: Arc>>>, } impl Handle { diff --git a/.cargo-vendor/axum/src/util.rs b/.cargo-vendor/axum/src/util.rs index f7fc6ae149..60abe38b91 100644 --- a/.cargo-vendor/axum/src/util.rs +++ b/.cargo-vendor/axum/src/util.rs @@ -1,6 +1,8 @@ use pin_project_lite::pin_project; use std::{ops::Deref, sync::Arc}; +pub(crate) use self::mutex::*; + #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub(crate) struct PercentDecodedStr(Arc); @@ -59,3 +61,64 @@ fn test_try_downcast() { assert_eq!(try_downcast::(5_u32), Err(5_u32)); assert_eq!(try_downcast::(5_i32), Ok(5_i32)); } + +// `AxumMutex` is a wrapper around `std::sync::Mutex` which, in test mode, tracks the number of +// times it's been locked on the current task. That way we can write a test to ensure we don't +// accidentally introduce more locking. +// +// When not in test mode, it is just a type alias for `std::sync::Mutex`. +#[cfg(not(test))] +mod mutex { + #[allow(clippy::disallowed_types)] + pub(crate) type AxumMutex = std::sync::Mutex; +} + +#[cfg(test)] +#[allow(clippy::disallowed_types)] +mod mutex { + use std::sync::{ + atomic::{AtomicUsize, Ordering}, + LockResult, Mutex, MutexGuard, + }; + + tokio::task_local! { + pub(crate) static NUM_LOCKED: AtomicUsize; + } + + pub(crate) async fn mutex_num_locked(f: F) -> (usize, Fut::Output) + where + F: FnOnce() -> Fut, + Fut: std::future::IntoFuture, + { + NUM_LOCKED + .scope(AtomicUsize::new(0), async move { + let output = f().await; + let num = NUM_LOCKED.with(|num| num.load(Ordering::SeqCst)); + (num, output) + }) + .await + } + + pub(crate) struct AxumMutex(Mutex); + + impl AxumMutex { + pub(crate) fn new(value: T) -> Self { + Self(Mutex::new(value)) + } + + pub(crate) fn get_mut(&mut self) -> LockResult<&mut T> { + self.0.get_mut() + } + + pub(crate) fn into_inner(self) -> LockResult { + self.0.into_inner() + } + + pub(crate) fn lock(&self) -> LockResult> { + _ = NUM_LOCKED.try_with(|num| { + num.fetch_add(1, Ordering::SeqCst); + }); + self.0.lock() + } + } +} diff --git a/.cargo-vendor/concurrent-queue/.cargo-checksum.json b/.cargo-vendor/concurrent-queue/.cargo-checksum.json new file mode 100644 index 0000000000..a9187f00d7 --- /dev/null +++ b/.cargo-vendor/concurrent-queue/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"e9a4a11edce8b62146fdade24e1a74ee624601b2efcaa7035359c464a1ff7ff7","Cargo.toml":"d14f713829a83746178dd8a52732e1d106c895b3b4370bb9436fb190a2d763b2","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"d7a326783ef72b063a5a237b8c64e209e80fe61b9dda20e4686b5d8b19b16fb1","benches/bench.rs":"6bac7fcdfbd1b1caa2b36089a347fb120091b95ca7bd399249a96f1271e1bf08","src/bounded.rs":"f161cc0e03f59cc764a44dc0782f7fcef7325fc328dfc8cb8c7fd608fc259cc8","src/lib.rs":"dc69f8a48cc28fe73ea1be88d77cd1aba98947d5a673019e61c630cc04c537ad","src/single.rs":"610671ffb6f3b3bc9d375b99f4e004c61eece74caa29c2a3af6977d4764185f4","src/sync.rs":"7dc9bba96eda875ee3a1e5b808e4e2317cdd03293a38492a214e26e538159eef","src/unbounded.rs":"e90ea841f3f1eac5503b1c3cd2949de64956fc6a164ca65150c6c2bba02d0e16","tests/bounded.rs":"07a357eae995a79c5b6ac586037a86ed49df754ef3893d16891dc3c686299c6b","tests/loom.rs":"63e40d2598f80c97cada351c8db9c8d5e79d97bae870bdf9fe510d2b21510616","tests/single.rs":"7866f94d1c350e9a860aab550165806a8422649845ac6e9c95045886ce3e7659","tests/unbounded.rs":"3f49e41c33c14ab7ac255ef48c0af4f0f1cfcc9352fc73f21918df3039ff10d9"},"package":"4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973"} \ No newline at end of file diff --git a/.cargo-vendor/concurrent-queue/CHANGELOG.md b/.cargo-vendor/concurrent-queue/CHANGELOG.md new file mode 100644 index 0000000000..f4f8bfd27f --- /dev/null +++ b/.cargo-vendor/concurrent-queue/CHANGELOG.md @@ -0,0 +1,69 @@ +# Version 2.5.0 + +- Add a `force_push` method that can be used to add an element to the queue by displacing another. (#58) +- Make `ConcurrentQueue::unbounded()` into a `const` function. (#67) +- Fix a compilation error in the Loom implementation. (#65) + +# Version 2.4.0 + +- Remove unnecessary heap allocations from inside of the `ConcurrentQueue` type. (#53) + +# Version 2.3.0 + +- Implement `UnwindSafe` without libstd. (#49) +- Bump `fastrand` to `v2.0.0`. (#43) +- Use inline assembly in the `full_fence` funtion. (#47) + +# Version 2.2.0 + +- Add the try_iter method. (#36) + +# Version 2.1.0 + +- Update `portable-atomic` to 1.0. (#33) + +# Version 2.0.0 + +- Add support for the `portable-atomic` and `loom` crates. (#27) +- **Breaking:** Add an `std` feature that can be disabled to use this crate on `no_std` platforms. (#22) +- Replace usage of `cache-padded` with `crossbeam-utils`. (#26) + +# Version 1.2.4 + +- Fix fence on x86 and miri. (#18) +- Revert 1.2.3. (#18) + +# Version 1.2.3 + +**Note:** This release has been yanked, see #17 for details. + +- Fix fence on non-x86 architectures and miri. (#16) + +# Version 1.2.2 + +- Add a special, efficient `bounded(1)` implementation. + +# Version 1.2.1 + +- In the bounded queue, use boxed slice instead of raw pointers. + +# Version 1.2.0 + +- Update dependencies. +- Implement `UnwindSafe` and `RefUnwindSafe` for `ConcurrentQueue`. + +# Version 1.1.2 + +- Optimize `SeqCst` fences. + +# Version 1.1.1 + +- Clarify errors in docs. + +# Version 1.1.0 + +- Add extra methods to error types. + +# Version 1.0.0 + +- Initial version diff --git a/.cargo-vendor/concurrent-queue/Cargo.toml b/.cargo-vendor/concurrent-queue/Cargo.toml new file mode 100644 index 0000000000..cdce2b4b6b --- /dev/null +++ b/.cargo-vendor/concurrent-queue/Cargo.toml @@ -0,0 +1,72 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.60" +name = "concurrent-queue" +version = "2.5.0" +authors = [ + "Stjepan Glavina ", + "Taiki Endo ", + "John Nunley ", +] +exclude = ["/.*"] +description = "Concurrent multi-producer multi-consumer queue" +readme = "README.md" +keywords = [ + "channel", + "mpmc", + "spsc", + "spmc", + "mpsc", +] +categories = ["concurrency"] +license = "Apache-2.0 OR MIT" +repository = "https://github.com/smol-rs/concurrent-queue" + +[lib] +bench = false + +[[bench]] +name = "bench" +harness = false + +[dependencies.crossbeam-utils] +version = "0.8.11" +default-features = false + +[dependencies.portable-atomic] +version = "1" +optional = true +default-features = false + +[dev-dependencies.criterion] +version = "0.5" +features = ["cargo_bench_support"] +default-features = false + +[dev-dependencies.easy-parallel] +version = "3.1.0" + +[dev-dependencies.fastrand] +version = "2.0.0" + +[features] +default = ["std"] +std = [] + +[target."cfg(loom)".dependencies.loom] +version = "0.7" +optional = true + +[target."cfg(target_family = \"wasm\")".dev-dependencies.wasm-bindgen-test] +version = "0.3" diff --git a/.cargo-vendor/concurrent-queue/LICENSE-APACHE b/.cargo-vendor/concurrent-queue/LICENSE-APACHE new file mode 100644 index 0000000000..16fe87b06e --- /dev/null +++ b/.cargo-vendor/concurrent-queue/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/.cargo-vendor/concurrent-queue/LICENSE-MIT b/.cargo-vendor/concurrent-queue/LICENSE-MIT new file mode 100644 index 0000000000..31aa79387f --- /dev/null +++ b/.cargo-vendor/concurrent-queue/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/.cargo-vendor/concurrent-queue/README.md b/.cargo-vendor/concurrent-queue/README.md new file mode 100644 index 0000000000..dfa9871d99 --- /dev/null +++ b/.cargo-vendor/concurrent-queue/README.md @@ -0,0 +1,51 @@ +# concurrent-queue + +[![Build](https://github.com/smol-rs/concurrent-queue/workflows/Build%20and%20test/badge.svg)]( +https://github.com/smol-rs/concurrent-queue/actions) +[![License](https://img.shields.io/badge/license-Apache--2.0_OR_MIT-blue.svg)]( +https://github.com/smol-rs/concurrent-queue) +[![Cargo](https://img.shields.io/crates/v/concurrent-queue.svg)]( +https://crates.io/crates/concurrent-queue) +[![Documentation](https://docs.rs/concurrent-queue/badge.svg)]( +https://docs.rs/concurrent-queue) + +A concurrent multi-producer multi-consumer queue. + +There are two kinds of queues: + +1. Bounded queue with limited capacity. +2. Unbounded queue with unlimited capacity. + +Queues also have the capability to get closed at any point. When closed, no more items can be +pushed into the queue, although the remaining items can still be popped. + +These features make it easy to build channels similar to `std::sync::mpsc` on top of this +crate. + +## Examples + +```rust +use concurrent_queue::ConcurrentQueue; + +let q = ConcurrentQueue::unbounded(); +q.push(1).unwrap(); +q.push(2).unwrap(); + +assert_eq!(q.pop(), Ok(1)); +assert_eq!(q.pop(), Ok(2)); +``` + +## License + +Licensed under either of + + * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +at your option. + +#### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. diff --git a/.cargo-vendor/concurrent-queue/benches/bench.rs b/.cargo-vendor/concurrent-queue/benches/bench.rs new file mode 100644 index 0000000000..6e82019dda --- /dev/null +++ b/.cargo-vendor/concurrent-queue/benches/bench.rs @@ -0,0 +1,93 @@ +use std::{any::type_name, fmt::Debug}; + +use concurrent_queue::{ConcurrentQueue, PopError}; +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use easy_parallel::Parallel; + +const COUNT: usize = 100_000; +const THREADS: usize = 7; + +fn spsc(recv: &ConcurrentQueue, send: &ConcurrentQueue) { + Parallel::new() + .add(|| loop { + match recv.pop() { + Ok(_) => (), + Err(PopError::Empty) => (), + Err(PopError::Closed) => break, + } + }) + .add(|| { + for _ in 0..COUNT { + send.push(T::default()).unwrap(); + } + send.close(); + }) + .run(); +} + +fn mpsc(recv: &ConcurrentQueue, send: &ConcurrentQueue) { + Parallel::new() + .each(0..THREADS, |_| { + for _ in 0..COUNT { + send.push(T::default()).unwrap(); + } + }) + .add(|| { + let mut recieved = 0; + while recieved < THREADS * COUNT { + match recv.pop() { + Ok(_) => recieved += 1, + Err(PopError::Empty) => (), + Err(PopError::Closed) => unreachable!(), + } + } + }) + .run(); +} + +fn single_thread( + recv: &ConcurrentQueue, + send: &ConcurrentQueue, +) { + for _ in 0..COUNT { + send.push(T::default()).unwrap(); + } + for _ in 0..COUNT { + recv.pop().unwrap(); + } +} + +// Because we can't pass generic functions as const parameters. +macro_rules! bench_all( + ($name:ident, $f:ident) => { + fn $name(c: &mut Criterion) { + fn helper(c: &mut Criterion) { + let name = format!("unbounded_{}_{}", stringify!($f), type_name::()); + + c.bench_function(&name, |b| b.iter(|| { + let q = ConcurrentQueue::unbounded(); + $f::(black_box(&q), black_box(&q)); + })); + + let name = format!("bounded_{}_{}", stringify!($f), type_name::()); + + c.bench_function(&name, |b| b.iter(|| { + let q = ConcurrentQueue::bounded(THREADS * COUNT); + $f::(black_box(&q), black_box(&q)); + })); + } + helper::(c); + helper::(c); + helper::(c); + helper::(c); + helper::(c); + } + } +); + +bench_all!(bench_spsc, spsc); +bench_all!(bench_mpsc, mpsc); +bench_all!(bench_single_thread, single_thread); + +criterion_group!(generic_group, bench_single_thread, bench_spsc, bench_mpsc); +criterion_main!(generic_group); diff --git a/.cargo-vendor/concurrent-queue/src/bounded.rs b/.cargo-vendor/concurrent-queue/src/bounded.rs new file mode 100644 index 0000000000..dab3a2953b --- /dev/null +++ b/.cargo-vendor/concurrent-queue/src/bounded.rs @@ -0,0 +1,408 @@ +use alloc::{boxed::Box, vec::Vec}; +use core::mem::MaybeUninit; + +use crossbeam_utils::CachePadded; + +use crate::sync::atomic::{AtomicUsize, Ordering}; +use crate::sync::cell::UnsafeCell; +#[allow(unused_imports)] +use crate::sync::prelude::*; +use crate::{busy_wait, ForcePushError, PopError, PushError}; + +/// A slot in a queue. +struct Slot { + /// The current stamp. + stamp: AtomicUsize, + + /// The value in this slot. + value: UnsafeCell>, +} + +/// A bounded queue. +pub struct Bounded { + /// The head of the queue. + /// + /// This value is a "stamp" consisting of an index into the buffer, a mark bit, and a lap, but + /// packed into a single `usize`. The lower bits represent the index, while the upper bits + /// represent the lap. The mark bit in the head is always zero. + /// + /// Values are popped from the head of the queue. + head: CachePadded, + + /// The tail of the queue. + /// + /// This value is a "stamp" consisting of an index into the buffer, a mark bit, and a lap, but + /// packed into a single `usize`. The lower bits represent the index, while the upper bits + /// represent the lap. The mark bit indicates that the queue is closed. + /// + /// Values are pushed into the tail of the queue. + tail: CachePadded, + + /// The buffer holding slots. + buffer: Box<[Slot]>, + + /// A stamp with the value of `{ lap: 1, mark: 0, index: 0 }`. + one_lap: usize, + + /// If this bit is set in the tail, that means the queue is closed. + mark_bit: usize, +} + +impl Bounded { + /// Creates a new bounded queue. + pub fn new(cap: usize) -> Bounded { + assert!(cap > 0, "capacity must be positive"); + + // Head is initialized to `{ lap: 0, mark: 0, index: 0 }`. + let head = 0; + // Tail is initialized to `{ lap: 0, mark: 0, index: 0 }`. + let tail = 0; + + // Allocate a buffer of `cap` slots initialized with stamps. + let mut buffer = Vec::with_capacity(cap); + for i in 0..cap { + // Set the stamp to `{ lap: 0, mark: 0, index: i }`. + buffer.push(Slot { + stamp: AtomicUsize::new(i), + value: UnsafeCell::new(MaybeUninit::uninit()), + }); + } + + // Compute constants `mark_bit` and `one_lap`. + let mark_bit = (cap + 1).next_power_of_two(); + let one_lap = mark_bit * 2; + + Bounded { + buffer: buffer.into(), + one_lap, + mark_bit, + head: CachePadded::new(AtomicUsize::new(head)), + tail: CachePadded::new(AtomicUsize::new(tail)), + } + } + + /// Attempts to push an item into the queue. + pub fn push(&self, value: T) -> Result<(), PushError> { + self.push_or_else(value, |value, tail, _, _| { + let head = self.head.load(Ordering::Relaxed); + + // If the head lags one lap behind the tail as well... + if head.wrapping_add(self.one_lap) == tail { + // ...then the queue is full. + Err(PushError::Full(value)) + } else { + Ok(value) + } + }) + } + + /// Pushes an item into the queue, displacing another item if needed. + pub fn force_push(&self, value: T) -> Result, ForcePushError> { + let result = self.push_or_else(value, |value, tail, new_tail, slot| { + let head = tail.wrapping_sub(self.one_lap); + let new_head = new_tail.wrapping_sub(self.one_lap); + + // Try to move the head. + if self + .head + .compare_exchange_weak(head, new_head, Ordering::SeqCst, Ordering::Relaxed) + .is_ok() + { + // Move the tail. + self.tail.store(new_tail, Ordering::SeqCst); + + // Swap out the old value. + // SAFETY: We know this is initialized, since it's covered by the current queue. + let old = unsafe { + slot.value + .with_mut(|slot| slot.replace(MaybeUninit::new(value)).assume_init()) + }; + + // Update the stamp. + slot.stamp.store(tail + 1, Ordering::Release); + + // Return a PushError. + Err(PushError::Full(old)) + } else { + Ok(value) + } + }); + + match result { + Ok(()) => Ok(None), + Err(PushError::Full(old_value)) => Ok(Some(old_value)), + Err(PushError::Closed(value)) => Err(ForcePushError(value)), + } + } + + /// Attempts to push an item into the queue, running a closure on failure. + /// + /// `fail` is run when there is no more room left in the tail of the queue. The parameters of + /// this function are as follows: + /// + /// - The item that failed to push. + /// - The value of `self.tail` before the new value would be inserted. + /// - The value of `self.tail` after the new value would be inserted. + /// - The slot that we attempted to push into. + /// + /// If `fail` returns `Ok(val)`, we will try pushing `val` to the head of the queue. Otherwise, + /// this function will return the error. + fn push_or_else(&self, mut value: T, mut fail: F) -> Result<(), PushError> + where + F: FnMut(T, usize, usize, &Slot) -> Result>, + { + let mut tail = self.tail.load(Ordering::Relaxed); + + loop { + // Check if the queue is closed. + if tail & self.mark_bit != 0 { + return Err(PushError::Closed(value)); + } + + // Deconstruct the tail. + let index = tail & (self.mark_bit - 1); + let lap = tail & !(self.one_lap - 1); + + // Calculate the new location of the tail. + let new_tail = if index + 1 < self.buffer.len() { + // Same lap, incremented index. + // Set to `{ lap: lap, mark: 0, index: index + 1 }`. + tail + 1 + } else { + // One lap forward, index wraps around to zero. + // Set to `{ lap: lap.wrapping_add(1), mark: 0, index: 0 }`. + lap.wrapping_add(self.one_lap) + }; + + // Inspect the corresponding slot. + let slot = &self.buffer[index]; + let stamp = slot.stamp.load(Ordering::Acquire); + + // If the tail and the stamp match, we may attempt to push. + if tail == stamp { + // Try moving the tail. + match self.tail.compare_exchange_weak( + tail, + new_tail, + Ordering::SeqCst, + Ordering::Relaxed, + ) { + Ok(_) => { + // Write the value into the slot and update the stamp. + slot.value.with_mut(|slot| unsafe { + slot.write(MaybeUninit::new(value)); + }); + slot.stamp.store(tail + 1, Ordering::Release); + return Ok(()); + } + Err(t) => { + tail = t; + } + } + } else if stamp.wrapping_add(self.one_lap) == tail + 1 { + crate::full_fence(); + + // We've failed to push; run our failure closure. + value = fail(value, tail, new_tail, slot)?; + + // Loom complains if there isn't an explicit busy wait here. + #[cfg(loom)] + busy_wait(); + + tail = self.tail.load(Ordering::Relaxed); + } else { + // Yield because we need to wait for the stamp to get updated. + busy_wait(); + tail = self.tail.load(Ordering::Relaxed); + } + } + } + + /// Attempts to pop an item from the queue. + pub fn pop(&self) -> Result { + let mut head = self.head.load(Ordering::Relaxed); + + loop { + // Deconstruct the head. + let index = head & (self.mark_bit - 1); + let lap = head & !(self.one_lap - 1); + + // Inspect the corresponding slot. + let slot = &self.buffer[index]; + let stamp = slot.stamp.load(Ordering::Acquire); + + // If the the stamp is ahead of the head by 1, we may attempt to pop. + if head + 1 == stamp { + let new = if index + 1 < self.buffer.len() { + // Same lap, incremented index. + // Set to `{ lap: lap, mark: 0, index: index + 1 }`. + head + 1 + } else { + // One lap forward, index wraps around to zero. + // Set to `{ lap: lap.wrapping_add(1), mark: 0, index: 0 }`. + lap.wrapping_add(self.one_lap) + }; + + // Try moving the head. + match self.head.compare_exchange_weak( + head, + new, + Ordering::SeqCst, + Ordering::Relaxed, + ) { + Ok(_) => { + // Read the value from the slot and update the stamp. + let value = slot + .value + .with_mut(|slot| unsafe { slot.read().assume_init() }); + slot.stamp + .store(head.wrapping_add(self.one_lap), Ordering::Release); + return Ok(value); + } + Err(h) => { + head = h; + } + } + } else if stamp == head { + crate::full_fence(); + let tail = self.tail.load(Ordering::Relaxed); + + // If the tail equals the head, that means the queue is empty. + if (tail & !self.mark_bit) == head { + // Check if the queue is closed. + if tail & self.mark_bit != 0 { + return Err(PopError::Closed); + } else { + return Err(PopError::Empty); + } + } + + // Loom complains if there isn't a busy-wait here. + #[cfg(loom)] + busy_wait(); + + head = self.head.load(Ordering::Relaxed); + } else { + // Yield because we need to wait for the stamp to get updated. + busy_wait(); + head = self.head.load(Ordering::Relaxed); + } + } + } + + /// Returns the number of items in the queue. + pub fn len(&self) -> usize { + loop { + // Load the tail, then load the head. + let tail = self.tail.load(Ordering::SeqCst); + let head = self.head.load(Ordering::SeqCst); + + // If the tail didn't change, we've got consistent values to work with. + if self.tail.load(Ordering::SeqCst) == tail { + let hix = head & (self.mark_bit - 1); + let tix = tail & (self.mark_bit - 1); + + return if hix < tix { + tix - hix + } else if hix > tix { + self.buffer.len() - hix + tix + } else if (tail & !self.mark_bit) == head { + 0 + } else { + self.buffer.len() + }; + } + } + } + + /// Returns `true` if the queue is empty. + pub fn is_empty(&self) -> bool { + let head = self.head.load(Ordering::SeqCst); + let tail = self.tail.load(Ordering::SeqCst); + + // Is the tail equal to the head? + // + // Note: If the head changes just before we load the tail, that means there was a moment + // when the queue was not empty, so it is safe to just return `false`. + (tail & !self.mark_bit) == head + } + + /// Returns `true` if the queue is full. + pub fn is_full(&self) -> bool { + let tail = self.tail.load(Ordering::SeqCst); + let head = self.head.load(Ordering::SeqCst); + + // Is the head lagging one lap behind tail? + // + // Note: If the tail changes just before we load the head, that means there was a moment + // when the queue was not full, so it is safe to just return `false`. + head.wrapping_add(self.one_lap) == tail & !self.mark_bit + } + + /// Returns the capacity of the queue. + pub fn capacity(&self) -> usize { + self.buffer.len() + } + + /// Closes the queue. + /// + /// Returns `true` if this call closed the queue. + pub fn close(&self) -> bool { + let tail = self.tail.fetch_or(self.mark_bit, Ordering::SeqCst); + tail & self.mark_bit == 0 + } + + /// Returns `true` if the queue is closed. + pub fn is_closed(&self) -> bool { + self.tail.load(Ordering::SeqCst) & self.mark_bit != 0 + } +} + +impl Drop for Bounded { + fn drop(&mut self) { + // Get the index of the head. + let Self { + head, + tail, + buffer, + mark_bit, + .. + } = self; + + let mark_bit = *mark_bit; + + head.with_mut(|&mut head| { + tail.with_mut(|&mut tail| { + let hix = head & (mark_bit - 1); + let tix = tail & (mark_bit - 1); + + let len = if hix < tix { + tix - hix + } else if hix > tix { + buffer.len() - hix + tix + } else if (tail & !mark_bit) == head { + 0 + } else { + buffer.len() + }; + + // Loop over all slots that hold a value and drop them. + for i in 0..len { + // Compute the index of the next slot holding a value. + let index = if hix + i < buffer.len() { + hix + i + } else { + hix + i - buffer.len() + }; + + // Drop the value in the slot. + let slot = &buffer[index]; + slot.value.with_mut(|slot| unsafe { + let value = &mut *slot; + value.as_mut_ptr().drop_in_place(); + }); + } + }); + }); + } +} diff --git a/.cargo-vendor/concurrent-queue/src/lib.rs b/.cargo-vendor/concurrent-queue/src/lib.rs new file mode 100644 index 0000000000..a4d26b501f --- /dev/null +++ b/.cargo-vendor/concurrent-queue/src/lib.rs @@ -0,0 +1,660 @@ +//! A concurrent multi-producer multi-consumer queue. +//! +//! There are two kinds of queues: +//! +//! 1. [Bounded] queue with limited capacity. +//! 2. [Unbounded] queue with unlimited capacity. +//! +//! Queues also have the capability to get [closed] at any point. When closed, no more items can be +//! pushed into the queue, although the remaining items can still be popped. +//! +//! These features make it easy to build channels similar to [`std::sync::mpsc`] on top of this +//! crate. +//! +//! # Examples +//! +//! ``` +//! use concurrent_queue::ConcurrentQueue; +//! +//! let q = ConcurrentQueue::unbounded(); +//! q.push(1).unwrap(); +//! q.push(2).unwrap(); +//! +//! assert_eq!(q.pop(), Ok(1)); +//! assert_eq!(q.pop(), Ok(2)); +//! ``` +//! +//! # Features +//! +//! `concurrent-queue` uses an `std` default feature. With this feature enabled, this crate will +//! use [`std::thread::yield_now`] to avoid busy waiting in tight loops. However, with this +//! feature disabled, [`core::hint::spin_loop`] will be used instead. Disabling `std` will allow +//! this crate to be used on `no_std` platforms at the potential expense of more busy waiting. +//! +//! There is also a `portable-atomic` feature, which uses a polyfill from the +//! [`portable-atomic`] crate to provide atomic operations on platforms that do not support them. +//! See the [`README`] for the [`portable-atomic`] crate for more information on how to use it. +//! Note that even with this feature enabled, `concurrent-queue` still requires a global allocator +//! to be available. See the documentation for the [`std::alloc::GlobalAlloc`] trait for more +//! information. +//! +//! [Bounded]: `ConcurrentQueue::bounded()` +//! [Unbounded]: `ConcurrentQueue::unbounded()` +//! [closed]: `ConcurrentQueue::close()` +//! [`portable-atomic`]: https://crates.io/crates/portable-atomic +//! [`README`]: https://github.com/taiki-e/portable-atomic/blob/main/README.md#optional-cfg + +#![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)] +#![no_std] +#![doc( + html_favicon_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png" +)] +#![doc( + html_logo_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png" +)] + +extern crate alloc; +#[cfg(feature = "std")] +extern crate std; + +use core::fmt; +use core::panic::{RefUnwindSafe, UnwindSafe}; +use sync::atomic::{self, Ordering}; + +#[cfg(feature = "std")] +use std::error; + +use crate::bounded::Bounded; +use crate::single::Single; +use crate::sync::busy_wait; +use crate::unbounded::Unbounded; + +mod bounded; +mod single; +mod unbounded; + +mod sync; + +/// Make the given function const if the given condition is true. +macro_rules! const_fn { + ( + const_if: #[cfg($($cfg:tt)+)]; + $(#[$($attr:tt)*])* + $vis:vis const fn $($rest:tt)* + ) => { + #[cfg($($cfg)+)] + $(#[$($attr)*])* + $vis const fn $($rest)* + #[cfg(not($($cfg)+))] + $(#[$($attr)*])* + $vis fn $($rest)* + }; +} + +pub(crate) use const_fn; + +/// A concurrent queue. +/// +/// # Examples +/// +/// ``` +/// use concurrent_queue::{ConcurrentQueue, PopError, PushError}; +/// +/// let q = ConcurrentQueue::bounded(2); +/// +/// assert_eq!(q.push('a'), Ok(())); +/// assert_eq!(q.push('b'), Ok(())); +/// assert_eq!(q.push('c'), Err(PushError::Full('c'))); +/// +/// assert_eq!(q.pop(), Ok('a')); +/// assert_eq!(q.pop(), Ok('b')); +/// assert_eq!(q.pop(), Err(PopError::Empty)); +/// ``` +pub struct ConcurrentQueue(Inner); + +unsafe impl Send for ConcurrentQueue {} +unsafe impl Sync for ConcurrentQueue {} + +impl UnwindSafe for ConcurrentQueue {} +impl RefUnwindSafe for ConcurrentQueue {} + +#[allow(clippy::large_enum_variant)] +enum Inner { + Single(Single), + Bounded(Bounded), + Unbounded(Unbounded), +} + +impl ConcurrentQueue { + /// Creates a new bounded queue. + /// + /// The queue allocates enough space for `cap` items. + /// + /// # Panics + /// + /// If the capacity is zero, this constructor will panic. + /// + /// # Examples + /// + /// ``` + /// use concurrent_queue::ConcurrentQueue; + /// + /// let q = ConcurrentQueue::::bounded(100); + /// ``` + pub fn bounded(cap: usize) -> ConcurrentQueue { + if cap == 1 { + ConcurrentQueue(Inner::Single(Single::new())) + } else { + ConcurrentQueue(Inner::Bounded(Bounded::new(cap))) + } + } + + const_fn!( + const_if: #[cfg(not(loom))]; + /// Creates a new unbounded queue. + /// + /// # Examples + /// + /// ``` + /// use concurrent_queue::ConcurrentQueue; + /// + /// let q = ConcurrentQueue::::unbounded(); + /// ``` + pub const fn unbounded() -> ConcurrentQueue { + ConcurrentQueue(Inner::Unbounded(Unbounded::new())) + } + ); + + /// Attempts to push an item into the queue. + /// + /// If the queue is full or closed, the item is returned back as an error. + /// + /// # Examples + /// + /// ``` + /// use concurrent_queue::{ConcurrentQueue, PushError}; + /// + /// let q = ConcurrentQueue::bounded(1); + /// + /// // Push succeeds because there is space in the queue. + /// assert_eq!(q.push(10), Ok(())); + /// + /// // Push errors because the queue is now full. + /// assert_eq!(q.push(20), Err(PushError::Full(20))); + /// + /// // Close the queue, which will prevent further pushes. + /// q.close(); + /// + /// // Pushing now errors indicating the queue is closed. + /// assert_eq!(q.push(20), Err(PushError::Closed(20))); + /// + /// // Pop the single item in the queue. + /// assert_eq!(q.pop(), Ok(10)); + /// + /// // Even though there is space, no more items can be pushed. + /// assert_eq!(q.push(20), Err(PushError::Closed(20))); + /// ``` + pub fn push(&self, value: T) -> Result<(), PushError> { + match &self.0 { + Inner::Single(q) => q.push(value), + Inner::Bounded(q) => q.push(value), + Inner::Unbounded(q) => q.push(value), + } + } + + /// Push an element into the queue, potentially displacing another element. + /// + /// Attempts to push an element into the queue. If the queue is full, one item from the + /// queue is replaced with the provided item. The displaced item is returned as `Some(T)`. + /// If the queue is closed, an error is returned. + /// + /// # Examples + /// + /// ``` + /// use concurrent_queue::{ConcurrentQueue, ForcePushError, PushError}; + /// + /// let q = ConcurrentQueue::bounded(3); + /// + /// // We can push to the queue. + /// for i in 1..=3 { + /// assert_eq!(q.force_push(i), Ok(None)); + /// } + /// + /// // Push errors because the queue is now full. + /// assert_eq!(q.push(4), Err(PushError::Full(4))); + /// + /// // Pushing a new value replaces the old ones. + /// assert_eq!(q.force_push(5), Ok(Some(1))); + /// assert_eq!(q.force_push(6), Ok(Some(2))); + /// + /// // Close the queue to stop further pushes. + /// q.close(); + /// + /// // Pushing will return an error. + /// assert_eq!(q.force_push(7), Err(ForcePushError(7))); + /// + /// // Popping items will return the force-pushed ones. + /// assert_eq!(q.pop(), Ok(3)); + /// assert_eq!(q.pop(), Ok(5)); + /// assert_eq!(q.pop(), Ok(6)); + /// ``` + pub fn force_push(&self, value: T) -> Result, ForcePushError> { + match &self.0 { + Inner::Single(q) => q.force_push(value), + Inner::Bounded(q) => q.force_push(value), + Inner::Unbounded(q) => match q.push(value) { + Ok(()) => Ok(None), + Err(PushError::Closed(value)) => Err(ForcePushError(value)), + Err(PushError::Full(_)) => unreachable!(), + }, + } + } + + /// Attempts to pop an item from the queue. + /// + /// If the queue is empty, an error is returned. + /// + /// # Examples + /// + /// ``` + /// use concurrent_queue::{ConcurrentQueue, PopError}; + /// + /// let q = ConcurrentQueue::bounded(1); + /// + /// // Pop errors when the queue is empty. + /// assert_eq!(q.pop(), Err(PopError::Empty)); + /// + /// // Push one item and close the queue. + /// assert_eq!(q.push(10), Ok(())); + /// q.close(); + /// + /// // Remaining items can be popped. + /// assert_eq!(q.pop(), Ok(10)); + /// + /// // Again, pop errors when the queue is empty, + /// // but now also indicates that the queue is closed. + /// assert_eq!(q.pop(), Err(PopError::Closed)); + /// ``` + pub fn pop(&self) -> Result { + match &self.0 { + Inner::Single(q) => q.pop(), + Inner::Bounded(q) => q.pop(), + Inner::Unbounded(q) => q.pop(), + } + } + + /// Get an iterator over the items in the queue. + /// + /// The iterator will continue until the queue is empty or closed. It will never block; + /// if the queue is empty, the iterator will return `None`. If new items are pushed into + /// the queue, the iterator may return `Some` in the future after returning `None`. + /// + /// # Examples + /// + /// ``` + /// use concurrent_queue::ConcurrentQueue; + /// + /// let q = ConcurrentQueue::bounded(5); + /// q.push(1).unwrap(); + /// q.push(2).unwrap(); + /// q.push(3).unwrap(); + /// + /// let mut iter = q.try_iter(); + /// assert_eq!(iter.by_ref().sum::(), 6); + /// assert_eq!(iter.next(), None); + /// + /// // Pushing more items will make them available to the iterator. + /// q.push(4).unwrap(); + /// assert_eq!(iter.next(), Some(4)); + /// assert_eq!(iter.next(), None); + /// ``` + pub fn try_iter(&self) -> TryIter<'_, T> { + TryIter { queue: self } + } + + /// Returns `true` if the queue is empty. + /// + /// # Examples + /// + /// ``` + /// use concurrent_queue::ConcurrentQueue; + /// + /// let q = ConcurrentQueue::::unbounded(); + /// + /// assert!(q.is_empty()); + /// q.push(1).unwrap(); + /// assert!(!q.is_empty()); + /// ``` + pub fn is_empty(&self) -> bool { + match &self.0 { + Inner::Single(q) => q.is_empty(), + Inner::Bounded(q) => q.is_empty(), + Inner::Unbounded(q) => q.is_empty(), + } + } + + /// Returns `true` if the queue is full. + /// + /// An unbounded queue is never full. + /// + /// # Examples + /// + /// ``` + /// use concurrent_queue::ConcurrentQueue; + /// + /// let q = ConcurrentQueue::bounded(1); + /// + /// assert!(!q.is_full()); + /// q.push(1).unwrap(); + /// assert!(q.is_full()); + /// ``` + pub fn is_full(&self) -> bool { + match &self.0 { + Inner::Single(q) => q.is_full(), + Inner::Bounded(q) => q.is_full(), + Inner::Unbounded(q) => q.is_full(), + } + } + + /// Returns the number of items in the queue. + /// + /// # Examples + /// + /// ``` + /// use concurrent_queue::ConcurrentQueue; + /// + /// let q = ConcurrentQueue::unbounded(); + /// assert_eq!(q.len(), 0); + /// + /// assert_eq!(q.push(10), Ok(())); + /// assert_eq!(q.len(), 1); + /// + /// assert_eq!(q.push(20), Ok(())); + /// assert_eq!(q.len(), 2); + /// ``` + pub fn len(&self) -> usize { + match &self.0 { + Inner::Single(q) => q.len(), + Inner::Bounded(q) => q.len(), + Inner::Unbounded(q) => q.len(), + } + } + + /// Returns the capacity of the queue. + /// + /// Unbounded queues have infinite capacity, represented as [`None`]. + /// + /// # Examples + /// + /// ``` + /// use concurrent_queue::ConcurrentQueue; + /// + /// let q = ConcurrentQueue::::bounded(7); + /// assert_eq!(q.capacity(), Some(7)); + /// + /// let q = ConcurrentQueue::::unbounded(); + /// assert_eq!(q.capacity(), None); + /// ``` + pub fn capacity(&self) -> Option { + match &self.0 { + Inner::Single(_) => Some(1), + Inner::Bounded(q) => Some(q.capacity()), + Inner::Unbounded(_) => None, + } + } + + /// Closes the queue. + /// + /// Returns `true` if this call closed the queue, or `false` if it was already closed. + /// + /// When a queue is closed, no more items can be pushed but the remaining items can still be + /// popped. + /// + /// # Examples + /// + /// ``` + /// use concurrent_queue::{ConcurrentQueue, PopError, PushError}; + /// + /// let q = ConcurrentQueue::unbounded(); + /// assert_eq!(q.push(10), Ok(())); + /// + /// assert!(q.close()); // `true` because this call closes the queue. + /// assert!(!q.close()); // `false` because the queue is already closed. + /// + /// // Cannot push any more items when closed. + /// assert_eq!(q.push(20), Err(PushError::Closed(20))); + /// + /// // Remaining items can still be popped. + /// assert_eq!(q.pop(), Ok(10)); + /// + /// // When no more items are present, the error is `Closed`. + /// assert_eq!(q.pop(), Err(PopError::Closed)); + /// ``` + pub fn close(&self) -> bool { + match &self.0 { + Inner::Single(q) => q.close(), + Inner::Bounded(q) => q.close(), + Inner::Unbounded(q) => q.close(), + } + } + + /// Returns `true` if the queue is closed. + /// + /// # Examples + /// + /// ``` + /// use concurrent_queue::ConcurrentQueue; + /// + /// let q = ConcurrentQueue::::unbounded(); + /// + /// assert!(!q.is_closed()); + /// q.close(); + /// assert!(q.is_closed()); + /// ``` + pub fn is_closed(&self) -> bool { + match &self.0 { + Inner::Single(q) => q.is_closed(), + Inner::Bounded(q) => q.is_closed(), + Inner::Unbounded(q) => q.is_closed(), + } + } +} + +impl fmt::Debug for ConcurrentQueue { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ConcurrentQueue") + .field("len", &self.len()) + .field("capacity", &self.capacity()) + .field("is_closed", &self.is_closed()) + .finish() + } +} + +/// An iterator that pops items from a [`ConcurrentQueue`]. +/// +/// This iterator will never block; it will return `None` once the queue has +/// been exhausted. Calling `next` after `None` may yield `Some(item)` if more items +/// are pushed to the queue. +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[derive(Clone)] +pub struct TryIter<'a, T> { + queue: &'a ConcurrentQueue, +} + +impl fmt::Debug for TryIter<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("Iter").field(&self.queue).finish() + } +} + +impl Iterator for TryIter<'_, T> { + type Item = T; + + fn next(&mut self) -> Option { + self.queue.pop().ok() + } +} + +/// Error which occurs when popping from an empty queue. +#[derive(Clone, Copy, Eq, PartialEq)] +pub enum PopError { + /// The queue is empty but not closed. + Empty, + + /// The queue is empty and closed. + Closed, +} + +impl PopError { + /// Returns `true` if the queue is empty but not closed. + pub fn is_empty(&self) -> bool { + match self { + PopError::Empty => true, + PopError::Closed => false, + } + } + + /// Returns `true` if the queue is empty and closed. + pub fn is_closed(&self) -> bool { + match self { + PopError::Empty => false, + PopError::Closed => true, + } + } +} + +#[cfg(feature = "std")] +impl error::Error for PopError {} + +impl fmt::Debug for PopError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + PopError::Empty => write!(f, "Empty"), + PopError::Closed => write!(f, "Closed"), + } + } +} + +impl fmt::Display for PopError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + PopError::Empty => write!(f, "Empty"), + PopError::Closed => write!(f, "Closed"), + } + } +} + +/// Error which occurs when pushing into a full or closed queue. +#[derive(Clone, Copy, Eq, PartialEq)] +pub enum PushError { + /// The queue is full but not closed. + Full(T), + + /// The queue is closed. + Closed(T), +} + +impl PushError { + /// Unwraps the item that couldn't be pushed. + pub fn into_inner(self) -> T { + match self { + PushError::Full(t) => t, + PushError::Closed(t) => t, + } + } + + /// Returns `true` if the queue is full but not closed. + pub fn is_full(&self) -> bool { + match self { + PushError::Full(_) => true, + PushError::Closed(_) => false, + } + } + + /// Returns `true` if the queue is closed. + pub fn is_closed(&self) -> bool { + match self { + PushError::Full(_) => false, + PushError::Closed(_) => true, + } + } +} + +#[cfg(feature = "std")] +impl error::Error for PushError {} + +impl fmt::Debug for PushError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + PushError::Full(t) => f.debug_tuple("Full").field(t).finish(), + PushError::Closed(t) => f.debug_tuple("Closed").field(t).finish(), + } + } +} + +impl fmt::Display for PushError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + PushError::Full(_) => write!(f, "Full"), + PushError::Closed(_) => write!(f, "Closed"), + } + } +} + +/// Error that occurs when force-pushing into a full queue. +#[derive(Clone, Copy, PartialEq, Eq)] +pub struct ForcePushError(pub T); + +impl ForcePushError { + /// Return the inner value that failed to be force-pushed. + pub fn into_inner(self) -> T { + self.0 + } +} + +impl fmt::Debug for ForcePushError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("ForcePushError").field(&self.0).finish() + } +} + +impl fmt::Display for ForcePushError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Closed") + } +} + +#[cfg(feature = "std")] +impl error::Error for ForcePushError {} + +/// Equivalent to `atomic::fence(Ordering::SeqCst)`, but in some cases faster. +#[inline] +fn full_fence() { + #[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), not(miri), not(loom)))] + { + use core::{arch::asm, cell::UnsafeCell}; + // HACK(stjepang): On x86 architectures there are two different ways of executing + // a `SeqCst` fence. + // + // 1. `atomic::fence(SeqCst)`, which compiles into a `mfence` instruction. + // 2. A `lock ` instruction. + // + // Both instructions have the effect of a full barrier, but empirical benchmarks have shown + // that the second one is sometimes a bit faster. + let a = UnsafeCell::new(0_usize); + // It is common to use `lock or` here, but when using a local variable, `lock not`, which + // does not change the flag, should be slightly more efficient. + // Refs: https://www.felixcloutier.com/x86/not + unsafe { + #[cfg(target_pointer_width = "64")] + asm!("lock not qword ptr [{0}]", in(reg) a.get(), options(nostack, preserves_flags)); + #[cfg(target_pointer_width = "32")] + asm!("lock not dword ptr [{0:e}]", in(reg) a.get(), options(nostack, preserves_flags)); + } + return; + } + #[allow(unreachable_code)] + { + atomic::fence(Ordering::SeqCst); + } +} diff --git a/.cargo-vendor/concurrent-queue/src/single.rs b/.cargo-vendor/concurrent-queue/src/single.rs new file mode 100644 index 0000000000..f88c4783a0 --- /dev/null +++ b/.cargo-vendor/concurrent-queue/src/single.rs @@ -0,0 +1,187 @@ +use core::mem::MaybeUninit; +use core::ptr; + +use crate::sync::atomic::{AtomicUsize, Ordering}; +use crate::sync::cell::UnsafeCell; +#[allow(unused_imports)] +use crate::sync::prelude::*; +use crate::{busy_wait, ForcePushError, PopError, PushError}; + +const LOCKED: usize = 1 << 0; +const PUSHED: usize = 1 << 1; +const CLOSED: usize = 1 << 2; + +/// A single-element queue. +pub struct Single { + state: AtomicUsize, + slot: UnsafeCell>, +} + +impl Single { + /// Creates a new single-element queue. + pub fn new() -> Single { + Single { + state: AtomicUsize::new(0), + slot: UnsafeCell::new(MaybeUninit::uninit()), + } + } + + /// Attempts to push an item into the queue. + pub fn push(&self, value: T) -> Result<(), PushError> { + // Lock and fill the slot. + let state = self + .state + .compare_exchange(0, LOCKED | PUSHED, Ordering::SeqCst, Ordering::SeqCst) + .unwrap_or_else(|x| x); + + if state == 0 { + // Write the value and unlock. + self.slot.with_mut(|slot| unsafe { + slot.write(MaybeUninit::new(value)); + }); + self.state.fetch_and(!LOCKED, Ordering::Release); + Ok(()) + } else if state & CLOSED != 0 { + Err(PushError::Closed(value)) + } else { + Err(PushError::Full(value)) + } + } + + /// Attempts to push an item into the queue, displacing another if necessary. + pub fn force_push(&self, value: T) -> Result, ForcePushError> { + // Attempt to lock the slot. + let mut state = 0; + + loop { + // Lock the slot. + let prev = self + .state + .compare_exchange(state, LOCKED | PUSHED, Ordering::SeqCst, Ordering::SeqCst) + .unwrap_or_else(|x| x); + + if prev & CLOSED != 0 { + return Err(ForcePushError(value)); + } + + if prev == state { + // If the value was pushed, swap out the value. + let prev_value = if prev & PUSHED == 0 { + // SAFETY: write is safe because we have locked the state. + self.slot.with_mut(|slot| unsafe { + slot.write(MaybeUninit::new(value)); + }); + None + } else { + // SAFETY: replace is safe because we have locked the state, and + // assume_init is safe because we have checked that the value was pushed. + let prev_value = unsafe { + self.slot.with_mut(move |slot| { + ptr::replace(slot, MaybeUninit::new(value)).assume_init() + }) + }; + Some(prev_value) + }; + + // We can unlock the slot now. + self.state.fetch_and(!LOCKED, Ordering::Release); + + // Return the old value. + return Ok(prev_value); + } + + // Try to go for the current (pushed) state. + if prev & LOCKED == 0 { + state = prev; + } else { + // State is locked. + busy_wait(); + state = prev & !LOCKED; + } + } + } + + /// Attempts to pop an item from the queue. + pub fn pop(&self) -> Result { + let mut state = PUSHED; + loop { + // Lock and empty the slot. + let prev = self + .state + .compare_exchange( + state, + (state | LOCKED) & !PUSHED, + Ordering::SeqCst, + Ordering::SeqCst, + ) + .unwrap_or_else(|x| x); + + if prev == state { + // Read the value and unlock. + let value = self + .slot + .with_mut(|slot| unsafe { slot.read().assume_init() }); + self.state.fetch_and(!LOCKED, Ordering::Release); + return Ok(value); + } + + if prev & PUSHED == 0 { + if prev & CLOSED == 0 { + return Err(PopError::Empty); + } else { + return Err(PopError::Closed); + } + } + + if prev & LOCKED == 0 { + state = prev; + } else { + busy_wait(); + state = prev & !LOCKED; + } + } + } + + /// Returns the number of items in the queue. + pub fn len(&self) -> usize { + usize::from(self.state.load(Ordering::SeqCst) & PUSHED != 0) + } + + /// Returns `true` if the queue is empty. + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Returns `true` if the queue is full. + pub fn is_full(&self) -> bool { + self.len() == 1 + } + + /// Closes the queue. + /// + /// Returns `true` if this call closed the queue. + pub fn close(&self) -> bool { + let state = self.state.fetch_or(CLOSED, Ordering::SeqCst); + state & CLOSED == 0 + } + + /// Returns `true` if the queue is closed. + pub fn is_closed(&self) -> bool { + self.state.load(Ordering::SeqCst) & CLOSED != 0 + } +} + +impl Drop for Single { + fn drop(&mut self) { + // Drop the value in the slot. + let Self { state, slot } = self; + state.with_mut(|state| { + if *state & PUSHED != 0 { + slot.with_mut(|slot| unsafe { + let value = &mut *slot; + value.as_mut_ptr().drop_in_place(); + }); + } + }); + } +} diff --git a/.cargo-vendor/concurrent-queue/src/sync.rs b/.cargo-vendor/concurrent-queue/src/sync.rs new file mode 100644 index 0000000000..d1b0a89a1b --- /dev/null +++ b/.cargo-vendor/concurrent-queue/src/sync.rs @@ -0,0 +1,114 @@ +//! Synchronization facade to choose between `core` primitives and `loom` primitives. + +#[cfg(all(feature = "portable-atomic", not(loom)))] +mod sync_impl { + pub(crate) use core::cell; + pub(crate) use portable_atomic as atomic; + + #[cfg(not(feature = "std"))] + pub(crate) use atomic::hint::spin_loop; + + #[cfg(feature = "std")] + pub(crate) use std::thread::yield_now; +} + +#[cfg(all(not(feature = "portable-atomic"), not(loom)))] +mod sync_impl { + pub(crate) use core::cell; + pub(crate) use core::sync::atomic; + + #[cfg(not(feature = "std"))] + #[inline] + pub(crate) fn spin_loop() { + #[allow(deprecated)] + atomic::spin_loop_hint(); + } + + #[cfg(feature = "std")] + pub(crate) use std::thread::yield_now; +} + +#[cfg(loom)] +mod sync_impl { + pub(crate) use loom::cell; + + pub(crate) mod atomic { + pub(crate) use loom::sync::atomic::*; + } + + #[cfg(not(feature = "std"))] + pub(crate) use loom::hint::spin_loop; + #[cfg(feature = "std")] + pub(crate) use loom::thread::yield_now; +} + +pub(crate) use sync_impl::*; + +/// Notify the CPU that we are currently busy-waiting. +#[inline] +pub(crate) fn busy_wait() { + #[cfg(feature = "std")] + yield_now(); + + #[cfg(not(feature = "std"))] + spin_loop(); +} + +#[cfg(loom)] +pub(crate) mod prelude {} + +#[cfg(not(loom))] +pub(crate) mod prelude { + use super::{atomic, cell}; + + /// Emulate `loom::UnsafeCell`'s API. + pub(crate) trait UnsafeCellExt { + type Value; + + fn with_mut(&self, f: F) -> R + where + F: FnOnce(*mut Self::Value) -> R; + } + + impl UnsafeCellExt for cell::UnsafeCell { + type Value = T; + + fn with_mut(&self, f: F) -> R + where + F: FnOnce(*mut Self::Value) -> R, + { + f(self.get()) + } + } + + /// Emulate `loom::Atomic*`'s API. + pub(crate) trait AtomicExt { + type Value; + + fn with_mut(&mut self, f: F) -> R + where + F: FnOnce(&mut Self::Value) -> R; + } + + impl AtomicExt for atomic::AtomicUsize { + type Value = usize; + + fn with_mut(&mut self, f: F) -> R + where + F: FnOnce(&mut Self::Value) -> R, + { + f(self.get_mut()) + } + } + + impl AtomicExt for atomic::AtomicPtr { + type Value = *mut T; + + fn with_mut(&mut self, f: F) -> R + where + F: FnOnce(&mut Self::Value) -> R, + { + f(self.get_mut()) + } + } +} diff --git a/.cargo-vendor/concurrent-queue/src/unbounded.rs b/.cargo-vendor/concurrent-queue/src/unbounded.rs new file mode 100644 index 0000000000..8e1c40d192 --- /dev/null +++ b/.cargo-vendor/concurrent-queue/src/unbounded.rs @@ -0,0 +1,452 @@ +use alloc::boxed::Box; +use core::mem::MaybeUninit; +use core::ptr; + +use crossbeam_utils::CachePadded; + +use crate::const_fn; +use crate::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; +use crate::sync::cell::UnsafeCell; +#[allow(unused_imports)] +use crate::sync::prelude::*; +use crate::{busy_wait, PopError, PushError}; + +// Bits indicating the state of a slot: +// * If a value has been written into the slot, `WRITE` is set. +// * If a value has been read from the slot, `READ` is set. +// * If the block is being destroyed, `DESTROY` is set. +const WRITE: usize = 1; +const READ: usize = 2; +const DESTROY: usize = 4; + +// Each block covers one "lap" of indices. +const LAP: usize = 32; +// The maximum number of items a block can hold. +const BLOCK_CAP: usize = LAP - 1; +// How many lower bits are reserved for metadata. +const SHIFT: usize = 1; +// Has two different purposes: +// * If set in head, indicates that the block is not the last one. +// * If set in tail, indicates that the queue is closed. +const MARK_BIT: usize = 1; + +/// A slot in a block. +struct Slot { + /// The value. + value: UnsafeCell>, + + /// The state of the slot. + state: AtomicUsize, +} + +impl Slot { + #[cfg(not(loom))] + const UNINIT: Slot = Slot { + value: UnsafeCell::new(MaybeUninit::uninit()), + state: AtomicUsize::new(0), + }; + + #[cfg(not(loom))] + fn uninit_block() -> [Slot; BLOCK_CAP] { + [Self::UNINIT; BLOCK_CAP] + } + + #[cfg(loom)] + fn uninit_block() -> [Slot; BLOCK_CAP] { + // Repeat this expression 31 times. + // Update if we change BLOCK_CAP + macro_rules! repeat_31 { + ($e: expr) => { + [ + $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, + $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, + ] + }; + } + + repeat_31!(Slot { + value: UnsafeCell::new(MaybeUninit::uninit()), + state: AtomicUsize::new(0), + }) + } + + /// Waits until a value is written into the slot. + fn wait_write(&self) { + while self.state.load(Ordering::Acquire) & WRITE == 0 { + busy_wait(); + } + } +} + +/// A block in a linked list. +/// +/// Each block in the list can hold up to `BLOCK_CAP` values. +struct Block { + /// The next block in the linked list. + next: AtomicPtr>, + + /// Slots for values. + slots: [Slot; BLOCK_CAP], +} + +impl Block { + /// Creates an empty block. + fn new() -> Block { + Block { + next: AtomicPtr::new(ptr::null_mut()), + slots: Slot::uninit_block(), + } + } + + /// Waits until the next pointer is set. + fn wait_next(&self) -> *mut Block { + loop { + let next = self.next.load(Ordering::Acquire); + if !next.is_null() { + return next; + } + busy_wait(); + } + } + + /// Sets the `DESTROY` bit in slots starting from `start` and destroys the block. + unsafe fn destroy(this: *mut Block, start: usize) { + // It is not necessary to set the `DESTROY` bit in the last slot because that slot has + // begun destruction of the block. + for i in start..BLOCK_CAP - 1 { + let slot = (*this).slots.get_unchecked(i); + + // Mark the `DESTROY` bit if a thread is still using the slot. + if slot.state.load(Ordering::Acquire) & READ == 0 + && slot.state.fetch_or(DESTROY, Ordering::AcqRel) & READ == 0 + { + // If a thread is still using the slot, it will continue destruction of the block. + return; + } + } + + // No thread is using the block, now it is safe to destroy it. + drop(Box::from_raw(this)); + } +} + +/// A position in a queue. +struct Position { + /// The index in the queue. + index: AtomicUsize, + + /// The block in the linked list. + block: AtomicPtr>, +} + +/// An unbounded queue. +pub struct Unbounded { + /// The head of the queue. + head: CachePadded>, + + /// The tail of the queue. + tail: CachePadded>, +} + +impl Unbounded { + const_fn!( + const_if: #[cfg(not(loom))]; + /// Creates a new unbounded queue. + pub const fn new() -> Unbounded { + Unbounded { + head: CachePadded::new(Position { + block: AtomicPtr::new(ptr::null_mut()), + index: AtomicUsize::new(0), + }), + tail: CachePadded::new(Position { + block: AtomicPtr::new(ptr::null_mut()), + index: AtomicUsize::new(0), + }), + } + } + ); + + /// Pushes an item into the queue. + pub fn push(&self, value: T) -> Result<(), PushError> { + let mut tail = self.tail.index.load(Ordering::Acquire); + let mut block = self.tail.block.load(Ordering::Acquire); + let mut next_block = None; + + loop { + // Check if the queue is closed. + if tail & MARK_BIT != 0 { + return Err(PushError::Closed(value)); + } + + // Calculate the offset of the index into the block. + let offset = (tail >> SHIFT) % LAP; + + // If we reached the end of the block, wait until the next one is installed. + if offset == BLOCK_CAP { + busy_wait(); + tail = self.tail.index.load(Ordering::Acquire); + block = self.tail.block.load(Ordering::Acquire); + continue; + } + + // If we're going to have to install the next block, allocate it in advance in order to + // make the wait for other threads as short as possible. + if offset + 1 == BLOCK_CAP && next_block.is_none() { + next_block = Some(Box::new(Block::::new())); + } + + // If this is the first value to be pushed into the queue, we need to allocate the + // first block and install it. + if block.is_null() { + let new = Box::into_raw(Box::new(Block::::new())); + + if self + .tail + .block + .compare_exchange(block, new, Ordering::Release, Ordering::Relaxed) + .is_ok() + { + self.head.block.store(new, Ordering::Release); + block = new; + } else { + next_block = unsafe { Some(Box::from_raw(new)) }; + tail = self.tail.index.load(Ordering::Acquire); + block = self.tail.block.load(Ordering::Acquire); + continue; + } + } + + let new_tail = tail + (1 << SHIFT); + + // Try advancing the tail forward. + match self.tail.index.compare_exchange_weak( + tail, + new_tail, + Ordering::SeqCst, + Ordering::Acquire, + ) { + Ok(_) => unsafe { + // If we've reached the end of the block, install the next one. + if offset + 1 == BLOCK_CAP { + let next_block = Box::into_raw(next_block.unwrap()); + self.tail.block.store(next_block, Ordering::Release); + self.tail.index.fetch_add(1 << SHIFT, Ordering::Release); + (*block).next.store(next_block, Ordering::Release); + } + + // Write the value into the slot. + let slot = (*block).slots.get_unchecked(offset); + slot.value.with_mut(|slot| { + slot.write(MaybeUninit::new(value)); + }); + slot.state.fetch_or(WRITE, Ordering::Release); + return Ok(()); + }, + Err(t) => { + tail = t; + block = self.tail.block.load(Ordering::Acquire); + } + } + } + } + + /// Pops an item from the queue. + pub fn pop(&self) -> Result { + let mut head = self.head.index.load(Ordering::Acquire); + let mut block = self.head.block.load(Ordering::Acquire); + + loop { + // Calculate the offset of the index into the block. + let offset = (head >> SHIFT) % LAP; + + // If we reached the end of the block, wait until the next one is installed. + if offset == BLOCK_CAP { + busy_wait(); + head = self.head.index.load(Ordering::Acquire); + block = self.head.block.load(Ordering::Acquire); + continue; + } + + let mut new_head = head + (1 << SHIFT); + + if new_head & MARK_BIT == 0 { + crate::full_fence(); + let tail = self.tail.index.load(Ordering::Relaxed); + + // If the tail equals the head, that means the queue is empty. + if head >> SHIFT == tail >> SHIFT { + // Check if the queue is closed. + if tail & MARK_BIT != 0 { + return Err(PopError::Closed); + } else { + return Err(PopError::Empty); + } + } + + // If head and tail are not in the same block, set `MARK_BIT` in head. + if (head >> SHIFT) / LAP != (tail >> SHIFT) / LAP { + new_head |= MARK_BIT; + } + } + + // The block can be null here only if the first push operation is in progress. + if block.is_null() { + busy_wait(); + head = self.head.index.load(Ordering::Acquire); + block = self.head.block.load(Ordering::Acquire); + continue; + } + + // Try moving the head index forward. + match self.head.index.compare_exchange_weak( + head, + new_head, + Ordering::SeqCst, + Ordering::Acquire, + ) { + Ok(_) => unsafe { + // If we've reached the end of the block, move to the next one. + if offset + 1 == BLOCK_CAP { + let next = (*block).wait_next(); + let mut next_index = (new_head & !MARK_BIT).wrapping_add(1 << SHIFT); + if !(*next).next.load(Ordering::Relaxed).is_null() { + next_index |= MARK_BIT; + } + + self.head.block.store(next, Ordering::Release); + self.head.index.store(next_index, Ordering::Release); + } + + // Read the value. + let slot = (*block).slots.get_unchecked(offset); + slot.wait_write(); + let value = slot.value.with_mut(|slot| slot.read().assume_init()); + + // Destroy the block if we've reached the end, or if another thread wanted to + // destroy but couldn't because we were busy reading from the slot. + if offset + 1 == BLOCK_CAP { + Block::destroy(block, 0); + } else if slot.state.fetch_or(READ, Ordering::AcqRel) & DESTROY != 0 { + Block::destroy(block, offset + 1); + } + + return Ok(value); + }, + Err(h) => { + head = h; + block = self.head.block.load(Ordering::Acquire); + } + } + } + } + + /// Returns the number of items in the queue. + pub fn len(&self) -> usize { + loop { + // Load the tail index, then load the head index. + let mut tail = self.tail.index.load(Ordering::SeqCst); + let mut head = self.head.index.load(Ordering::SeqCst); + + // If the tail index didn't change, we've got consistent indices to work with. + if self.tail.index.load(Ordering::SeqCst) == tail { + // Erase the lower bits. + tail &= !((1 << SHIFT) - 1); + head &= !((1 << SHIFT) - 1); + + // Fix up indices if they fall onto block ends. + if (tail >> SHIFT) & (LAP - 1) == LAP - 1 { + tail = tail.wrapping_add(1 << SHIFT); + } + if (head >> SHIFT) & (LAP - 1) == LAP - 1 { + head = head.wrapping_add(1 << SHIFT); + } + + // Rotate indices so that head falls into the first block. + let lap = (head >> SHIFT) / LAP; + tail = tail.wrapping_sub((lap * LAP) << SHIFT); + head = head.wrapping_sub((lap * LAP) << SHIFT); + + // Remove the lower bits. + tail >>= SHIFT; + head >>= SHIFT; + + // Return the difference minus the number of blocks between tail and head. + return tail - head - tail / LAP; + } + } + } + + /// Returns `true` if the queue is empty. + pub fn is_empty(&self) -> bool { + let head = self.head.index.load(Ordering::SeqCst); + let tail = self.tail.index.load(Ordering::SeqCst); + head >> SHIFT == tail >> SHIFT + } + + /// Returns `true` if the queue is full. + pub fn is_full(&self) -> bool { + false + } + + /// Closes the queue. + /// + /// Returns `true` if this call closed the queue. + pub fn close(&self) -> bool { + let tail = self.tail.index.fetch_or(MARK_BIT, Ordering::SeqCst); + tail & MARK_BIT == 0 + } + + /// Returns `true` if the queue is closed. + pub fn is_closed(&self) -> bool { + self.tail.index.load(Ordering::SeqCst) & MARK_BIT != 0 + } +} + +impl Drop for Unbounded { + fn drop(&mut self) { + let Self { head, tail } = self; + let Position { index: head, block } = &mut **head; + + head.with_mut(|&mut mut head| { + tail.index.with_mut(|&mut mut tail| { + // Erase the lower bits. + head &= !((1 << SHIFT) - 1); + tail &= !((1 << SHIFT) - 1); + + unsafe { + // Drop all values between `head` and `tail` and deallocate the heap-allocated blocks. + while head != tail { + let offset = (head >> SHIFT) % LAP; + + if offset < BLOCK_CAP { + // Drop the value in the slot. + block.with_mut(|block| { + let slot = (**block).slots.get_unchecked(offset); + slot.value.with_mut(|slot| { + let value = &mut *slot; + value.as_mut_ptr().drop_in_place(); + }); + }); + } else { + // Deallocate the block and move to the next one. + block.with_mut(|block| { + let next_block = (**block).next.with_mut(|next| *next); + drop(Box::from_raw(*block)); + *block = next_block; + }); + } + + head = head.wrapping_add(1 << SHIFT); + } + + // Deallocate the last remaining block. + block.with_mut(|block| { + if !block.is_null() { + drop(Box::from_raw(*block)); + } + }); + } + }); + }); + } +} diff --git a/.cargo-vendor/concurrent-queue/tests/bounded.rs b/.cargo-vendor/concurrent-queue/tests/bounded.rs new file mode 100644 index 0000000000..6f402b7f8b --- /dev/null +++ b/.cargo-vendor/concurrent-queue/tests/bounded.rs @@ -0,0 +1,371 @@ +#![allow(clippy::bool_assert_comparison)] + +use concurrent_queue::{ConcurrentQueue, ForcePushError, PopError, PushError}; + +#[cfg(not(target_family = "wasm"))] +use easy_parallel::Parallel; +#[cfg(not(target_family = "wasm"))] +use std::sync::atomic::{AtomicUsize, Ordering}; + +#[cfg(target_family = "wasm")] +use wasm_bindgen_test::wasm_bindgen_test as test; + +#[test] +fn smoke() { + let q = ConcurrentQueue::bounded(2); + + q.push(7).unwrap(); + assert_eq!(q.pop(), Ok(7)); + + q.push(8).unwrap(); + assert_eq!(q.pop(), Ok(8)); + assert!(q.pop().is_err()); +} + +#[test] +fn capacity() { + for i in 1..10 { + let q = ConcurrentQueue::::bounded(i); + assert_eq!(q.capacity(), Some(i)); + } +} + +#[test] +#[should_panic(expected = "capacity must be positive")] +fn zero_capacity() { + let _ = ConcurrentQueue::::bounded(0); +} + +#[test] +fn len_empty_full() { + let q = ConcurrentQueue::bounded(2); + + assert_eq!(q.len(), 0); + assert_eq!(q.is_empty(), true); + assert_eq!(q.is_full(), false); + + q.push(()).unwrap(); + + assert_eq!(q.len(), 1); + assert_eq!(q.is_empty(), false); + assert_eq!(q.is_full(), false); + + q.push(()).unwrap(); + + assert_eq!(q.len(), 2); + assert_eq!(q.is_empty(), false); + assert_eq!(q.is_full(), true); + + q.pop().unwrap(); + + assert_eq!(q.len(), 1); + assert_eq!(q.is_empty(), false); + assert_eq!(q.is_full(), false); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn len() { + const COUNT: usize = if cfg!(miri) { 50 } else { 25_000 }; + const CAP: usize = if cfg!(miri) { 50 } else { 1000 }; + + let q = ConcurrentQueue::bounded(CAP); + assert_eq!(q.len(), 0); + + for _ in 0..CAP / 10 { + for i in 0..50 { + q.push(i).unwrap(); + assert_eq!(q.len(), i + 1); + } + + for i in 0..50 { + q.pop().unwrap(); + assert_eq!(q.len(), 50 - i - 1); + } + } + assert_eq!(q.len(), 0); + + for i in 0..CAP { + q.push(i).unwrap(); + assert_eq!(q.len(), i + 1); + } + + for _ in 0..CAP { + q.pop().unwrap(); + } + assert_eq!(q.len(), 0); + + Parallel::new() + .add(|| { + for i in 0..COUNT { + loop { + if let Ok(x) = q.pop() { + assert_eq!(x, i); + break; + } + } + let len = q.len(); + assert!(len <= CAP); + } + }) + .add(|| { + for i in 0..COUNT { + while q.push(i).is_err() {} + let len = q.len(); + assert!(len <= CAP); + } + }) + .run(); + + assert_eq!(q.len(), 0); +} + +#[test] +fn close() { + let q = ConcurrentQueue::bounded(2); + assert_eq!(q.push(10), Ok(())); + + assert!(!q.is_closed()); + assert!(q.close()); + + assert!(q.is_closed()); + assert!(!q.close()); + + assert_eq!(q.push(20), Err(PushError::Closed(20))); + assert_eq!(q.pop(), Ok(10)); + assert_eq!(q.pop(), Err(PopError::Closed)); +} + +#[test] +fn force_push() { + let q = ConcurrentQueue::::bounded(5); + + for i in 1..=5 { + assert_eq!(q.force_push(i), Ok(None)); + } + + assert!(!q.is_closed()); + for i in 6..=10 { + assert_eq!(q.force_push(i), Ok(Some(i - 5))); + } + assert_eq!(q.pop(), Ok(6)); + assert_eq!(q.force_push(11), Ok(None)); + for i in 12..=15 { + assert_eq!(q.force_push(i), Ok(Some(i - 5))); + } + + assert!(q.close()); + assert_eq!(q.force_push(40), Err(ForcePushError(40))); + for i in 11..=15 { + assert_eq!(q.pop(), Ok(i)); + } + assert_eq!(q.pop(), Err(PopError::Closed)); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn spsc() { + const COUNT: usize = if cfg!(miri) { 100 } else { 100_000 }; + + let q = ConcurrentQueue::bounded(3); + + Parallel::new() + .add(|| { + for i in 0..COUNT { + loop { + if let Ok(x) = q.pop() { + assert_eq!(x, i); + break; + } + } + } + assert!(q.pop().is_err()); + }) + .add(|| { + for i in 0..COUNT { + while q.push(i).is_err() {} + } + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn mpmc() { + const COUNT: usize = if cfg!(miri) { 100 } else { 25_000 }; + const THREADS: usize = 4; + + let q = ConcurrentQueue::::bounded(3); + let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); + + Parallel::new() + .each(0..THREADS, |_| { + for _ in 0..COUNT { + let n = loop { + if let Ok(x) = q.pop() { + break x; + } + }; + v[n].fetch_add(1, Ordering::SeqCst); + } + }) + .each(0..THREADS, |_| { + for i in 0..COUNT { + while q.push(i).is_err() {} + } + }) + .run(); + + for c in v { + assert_eq!(c.load(Ordering::SeqCst), THREADS); + } +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn drops() { + const RUNS: usize = if cfg!(miri) { 10 } else { 100 }; + const STEPS: usize = if cfg!(miri) { 100 } else { 10_000 }; + + static DROPS: AtomicUsize = AtomicUsize::new(0); + + #[derive(Debug, PartialEq)] + struct DropCounter; + + impl Drop for DropCounter { + fn drop(&mut self) { + DROPS.fetch_add(1, Ordering::SeqCst); + } + } + + for _ in 0..RUNS { + let steps = fastrand::usize(..STEPS); + let additional = fastrand::usize(..50); + + DROPS.store(0, Ordering::SeqCst); + let q = ConcurrentQueue::bounded(50); + + Parallel::new() + .add(|| { + for _ in 0..steps { + while q.pop().is_err() {} + } + }) + .add(|| { + for _ in 0..steps { + while q.push(DropCounter).is_err() { + DROPS.fetch_sub(1, Ordering::SeqCst); + } + } + }) + .run(); + + for _ in 0..additional { + q.push(DropCounter).unwrap(); + } + + assert_eq!(DROPS.load(Ordering::SeqCst), steps); + drop(q); + assert_eq!(DROPS.load(Ordering::SeqCst), steps + additional); + } +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn linearizable() { + const COUNT: usize = if cfg!(miri) { 500 } else { 25_000 }; + const THREADS: usize = 4; + + let q = ConcurrentQueue::bounded(THREADS); + + Parallel::new() + .each(0..THREADS / 2, |_| { + for _ in 0..COUNT { + while q.push(0).is_err() {} + q.pop().unwrap(); + } + }) + .each(0..THREADS / 2, |_| { + for _ in 0..COUNT { + if q.force_push(0).unwrap().is_none() { + q.pop().unwrap(); + } + } + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn spsc_ring_buffer() { + const COUNT: usize = if cfg!(miri) { 200 } else { 100_000 }; + + let t = AtomicUsize::new(1); + let q = ConcurrentQueue::::bounded(3); + let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); + + Parallel::new() + .add(|| loop { + match t.load(Ordering::SeqCst) { + 0 if q.is_empty() => break, + + _ => { + while let Ok(n) = q.pop() { + v[n].fetch_add(1, Ordering::SeqCst); + } + } + } + }) + .add(|| { + for i in 0..COUNT { + if let Ok(Some(n)) = q.force_push(i) { + v[n].fetch_add(1, Ordering::SeqCst); + } + } + + t.fetch_sub(1, Ordering::SeqCst); + }) + .run(); + + for c in v { + assert_eq!(c.load(Ordering::SeqCst), 1); + } +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn mpmc_ring_buffer() { + const COUNT: usize = if cfg!(miri) { 100 } else { 25_000 }; + const THREADS: usize = 4; + + let t = AtomicUsize::new(THREADS); + let q = ConcurrentQueue::::bounded(3); + let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); + + Parallel::new() + .each(0..THREADS, |_| loop { + match t.load(Ordering::SeqCst) { + 0 if q.is_empty() => break, + + _ => { + while let Ok(n) = q.pop() { + v[n].fetch_add(1, Ordering::SeqCst); + } + } + } + }) + .each(0..THREADS, |_| { + for i in 0..COUNT { + if let Ok(Some(n)) = q.force_push(i) { + v[n].fetch_add(1, Ordering::SeqCst); + } + } + + t.fetch_sub(1, Ordering::SeqCst); + }) + .run(); + + for c in v { + assert_eq!(c.load(Ordering::SeqCst), THREADS); + } +} diff --git a/.cargo-vendor/concurrent-queue/tests/loom.rs b/.cargo-vendor/concurrent-queue/tests/loom.rs new file mode 100644 index 0000000000..77f99d4945 --- /dev/null +++ b/.cargo-vendor/concurrent-queue/tests/loom.rs @@ -0,0 +1,307 @@ +#![cfg(loom)] + +use concurrent_queue::{ConcurrentQueue, ForcePushError, PopError, PushError}; +use loom::sync::atomic::{AtomicUsize, Ordering}; +use loom::sync::{Arc, Condvar, Mutex}; +use loom::thread; + +#[cfg(target_family = "wasm")] +use wasm_bindgen_test::wasm_bindgen_test as test; + +/// A basic MPMC channel based on a ConcurrentQueue and loom primitives. +struct Channel { + /// The queue used to contain items. + queue: ConcurrentQueue, + + /// The number of senders. + senders: AtomicUsize, + + /// The number of receivers. + receivers: AtomicUsize, + + /// The event that is signaled when a new item is pushed. + push_event: Event, + + /// The event that is signaled when a new item is popped. + pop_event: Event, +} + +/// The sending side of a channel. +struct Sender { + /// The channel. + channel: Arc>, +} + +/// The receiving side of a channel. +struct Receiver { + /// The channel. + channel: Arc>, +} + +/// Create a new pair of senders/receivers based on a queue. +fn pair(queue: ConcurrentQueue) -> (Sender, Receiver) { + let channel = Arc::new(Channel { + queue, + senders: AtomicUsize::new(1), + receivers: AtomicUsize::new(1), + push_event: Event::new(), + pop_event: Event::new(), + }); + + ( + Sender { + channel: channel.clone(), + }, + Receiver { channel }, + ) +} + +impl Clone for Sender { + fn clone(&self) -> Self { + self.channel.senders.fetch_add(1, Ordering::SeqCst); + Sender { + channel: self.channel.clone(), + } + } +} + +impl Drop for Sender { + fn drop(&mut self) { + if self.channel.senders.fetch_sub(1, Ordering::SeqCst) == 1 { + // Close the channel and notify the receivers. + self.channel.queue.close(); + self.channel.push_event.signal_all(); + } + } +} + +impl Clone for Receiver { + fn clone(&self) -> Self { + self.channel.receivers.fetch_add(1, Ordering::SeqCst); + Receiver { + channel: self.channel.clone(), + } + } +} + +impl Drop for Receiver { + fn drop(&mut self) { + if self.channel.receivers.fetch_sub(1, Ordering::SeqCst) == 1 { + // Close the channel and notify the senders. + self.channel.queue.close(); + self.channel.pop_event.signal_all(); + } + } +} + +impl Sender { + /// Send a value. + /// + /// Returns an error with the value if the channel is closed. + fn send(&self, mut value: T) -> Result<(), T> { + loop { + match self.channel.queue.push(value) { + Ok(()) => { + // Notify a single receiver. + self.channel.push_event.signal(); + return Ok(()); + } + Err(PushError::Closed(val)) => return Err(val), + Err(PushError::Full(val)) => { + // Wait for a receiver to pop an item. + value = val; + self.channel.pop_event.wait(); + } + } + } + } + + /// Send a value forcefully. + fn force_send(&self, value: T) -> Result, T> { + match self.channel.queue.force_push(value) { + Ok(bumped) => { + self.channel.push_event.signal(); + Ok(bumped) + } + + Err(ForcePushError(val)) => Err(val), + } + } +} + +impl Receiver { + /// Channel capacity. + fn capacity(&self) -> Option { + self.channel.queue.capacity() + } + + /// Receive a value. + /// + /// Returns an error if the channel is closed. + fn recv(&self) -> Result { + loop { + match self.channel.queue.pop() { + Ok(value) => { + // Notify a single sender. + self.channel.pop_event.signal(); + return Ok(value); + } + Err(PopError::Closed) => return Err(()), + Err(PopError::Empty) => { + // Wait for a sender to push an item. + self.channel.push_event.wait(); + } + } + } + } +} + +/// An event that can be waited on and then signaled. +struct Event { + /// The condition variable used to wait on the event. + condvar: Condvar, + + /// The mutex used to protect the event. + /// + /// Inside is the event's state. The first bit is used to indicate if the + /// notify_one method was called. The second bit is used to indicate if the + /// notify_all method was called. + mutex: Mutex, +} + +impl Event { + /// Create a new event. + fn new() -> Self { + Self { + condvar: Condvar::new(), + mutex: Mutex::new(0), + } + } + + /// Wait for the event to be signaled. + fn wait(&self) { + let mut state = self.mutex.lock().unwrap(); + + loop { + if *state & 0b11 != 0 { + // The event was signaled. + *state &= !0b01; + return; + } + + // Wait for the event to be signaled. + state = self.condvar.wait(state).unwrap(); + } + } + + /// Signal the event. + fn signal(&self) { + let mut state = self.mutex.lock().unwrap(); + *state |= 1; + drop(state); + + self.condvar.notify_one(); + } + + /// Signal the event, but notify all waiters. + fn signal_all(&self) { + let mut state = self.mutex.lock().unwrap(); + *state |= 3; + drop(state); + + self.condvar.notify_all(); + } +} + +/// Wrapper to run tests on all three queues. +fn run_test, usize) + Send + Sync + Clone + 'static>(f: F) { + // The length of a loom test seems to increase exponentially the higher this number is. + const LIMIT: usize = 4; + + let fc = f.clone(); + loom::model(move || { + fc(ConcurrentQueue::bounded(1), LIMIT); + }); + + let fc = f.clone(); + loom::model(move || { + fc(ConcurrentQueue::bounded(LIMIT / 2), LIMIT); + }); + + loom::model(move || { + f(ConcurrentQueue::unbounded(), LIMIT); + }); +} + +#[test] +fn spsc() { + run_test(|q, limit| { + // Create a new pair of senders/receivers. + let (tx, rx) = pair(q); + + // Push each onto a thread and run them. + let handle = thread::spawn(move || { + for i in 0..limit { + if tx.send(i).is_err() { + break; + } + } + }); + + let mut recv_values = vec![]; + + loop { + match rx.recv() { + Ok(value) => recv_values.push(value), + Err(()) => break, + } + } + + // Values may not be in order. + recv_values.sort_unstable(); + assert_eq!(recv_values, (0..limit).collect::>()); + + // Join the handle before we exit. + handle.join().unwrap(); + }); +} + +#[test] +fn spsc_force() { + run_test(|q, limit| { + // Create a new pair of senders/receivers. + let (tx, rx) = pair(q); + + // Push each onto a thread and run them. + let handle = thread::spawn(move || { + for i in 0..limit { + if tx.force_send(i).is_err() { + break; + } + } + }); + + let mut recv_values = vec![]; + + loop { + match rx.recv() { + Ok(value) => recv_values.push(value), + Err(()) => break, + } + } + + // Values may not be in order. + recv_values.sort_unstable(); + let cap = rx.capacity().unwrap_or(usize::MAX); + for (left, right) in (0..limit) + .rev() + .take(cap) + .zip(recv_values.into_iter().rev()) + { + assert_eq!(left, right); + } + + // Join the handle before we exit. + handle.join().unwrap(); + }); +} diff --git a/.cargo-vendor/concurrent-queue/tests/single.rs b/.cargo-vendor/concurrent-queue/tests/single.rs new file mode 100644 index 0000000000..ec4b912c94 --- /dev/null +++ b/.cargo-vendor/concurrent-queue/tests/single.rs @@ -0,0 +1,289 @@ +#![allow(clippy::bool_assert_comparison)] + +use concurrent_queue::{ConcurrentQueue, ForcePushError, PopError, PushError}; + +#[cfg(not(target_family = "wasm"))] +use easy_parallel::Parallel; +#[cfg(not(target_family = "wasm"))] +use std::sync::atomic::{AtomicUsize, Ordering}; + +#[cfg(target_family = "wasm")] +use wasm_bindgen_test::wasm_bindgen_test as test; + +#[test] +fn smoke() { + let q = ConcurrentQueue::bounded(1); + + q.push(7).unwrap(); + assert_eq!(q.pop(), Ok(7)); + + q.push(8).unwrap(); + assert_eq!(q.pop(), Ok(8)); + assert!(q.pop().is_err()); +} + +#[test] +fn capacity() { + let q = ConcurrentQueue::::bounded(1); + assert_eq!(q.capacity(), Some(1)); +} + +#[test] +fn len_empty_full() { + let q = ConcurrentQueue::bounded(1); + + assert_eq!(q.len(), 0); + assert_eq!(q.is_empty(), true); + assert_eq!(q.is_full(), false); + + q.push(()).unwrap(); + + assert_eq!(q.len(), 1); + assert_eq!(q.is_empty(), false); + assert_eq!(q.is_full(), true); + + q.pop().unwrap(); + + assert_eq!(q.len(), 0); + assert_eq!(q.is_empty(), true); + assert_eq!(q.is_full(), false); +} + +#[test] +fn close() { + let q = ConcurrentQueue::::bounded(1); + assert_eq!(q.push(10), Ok(())); + + assert!(!q.is_closed()); + assert!(q.close()); + + assert!(q.is_closed()); + assert!(!q.close()); + + assert_eq!(q.push(20), Err(PushError::Closed(20))); + assert_eq!(q.pop(), Ok(10)); + assert_eq!(q.pop(), Err(PopError::Closed)); +} + +#[test] +fn force_push() { + let q = ConcurrentQueue::::bounded(1); + assert_eq!(q.force_push(10), Ok(None)); + + assert!(!q.is_closed()); + assert_eq!(q.force_push(20), Ok(Some(10))); + assert_eq!(q.force_push(30), Ok(Some(20))); + + assert!(q.close()); + assert_eq!(q.force_push(40), Err(ForcePushError(40))); + assert_eq!(q.pop(), Ok(30)); + assert_eq!(q.pop(), Err(PopError::Closed)); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn spsc() { + const COUNT: usize = if cfg!(miri) { 100 } else { 100_000 }; + + let q = ConcurrentQueue::bounded(1); + + Parallel::new() + .add(|| { + for i in 0..COUNT { + loop { + if let Ok(x) = q.pop() { + assert_eq!(x, i); + break; + } + } + } + assert!(q.pop().is_err()); + }) + .add(|| { + for i in 0..COUNT { + while q.push(i).is_err() {} + } + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn mpmc() { + const COUNT: usize = if cfg!(miri) { 100 } else { 25_000 }; + const THREADS: usize = 1; + + let q = ConcurrentQueue::::bounded(THREADS); + let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); + + Parallel::new() + .each(0..THREADS, |_| { + for _ in 0..COUNT { + let n = loop { + if let Ok(x) = q.pop() { + break x; + } + }; + v[n].fetch_add(1, Ordering::SeqCst); + } + }) + .each(0..THREADS, |_| { + for i in 0..COUNT { + while q.push(i).is_err() {} + } + }) + .run(); + + for c in v { + assert_eq!(c.load(Ordering::SeqCst), THREADS); + } +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn drops() { + const RUNS: usize = if cfg!(miri) { 20 } else { 100 }; + const STEPS: usize = if cfg!(miri) { 100 } else { 10_000 }; + + static DROPS: AtomicUsize = AtomicUsize::new(0); + + #[derive(Debug, PartialEq)] + struct DropCounter; + + impl Drop for DropCounter { + fn drop(&mut self) { + DROPS.fetch_add(1, Ordering::SeqCst); + } + } + + for _ in 0..RUNS { + let steps = fastrand::usize(..STEPS); + let additional = fastrand::usize(0..=1); + + DROPS.store(0, Ordering::SeqCst); + let q = ConcurrentQueue::bounded(1); + + Parallel::new() + .add(|| { + for _ in 0..steps { + while q.pop().is_err() {} + } + }) + .add(|| { + for _ in 0..steps { + while q.push(DropCounter).is_err() { + DROPS.fetch_sub(1, Ordering::SeqCst); + } + } + }) + .run(); + + for _ in 0..additional { + q.push(DropCounter).unwrap(); + } + + assert_eq!(DROPS.load(Ordering::SeqCst), steps); + drop(q); + assert_eq!(DROPS.load(Ordering::SeqCst), steps + additional); + } +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn linearizable() { + const COUNT: usize = if cfg!(miri) { 500 } else { 25_000 }; + const THREADS: usize = 4; + + let q = ConcurrentQueue::bounded(1); + + Parallel::new() + .each(0..THREADS / 2, |_| { + for _ in 0..COUNT { + while q.push(0).is_err() {} + q.pop().unwrap(); + } + }) + .each(0..THREADS / 2, |_| { + for _ in 0..COUNT { + if q.force_push(0).unwrap().is_none() { + q.pop().unwrap(); + } + } + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn spsc_ring_buffer() { + const COUNT: usize = if cfg!(miri) { 200 } else { 100_000 }; + + let t = AtomicUsize::new(1); + let q = ConcurrentQueue::::bounded(1); + let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); + + Parallel::new() + .add(|| loop { + match t.load(Ordering::SeqCst) { + 0 if q.is_empty() => break, + + _ => { + while let Ok(n) = q.pop() { + v[n].fetch_add(1, Ordering::SeqCst); + } + } + } + }) + .add(|| { + for i in 0..COUNT { + if let Ok(Some(n)) = q.force_push(i) { + v[n].fetch_add(1, Ordering::SeqCst); + } + } + + t.fetch_sub(1, Ordering::SeqCst); + }) + .run(); + + for c in v { + assert_eq!(c.load(Ordering::SeqCst), 1); + } +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn mpmc_ring_buffer() { + const COUNT: usize = if cfg!(miri) { 100 } else { 25_000 }; + const THREADS: usize = 4; + + let t = AtomicUsize::new(THREADS); + let q = ConcurrentQueue::::bounded(1); + let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); + + Parallel::new() + .each(0..THREADS, |_| loop { + match t.load(Ordering::SeqCst) { + 0 if q.is_empty() => break, + + _ => { + while let Ok(n) = q.pop() { + v[n].fetch_add(1, Ordering::SeqCst); + } + } + } + }) + .each(0..THREADS, |_| { + for i in 0..COUNT { + if let Ok(Some(n)) = q.force_push(i) { + v[n].fetch_add(1, Ordering::SeqCst); + } + } + + t.fetch_sub(1, Ordering::SeqCst); + }) + .run(); + + for c in v { + assert_eq!(c.load(Ordering::SeqCst), THREADS); + } +} diff --git a/.cargo-vendor/concurrent-queue/tests/unbounded.rs b/.cargo-vendor/concurrent-queue/tests/unbounded.rs new file mode 100644 index 0000000000..e95dc8c725 --- /dev/null +++ b/.cargo-vendor/concurrent-queue/tests/unbounded.rs @@ -0,0 +1,181 @@ +#![allow(clippy::bool_assert_comparison)] + +use concurrent_queue::{ConcurrentQueue, PopError, PushError}; + +#[cfg(not(target_family = "wasm"))] +use easy_parallel::Parallel; +#[cfg(not(target_family = "wasm"))] +use std::sync::atomic::{AtomicUsize, Ordering}; + +#[cfg(target_family = "wasm")] +use wasm_bindgen_test::wasm_bindgen_test as test; + +#[test] +fn smoke() { + let q = ConcurrentQueue::unbounded(); + q.push(7).unwrap(); + assert_eq!(q.pop(), Ok(7)); + + q.push(8).unwrap(); + assert_eq!(q.pop(), Ok(8)); + assert!(q.pop().is_err()); +} + +#[test] +fn len_empty_full() { + let q = ConcurrentQueue::unbounded(); + + assert_eq!(q.len(), 0); + assert_eq!(q.is_empty(), true); + + q.push(()).unwrap(); + + assert_eq!(q.len(), 1); + assert_eq!(q.is_empty(), false); + + q.pop().unwrap(); + + assert_eq!(q.len(), 0); + assert_eq!(q.is_empty(), true); +} + +#[test] +fn len() { + let q = ConcurrentQueue::unbounded(); + + assert_eq!(q.len(), 0); + + for i in 0..50 { + q.push(i).unwrap(); + assert_eq!(q.len(), i + 1); + } + + for i in 0..50 { + q.pop().unwrap(); + assert_eq!(q.len(), 50 - i - 1); + } + + assert_eq!(q.len(), 0); +} + +#[test] +fn close() { + let q = ConcurrentQueue::unbounded(); + assert_eq!(q.push(10), Ok(())); + + assert!(!q.is_closed()); + assert!(q.close()); + + assert!(q.is_closed()); + assert!(!q.close()); + + assert_eq!(q.push(20), Err(PushError::Closed(20))); + assert_eq!(q.pop(), Ok(10)); + assert_eq!(q.pop(), Err(PopError::Closed)); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn spsc() { + const COUNT: usize = if cfg!(miri) { 100 } else { 100_000 }; + + let q = ConcurrentQueue::unbounded(); + + Parallel::new() + .add(|| { + for i in 0..COUNT { + loop { + if let Ok(x) = q.pop() { + assert_eq!(x, i); + break; + } + } + } + assert!(q.pop().is_err()); + }) + .add(|| { + for i in 0..COUNT { + q.push(i).unwrap(); + } + }) + .run(); +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn mpmc() { + const COUNT: usize = if cfg!(miri) { 100 } else { 25_000 }; + const THREADS: usize = 4; + + let q = ConcurrentQueue::::unbounded(); + let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); + + Parallel::new() + .each(0..THREADS, |_| { + for _ in 0..COUNT { + let n = loop { + if let Ok(x) = q.pop() { + break x; + } + }; + v[n].fetch_add(1, Ordering::SeqCst); + } + }) + .each(0..THREADS, |_| { + for i in 0..COUNT { + q.push(i).unwrap(); + } + }) + .run(); + + for c in v { + assert_eq!(c.load(Ordering::SeqCst), THREADS); + } +} + +#[cfg(not(target_family = "wasm"))] +#[test] +fn drops() { + const RUNS: usize = if cfg!(miri) { 20 } else { 100 }; + const STEPS: usize = if cfg!(miri) { 100 } else { 10_000 }; + + static DROPS: AtomicUsize = AtomicUsize::new(0); + + #[derive(Debug, PartialEq)] + struct DropCounter; + + impl Drop for DropCounter { + fn drop(&mut self) { + DROPS.fetch_add(1, Ordering::SeqCst); + } + } + + for _ in 0..RUNS { + let steps = fastrand::usize(0..STEPS); + let additional = fastrand::usize(0..1000); + + DROPS.store(0, Ordering::SeqCst); + let q = ConcurrentQueue::unbounded(); + + Parallel::new() + .add(|| { + for _ in 0..steps { + while q.pop().is_err() {} + } + }) + .add(|| { + for _ in 0..steps { + q.push(DropCounter).unwrap(); + } + }) + .run(); + + for _ in 0..additional { + q.push(DropCounter).unwrap(); + } + + assert_eq!(DROPS.load(Ordering::SeqCst), steps); + drop(q); + assert_eq!(DROPS.load(Ordering::SeqCst), steps + additional); + } +} diff --git a/.cargo-vendor/data-encoding/.cargo-checksum.json b/.cargo-vendor/data-encoding/.cargo-checksum.json new file mode 100644 index 0000000000..cb74973562 --- /dev/null +++ b/.cargo-vendor/data-encoding/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"0ec3d9c4bbe44a03ebe4ebfc5d212db66fef007490f82d98d08016aaa5058d40","LICENSE":"b68ad1a3367b825447089e1f8d6829b97f47a89eb78d2f4ebaef4672f5606186","README.md":"3e363840aec7b9ac3718330a61601bf2f49623e0749d923c866daf3bdd2ed87f","src/lib.rs":"864640728624d2c5001f1f8ea1b957447d7376817fb41e7e99abb07a3324e799"},"package":"e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2"} \ No newline at end of file diff --git a/.cargo-vendor/data-encoding/Cargo.toml b/.cargo-vendor/data-encoding/Cargo.toml new file mode 100644 index 0000000000..0d224286b9 --- /dev/null +++ b/.cargo-vendor/data-encoding/Cargo.toml @@ -0,0 +1,46 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +rust-version = "1.48" +name = "data-encoding" +version = "2.6.0" +authors = ["Julien Cretin "] +include = [ + "Cargo.toml", + "LICENSE", + "README.md", + "src/lib.rs", +] +description = "Efficient and customizable data-encoding functions like base64, base32, and hex" +documentation = "https://docs.rs/data-encoding" +readme = "README.md" +keywords = [ + "no_std", + "base64", + "base32", + "hex", +] +categories = [ + "encoding", + "no-std", +] +license = "MIT" +repository = "https://github.com/ia0/data-encoding" + +[package.metadata.docs.rs] +rustdoc-args = ["--cfg=docsrs"] + +[features] +alloc = [] +default = ["std"] +std = ["alloc"] diff --git a/.cargo-vendor/data-encoding/LICENSE b/.cargo-vendor/data-encoding/LICENSE new file mode 100644 index 0000000000..9a750833a0 --- /dev/null +++ b/.cargo-vendor/data-encoding/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015-2020 Julien Cretin +Copyright (c) 2017-2020 Google Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/.cargo-vendor/data-encoding/README.md b/.cargo-vendor/data-encoding/README.md new file mode 100644 index 0000000000..f2d892e425 --- /dev/null +++ b/.cargo-vendor/data-encoding/README.md @@ -0,0 +1,35 @@ +[![CI Status][ci_badge]][ci] +[![Coverage Status][coveralls_badge]][coveralls] + +This library provides the following common encodings: + +| Name | Description | +|--------------------------|---------------------------------------------------| +| `HEXLOWER` | lowercase hexadecimal | +| `HEXLOWER_PERMISSIVE` | lowercase hexadecimal (case-insensitive decoding) | +| `HEXUPPER` | uppercase hexadecimal | +| `HEXUPPER_PERMISSIVE` | uppercase hexadecimal (case-insensitive decoding) | +| `BASE32` | RFC4648 base32 | +| `BASE32_NOPAD` | RFC4648 base32 (no padding) | +| `BASE32_DNSSEC` | RFC5155 base32 | +| `BASE32_DNSCURVE` | DNSCurve base32 | +| `BASE32HEX` | RFC4648 base32hex | +| `BASE32HEX_NOPAD` | RFC4648 base32hex (no padding) | +| `BASE64` | RFC4648 base64 | +| `BASE64_NOPAD` | RFC4648 base64 (no padding) | +| `BASE64_MIME` | RFC2045-like base64 | +| `BASE64_MIME_PERMISSIVE` | RFC2045-like base64 (ignoring trailing bits) | +| `BASE64URL` | RFC4648 base64url | +| `BASE64URL_NOPAD` | RFC4648 base64url (no padding) | + +It also provides the possibility to define custom little-endian ASCII +base-conversion encodings for bases of size 2, 4, 8, 16, 32, and 64 (for which +all above use-cases are particular instances). + +See the [documentation] for more details. + +[ci]: https://github.com/ia0/data-encoding/actions/workflows/ci.yml +[ci_badge]: https://github.com/ia0/data-encoding/actions/workflows/ci.yml/badge.svg +[coveralls]: https://coveralls.io/github/ia0/data-encoding?branch=main +[coveralls_badge]: https://coveralls.io/repos/github/ia0/data-encoding/badge.svg?branch=main +[documentation]: https://docs.rs/data-encoding diff --git a/.cargo-vendor/data-encoding/src/lib.rs b/.cargo-vendor/data-encoding/src/lib.rs new file mode 100644 index 0000000000..ed8b194e36 --- /dev/null +++ b/.cargo-vendor/data-encoding/src/lib.rs @@ -0,0 +1,2561 @@ +//! Efficient and customizable data-encoding functions like base64, base32, and hex +//! +//! This [crate] provides little-endian ASCII base-conversion encodings for +//! bases of size 2, 4, 8, 16, 32, and 64. It supports: +//! +//! - [padding] for streaming +//! - canonical encodings (e.g. [trailing bits] are checked) +//! - in-place [encoding] and [decoding] functions +//! - partial [decoding] functions (e.g. for error recovery) +//! - character [translation] (e.g. for case-insensitivity) +//! - most and least significant [bit-order] +//! - [ignoring] characters when decoding (e.g. for skipping newlines) +//! - [wrapping] the output when encoding +//! - no-std environments with `default-features = false, features = ["alloc"]` +//! - no-alloc environments with `default-features = false` +//! +//! You may use the [binary] or the [website] to play around. +//! +//! # Examples +//! +//! This crate provides predefined encodings as [constants]. These constants are of type +//! [`Encoding`]. This type provides encoding and decoding functions with in-place or allocating +//! variants. Here is an example using the allocating encoding function of [`BASE64`]: +//! +//! ```rust +//! use data_encoding::BASE64; +//! assert_eq!(BASE64.encode(b"Hello world"), "SGVsbG8gd29ybGQ="); +//! ``` +//! +//! Here is an example using the in-place decoding function of [`BASE32`]: +//! +//! ```rust +//! use data_encoding::BASE32; +//! let input = b"JBSWY3DPEB3W64TMMQ======"; +//! let mut output = vec![0; BASE32.decode_len(input.len()).unwrap()]; +//! let len = BASE32.decode_mut(input, &mut output).unwrap(); +//! assert_eq!(&output[0 .. len], b"Hello world"); +//! ``` +//! +//! You are not limited to the predefined encodings. You may define your own encodings (with the +//! same correctness and performance properties as the predefined ones) using the [`Specification`] +//! type: +//! +//! ```rust +//! use data_encoding::Specification; +//! let hex = { +//! let mut spec = Specification::new(); +//! spec.symbols.push_str("0123456789abcdef"); +//! spec.encoding().unwrap() +//! }; +//! assert_eq!(hex.encode(b"hello"), "68656c6c6f"); +//! ``` +//! +//! You may use the [macro] library to define a compile-time custom encoding: +//! +//! ```rust,ignore +//! use data_encoding::Encoding; +//! use data_encoding_macro::new_encoding; +//! const HEX: Encoding = new_encoding!{ +//! symbols: "0123456789abcdef", +//! translate_from: "ABCDEF", +//! translate_to: "abcdef", +//! }; +//! const BASE64: Encoding = new_encoding!{ +//! symbols: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", +//! padding: '=', +//! }; +//! ``` +//! +//! # Properties +//! +//! The [`HEXUPPER`], [`BASE32`], [`BASE32HEX`], [`BASE64`], and [`BASE64URL`] predefined encodings +//! conform to [RFC4648]. +//! +//! In general, the encoding and decoding functions satisfy the following properties: +//! +//! - They are deterministic: their output only depends on their input +//! - They have no side-effects: they do not modify any hidden mutable state +//! - They are correct: encoding followed by decoding gives the initial data +//! - They are canonical (unless [`is_canonical`] returns false): decoding followed by encoding +//! gives the initial data +//! +//! This last property is usually not satisfied by base64 implementations. This is a matter of +//! choice and this crate has made the choice to let the user choose. Support for canonical encoding +//! as described by the [RFC][canonical] is provided. But it is also possible to disable checking +//! trailing bits, to add characters translation, to decode concatenated padded inputs, and to +//! ignore some characters. Note that non-canonical encodings may be an attack vector as described +//! in [Base64 Malleability in Practice](https://eprint.iacr.org/2022/361.pdf). +//! +//! Since the RFC specifies the encoding function on all inputs and the decoding function on all +//! possible encoded outputs, the differences between implementations come from the decoding +//! function which may be more or less permissive. In this crate, the decoding function of canonical +//! encodings rejects all inputs that are not a possible output of the encoding function. Here are +//! some concrete examples of decoding differences between this crate, the `base64` crate, and the +//! `base64` GNU program: +//! +//! | Input | `data-encoding` | `base64` | GNU `base64` | +//! | ---------- | --------------- | --------- | ------------- | +//! | `AAB=` | `Trailing(2)` | `Last(2)` | `\x00\x00` | +//! | `AA\nB=` | `Length(4)` | `Byte(2)` | `\x00\x00` | +//! | `AAB` | `Length(0)` | `Padding` | Invalid input | +//! | `AAA` | `Length(0)` | `Padding` | Invalid input | +//! | `A\rA\nB=` | `Length(4)` | `Byte(1)` | Invalid input | +//! | `-_\r\n` | `Symbol(0)` | `Byte(0)` | Invalid input | +//! | `AA==AA==` | `[0, 0]` | `Byte(2)` | `\x00\x00` | +//! +//! We can summarize these discrepancies as follows: +//! +//! | Discrepancy | `data-encoding` | `base64` | GNU `base64` | +//! | -------------------------- | --------------- | -------- | ------------ | +//! | Check trailing bits | Yes | Yes | No | +//! | Ignored characters | None | None | `\n` | +//! | Translated characters | None | None | None | +//! | Check padding | Yes | No | Yes | +//! | Support concatenated input | Yes | No | Yes | +//! +//! This crate permits to disable checking trailing bits. It permits to ignore some characters. It +//! permits to translate characters. It permits to use unpadded encodings. However, for padded +//! encodings, support for concatenated inputs cannot be disabled. This is simply because it doesn't +//! make sense to use padding if it is not to support concatenated inputs. +//! +//! [RFC4648]: https://tools.ietf.org/html/rfc4648 +//! [`BASE32HEX`]: constant.BASE32HEX.html +//! [`BASE32`]: constant.BASE32.html +//! [`BASE64URL`]: constant.BASE64URL.html +//! [`BASE64`]: constant.BASE64.html +//! [`Encoding`]: struct.Encoding.html +//! [`HEXUPPER`]: constant.HEXUPPER.html +//! [`Specification`]: struct.Specification.html +//! [`is_canonical`]: struct.Encoding.html#method.is_canonical +//! [binary]: https://crates.io/crates/data-encoding-bin +//! [bit-order]: struct.Specification.html#structfield.bit_order +//! [canonical]: https://tools.ietf.org/html/rfc4648#section-3.5 +//! [constants]: index.html#constants +//! [crate]: https://crates.io/crates/data-encoding +//! [decoding]: struct.Encoding.html#method.decode_mut +//! [encoding]: struct.Encoding.html#method.encode_mut +//! [ignoring]: struct.Specification.html#structfield.ignore +//! [macro]: https://crates.io/crates/data-encoding-macro +//! [padding]: struct.Specification.html#structfield.padding +//! [trailing bits]: struct.Specification.html#structfield.check_trailing_bits +//! [translation]: struct.Specification.html#structfield.translate +//! [website]: https://data-encoding.rs +//! [wrapping]: struct.Specification.html#structfield.wrap + +#![no_std] +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +// TODO: This list up to warn(clippy::pedantic) should ideally use a lint group. +#![warn(elided_lifetimes_in_paths)] +// TODO(msrv): #![warn(let_underscore_drop)] +#![warn(missing_debug_implementations)] +#![warn(missing_docs)] +#![warn(unreachable_pub)] +// TODO(msrv): #![warn(unsafe_op_in_unsafe_fn)] +#![warn(unused_results)] +#![allow(unused_unsafe)] // TODO(msrv) +#![warn(clippy::pedantic)] +#![allow(clippy::assigning_clones)] // TODO(msrv) +#![allow(clippy::doc_markdown)] +#![allow(clippy::enum_glob_use)] +#![allow(clippy::similar_names)] +#![allow(clippy::uninlined_format_args)] // TODO(msrv) + +#[cfg(feature = "alloc")] +extern crate alloc; +#[cfg(feature = "std")] +extern crate std; + +#[cfg(feature = "alloc")] +use alloc::borrow::{Cow, ToOwned}; +#[cfg(feature = "alloc")] +use alloc::string::String; +#[cfg(feature = "alloc")] +use alloc::vec; +#[cfg(feature = "alloc")] +use alloc::vec::Vec; +use core::convert::TryInto; + +macro_rules! check { + ($e: expr, $c: expr) => { + if !$c { + return Err($e); + } + }; +} + +trait Static: Copy { + fn val(self) -> T; +} + +macro_rules! define { + ($name: ident: $type: ty = $val: expr) => { + #[derive(Copy, Clone)] + struct $name; + impl Static<$type> for $name { + fn val(self) -> $type { + $val + } + } + }; +} + +define!(Bf: bool = false); +define!(Bt: bool = true); +define!(N1: usize = 1); +define!(N2: usize = 2); +define!(N3: usize = 3); +define!(N4: usize = 4); +define!(N5: usize = 5); +define!(N6: usize = 6); + +#[derive(Copy, Clone)] +struct On; + +impl Static> for On { + fn val(self) -> Option { + None + } +} + +#[derive(Copy, Clone)] +struct Os(T); + +impl Static> for Os { + fn val(self) -> Option { + Some(self.0) + } +} + +macro_rules! dispatch { + (let $var: ident: bool = $val: expr; $($body: tt)*) => { + if $val { + let $var = Bt; dispatch!($($body)*) + } else { + let $var = Bf; dispatch!($($body)*) + } + }; + (let $var: ident: usize = $val: expr; $($body: tt)*) => { + match $val { + 1 => { let $var = N1; dispatch!($($body)*) }, + 2 => { let $var = N2; dispatch!($($body)*) }, + 3 => { let $var = N3; dispatch!($($body)*) }, + 4 => { let $var = N4; dispatch!($($body)*) }, + 5 => { let $var = N5; dispatch!($($body)*) }, + 6 => { let $var = N6; dispatch!($($body)*) }, + _ => panic!(), + } + }; + (let $var: ident: Option<$type: ty> = $val: expr; $($body: tt)*) => { + match $val { + None => { let $var = On; dispatch!($($body)*) }, + Some(x) => { let $var = Os(x); dispatch!($($body)*) }, + } + }; + ($body: expr) => { $body }; +} + +unsafe fn chunk_unchecked(x: &[u8], n: usize, i: usize) -> &[u8] { + debug_assert!((i + 1) * n <= x.len()); + unsafe { core::slice::from_raw_parts(x.as_ptr().add(n * i), n) } +} + +unsafe fn chunk_mut_unchecked(x: &mut [u8], n: usize, i: usize) -> &mut [u8] { + debug_assert!((i + 1) * n <= x.len()); + unsafe { core::slice::from_raw_parts_mut(x.as_mut_ptr().add(n * i), n) } +} + +fn div_ceil(x: usize, m: usize) -> usize { + (x + m - 1) / m +} + +fn floor(x: usize, m: usize) -> usize { + x / m * m +} + +fn vectorize(n: usize, bs: usize, mut f: F) { + for k in 0 .. n / bs { + for i in k * bs .. (k + 1) * bs { + f(i); + } + } + for i in floor(n, bs) .. n { + f(i); + } +} + +/// Decoding error kind +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum DecodeKind { + /// Invalid length + Length, + + /// Invalid symbol + Symbol, + + /// Non-zero trailing bits + Trailing, + + /// Invalid padding length + Padding, +} + +impl core::fmt::Display for DecodeKind { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let description = match self { + DecodeKind::Length => "invalid length", + DecodeKind::Symbol => "invalid symbol", + DecodeKind::Trailing => "non-zero trailing bits", + DecodeKind::Padding => "invalid padding length", + }; + write!(f, "{}", description) + } +} + +/// Decoding error +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub struct DecodeError { + /// Error position + /// + /// This position is always a valid input position and represents the first encountered error. + pub position: usize, + + /// Error kind + pub kind: DecodeKind, +} + +#[cfg(feature = "std")] +impl std::error::Error for DecodeError {} + +impl core::fmt::Display for DecodeError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "{} at {}", self.kind, self.position) + } +} + +/// Decoding error with partial result +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub struct DecodePartial { + /// Number of bytes read from input + /// + /// This number does not exceed the error position: `read <= error.position`. + pub read: usize, + + /// Number of bytes written to output + /// + /// This number does not exceed the decoded length: `written <= decode_len(read)`. + pub written: usize, + + /// Decoding error + pub error: DecodeError, +} + +const INVALID: u8 = 128; +const IGNORE: u8 = 129; +const PADDING: u8 = 130; + +fn order(msb: bool, n: usize, i: usize) -> usize { + if msb { + n - 1 - i + } else { + i + } +} + +fn enc(bit: usize) -> usize { + match bit { + 1 | 2 | 4 => 1, + 3 | 6 => 3, + 5 => 5, + _ => unreachable!(), + } +} + +fn dec(bit: usize) -> usize { + enc(bit) * 8 / bit +} + +fn encode_len>(bit: B, len: usize) -> usize { + div_ceil(8 * len, bit.val()) +} + +fn encode_block, M: Static>( + bit: B, msb: M, symbols: &[u8; 256], input: &[u8], output: &mut [u8], +) { + debug_assert!(input.len() <= enc(bit.val())); + debug_assert_eq!(output.len(), encode_len(bit, input.len())); + let bit = bit.val(); + let msb = msb.val(); + let mut x = 0u64; + for (i, input) in input.iter().enumerate() { + x |= u64::from(*input) << (8 * order(msb, enc(bit), i)); + } + for (i, output) in output.iter_mut().enumerate() { + let y = x >> (bit * order(msb, dec(bit), i)); + *output = symbols[(y & 0xff) as usize]; + } +} + +fn encode_mut, M: Static>( + bit: B, msb: M, symbols: &[u8; 256], input: &[u8], output: &mut [u8], +) { + debug_assert_eq!(output.len(), encode_len(bit, input.len())); + let enc = enc(bit.val()); + let dec = dec(bit.val()); + let n = input.len() / enc; + let bs = match bit.val() { + 5 => 2, + 6 => 4, + _ => 1, + }; + vectorize(n, bs, |i| { + let input = unsafe { chunk_unchecked(input, enc, i) }; + let output = unsafe { chunk_mut_unchecked(output, dec, i) }; + encode_block(bit, msb, symbols, input, output); + }); + encode_block(bit, msb, symbols, &input[enc * n ..], &mut output[dec * n ..]); +} + +// Fails if an input character does not translate to a symbol. The error is the +// lowest index of such character. The output is not written to. +fn decode_block, M: Static>( + bit: B, msb: M, values: &[u8; 256], input: &[u8], output: &mut [u8], +) -> Result<(), usize> { + debug_assert!(output.len() <= enc(bit.val())); + debug_assert_eq!(input.len(), encode_len(bit, output.len())); + let bit = bit.val(); + let msb = msb.val(); + let mut x = 0u64; + for j in 0 .. input.len() { + let y = values[input[j] as usize]; + check!(j, y < 1 << bit); + x |= u64::from(y) << (bit * order(msb, dec(bit), j)); + } + for (j, output) in output.iter_mut().enumerate() { + *output = (x >> (8 * order(msb, enc(bit), j)) & 0xff) as u8; + } + Ok(()) +} + +// Fails if an input character does not translate to a symbol. The error `pos` +// is the lowest index of such character. The output is valid up to `pos / dec * +// enc` excluded. +fn decode_mut, M: Static>( + bit: B, msb: M, values: &[u8; 256], input: &[u8], output: &mut [u8], +) -> Result<(), usize> { + debug_assert_eq!(input.len(), encode_len(bit, output.len())); + let enc = enc(bit.val()); + let dec = dec(bit.val()); + let n = input.len() / dec; + for i in 0 .. n { + let input = unsafe { chunk_unchecked(input, dec, i) }; + let output = unsafe { chunk_mut_unchecked(output, enc, i) }; + decode_block(bit, msb, values, input, output).map_err(|e| dec * i + e)?; + } + decode_block(bit, msb, values, &input[dec * n ..], &mut output[enc * n ..]) + .map_err(|e| dec * n + e) +} + +// Fails if there are non-zero trailing bits. +fn check_trail, M: Static>( + bit: B, msb: M, ctb: bool, values: &[u8; 256], input: &[u8], +) -> Result<(), ()> { + if 8 % bit.val() == 0 || !ctb { + return Ok(()); + } + let trail = bit.val() * input.len() % 8; + if trail == 0 { + return Ok(()); + } + let mut mask = (1 << trail) - 1; + if !msb.val() { + mask <<= bit.val() - trail; + } + check!((), values[input[input.len() - 1] as usize] & mask == 0); + Ok(()) +} + +// Fails if the padding length is invalid. The error is the index of the first +// padding character. +fn check_pad>(bit: B, values: &[u8; 256], input: &[u8]) -> Result { + let bit = bit.val(); + debug_assert_eq!(input.len(), dec(bit)); + let is_pad = |x: &&u8| values[**x as usize] == PADDING; + let count = input.iter().rev().take_while(is_pad).count(); + let len = input.len() - count; + check!(len, len > 0 && bit * len % 8 < bit); + Ok(len) +} + +fn encode_base_len>(bit: B, len: usize) -> usize { + encode_len(bit, len) +} + +fn encode_base, M: Static>( + bit: B, msb: M, symbols: &[u8; 256], input: &[u8], output: &mut [u8], +) { + debug_assert_eq!(output.len(), encode_base_len(bit, input.len())); + encode_mut(bit, msb, symbols, input, output); +} + +fn encode_pad_len, P: Static>>(bit: B, pad: P, len: usize) -> usize { + match pad.val() { + None => encode_base_len(bit, len), + Some(_) => div_ceil(len, enc(bit.val())) * dec(bit.val()), + } +} + +fn encode_pad, M: Static, P: Static>>( + bit: B, msb: M, symbols: &[u8; 256], spad: P, input: &[u8], output: &mut [u8], +) { + let pad = match spad.val() { + None => return encode_base(bit, msb, symbols, input, output), + Some(pad) => pad, + }; + debug_assert_eq!(output.len(), encode_pad_len(bit, spad, input.len())); + let olen = encode_base_len(bit, input.len()); + encode_base(bit, msb, symbols, input, &mut output[.. olen]); + for output in output.iter_mut().skip(olen) { + *output = pad; + } +} + +fn encode_wrap_len< + 'a, + B: Static, + P: Static>, + W: Static>, +>( + bit: B, pad: P, wrap: W, ilen: usize, +) -> usize { + let olen = encode_pad_len(bit, pad, ilen); + match wrap.val() { + None => olen, + Some((col, end)) => olen + end.len() * div_ceil(olen, col), + } +} + +fn encode_wrap_mut< + 'a, + B: Static, + M: Static, + P: Static>, + W: Static>, +>( + bit: B, msb: M, symbols: &[u8; 256], pad: P, wrap: W, input: &[u8], output: &mut [u8], +) { + let (col, end) = match wrap.val() { + None => return encode_pad(bit, msb, symbols, pad, input, output), + Some((col, end)) => (col, end), + }; + debug_assert_eq!(output.len(), encode_wrap_len(bit, pad, wrap, input.len())); + debug_assert_eq!(col % dec(bit.val()), 0); + let col = col / dec(bit.val()); + let enc = col * enc(bit.val()); + let dec = col * dec(bit.val()) + end.len(); + let olen = dec - end.len(); + let n = input.len() / enc; + for i in 0 .. n { + let input = unsafe { chunk_unchecked(input, enc, i) }; + let output = unsafe { chunk_mut_unchecked(output, dec, i) }; + encode_base(bit, msb, symbols, input, &mut output[.. olen]); + output[olen ..].copy_from_slice(end); + } + if input.len() > enc * n { + let olen = dec * n + encode_pad_len(bit, pad, input.len() - enc * n); + encode_pad(bit, msb, symbols, pad, &input[enc * n ..], &mut output[dec * n .. olen]); + output[olen ..].copy_from_slice(end); + } +} + +// Returns the longest valid input length and associated output length. +fn decode_wrap_len, P: Static>( + bit: B, pad: P, len: usize, +) -> (usize, usize) { + let bit = bit.val(); + if pad.val() { + (floor(len, dec(bit)), len / dec(bit) * enc(bit)) + } else { + let trail = bit * len % 8; + (len - trail / bit, bit * len / 8) + } +} + +// Fails with Length if length is invalid. The error is the largest valid +// length. +fn decode_pad_len, P: Static>( + bit: B, pad: P, len: usize, +) -> Result { + let (ilen, olen) = decode_wrap_len(bit, pad, len); + check!(DecodeError { position: ilen, kind: DecodeKind::Length }, ilen == len); + Ok(olen) +} + +// Fails with Length if length is invalid. The error is the largest valid +// length. +fn decode_base_len>(bit: B, len: usize) -> Result { + decode_pad_len(bit, Bf, len) +} + +// Fails with Symbol if an input character does not translate to a symbol. The +// error is the lowest index of such character. +// Fails with Trailing if there are non-zero trailing bits. +fn decode_base_mut, M: Static>( + bit: B, msb: M, ctb: bool, values: &[u8; 256], input: &[u8], output: &mut [u8], +) -> Result { + debug_assert_eq!(Ok(output.len()), decode_base_len(bit, input.len())); + let fail = |pos, kind| DecodePartial { + read: pos / dec(bit.val()) * dec(bit.val()), + written: pos / dec(bit.val()) * enc(bit.val()), + error: DecodeError { position: pos, kind }, + }; + decode_mut(bit, msb, values, input, output).map_err(|pos| fail(pos, DecodeKind::Symbol))?; + check_trail(bit, msb, ctb, values, input) + .map_err(|()| fail(input.len() - 1, DecodeKind::Trailing))?; + Ok(output.len()) +} + +// Fails with Symbol if an input character does not translate to a symbol. The +// error is the lowest index of such character. +// Fails with Padding if some padding length is invalid. The error is the index +// of the first padding character of the invalid padding. +// Fails with Trailing if there are non-zero trailing bits. +fn decode_pad_mut, M: Static, P: Static>( + bit: B, msb: M, ctb: bool, values: &[u8; 256], pad: P, input: &[u8], output: &mut [u8], +) -> Result { + if !pad.val() { + return decode_base_mut(bit, msb, ctb, values, input, output); + } + debug_assert_eq!(Ok(output.len()), decode_pad_len(bit, pad, input.len())); + let enc = enc(bit.val()); + let dec = dec(bit.val()); + let mut inpos = 0; + let mut outpos = 0; + let mut outend = output.len(); + while inpos < input.len() { + match decode_base_mut( + bit, + msb, + ctb, + values, + &input[inpos ..], + &mut output[outpos .. outend], + ) { + Ok(written) => { + if cfg!(debug_assertions) { + inpos = input.len(); + } + outpos += written; + break; + } + Err(partial) => { + inpos += partial.read; + outpos += partial.written; + } + } + let inlen = + check_pad(bit, values, &input[inpos .. inpos + dec]).map_err(|pos| DecodePartial { + read: inpos, + written: outpos, + error: DecodeError { position: inpos + pos, kind: DecodeKind::Padding }, + })?; + let outlen = decode_base_len(bit, inlen).unwrap(); + let written = decode_base_mut( + bit, + msb, + ctb, + values, + &input[inpos .. inpos + inlen], + &mut output[outpos .. outpos + outlen], + ) + .map_err(|partial| { + debug_assert_eq!(partial.read, 0); + debug_assert_eq!(partial.written, 0); + DecodePartial { + read: inpos, + written: outpos, + error: DecodeError { + position: inpos + partial.error.position, + kind: partial.error.kind, + }, + } + })?; + debug_assert_eq!(written, outlen); + inpos += dec; + outpos += outlen; + outend -= enc - outlen; + } + debug_assert_eq!(inpos, input.len()); + debug_assert_eq!(outpos, outend); + Ok(outend) +} + +fn skip_ignore(values: &[u8; 256], input: &[u8], mut inpos: usize) -> usize { + while inpos < input.len() && values[input[inpos] as usize] == IGNORE { + inpos += 1; + } + inpos +} + +// Returns next input and output position. +// Fails with Symbol if an input character does not translate to a symbol. The +// error is the lowest index of such character. +// Fails with Padding if some padding length is invalid. The error is the index +// of the first padding character of the invalid padding. +// Fails with Trailing if there are non-zero trailing bits. +fn decode_wrap_block, M: Static, P: Static>( + bit: B, msb: M, ctb: bool, values: &[u8; 256], pad: P, input: &[u8], output: &mut [u8], +) -> Result<(usize, usize), DecodeError> { + let dec = dec(bit.val()); + let mut buf = [0u8; 8]; + let mut shift = [0usize; 8]; + let mut bufpos = 0; + let mut inpos = 0; + while bufpos < dec { + inpos = skip_ignore(values, input, inpos); + if inpos == input.len() { + break; + } + shift[bufpos] = inpos; + buf[bufpos] = input[inpos]; + bufpos += 1; + inpos += 1; + } + let olen = decode_pad_len(bit, pad, bufpos).map_err(|mut e| { + e.position = shift[e.position]; + e + })?; + let written = decode_pad_mut(bit, msb, ctb, values, pad, &buf[.. bufpos], &mut output[.. olen]) + .map_err(|partial| { + debug_assert_eq!(partial.read, 0); + debug_assert_eq!(partial.written, 0); + DecodeError { position: shift[partial.error.position], kind: partial.error.kind } + })?; + Ok((inpos, written)) +} + +// Fails with Symbol if an input character does not translate to a symbol. The +// error is the lowest index of such character. +// Fails with Padding if some padding length is invalid. The error is the index +// of the first padding character of the invalid padding. +// Fails with Trailing if there are non-zero trailing bits. +// Fails with Length if input length (without ignored characters) is invalid. +#[allow(clippy::too_many_arguments)] +fn decode_wrap_mut, M: Static, P: Static, I: Static>( + bit: B, msb: M, ctb: bool, values: &[u8; 256], pad: P, has_ignore: I, input: &[u8], + output: &mut [u8], +) -> Result { + if !has_ignore.val() { + return decode_pad_mut(bit, msb, ctb, values, pad, input, output); + } + debug_assert_eq!(output.len(), decode_wrap_len(bit, pad, input.len()).1); + let mut inpos = 0; + let mut outpos = 0; + while inpos < input.len() { + let (inlen, outlen) = decode_wrap_len(bit, pad, input.len() - inpos); + match decode_pad_mut( + bit, + msb, + ctb, + values, + pad, + &input[inpos .. inpos + inlen], + &mut output[outpos .. outpos + outlen], + ) { + Ok(written) => { + inpos += inlen; + outpos += written; + break; + } + Err(partial) => { + inpos += partial.read; + outpos += partial.written; + } + } + let (ipos, opos) = + decode_wrap_block(bit, msb, ctb, values, pad, &input[inpos ..], &mut output[outpos ..]) + .map_err(|mut error| { + error.position += inpos; + DecodePartial { read: inpos, written: outpos, error } + })?; + inpos += ipos; + outpos += opos; + } + let inpos = skip_ignore(values, input, inpos); + if inpos == input.len() { + Ok(outpos) + } else { + Err(DecodePartial { + read: inpos, + written: outpos, + error: DecodeError { position: inpos, kind: DecodeKind::Length }, + }) + } +} + +/// Order in which bits are read from a byte +/// +/// The base-conversion encoding is always little-endian. This means that the least significant +/// **byte** is always first. However, we can still choose whether, within a byte, this is the most +/// significant or the least significant **bit** that is first. If the terminology is confusing, +/// testing on an asymmetrical example should be enough to choose the correct value. +/// +/// # Examples +/// +/// In the following example, we can see that a base with the `MostSignificantFirst` bit-order has +/// the most significant bit first in the encoded output. In particular, the output is in the same +/// order as the bits in the byte. The opposite happens with the `LeastSignificantFirst` bit-order. +/// The least significant bit is first and the output is in the reverse order. +/// +/// ```rust +/// use data_encoding::{BitOrder, Specification}; +/// let mut spec = Specification::new(); +/// spec.symbols.push_str("01"); +/// spec.bit_order = BitOrder::MostSignificantFirst; // default +/// let msb = spec.encoding().unwrap(); +/// spec.bit_order = BitOrder::LeastSignificantFirst; +/// let lsb = spec.encoding().unwrap(); +/// assert_eq!(msb.encode(&[0b01010011]), "01010011"); +/// assert_eq!(lsb.encode(&[0b01010011]), "11001010"); +/// ``` +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +#[cfg(feature = "alloc")] +pub enum BitOrder { + /// Most significant bit first + /// + /// This is the most common and most intuitive bit-order. In particular, this is the bit-order + /// used by [RFC4648] and thus the usual hexadecimal, base64, base32, base64url, and base32hex + /// encodings. This is the default bit-order when [specifying](struct.Specification.html) a + /// base. + /// + /// [RFC4648]: https://tools.ietf.org/html/rfc4648 + MostSignificantFirst, + + /// Least significant bit first + /// + /// # Examples + /// + /// DNSCurve [base32] uses least significant bit first: + /// + /// ```rust + /// use data_encoding::BASE32_DNSCURVE; + /// assert_eq!(BASE32_DNSCURVE.encode(&[0x64, 0x88]), "4321"); + /// assert_eq!(BASE32_DNSCURVE.decode(b"4321").unwrap(), vec![0x64, 0x88]); + /// ``` + /// + /// [base32]: constant.BASE32_DNSCURVE.html + LeastSignificantFirst, +} +#[cfg(feature = "alloc")] +use crate::BitOrder::*; + +#[doc(hidden)] +#[cfg(feature = "alloc")] +pub type InternalEncoding = Cow<'static, [u8]>; + +#[doc(hidden)] +#[cfg(not(feature = "alloc"))] +pub type InternalEncoding = &'static [u8]; + +/// Base-conversion encoding +/// +/// See [Specification](struct.Specification.html) for technical details or how to define a new one. +// Required fields: +// 0 - 256 (256) symbols +// 256 - 512 (256) values +// 512 - 513 ( 1) padding +// 513 - 514 ( 1) reserved(3),ctb(1),msb(1),bit(3) +// Optional fields: +// 514 - 515 ( 1) width +// 515 - * ( N) separator +// Invariants: +// - symbols is 2^bit unique characters repeated 2^(8-bit) times +// - values[128 ..] are INVALID +// - values[0 .. 128] are either INVALID, IGNORE, PADDING, or < 2^bit +// - padding is either < 128 or INVALID +// - values[padding] is PADDING if padding < 128 +// - values and symbols are inverse +// - ctb is true if 8 % bit == 0 +// - width is present if there is x such that values[x] is IGNORE +// - width % dec(bit) == 0 +// - for all x in separator values[x] is IGNORE +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Encoding(#[doc(hidden)] pub InternalEncoding); + +/// How to translate characters when decoding +/// +/// The order matters. The first character of the `from` field is translated to the first character +/// of the `to` field. The second to the second. Etc. +/// +/// See [Specification](struct.Specification.html) for more information. +#[derive(Debug, Clone)] +#[cfg(feature = "alloc")] +pub struct Translate { + /// Characters to translate from + pub from: String, + + /// Characters to translate to + pub to: String, +} + +/// How to wrap the output when encoding +/// +/// See [Specification](struct.Specification.html) for more information. +#[derive(Debug, Clone)] +#[cfg(feature = "alloc")] +pub struct Wrap { + /// Wrapping width + /// + /// Must be a multiple of: + /// + /// - 8 for a bit-width of 1 (binary), 3 (octal), and 5 (base32) + /// - 4 for a bit-width of 2 (base4) and 6 (base64) + /// - 2 for a bit-width of 4 (hexadecimal) + /// + /// Wrapping is disabled if null. + pub width: usize, + + /// Wrapping characters + /// + /// Wrapping is disabled if empty. + pub separator: String, +} + +/// Base-conversion specification +/// +/// It is possible to define custom encodings given a specification. To do so, it is important to +/// understand the theory first. +/// +/// # Theory +/// +/// Each subsection has an equivalent subsection in the [Practice](#practice) section. +/// +/// ## Basics +/// +/// The main idea of a [base-conversion] encoding is to see `[u8]` as numbers written in +/// little-endian base256 and convert them in another little-endian base. For performance reasons, +/// this crate restricts this other base to be of size 2 (binary), 4 (base4), 8 (octal), 16 +/// (hexadecimal), 32 (base32), or 64 (base64). The converted number is written as `[u8]` although +/// it doesn't use all the 256 possible values of `u8`. This crate encodes to ASCII, so only values +/// smaller than 128 are allowed. +/// +/// More precisely, we need the following elements: +/// +/// - The bit-width N: 1 for binary, 2 for base4, 3 for octal, 4 for hexadecimal, 5 for base32, and +/// 6 for base64 +/// - The [bit-order](enum.BitOrder.html): most or least significant bit first +/// - The symbols function S from [0, 2N) (called values and written `uN`) to symbols +/// (represented as `u8` although only ASCII symbols are allowed, i.e. smaller than 128) +/// - The values partial function V from ASCII to [0, 2N), i.e. from `u8` to `uN` +/// - Whether trailing bits are checked: trailing bits are leading zeros in theory, but since +/// numbers are little-endian they come last +/// +/// For the encoding to be correct (i.e. encoding then decoding gives back the initial input), +/// V(S(i)) must be defined and equal to i for all i in [0, 2N). For the encoding to be +/// [canonical][canonical] (i.e. different inputs decode to different outputs, or equivalently, +/// decoding then encoding gives back the initial input), trailing bits must be checked and if V(i) +/// is defined then S(V(i)) is equal to i for all i. +/// +/// Encoding and decoding are given by the following pipeline: +/// +/// ```text +/// [u8] <--1--> [[bit; 8]] <--2--> [[bit; N]] <--3--> [uN] <--4--> [u8] +/// 1: Map bit-order between each u8 and [bit; 8] +/// 2: Base conversion between base 2^8 and base 2^N (check trailing bits) +/// 3: Map bit-order between each [bit; N] and uN +/// 4: Map symbols/values between each uN and u8 (values must be defined) +/// ``` +/// +/// ## Extensions +/// +/// All these extensions make the encoding not canonical. +/// +/// ### Padding +/// +/// Padding is useful if the following conditions are met: +/// +/// - the bit-width is 3 (octal), 5 (base32), or 6 (base64) +/// - the length of the data to encode is not known in advance +/// - the data must be sent without buffering +/// +/// Bases for which the bit-width N does not divide 8 may not concatenate encoded data. This comes +/// from the fact that it is not possible to make the difference between trailing bits and encoding +/// bits. Padding solves this issue by adding a new character to discriminate between trailing bits +/// and encoding bits. The idea is to work by blocks of lcm(8, N) bits, where lcm(8, N) is the least +/// common multiple of 8 and N. When such block is not complete, it is padded. +/// +/// To preserve correctness, the padding character must not be a symbol. +/// +/// ### Ignore characters when decoding +/// +/// Ignoring characters when decoding is useful if after encoding some characters are added for +/// convenience or any other reason (like wrapping). In that case we want to first ignore those +/// characters before decoding. +/// +/// To preserve correctness, ignored characters must not contain symbols or the padding character. +/// +/// ### Wrap output when encoding +/// +/// Wrapping output when encoding is useful if the output is meant to be printed in a document where +/// width is limited (typically 80-columns documents). In that case, the wrapping width and the +/// wrapping separator have to be defined. +/// +/// To preserve correctness, the wrapping separator characters must be ignored (see previous +/// subsection). As such, wrapping separator characters must also not contain symbols or the padding +/// character. +/// +/// ### Translate characters when decoding +/// +/// Translating characters when decoding is useful when encoded data may be copied by a humain +/// instead of a machine. Humans tend to confuse some characters for others. In that case we want to +/// translate those characters before decoding. +/// +/// To preserve correctness, the characters we translate _from_ must not contain symbols or the +/// padding character, and the characters we translate _to_ must only contain symbols or the padding +/// character. +/// +/// # Practice +/// +/// ## Basics +/// +/// ```rust +/// use data_encoding::{Encoding, Specification}; +/// fn make_encoding(symbols: &str) -> Encoding { +/// let mut spec = Specification::new(); +/// spec.symbols.push_str(symbols); +/// spec.encoding().unwrap() +/// } +/// let binary = make_encoding("01"); +/// let octal = make_encoding("01234567"); +/// let hexadecimal = make_encoding("0123456789abcdef"); +/// assert_eq!(binary.encode(b"Bit"), "010000100110100101110100"); +/// assert_eq!(octal.encode(b"Bit"), "20464564"); +/// assert_eq!(hexadecimal.encode(b"Bit"), "426974"); +/// ``` +/// +/// The `binary` base has 2 symbols `0` and `1` with value 0 and 1 respectively. The `octal` base +/// has 8 symbols `0` to `7` with value 0 to 7. The `hexadecimal` base has 16 symbols `0` to `9` and +/// `a` to `f` with value 0 to 15. The following diagram gives the idea of how encoding works in the +/// previous example (note that we can actually write such diagram only because the bit-order is +/// most significant first): +/// +/// ```text +/// [ octal] | 2 : 0 : 4 : 6 : 4 : 5 : 6 : 4 | +/// [ binary] |0 1 0 0 0 0 1 0|0 1 1 0 1 0 0 1|0 1 1 1 0 1 0 0| +/// [hexadecimal] | 4 : 2 | 6 : 9 | 7 : 4 | +/// ^-- LSB ^-- MSB +/// ``` +/// +/// Note that in theory, these little-endian numbers are read from right to left (the most +/// significant bit is at the right). Since leading zeros are meaningless (in our usual decimal +/// notation 0123 is the same as 123), it explains why trailing bits must be zero. Trailing bits may +/// occur when the bit-width of a base does not divide 8. Only binary, base4, and hexadecimal don't +/// have trailing bits issues. So let's consider octal and base64, which have trailing bits in +/// similar circumstances: +/// +/// ```rust +/// use data_encoding::{Specification, BASE64_NOPAD}; +/// let octal = { +/// let mut spec = Specification::new(); +/// spec.symbols.push_str("01234567"); +/// spec.encoding().unwrap() +/// }; +/// assert_eq!(BASE64_NOPAD.encode(b"B"), "Qg"); +/// assert_eq!(octal.encode(b"B"), "204"); +/// ``` +/// +/// We have the following diagram, where the base64 values are written between parentheses: +/// +/// ```text +/// [base64] | Q(16) : g(32) : [has 4 zero trailing bits] +/// [ octal] | 2 : 0 : 4 : [has 1 zero trailing bit ] +/// |0 1 0 0 0 0 1 0|0 0 0 0 +/// [ ascii] | B | +/// ^-^-^-^-- leading zeros / trailing bits +/// ``` +/// +/// ## Extensions +/// +/// ### Padding +/// +/// For octal and base64, lcm(8, 3) == lcm(8, 6) == 24 bits or 3 bytes. For base32, lcm(8, 5) is 40 +/// bits or 5 bytes. Let's consider octal and base64: +/// +/// ```rust +/// use data_encoding::{Specification, BASE64}; +/// let octal = { +/// let mut spec = Specification::new(); +/// spec.symbols.push_str("01234567"); +/// spec.padding = Some('='); +/// spec.encoding().unwrap() +/// }; +/// // We start encoding but we only have "B" for now. +/// assert_eq!(BASE64.encode(b"B"), "Qg=="); +/// assert_eq!(octal.encode(b"B"), "204====="); +/// // Now we have "it". +/// assert_eq!(BASE64.encode(b"it"), "aXQ="); +/// assert_eq!(octal.encode(b"it"), "322720=="); +/// // By concatenating everything, we may decode the original data. +/// assert_eq!(BASE64.decode(b"Qg==aXQ=").unwrap(), b"Bit"); +/// assert_eq!(octal.decode(b"204=====322720==").unwrap(), b"Bit"); +/// ``` +/// +/// We have the following diagrams: +/// +/// ```text +/// [base64] | Q(16) : g(32) : = : = | +/// [ octal] | 2 : 0 : 4 : = : = : = : = : = | +/// |0 1 0 0 0 0 1 0|. . . . . . . .|. . . . . . . .| +/// [ ascii] | B | end of block aligned --^ +/// ^-- beginning of block aligned +/// +/// [base64] | a(26) : X(23) : Q(16) : = | +/// [ octal] | 3 : 2 : 2 : 7 : 2 : 0 : = : = | +/// |0 1 1 0 1 0 0 1|0 1 1 1 0 1 0 0|. . . . . . . .| +/// [ ascii] | i | t | +/// ``` +/// +/// ### Ignore characters when decoding +/// +/// The typical use-case is to ignore newlines (`\r` and `\n`). But to keep the example small, we +/// will ignore spaces. +/// +/// ```rust +/// let mut spec = data_encoding::HEXLOWER.specification(); +/// spec.ignore.push_str(" \t"); +/// let base = spec.encoding().unwrap(); +/// assert_eq!(base.decode(b"42 69 74"), base.decode(b"426974")); +/// ``` +/// +/// ### Wrap output when encoding +/// +/// The typical use-case is to wrap after 64 or 76 characters with a newline (`\r\n` or `\n`). But +/// to keep the example small, we will wrap after 8 characters with a space. +/// +/// ```rust +/// let mut spec = data_encoding::BASE64.specification(); +/// spec.wrap.width = 8; +/// spec.wrap.separator.push_str(" "); +/// let base64 = spec.encoding().unwrap(); +/// assert_eq!(base64.encode(b"Hey you"), "SGV5IHlv dQ== "); +/// ``` +/// +/// Note that the output always ends with the separator. +/// +/// ### Translate characters when decoding +/// +/// The typical use-case is to translate lowercase to uppercase or reciprocally, but it is also used +/// for letters that look alike, like `O0` or `Il1`. Let's illustrate both examples. +/// +/// ```rust +/// let mut spec = data_encoding::HEXLOWER.specification(); +/// spec.translate.from.push_str("ABCDEFOIl"); +/// spec.translate.to.push_str("abcdef011"); +/// let base = spec.encoding().unwrap(); +/// assert_eq!(base.decode(b"BOIl"), base.decode(b"b011")); +/// ``` +/// +/// [base-conversion]: https://en.wikipedia.org/wiki/Positional_notation#Base_conversion +/// [canonical]: https://tools.ietf.org/html/rfc4648#section-3.5 +#[derive(Debug, Clone)] +#[cfg(feature = "alloc")] +pub struct Specification { + /// Symbols + /// + /// The number of symbols must be 2, 4, 8, 16, 32, or 64. Symbols must be ASCII characters + /// (smaller than 128) and they must be unique. + pub symbols: String, + + /// Bit-order + /// + /// The default is to use most significant bit first since it is the most common. + pub bit_order: BitOrder, + + /// Check trailing bits + /// + /// The default is to check trailing bits. This field is ignored when unnecessary (i.e. for + /// base2, base4, and base16). + pub check_trailing_bits: bool, + + /// Padding + /// + /// The default is to not use padding. The padding character must be ASCII and must not be a + /// symbol. + pub padding: Option, + + /// Characters to ignore when decoding + /// + /// The default is to not ignore characters when decoding. The characters to ignore must be + /// ASCII and must not be symbols or the padding character. + pub ignore: String, + + /// How to wrap the output when encoding + /// + /// The default is to not wrap the output when encoding. The wrapping characters must be ASCII + /// and must not be symbols or the padding character. + pub wrap: Wrap, + + /// How to translate characters when decoding + /// + /// The default is to not translate characters when decoding. The characters to translate from + /// must be ASCII and must not have already been assigned a semantics. The characters to + /// translate to must be ASCII and must have been assigned a semantics (symbol, padding + /// character, or ignored character). + pub translate: Translate, +} + +#[cfg(feature = "alloc")] +impl Default for Specification { + fn default() -> Self { + Self::new() + } +} + +impl Encoding { + fn sym(&self) -> &[u8; 256] { + self.0[0 .. 256].try_into().unwrap() + } + + fn val(&self) -> &[u8; 256] { + self.0[256 .. 512].try_into().unwrap() + } + + fn pad(&self) -> Option { + if self.0[512] < 128 { + Some(self.0[512]) + } else { + None + } + } + + fn ctb(&self) -> bool { + self.0[513] & 0x10 != 0 + } + + fn msb(&self) -> bool { + self.0[513] & 0x8 != 0 + } + + fn bit(&self) -> usize { + (self.0[513] & 0x7) as usize + } + + /// Minimum number of input and output blocks when encoding + fn block_len(&self) -> (usize, usize) { + let bit = self.bit(); + match self.wrap() { + Some((col, end)) => (col / dec(bit) * enc(bit), col + end.len()), + None => (enc(bit), dec(bit)), + } + } + + fn wrap(&self) -> Option<(usize, &[u8])> { + if self.0.len() <= 515 { + return None; + } + Some((self.0[514] as usize, &self.0[515 ..])) + } + + fn has_ignore(&self) -> bool { + self.0.len() >= 515 + } + + /// Returns the encoded length of an input of length `len` + /// + /// See [`encode_mut`] for when to use it. + /// + /// [`encode_mut`]: struct.Encoding.html#method.encode_mut + #[must_use] + pub fn encode_len(&self, len: usize) -> usize { + dispatch! { + let bit: usize = self.bit(); + let pad: Option = self.pad(); + let wrap: Option<(usize, &[u8])> = self.wrap(); + encode_wrap_len(bit, pad, wrap, len) + } + } + + /// Encodes `input` in `output` + /// + /// # Panics + /// + /// Panics if the `output` length does not match the result of [`encode_len`] for the `input` + /// length. + /// + /// # Examples + /// + /// ```rust + /// use data_encoding::BASE64; + /// # let mut buffer = vec![0; 100]; + /// let input = b"Hello world"; + /// let output = &mut buffer[0 .. BASE64.encode_len(input.len())]; + /// BASE64.encode_mut(input, output); + /// assert_eq!(output, b"SGVsbG8gd29ybGQ="); + /// ``` + /// + /// [`encode_len`]: struct.Encoding.html#method.encode_len + #[allow(clippy::cognitive_complexity)] + pub fn encode_mut(&self, input: &[u8], output: &mut [u8]) { + assert_eq!(output.len(), self.encode_len(input.len())); + dispatch! { + let bit: usize = self.bit(); + let msb: bool = self.msb(); + let pad: Option = self.pad(); + let wrap: Option<(usize, &[u8])> = self.wrap(); + encode_wrap_mut(bit, msb, self.sym(), pad, wrap, input, output) + } + } + + /// Appends the encoding of `input` to `output` + /// + /// # Examples + /// + /// ```rust + /// use data_encoding::BASE64; + /// # let mut buffer = vec![0; 100]; + /// let input = b"Hello world"; + /// let mut output = "Result: ".to_string(); + /// BASE64.encode_append(input, &mut output); + /// assert_eq!(output, "Result: SGVsbG8gd29ybGQ="); + /// ``` + #[cfg(feature = "alloc")] + pub fn encode_append(&self, input: &[u8], output: &mut String) { + let output = unsafe { output.as_mut_vec() }; + let output_len = output.len(); + output.resize(output_len + self.encode_len(input.len()), 0u8); + self.encode_mut(input, &mut output[output_len ..]); + } + + /// Returns an object to encode a fragmented input and append it to `output` + /// + /// See the documentation of [`Encoder`] for more details and examples. + #[cfg(feature = "alloc")] + pub fn new_encoder<'a>(&'a self, output: &'a mut String) -> Encoder<'a> { + Encoder::new(self, output) + } + + /// Writes the encoding of `input` to `output` + /// + /// This allocates a buffer of 1024 bytes on the stack. If you want to control the buffer size + /// and location, use [`Encoding::encode_write_buffer()`] instead. + /// + /// # Errors + /// + /// Returns an error when writing to the output fails. + pub fn encode_write( + &self, input: &[u8], output: &mut impl core::fmt::Write, + ) -> core::fmt::Result { + self.encode_write_buffer(input, output, &mut [0; 1024]) + } + + /// Writes the encoding of `input` to `output` using a temporary `buffer` + /// + /// # Panics + /// + /// Panics if the buffer is shorter than 510 bytes. + /// + /// # Errors + /// + /// Returns an error when writing to the output fails. + pub fn encode_write_buffer( + &self, input: &[u8], output: &mut impl core::fmt::Write, buffer: &mut [u8], + ) -> core::fmt::Result { + assert!(510 <= buffer.len()); + let (enc, dec) = self.block_len(); + for input in input.chunks(buffer.len() / dec * enc) { + let buffer = &mut buffer[.. self.encode_len(input.len())]; + self.encode_mut(input, buffer); + output.write_str(unsafe { core::str::from_utf8_unchecked(buffer) })?; + } + Ok(()) + } + + /// Returns encoded `input` + /// + /// # Examples + /// + /// ```rust + /// use data_encoding::BASE64; + /// assert_eq!(BASE64.encode(b"Hello world"), "SGVsbG8gd29ybGQ="); + /// ``` + #[cfg(feature = "alloc")] + #[must_use] + pub fn encode(&self, input: &[u8]) -> String { + let mut output = vec![0u8; self.encode_len(input.len())]; + self.encode_mut(input, &mut output); + unsafe { String::from_utf8_unchecked(output) } + } + + /// Returns the decoded length of an input of length `len` + /// + /// See [`decode_mut`] for when to use it. + /// + /// # Errors + /// + /// Returns an error if `len` is invalid. The error kind is [`Length`] and the [position] is the + /// greatest valid input length. + /// + /// [`decode_mut`]: struct.Encoding.html#method.decode_mut + /// [`Length`]: enum.DecodeKind.html#variant.Length + /// [position]: struct.DecodeError.html#structfield.position + pub fn decode_len(&self, len: usize) -> Result { + let (ilen, olen) = dispatch! { + let bit: usize = self.bit(); + let pad: bool = self.pad().is_some(); + decode_wrap_len(bit, pad, len) + }; + check!( + DecodeError { position: ilen, kind: DecodeKind::Length }, + self.has_ignore() || len == ilen + ); + Ok(olen) + } + + /// Decodes `input` in `output` + /// + /// Returns the length of the decoded output. This length may be smaller than the output length + /// if the input contained padding or ignored characters. The output bytes after the returned + /// length are not initialized and should not be read. + /// + /// # Panics + /// + /// Panics if the `output` length does not match the result of [`decode_len`] for the `input` + /// length. Also panics if `decode_len` fails for the `input` length. + /// + /// # Errors + /// + /// Returns an error if `input` is invalid. See [`decode`] for more details. The are two + /// differences though: + /// + /// - [`Length`] may be returned only if the encoding allows ignored characters, because + /// otherwise this is already checked by [`decode_len`]. + /// - The [`read`] first bytes of the input have been successfully decoded to the [`written`] + /// first bytes of the output. + /// + /// # Examples + /// + /// ```rust + /// use data_encoding::BASE64; + /// # let mut buffer = vec![0; 100]; + /// let input = b"SGVsbA==byB3b3JsZA=="; + /// let output = &mut buffer[0 .. BASE64.decode_len(input.len()).unwrap()]; + /// let len = BASE64.decode_mut(input, output).unwrap(); + /// assert_eq!(&output[0 .. len], b"Hello world"); + /// ``` + /// + /// [`decode_len`]: struct.Encoding.html#method.decode_len + /// [`decode`]: struct.Encoding.html#method.decode + /// [`Length`]: enum.DecodeKind.html#variant.Length + /// [`read`]: struct.DecodePartial.html#structfield.read + /// [`written`]: struct.DecodePartial.html#structfield.written + #[allow(clippy::cognitive_complexity)] + pub fn decode_mut(&self, input: &[u8], output: &mut [u8]) -> Result { + assert_eq!(Ok(output.len()), self.decode_len(input.len())); + dispatch! { + let bit: usize = self.bit(); + let msb: bool = self.msb(); + let pad: bool = self.pad().is_some(); + let has_ignore: bool = self.has_ignore(); + decode_wrap_mut(bit, msb, self.ctb(), self.val(), pad, has_ignore, + input, output) + } + } + + /// Returns decoded `input` + /// + /// # Errors + /// + /// Returns an error if `input` is invalid. The error kind can be: + /// + /// - [`Length`] if the input length is invalid. The [position] is the greatest valid input + /// length. + /// - [`Symbol`] if the input contains an invalid character. The [position] is the first invalid + /// character. + /// - [`Trailing`] if the input has non-zero trailing bits. This is only possible if the + /// encoding checks trailing bits. The [position] is the first character containing non-zero + /// trailing bits. + /// - [`Padding`] if the input has an invalid padding length. This is only possible if the + /// encoding uses padding. The [position] is the first padding character of the first padding + /// of invalid length. + /// + /// # Examples + /// + /// ```rust + /// use data_encoding::BASE64; + /// assert_eq!(BASE64.decode(b"SGVsbA==byB3b3JsZA==").unwrap(), b"Hello world"); + /// ``` + /// + /// [`Length`]: enum.DecodeKind.html#variant.Length + /// [`Symbol`]: enum.DecodeKind.html#variant.Symbol + /// [`Trailing`]: enum.DecodeKind.html#variant.Trailing + /// [`Padding`]: enum.DecodeKind.html#variant.Padding + /// [position]: struct.DecodeError.html#structfield.position + #[cfg(feature = "alloc")] + pub fn decode(&self, input: &[u8]) -> Result, DecodeError> { + let mut output = vec![0u8; self.decode_len(input.len())?]; + let len = self.decode_mut(input, &mut output).map_err(|partial| partial.error)?; + output.truncate(len); + Ok(output) + } + + /// Returns the bit-width + #[must_use] + pub fn bit_width(&self) -> usize { + self.bit() + } + + /// Returns whether the encoding is canonical + /// + /// An encoding is not canonical if one of the following conditions holds: + /// + /// - trailing bits are not checked + /// - padding is used + /// - characters are ignored + /// - characters are translated + #[must_use] + pub fn is_canonical(&self) -> bool { + if !self.ctb() { + return false; + } + let bit = self.bit(); + let sym = self.sym(); + let val = self.val(); + for i in 0 .. 256 { + if val[i] == INVALID { + continue; + } + if val[i] >= 1 << bit { + return false; + } + if sym[val[i] as usize] as usize != i { + return false; + } + } + true + } + + /// Returns the encoding specification + #[allow(clippy::missing_panics_doc)] // no panic + #[cfg(feature = "alloc")] + #[must_use] + pub fn specification(&self) -> Specification { + let mut specification = Specification::new(); + specification + .symbols + .push_str(core::str::from_utf8(&self.sym()[0 .. 1 << self.bit()]).unwrap()); + specification.bit_order = + if self.msb() { MostSignificantFirst } else { LeastSignificantFirst }; + specification.check_trailing_bits = self.ctb(); + if let Some(pad) = self.pad() { + specification.padding = Some(pad as char); + } + for i in 0 .. 128u8 { + if self.val()[i as usize] != IGNORE { + continue; + } + specification.ignore.push(i as char); + } + if let Some((col, end)) = self.wrap() { + specification.wrap.width = col; + specification.wrap.separator = core::str::from_utf8(end).unwrap().to_owned(); + } + for i in 0 .. 128u8 { + let canonical = if self.val()[i as usize] < 1 << self.bit() { + self.sym()[self.val()[i as usize] as usize] + } else if self.val()[i as usize] == PADDING { + self.pad().unwrap() + } else { + continue; + }; + if i == canonical { + continue; + } + specification.translate.from.push(i as char); + specification.translate.to.push(canonical as char); + } + specification + } + + #[doc(hidden)] + #[must_use] + pub const fn internal_new(implementation: &'static [u8]) -> Encoding { + #[cfg(feature = "alloc")] + let encoding = Encoding(Cow::Borrowed(implementation)); + #[cfg(not(feature = "alloc"))] + let encoding = Encoding(implementation); + encoding + } + + #[doc(hidden)] + #[must_use] + pub fn internal_implementation(&self) -> &[u8] { + &self.0 + } +} + +/// Encodes fragmented input to an output +/// +/// It is equivalent to use an [`Encoder`] with multiple calls to [`Encoder::append()`] than to +/// first concatenate all the input and then use [`Encoding::encode_append()`]. In particular, this +/// function will not introduce padding or wrapping between inputs. +/// +/// # Examples +/// +/// ```rust +/// // This is a bit inconvenient but we can't take a long-term reference to data_encoding::BASE64 +/// // because it's a constant. We need to use a static which has an address instead. This will be +/// // fixed in version 3 of the library. +/// static BASE64: data_encoding::Encoding = data_encoding::BASE64; +/// let mut output = String::new(); +/// let mut encoder = BASE64.new_encoder(&mut output); +/// encoder.append(b"hello"); +/// encoder.append(b"world"); +/// encoder.finalize(); +/// assert_eq!(output, BASE64.encode(b"helloworld")); +/// ``` +#[derive(Debug)] +#[cfg(feature = "alloc")] +pub struct Encoder<'a> { + encoding: &'a Encoding, + output: &'a mut String, + buffer: [u8; 255], + length: u8, +} + +#[cfg(feature = "alloc")] +impl<'a> Drop for Encoder<'a> { + fn drop(&mut self) { + self.encoding.encode_append(&self.buffer[.. self.length as usize], self.output); + } +} + +#[cfg(feature = "alloc")] +impl<'a> Encoder<'a> { + fn new(encoding: &'a Encoding, output: &'a mut String) -> Self { + Encoder { encoding, output, buffer: [0; 255], length: 0 } + } + + /// Encodes the provided input fragment and appends the result to the output + pub fn append(&mut self, mut input: &[u8]) { + #[allow(clippy::cast_possible_truncation)] // no truncation + let max = self.encoding.block_len().0 as u8; + if self.length != 0 { + let len = self.length; + #[allow(clippy::cast_possible_truncation)] // no truncation + let add = core::cmp::min((max - len) as usize, input.len()) as u8; + self.buffer[len as usize ..][.. add as usize].copy_from_slice(&input[.. add as usize]); + self.length += add; + input = &input[add as usize ..]; + if self.length != max { + debug_assert!(self.length < max); + debug_assert!(input.is_empty()); + return; + } + self.encoding.encode_append(&self.buffer[.. max as usize], self.output); + self.length = 0; + } + let len = floor(input.len(), max as usize); + self.encoding.encode_append(&input[.. len], self.output); + input = &input[len ..]; + #[allow(clippy::cast_possible_truncation)] // no truncation + let len = input.len() as u8; + self.buffer[.. len as usize].copy_from_slice(input); + self.length = len; + } + + /// Makes sure all inputs have been encoded and appended to the output + /// + /// This is equivalent to dropping the encoder and required for correctness, otherwise some + /// encoded data may be missing at the end. + pub fn finalize(self) {} +} + +#[derive(Debug, Copy, Clone)] +#[cfg(feature = "alloc")] +enum SpecificationErrorImpl { + BadSize, + NotAscii, + Duplicate(u8), + ExtraPadding, + WrapLength, + WrapWidth(u8), + FromTo, + Undefined(u8), +} +#[cfg(feature = "alloc")] +use crate::SpecificationErrorImpl::*; + +/// Specification error +#[derive(Debug, Copy, Clone)] +#[cfg(feature = "alloc")] +pub struct SpecificationError(SpecificationErrorImpl); + +#[cfg(feature = "alloc")] +impl core::fmt::Display for SpecificationError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self.0 { + BadSize => write!(f, "invalid number of symbols"), + NotAscii => write!(f, "non-ascii character"), + Duplicate(c) => write!(f, "{:?} has conflicting definitions", c as char), + ExtraPadding => write!(f, "unnecessary padding"), + WrapLength => write!(f, "invalid wrap width or separator length"), + WrapWidth(x) => write!(f, "wrap width not a multiple of {}", x), + FromTo => write!(f, "translate from/to length mismatch"), + Undefined(c) => write!(f, "{:?} is undefined", c as char), + } + } +} + +#[cfg(feature = "std")] +impl std::error::Error for SpecificationError { + fn description(&self) -> &str { + match self.0 { + BadSize => "invalid number of symbols", + NotAscii => "non-ascii character", + Duplicate(_) => "conflicting definitions", + ExtraPadding => "unnecessary padding", + WrapLength => "invalid wrap width or separator length", + WrapWidth(_) => "wrap width not a multiple", + FromTo => "translate from/to length mismatch", + Undefined(_) => "undefined character", + } + } +} + +#[cfg(feature = "alloc")] +impl Specification { + /// Returns a default specification + #[must_use] + pub fn new() -> Specification { + Specification { + symbols: String::new(), + bit_order: MostSignificantFirst, + check_trailing_bits: true, + padding: None, + ignore: String::new(), + wrap: Wrap { width: 0, separator: String::new() }, + translate: Translate { from: String::new(), to: String::new() }, + } + } + + /// Returns the specified encoding + /// + /// # Errors + /// + /// Returns an error if the specification is invalid. + pub fn encoding(&self) -> Result { + let symbols = self.symbols.as_bytes(); + let bit: u8 = match symbols.len() { + 2 => 1, + 4 => 2, + 8 => 3, + 16 => 4, + 32 => 5, + 64 => 6, + _ => return Err(SpecificationError(BadSize)), + }; + let mut values = [INVALID; 128]; + let set = |v: &mut [u8; 128], i: u8, x: u8| { + check!(SpecificationError(NotAscii), i < 128); + if v[i as usize] == x { + return Ok(()); + } + check!(SpecificationError(Duplicate(i)), v[i as usize] == INVALID); + v[i as usize] = x; + Ok(()) + }; + for (v, symbols) in symbols.iter().enumerate() { + #[allow(clippy::cast_possible_truncation)] // no truncation + set(&mut values, *symbols, v as u8)?; + } + let msb = self.bit_order == MostSignificantFirst; + let ctb = self.check_trailing_bits || 8 % bit == 0; + let pad = match self.padding { + None => None, + Some(pad) => { + check!(SpecificationError(ExtraPadding), 8 % bit != 0); + check!(SpecificationError(NotAscii), pad.len_utf8() == 1); + set(&mut values, pad as u8, PADDING)?; + Some(pad as u8) + } + }; + for i in self.ignore.bytes() { + set(&mut values, i, IGNORE)?; + } + let wrap = if self.wrap.separator.is_empty() || self.wrap.width == 0 { + None + } else { + let col = self.wrap.width; + let end = self.wrap.separator.as_bytes(); + check!(SpecificationError(WrapLength), col < 256 && end.len() < 256); + #[allow(clippy::cast_possible_truncation)] // no truncation + let col = col as u8; + #[allow(clippy::cast_possible_truncation)] // no truncation + let dec = dec(bit as usize) as u8; + check!(SpecificationError(WrapWidth(dec)), col % dec == 0); + for &i in end { + set(&mut values, i, IGNORE)?; + } + Some((col, end)) + }; + let from = self.translate.from.as_bytes(); + let to = self.translate.to.as_bytes(); + check!(SpecificationError(FromTo), from.len() == to.len()); + for i in 0 .. from.len() { + check!(SpecificationError(NotAscii), to[i] < 128); + let v = values[to[i] as usize]; + check!(SpecificationError(Undefined(to[i])), v != INVALID); + set(&mut values, from[i], v)?; + } + let mut encoding = Vec::new(); + for _ in 0 .. 256 / symbols.len() { + encoding.extend_from_slice(symbols); + } + encoding.extend_from_slice(&values); + encoding.extend_from_slice(&[INVALID; 128]); + match pad { + None => encoding.push(INVALID), + Some(pad) => encoding.push(pad), + } + encoding.push(bit); + if msb { + encoding[513] |= 0x08; + } + if ctb { + encoding[513] |= 0x10; + } + if let Some((col, end)) = wrap { + encoding.push(col); + encoding.extend_from_slice(end); + } else if values.contains(&IGNORE) { + encoding.push(0); + } + Ok(Encoding(Cow::Owned(encoding))) + } +} + +/// Lowercase hexadecimal encoding +/// +/// This encoding is a static version of: +/// +/// ```rust +/// # use data_encoding::{Specification, HEXLOWER}; +/// let mut spec = Specification::new(); +/// spec.symbols.push_str("0123456789abcdef"); +/// assert_eq!(HEXLOWER, spec.encoding().unwrap()); +/// ``` +/// +/// # Examples +/// +/// ```rust +/// use data_encoding::HEXLOWER; +/// let deadbeef = vec![0xde, 0xad, 0xbe, 0xef]; +/// assert_eq!(HEXLOWER.decode(b"deadbeef").unwrap(), deadbeef); +/// assert_eq!(HEXLOWER.encode(&deadbeef), "deadbeef"); +/// ``` +pub const HEXLOWER: Encoding = Encoding::internal_new(HEXLOWER_IMPL); +const HEXLOWER_IMPL: &[u8] = &[ + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 97, 98, 99, 100, 101, 102, 48, 49, 50, 51, 52, 53, 54, + 55, 56, 57, 97, 98, 99, 100, 101, 102, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 97, 98, 99, 100, + 101, 102, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 97, 98, 99, 100, 101, 102, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 97, 98, 99, 100, 101, 102, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 97, + 98, 99, 100, 101, 102, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 97, 98, 99, 100, 101, 102, 48, + 49, 50, 51, 52, 53, 54, 55, 56, 57, 97, 98, 99, 100, 101, 102, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 97, 98, 99, 100, 101, 102, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 97, 98, 99, 100, + 101, 102, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 97, 98, 99, 100, 101, 102, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 97, 98, 99, 100, 101, 102, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 97, + 98, 99, 100, 101, 102, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 97, 98, 99, 100, 101, 102, 48, + 49, 50, 51, 52, 53, 54, 55, 56, 57, 97, 98, 99, 100, 101, 102, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 97, 98, 99, 100, 101, 102, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 0, 1, 2, + 3, 4, 5, 6, 7, 8, 9, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 10, 11, 12, 13, 14, 15, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 28, +]; + +/// Lowercase hexadecimal encoding with case-insensitive decoding +/// +/// This encoding is a static version of: +/// +/// ```rust +/// # use data_encoding::{Specification, HEXLOWER_PERMISSIVE}; +/// let mut spec = Specification::new(); +/// spec.symbols.push_str("0123456789abcdef"); +/// spec.translate.from.push_str("ABCDEF"); +/// spec.translate.to.push_str("abcdef"); +/// assert_eq!(HEXLOWER_PERMISSIVE, spec.encoding().unwrap()); +/// ``` +/// +/// # Examples +/// +/// ```rust +/// use data_encoding::HEXLOWER_PERMISSIVE; +/// let deadbeef = vec![0xde, 0xad, 0xbe, 0xef]; +/// assert_eq!(HEXLOWER_PERMISSIVE.decode(b"DeadBeef").unwrap(), deadbeef); +/// assert_eq!(HEXLOWER_PERMISSIVE.encode(&deadbeef), "deadbeef"); +/// ``` +/// +/// You can also define a shorter name: +/// +/// ```rust +/// use data_encoding::{Encoding, HEXLOWER_PERMISSIVE}; +/// const HEX: Encoding = HEXLOWER_PERMISSIVE; +/// ``` +pub const HEXLOWER_PERMISSIVE: Encoding = Encoding::internal_new(HEXLOWER_PERMISSIVE_IMPL); +const HEXLOWER_PERMISSIVE_IMPL: &[u8] = &[ + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 97, 98, 99, 100, 101, 102, 48, 49, 50, 51, 52, 53, 54, + 55, 56, 57, 97, 98, 99, 100, 101, 102, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 97, 98, 99, 100, + 101, 102, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 97, 98, 99, 100, 101, 102, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 97, 98, 99, 100, 101, 102, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 97, + 98, 99, 100, 101, 102, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 97, 98, 99, 100, 101, 102, 48, + 49, 50, 51, 52, 53, 54, 55, 56, 57, 97, 98, 99, 100, 101, 102, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 97, 98, 99, 100, 101, 102, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 97, 98, 99, 100, + 101, 102, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 97, 98, 99, 100, 101, 102, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 97, 98, 99, 100, 101, 102, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 97, + 98, 99, 100, 101, 102, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 97, 98, 99, 100, 101, 102, 48, + 49, 50, 51, 52, 53, 54, 55, 56, 57, 97, 98, 99, 100, 101, 102, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 97, 98, 99, 100, 101, 102, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 0, 1, 2, + 3, 4, 5, 6, 7, 8, 9, 128, 128, 128, 128, 128, 128, 128, 10, 11, 12, 13, 14, 15, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 10, 11, 12, 13, 14, 15, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 28, +]; + +/// Uppercase hexadecimal encoding +/// +/// This encoding is a static version of: +/// +/// ```rust +/// # use data_encoding::{Specification, HEXUPPER}; +/// let mut spec = Specification::new(); +/// spec.symbols.push_str("0123456789ABCDEF"); +/// assert_eq!(HEXUPPER, spec.encoding().unwrap()); +/// ``` +/// +/// It is compliant with [RFC4648] and known as "base16" or "hex". +/// +/// # Examples +/// +/// ```rust +/// use data_encoding::HEXUPPER; +/// let deadbeef = vec![0xde, 0xad, 0xbe, 0xef]; +/// assert_eq!(HEXUPPER.decode(b"DEADBEEF").unwrap(), deadbeef); +/// assert_eq!(HEXUPPER.encode(&deadbeef), "DEADBEEF"); +/// ``` +/// +/// [RFC4648]: https://tools.ietf.org/html/rfc4648#section-8 +pub const HEXUPPER: Encoding = Encoding::internal_new(HEXUPPER_IMPL); +const HEXUPPER_IMPL: &[u8] = &[ + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 65, 66, 67, 68, 69, 70, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 65, 66, 67, 68, 69, 70, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 65, 66, 67, 68, 69, 70, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 65, 66, 67, 68, 69, 70, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 65, 66, 67, 68, 69, 70, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 128, 128, 128, 128, 128, 128, 128, 10, 11, + 12, 13, 14, 15, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 28, +]; + +/// Uppercase hexadecimal encoding with case-insensitive decoding +/// +/// This encoding is a static version of: +/// +/// ```rust +/// # use data_encoding::{Specification, HEXUPPER_PERMISSIVE}; +/// let mut spec = Specification::new(); +/// spec.symbols.push_str("0123456789ABCDEF"); +/// spec.translate.from.push_str("abcdef"); +/// spec.translate.to.push_str("ABCDEF"); +/// assert_eq!(HEXUPPER_PERMISSIVE, spec.encoding().unwrap()); +/// ``` +/// +/// # Examples +/// +/// ```rust +/// use data_encoding::HEXUPPER_PERMISSIVE; +/// let deadbeef = vec![0xde, 0xad, 0xbe, 0xef]; +/// assert_eq!(HEXUPPER_PERMISSIVE.decode(b"DeadBeef").unwrap(), deadbeef); +/// assert_eq!(HEXUPPER_PERMISSIVE.encode(&deadbeef), "DEADBEEF"); +/// ``` +pub const HEXUPPER_PERMISSIVE: Encoding = Encoding::internal_new(HEXUPPER_PERMISSIVE_IMPL); +const HEXUPPER_PERMISSIVE_IMPL: &[u8] = &[ + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 65, 66, 67, 68, 69, 70, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 65, 66, 67, 68, 69, 70, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 65, 66, 67, 68, 69, 70, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 65, 66, 67, 68, 69, 70, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 65, 66, 67, 68, 69, 70, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 128, 128, 128, 128, 128, 128, 128, 10, 11, + 12, 13, 14, 15, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 10, 11, 12, 13, 14, 15, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 28, +]; + +/// Padded base32 encoding +/// +/// This encoding is a static version of: +/// +/// ```rust +/// # use data_encoding::{Specification, BASE32}; +/// let mut spec = Specification::new(); +/// spec.symbols.push_str("ABCDEFGHIJKLMNOPQRSTUVWXYZ234567"); +/// spec.padding = Some('='); +/// assert_eq!(BASE32, spec.encoding().unwrap()); +/// ``` +/// +/// It conforms to [RFC4648]. +/// +/// [RFC4648]: https://tools.ietf.org/html/rfc4648#section-6 +pub const BASE32: Encoding = Encoding::internal_new(BASE32_IMPL); +const BASE32_IMPL: &[u8] = &[ + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, + 89, 90, 50, 51, 52, 53, 54, 55, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 50, 51, 52, 53, 54, 55, 65, 66, 67, 68, 69, 70, 71, 72, + 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 50, 51, 52, 53, 54, 55, + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, + 89, 90, 50, 51, 52, 53, 54, 55, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 50, 51, 52, 53, 54, 55, 65, 66, 67, 68, 69, 70, 71, 72, + 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 50, 51, 52, 53, 54, 55, + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, + 89, 90, 50, 51, 52, 53, 54, 55, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 50, 51, 52, 53, 54, 55, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 26, 27, 28, 29, 30, 31, 128, 128, 128, 128, 128, 130, 128, 128, + 128, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 61, 29, +]; + +/// Unpadded base32 encoding +/// +/// This encoding is a static version of: +/// +/// ```rust +/// # use data_encoding::{Specification, BASE32_NOPAD}; +/// let mut spec = Specification::new(); +/// spec.symbols.push_str("ABCDEFGHIJKLMNOPQRSTUVWXYZ234567"); +/// assert_eq!(BASE32_NOPAD, spec.encoding().unwrap()); +/// ``` +pub const BASE32_NOPAD: Encoding = Encoding::internal_new(BASE32_NOPAD_IMPL); +const BASE32_NOPAD_IMPL: &[u8] = &[ + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, + 89, 90, 50, 51, 52, 53, 54, 55, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 50, 51, 52, 53, 54, 55, 65, 66, 67, 68, 69, 70, 71, 72, + 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 50, 51, 52, 53, 54, 55, + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, + 89, 90, 50, 51, 52, 53, 54, 55, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 50, 51, 52, 53, 54, 55, 65, 66, 67, 68, 69, 70, 71, 72, + 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 50, 51, 52, 53, 54, 55, + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, + 89, 90, 50, 51, 52, 53, 54, 55, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 50, 51, 52, 53, 54, 55, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 26, 27, 28, 29, 30, 31, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 29, +]; + +/// Padded base32hex encoding +/// +/// This encoding is a static version of: +/// +/// ```rust +/// # use data_encoding::{Specification, BASE32HEX}; +/// let mut spec = Specification::new(); +/// spec.symbols.push_str("0123456789ABCDEFGHIJKLMNOPQRSTUV"); +/// spec.padding = Some('='); +/// assert_eq!(BASE32HEX, spec.encoding().unwrap()); +/// ``` +/// +/// It conforms to [RFC4648]. +/// +/// [RFC4648]: https://tools.ietf.org/html/rfc4648#section-7 +pub const BASE32HEX: Encoding = Encoding::internal_new(BASE32HEX_IMPL); +const BASE32HEX_IMPL: &[u8] = &[ + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, + 79, 80, 81, 82, 83, 84, 85, 86, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, + 79, 80, 81, 82, 83, 84, 85, 86, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, + 79, 80, 81, 82, 83, 84, 85, 86, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 128, 128, 128, 130, 128, 128, 128, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 61, 29, +]; + +/// Unpadded base32hex encoding +/// +/// This encoding is a static version of: +/// +/// ```rust +/// # use data_encoding::{Specification, BASE32HEX_NOPAD}; +/// let mut spec = Specification::new(); +/// spec.symbols.push_str("0123456789ABCDEFGHIJKLMNOPQRSTUV"); +/// assert_eq!(BASE32HEX_NOPAD, spec.encoding().unwrap()); +/// ``` +pub const BASE32HEX_NOPAD: Encoding = Encoding::internal_new(BASE32HEX_NOPAD_IMPL); +const BASE32HEX_NOPAD_IMPL: &[u8] = &[ + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, + 79, 80, 81, 82, 83, 84, 85, 86, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, + 79, 80, 81, 82, 83, 84, 85, 86, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, + 79, 80, 81, 82, 83, 84, 85, 86, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 128, 128, 128, 128, 128, 128, 128, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 29, +]; + +/// DNSSEC base32 encoding +/// +/// This encoding is a static version of: +/// +/// ```rust +/// # use data_encoding::{Specification, BASE32_DNSSEC}; +/// let mut spec = Specification::new(); +/// spec.symbols.push_str("0123456789abcdefghijklmnopqrstuv"); +/// spec.translate.from.push_str("ABCDEFGHIJKLMNOPQRSTUV"); +/// spec.translate.to.push_str("abcdefghijklmnopqrstuv"); +/// assert_eq!(BASE32_DNSSEC, spec.encoding().unwrap()); +/// ``` +/// +/// It conforms to [RFC5155]: +/// +/// - It uses a base32 extended hex alphabet. +/// - It is case-insensitive when decoding and uses lowercase when encoding. +/// - It does not use padding. +/// +/// [RFC5155]: https://tools.ietf.org/html/rfc5155 +pub const BASE32_DNSSEC: Encoding = Encoding::internal_new(BASE32_DNSSEC_IMPL); +const BASE32_DNSSEC_IMPL: &[u8] = &[ + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, + 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, + 116, 117, 118, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 97, 98, 99, 100, 101, 102, 103, 104, + 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, + 113, 114, 115, 116, 117, 118, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 97, 98, 99, 100, 101, + 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, + 110, 111, 112, 113, 114, 115, 116, 117, 118, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 97, 98, + 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, + 118, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, + 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 128, 128, 128, 128, 128, 128, 128, 10, 11, 12, 13, + 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, + 26, 27, 28, 29, 30, 31, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 29, +]; + +#[allow(clippy::doc_markdown)] +/// DNSCurve base32 encoding +/// +/// This encoding is a static version of: +/// +/// ```rust +/// # use data_encoding::{BitOrder, Specification, BASE32_DNSCURVE}; +/// let mut spec = Specification::new(); +/// spec.symbols.push_str("0123456789bcdfghjklmnpqrstuvwxyz"); +/// spec.bit_order = BitOrder::LeastSignificantFirst; +/// spec.translate.from.push_str("BCDFGHJKLMNPQRSTUVWXYZ"); +/// spec.translate.to.push_str("bcdfghjklmnpqrstuvwxyz"); +/// assert_eq!(BASE32_DNSCURVE, spec.encoding().unwrap()); +/// ``` +/// +/// It conforms to [DNSCurve]. +/// +/// [DNSCurve]: https://dnscurve.org/in-implement.html +pub const BASE32_DNSCURVE: Encoding = Encoding::internal_new(BASE32_DNSCURVE_IMPL); +const BASE32_DNSCURVE_IMPL: &[u8] = &[ + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 98, 99, 100, 102, 103, 104, 106, 107, 108, 109, 110, + 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, + 98, 99, 100, 102, 103, 104, 106, 107, 108, 109, 110, 112, 113, 114, 115, 116, 117, 118, 119, + 120, 121, 122, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 98, 99, 100, 102, 103, 104, 106, 107, + 108, 109, 110, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 98, 99, 100, 102, 103, 104, 106, 107, 108, 109, 110, 112, 113, 114, 115, 116, + 117, 118, 119, 120, 121, 122, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 98, 99, 100, 102, 103, + 104, 106, 107, 108, 109, 110, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 98, 99, 100, 102, 103, 104, 106, 107, 108, 109, 110, 112, 113, + 114, 115, 116, 117, 118, 119, 120, 121, 122, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 98, 99, + 100, 102, 103, 104, 106, 107, 108, 109, 110, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, + 122, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 98, 99, 100, 102, 103, 104, 106, 107, 108, 109, + 110, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 128, 128, 128, 128, 128, 128, 128, 128, 10, 11, + 12, 128, 13, 14, 15, 128, 16, 17, 18, 19, 20, 128, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 128, 128, 128, 128, 128, 128, 128, 10, 11, 12, 128, 13, 14, 15, 128, 16, 17, 18, 19, 20, 128, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 21, +]; + +/// Padded base64 encoding +/// +/// This encoding is a static version of: +/// +/// ```rust +/// # use data_encoding::{Specification, BASE64}; +/// let mut spec = Specification::new(); +/// spec.symbols.push_str("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"); +/// spec.padding = Some('='); +/// assert_eq!(BASE64, spec.encoding().unwrap()); +/// ``` +/// +/// It conforms to [RFC4648]. +/// +/// [RFC4648]: https://tools.ietf.org/html/rfc4648#section-4 +pub const BASE64: Encoding = Encoding::internal_new(BASE64_IMPL); +const BASE64_IMPL: &[u8] = &[ + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, + 89, 90, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, + 115, 116, 117, 118, 119, 120, 121, 122, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 43, 47, 65, 66, + 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, + 116, 117, 118, 119, 120, 121, 122, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 43, 47, 65, 66, 67, + 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 97, + 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, + 117, 118, 119, 120, 121, 122, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 43, 47, 65, 66, 67, 68, + 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 97, 98, + 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, + 118, 119, 120, 121, 122, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 43, 47, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 62, 128, 128, 128, 63, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 128, 128, 128, 130, 128, + 128, 128, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 128, 128, 128, 128, 128, 128, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 61, 30, +]; + +/// Unpadded base64 encoding +/// +/// This encoding is a static version of: +/// +/// ```rust +/// # use data_encoding::{Specification, BASE64_NOPAD}; +/// let mut spec = Specification::new(); +/// spec.symbols.push_str("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"); +/// assert_eq!(BASE64_NOPAD, spec.encoding().unwrap()); +/// ``` +pub const BASE64_NOPAD: Encoding = Encoding::internal_new(BASE64_NOPAD_IMPL); +const BASE64_NOPAD_IMPL: &[u8] = &[ + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, + 89, 90, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, + 115, 116, 117, 118, 119, 120, 121, 122, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 43, 47, 65, 66, + 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, + 116, 117, 118, 119, 120, 121, 122, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 43, 47, 65, 66, 67, + 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 97, + 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, + 117, 118, 119, 120, 121, 122, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 43, 47, 65, 66, 67, 68, + 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 97, 98, + 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, + 118, 119, 120, 121, 122, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 43, 47, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 62, 128, 128, 128, 63, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 128, 128, 128, 128, 128, + 128, 128, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 128, 128, 128, 128, 128, 128, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 30, +]; + +/// MIME base64 encoding +/// +/// This encoding is a static version of: +/// +/// ```rust +/// # use data_encoding::{Specification, BASE64_MIME}; +/// let mut spec = Specification::new(); +/// spec.symbols.push_str("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"); +/// spec.padding = Some('='); +/// spec.wrap.width = 76; +/// spec.wrap.separator.push_str("\r\n"); +/// assert_eq!(BASE64_MIME, spec.encoding().unwrap()); +/// ``` +/// +/// It does not exactly conform to [RFC2045] because it does not print the header +/// and does not ignore all characters. +/// +/// [RFC2045]: https://tools.ietf.org/html/rfc2045 +pub const BASE64_MIME: Encoding = Encoding::internal_new(BASE64_MIME_IMPL); +const BASE64_MIME_IMPL: &[u8] = &[ + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, + 89, 90, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, + 115, 116, 117, 118, 119, 120, 121, 122, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 43, 47, 65, 66, + 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, + 116, 117, 118, 119, 120, 121, 122, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 43, 47, 65, 66, 67, + 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 97, + 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, + 117, 118, 119, 120, 121, 122, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 43, 47, 65, 66, 67, 68, + 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 97, 98, + 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, + 118, 119, 120, 121, 122, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 43, 47, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 129, 128, 128, 129, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 62, 128, 128, 128, 63, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 128, 128, 128, 130, 128, + 128, 128, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 128, 128, 128, 128, 128, 128, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 61, 30, 76, 13, 10, +]; + +/// MIME base64 encoding without trailing bits check +/// +/// This encoding is a static version of: +/// +/// ```rust +/// # use data_encoding::{Specification, BASE64_MIME_PERMISSIVE}; +/// let mut spec = Specification::new(); +/// spec.symbols.push_str("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"); +/// spec.padding = Some('='); +/// spec.wrap.width = 76; +/// spec.wrap.separator.push_str("\r\n"); +/// spec.check_trailing_bits = false; +/// assert_eq!(BASE64_MIME_PERMISSIVE, spec.encoding().unwrap()); +/// ``` +/// +/// It does not exactly conform to [RFC2045] because it does not print the header +/// and does not ignore all characters. +/// +/// [RFC2045]: https://tools.ietf.org/html/rfc2045 +pub const BASE64_MIME_PERMISSIVE: Encoding = Encoding::internal_new(BASE64_MIME_PERMISSIVE_IMPL); +const BASE64_MIME_PERMISSIVE_IMPL: &[u8] = &[ + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, + 89, 90, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, + 115, 116, 117, 118, 119, 120, 121, 122, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 43, 47, 65, 66, + 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, + 116, 117, 118, 119, 120, 121, 122, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 43, 47, 65, 66, 67, + 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 97, + 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, + 117, 118, 119, 120, 121, 122, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 43, 47, 65, 66, 67, 68, + 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 97, 98, + 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, + 118, 119, 120, 121, 122, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 43, 47, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 129, 128, 128, 129, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 62, 128, 128, 128, 63, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 128, 128, 128, 130, 128, + 128, 128, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 128, 128, 128, 128, 128, 128, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 61, 14, 76, 13, 10, +]; + +/// Padded base64url encoding +/// +/// This encoding is a static version of: +/// +/// ```rust +/// # use data_encoding::{Specification, BASE64URL}; +/// let mut spec = Specification::new(); +/// spec.symbols.push_str("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_"); +/// spec.padding = Some('='); +/// assert_eq!(BASE64URL, spec.encoding().unwrap()); +/// ``` +/// +/// It conforms to [RFC4648]. +/// +/// [RFC4648]: https://tools.ietf.org/html/rfc4648#section-5 +pub const BASE64URL: Encoding = Encoding::internal_new(BASE64URL_IMPL); +const BASE64URL_IMPL: &[u8] = &[ + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, + 89, 90, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, + 115, 116, 117, 118, 119, 120, 121, 122, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 45, 95, 65, 66, + 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, + 116, 117, 118, 119, 120, 121, 122, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 45, 95, 65, 66, 67, + 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 97, + 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, + 117, 118, 119, 120, 121, 122, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 45, 95, 65, 66, 67, 68, + 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 97, 98, + 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, + 118, 119, 120, 121, 122, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 45, 95, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 62, 128, 128, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 128, 128, 128, 130, 128, + 128, 128, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 128, 128, 128, 128, 63, 128, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 61, 30, +]; + +/// Unpadded base64url encoding +/// +/// This encoding is a static version of: +/// +/// ```rust +/// # use data_encoding::{Specification, BASE64URL_NOPAD}; +/// let mut spec = Specification::new(); +/// spec.symbols.push_str("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_"); +/// assert_eq!(BASE64URL_NOPAD, spec.encoding().unwrap()); +/// ``` +pub const BASE64URL_NOPAD: Encoding = Encoding::internal_new(BASE64URL_NOPAD_IMPL); +const BASE64URL_NOPAD_IMPL: &[u8] = &[ + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, + 89, 90, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, + 115, 116, 117, 118, 119, 120, 121, 122, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 45, 95, 65, 66, + 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, + 116, 117, 118, 119, 120, 121, 122, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 45, 95, 65, 66, 67, + 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 97, + 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, + 117, 118, 119, 120, 121, 122, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 45, 95, 65, 66, 67, 68, + 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 97, 98, + 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, + 118, 119, 120, 121, 122, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 45, 95, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 62, 128, 128, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 128, 128, 128, 128, 128, + 128, 128, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 128, 128, 128, 128, 63, 128, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 30, +]; diff --git a/.cargo-vendor/event-listener-strategy/.cargo-checksum.json b/.cargo-vendor/event-listener-strategy/.cargo-checksum.json new file mode 100644 index 0000000000..9cf10fcb20 --- /dev/null +++ b/.cargo-vendor/event-listener-strategy/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"e357c996f2c3e270813a90e0b49f3f6846cb54a8f2cbc80bc00e39548fc77274","Cargo.toml":"4520087001d3f66c104087efbd70f54c17a3105c4a38d61051cc0e6e056490a0","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"df6555ffb5946bbaf931b71be241ec638b54a614ffd808185407b5071f348b28","src/lib.rs":"698068abda112385a3af26c84db48bc6bdbfd0cfed273c9b68dc37738d5fbd75","tests/easy_wrapper.rs":"065dd007b309b8663ea836308998ac578eb3e828ec8bdcee669dd162d50d78e7"},"package":"0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1"} \ No newline at end of file diff --git a/.cargo-vendor/event-listener-strategy/CHANGELOG.md b/.cargo-vendor/event-listener-strategy/CHANGELOG.md new file mode 100644 index 0000000000..8cc9012db6 --- /dev/null +++ b/.cargo-vendor/event-listener-strategy/CHANGELOG.md @@ -0,0 +1,30 @@ +# Version 0.5.2 + +- Re-export the `event-listener` crate. (#20) + +# Version 0.5.1 + +- Fix the `repository` field in `Cargo.toml` to point to the correct repository. (#17) + +# Version 0.5.0 + +- **Breaking:** Bump `event-listener` to v5.0.0. (#12) +- Bump MSRV to 1.60. (#14) +- Make `NonBlocking` `Send` and `Sync`. (#15) + +# Version 0.4.0 + +- **Breaking:** Bump `event-listener` to v4.0.0. (#10) + +# Version 0.3.0 + +- **Breaking:** Remove an unneeded lifetime from the public API. (#6) + +# Version 0.2.0 + +- **Breaking:** Add support for WASM targets by disabling `wait()` on them. (#3) + +# Version 0.1.0 + +- Initial version + diff --git a/.cargo-vendor/event-listener-strategy/Cargo.toml b/.cargo-vendor/event-listener-strategy/Cargo.toml new file mode 100644 index 0000000000..aa9f2bef9d --- /dev/null +++ b/.cargo-vendor/event-listener-strategy/Cargo.toml @@ -0,0 +1,57 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.60" +name = "event-listener-strategy" +version = "0.5.2" +authors = ["John Nunley "] +exclude = ["/.*"] +description = "Block or poll on event_listener easily" +readme = "README.md" +keywords = [ + "condvar", + "envcount", + "wake", + "blocking", + "park", +] +categories = [ + "asynchronous", + "concurrency", +] +license = "Apache-2.0 OR MIT" +repository = "https://github.com/smol-rs/event-listener-strategy" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = [ + "--cfg", + "docsrs", +] + +[dependencies.event-listener] +version = "5.0.0" +default-features = false + +[dependencies.pin-project-lite] +version = "0.2.12" + +[dev-dependencies.futures-lite] +version = "2.0.0" + +[features] +default = ["std"] +std = ["event-listener/std"] + +[target."cfg(target_family = \"wasm\")".dev-dependencies.wasm-bindgen-test] +version = "0.3.37" diff --git a/.cargo-vendor/event-listener-strategy/LICENSE-APACHE b/.cargo-vendor/event-listener-strategy/LICENSE-APACHE new file mode 100644 index 0000000000..16fe87b06e --- /dev/null +++ b/.cargo-vendor/event-listener-strategy/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/.cargo-vendor/event-listener-strategy/LICENSE-MIT b/.cargo-vendor/event-listener-strategy/LICENSE-MIT new file mode 100644 index 0000000000..31aa79387f --- /dev/null +++ b/.cargo-vendor/event-listener-strategy/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/.cargo-vendor/event-listener-strategy/README.md b/.cargo-vendor/event-listener-strategy/README.md new file mode 100644 index 0000000000..ab0ae05042 --- /dev/null +++ b/.cargo-vendor/event-listener-strategy/README.md @@ -0,0 +1,84 @@ +# event-listener-strategy + +[![Build](https://github.com/smol-rs/event-listener-strategy/workflows/CI/badge.svg)]( +https://github.com/smol-rs/event-listener-strategy/actions) +[![License](https://img.shields.io/badge/license-Apache--2.0_OR_MIT-blue.svg)]( +https://github.com/smol-rs/event-listener-strategy) +[![Cargo](https://img.shields.io/crates/v/event-listener-strategy.svg)]( +https://crates.io/crates/event-listener-strategy) +[![Documentation](https://docs.rs/event-listener-strategy/badge.svg)]( +https://docs.rs/event-listener-strategy) + +A strategy for using the [`event-listener`] crate in both blocking and non-blocking contexts. + +One of the stand-out features of the [`event-listener`] crate is the ability to use it in both +asynchronous and synchronous contexts. However, sometimes using it like this causes a lot of +boilerplate to be duplicated. This crate aims to reduce that boilerplate by providing an `EventListenerFuture` trait that implements both blocking and non-blocking functionality. + +[`event-listener`]: https://docs.rs/event-listener + +# Examples + +``` +use event_listener::{Event, EventListener}; +use event_listener_strategy::{EventListenerFuture, FutureWrapper, Strategy}; + +use std::pin::Pin; +use std::task::Poll; +use std::thread; +use std::sync::Arc; + +// A future that waits three seconds for an event to be fired. +fn wait_three_seconds() -> WaitThreeSeconds { + let event = Event::new(); + let listener = event.listen(); + + thread::spawn(move || { + thread::sleep(std::time::Duration::from_secs(3)); + event.notify(1); + }); + + WaitThreeSeconds { listener } +} + +struct WaitThreeSeconds { + listener: Pin>, +} + +impl EventListenerFuture for WaitThreeSeconds { + type Output = (); + + fn poll_with_strategy<'a, S: Strategy<'a>>( + mut self: Pin<&'a mut Self>, + strategy: &mut S, + context: &mut S::Context, + ) -> Poll { + strategy.poll(self.listener.as_mut(), context) + } +} + +// Use the future in a blocking context. +let future = wait_three_seconds(); +future.wait(); + +// Use the future in a non-blocking context. +futures_lite::future::block_on(async { + let future = FutureWrapper::new(wait_three_seconds()); + future.await; +}); +``` + +## License + +Licensed under either of + + * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT) + +at your option. + +#### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. diff --git a/.cargo-vendor/event-listener-strategy/src/lib.rs b/.cargo-vendor/event-listener-strategy/src/lib.rs new file mode 100644 index 0000000000..e36210a205 --- /dev/null +++ b/.cargo-vendor/event-listener-strategy/src/lib.rs @@ -0,0 +1,549 @@ +// SPDX-Licenser-Identifier: MIT OR Apache-2.0 +//! A strategy for using the [`event-listener`] crate in both blocking and non-blocking contexts. +//! +//! One of the stand-out features of the [`event-listener`] crate is the ability to use it in both +//! asynchronous and synchronous contexts. However, sometimes using it like this causes a lot of +//! boilerplate to be duplicated. This crate aims to reduce that boilerplate by providing an +//! [`EventListenerFuture`] trait that implements both blocking and non-blocking functionality. +//! +//! # Examples +//! +//! ``` +//! use event_listener_strategy::{ +//! event_listener::{Event, EventListener}, +//! EventListenerFuture, FutureWrapper, Strategy +//! }; +//! +//! use std::pin::Pin; +//! use std::task::Poll; +//! use std::thread; +//! use std::sync::Arc; +//! +//! // A future that waits three seconds for an event to be fired. +//! fn wait_three_seconds() -> WaitThreeSeconds { +//! let event = Event::new(); +//! let listener = event.listen(); +//! +//! thread::spawn(move || { +//! thread::sleep(std::time::Duration::from_secs(3)); +//! event.notify(1); +//! }); +//! +//! WaitThreeSeconds { listener: Some(listener) } +//! } +//! +//! struct WaitThreeSeconds { +//! listener: Option, +//! } +//! +//! impl EventListenerFuture for WaitThreeSeconds { +//! type Output = (); +//! +//! fn poll_with_strategy<'a, S: Strategy<'a>>( +//! mut self: Pin<&mut Self>, +//! strategy: &mut S, +//! context: &mut S::Context, +//! ) -> Poll { +//! strategy.poll(&mut self.listener, context) +//! } +//! } +//! +//! // Use the future in a blocking context. +//! let future = wait_three_seconds(); +//! future.wait(); +//! +//! // Use the future in a non-blocking context. +//! futures_lite::future::block_on(async { +//! let future = FutureWrapper::new(wait_three_seconds()); +//! future.await; +//! }); +//! ``` + +#![cfg_attr(not(feature = "std"), no_std)] +#![cfg_attr(docsrs, feature(doc_cfg))] +#![forbid(future_incompatible, missing_docs)] +#![doc( + html_favicon_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png" +)] +#![doc( + html_logo_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png" +)] + +use core::future::Future; +use core::marker::PhantomData; +use core::pin::Pin; +use core::task::{Context, Poll}; + +use event_listener::{EventListener, Listener}; + +#[doc(hidden)] +pub use pin_project_lite::pin_project; + +#[doc(no_inline)] +pub use event_listener; + +/// A wrapper around an [`EventListenerFuture`] that can be easily exported for use. +/// +/// This type implements [`Future`], has a `_new()` constructor, and a `wait()` method +/// that uses the [`Blocking`] strategy to poll the future until it is ready. +/// +/// # Examples +/// +/// ``` +/// mod my_future { +/// use event_listener_strategy::{easy_wrapper, EventListenerFuture, Strategy}; +/// use std::pin::Pin; +/// use std::task::Poll; +/// +/// struct MyFuture; +/// +/// impl EventListenerFuture for MyFuture { +/// type Output = (); +/// +/// fn poll_with_strategy<'a, S: Strategy<'a>>( +/// self: Pin<&mut Self>, +/// strategy: &mut S, +/// context: &mut S::Context, +/// ) -> Poll { +/// /* ... */ +/// # Poll::Ready(()) +/// } +/// } +/// +/// easy_wrapper! { +/// /// A future that does something. +/// pub struct MyFutureWrapper(MyFuture => ()); +/// /// Wait for it. +/// pub wait(); +/// } +/// +/// impl MyFutureWrapper { +/// /// Create a new instance of the future. +/// pub fn new() -> Self { +/// Self::_new(MyFuture) +/// } +/// } +/// } +/// +/// use my_future::MyFutureWrapper; +/// +/// // Use the future in a blocking context. +/// let future = MyFutureWrapper::new(); +/// future.wait(); +/// +/// // Use the future in a non-blocking context. +/// futures_lite::future::block_on(async { +/// let future = MyFutureWrapper::new(); +/// future.await; +/// }); +/// ``` +#[macro_export] +macro_rules! easy_wrapper { + ( + $(#[$meta:meta])* + $vis:vis struct $name:ident + + $(< + $( $lifetime:lifetime $(: $lifetime_bound:lifetime)? ),* $(,)? + $( $generics:ident + $(: $generics_bound:path)? + $(: ?$generics_unsized_bound:path)? + $(: $generics_lifetime_bound:lifetime)? + $(= $generics_default:ty)? + ),* $(,)? + >)? + + ($inner:ty => $output:ty) + + $(where + $( $where_clause_ty:ty + $(: $where_clause_bound:path)? + $(: ?$where_clause_unsized_bound:path)? + $(: $where_clause_lifetime_bound:lifetime)? + ),* $(,)? + )? + + ; + + $(#[$wait_meta:meta])* + $wait_vis: vis wait(); + ) => { + $crate::pin_project! { + $(#[$meta])* + $vis struct $name $(< + $( $lifetime $(: $lifetime_bound)? ),* + $( $generics + $(: $generics_bound)? + $(: ?$generics_unsized_bound)? + $(: $generics_lifetime_bound)? + $(= $generics_default)? + ),* + >)? $( + where + $( $where_clause_ty + $(: $where_clause_bound)? + $(: ?$where_clause_unsized_bound)? + $(: $where_clause_lifetime_bound)? + ),* + )? { + #[pin] + _inner: $crate::FutureWrapper<$inner> + } + } + + impl $(< + $( $lifetime $(: $lifetime_bound)? ,)* + $( $generics + $(: $generics_bound)? + $(: ?$generics_unsized_bound)? + $(: $generics_lifetime_bound)? + $(= $generics_default)? + ),* + >)? $name $(< + $( $lifetime ,)* + $( $generics ),* + >)? $( + where + $( $where_clause_ty + $(: $where_clause_bound)? + $(: ?$where_clause_unsized_bound)? + $(: $where_clause_lifetime_bound)? + ),* + )? { + #[inline] + fn _new(inner: $inner) -> Self { + Self { + _inner: $crate::FutureWrapper::new(inner) + } + } + + $(#[$wait_meta])* + #[inline] + $wait_vis fn wait(self) -> $output { + use $crate::EventListenerFuture; + self._inner.into_inner().wait() + } + + pub(crate) fn poll_with_strategy<'__strategy, __S: $crate::Strategy<'__strategy>>( + self: ::core::pin::Pin<&mut Self>, + strategy: &mut __S, + context: &mut __S::Context, + ) -> ::core::task::Poll<$output> { + self.project()._inner.get_pin_mut().poll_with_strategy(strategy, context) + } + } + + impl $(< + $( $lifetime $(: $lifetime_bound)? ,)* + $( $generics + $(: $generics_bound)? + $(: ?$generics_unsized_bound)? + $(: $generics_lifetime_bound)? + $(= $generics_default)? + ),* + >)? ::core::future::Future for $name $( + < + $( $lifetime ,)* + $( $generics ),* + > + )? $( + where + $( $where_clause_ty + $(: $where_clause_bound)? + $(: ?$where_clause_unsized_bound)? + $(: $where_clause_lifetime_bound)? + ),* + )? { + type Output = $output; + + #[inline] + fn poll( + self: ::core::pin::Pin<&mut Self>, + context: &mut ::core::task::Context<'_> + ) -> ::core::task::Poll { + self.project()._inner.poll(context) + } + } + }; +} + +/// A future that runs using the [`event-listener`] crate. +/// +/// This is similar to the [`Future`] trait from libstd, with one notable difference: it takes +/// a strategy that tells it whether to operate in a blocking or non-blocking context. The +/// `poll_with_strategy` method is the equivalent of the `poll` method in this regard; it uses +/// the [`Strategy`] trait to determine how to poll the future. +/// +/// From here, there are two additional things one can do with this trait: +/// +/// - The `wait` method, which uses the [`Blocking`] strategy to poll the future until it is +/// ready, blocking the current thread until it is. +/// - The [`FutureWrapper`] type, which implements [`Future`] and uses the [`NonBlocking`] +/// strategy to poll the future. +pub trait EventListenerFuture { + /// The type of value produced on completion. + type Output; + + /// Poll the future using the provided strategy. + /// + /// This function should use the `Strategy::poll` method to poll the future, and proceed + /// based on the result. + fn poll_with_strategy<'a, S: Strategy<'a>>( + self: Pin<&mut Self>, + strategy: &mut S, + context: &mut S::Context, + ) -> Poll; + + /// Wait for the future to complete, blocking the current thread. + /// + /// This function uses the [`Blocking`] strategy to poll the future until it is ready. + /// + /// The future should only return `Pending` if `Strategy::poll` returns error. Otherwise, + /// this function polls the future in a hot loop. + #[cfg(all(feature = "std", not(target_family = "wasm")))] + #[cfg_attr(docsrs, doc(all(feature = "std", not(target_family = "wasm"))))] + fn wait(mut self) -> Self::Output + where + Self: Sized, + { + // SAFETY: `self`/`this` is not moved out after this. + let mut this = unsafe { Pin::new_unchecked(&mut self) }; + + loop { + if let Poll::Ready(res) = this + .as_mut() + .poll_with_strategy(&mut Blocking::default(), &mut ()) + { + return res; + } + } + } +} + +pin_project_lite::pin_project! { + /// A wrapper around an [`EventListenerFuture`] that implements [`Future`]. + /// + /// [`Future`]: core::future::Future + #[derive(Debug, Clone)] + pub struct FutureWrapper { + #[pin] + inner: F, + } +} + +impl FutureWrapper { + /// Create a new `FutureWrapper` from the provided future. + #[inline] + pub fn new(inner: F) -> Self { + Self { inner } + } + + /// Consume the `FutureWrapper`, returning the inner future. + #[inline] + pub fn into_inner(self) -> F { + self.inner + } +} + +impl FutureWrapper { + /// Get a reference to the inner future. + #[inline] + pub fn get_ref(&self) -> &F { + &self.inner + } + + /// Get a mutable reference to the inner future. + #[inline] + pub fn get_mut(&mut self) -> &mut F { + &mut self.inner + } + + /// Get a pinned mutable reference to the inner future. + #[inline] + pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut F> { + self.project().inner + } + + /// Get a pinned reference to the inner future. + #[inline] + pub fn get_pin_ref(self: Pin<&Self>) -> Pin<&F> { + self.project_ref().inner + } +} + +impl From for FutureWrapper { + #[inline] + fn from(inner: F) -> Self { + Self { inner } + } +} + +impl Future for FutureWrapper { + type Output = F::Output; + + #[inline] + fn poll(self: Pin<&mut Self>, context: &mut Context<'_>) -> Poll { + self.project() + .inner + .poll_with_strategy(&mut NonBlocking::default(), context) + } +} + +/// A strategy for polling an [`EventListenerFuture`] or an [`EventListener`]. +/// +/// This trait is used by the [`EventListenerFuture::poll_with_strategy`] method to determine +/// how to poll the future. It can also be used standalone, by calling the [`Strategy::wait`] +/// method. +/// +/// [`EventListenerFuture::poll_with_strategy`]: EventListenerFuture::poll_with_strategy +/// [`EventListener`]: event_listener::EventListener +/// +/// # Examples +/// +/// ``` +/// use event_listener_strategy::{ +/// event_listener::{Event, EventListener}, +/// EventListenerFuture, Strategy, Blocking, NonBlocking +/// }; +/// use std::pin::Pin; +/// +/// async fn wait_on<'a, S: Strategy<'a>>(evl: EventListener, strategy: &mut S) { +/// strategy.wait(evl).await; +/// } +/// +/// # futures_lite::future::block_on(async { +/// // Block on the future. +/// let ev = Event::new(); +/// let listener = ev.listen(); +/// ev.notify(1); +/// +/// wait_on(listener, &mut Blocking::default()).await; +/// +/// // Poll the future. +/// let listener = ev.listen(); +/// ev.notify(1); +/// +/// wait_on(listener, &mut NonBlocking::default()).await; +/// # }); +/// ``` +pub trait Strategy<'a> { + /// The context needed to poll the future. + type Context: ?Sized; + + /// The future returned by the [`Strategy::wait`] method. + type Future: Future + 'a; + + /// Poll the event listener until it is ready. + fn poll + Unpin>( + &mut self, + event_listener: &mut Option, + context: &mut Self::Context, + ) -> Poll; + + /// Wait for the event listener to become ready. + fn wait(&mut self, evl: EventListener) -> Self::Future; +} + +/// A strategy that uses polling to efficiently wait for an event. +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +pub struct NonBlocking<'a> { + /// The type `&'a mut &'a T` is invariant over `'a`, like `Context` is. + /// + /// We used to just use `Context` here, but then `Context` became `!Send` + /// and `!Sync`, making all of the futures that use this type `!Send` and + /// `!Sync` as well. So we just take the lifetime invariance and none of + /// the downsides. + _marker: PhantomData<&'a mut &'a ()>, +} + +impl<'a, 'evl> Strategy<'evl> for NonBlocking<'a> { + type Context = Context<'a>; + type Future = EventListener; + + #[inline] + fn wait(&mut self, evl: EventListener) -> Self::Future { + evl + } + + #[inline] + fn poll + Unpin>( + &mut self, + event_listener: &mut Option, + context: &mut Self::Context, + ) -> Poll { + let poll = Pin::new( + event_listener + .as_mut() + .expect("`event_listener` should never be `None`"), + ) + .poll(context); + if poll.is_ready() { + *event_listener = None; + } + poll + } +} + +/// A strategy that blocks the current thread until the event is signalled. +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[cfg(all(feature = "std", not(target_family = "wasm")))] +pub struct Blocking { + _private: (), +} + +#[cfg(all(feature = "std", not(target_family = "wasm")))] +impl<'evl> Strategy<'evl> for Blocking { + type Context = (); + type Future = Ready; + + #[inline] + fn wait(&mut self, evl: EventListener) -> Self::Future { + evl.wait(); + Ready { _private: () } + } + + #[inline] + fn poll + Unpin>( + &mut self, + event_listener: &mut Option, + _context: &mut Self::Context, + ) -> Poll { + let result = event_listener + .take() + .expect("`event_listener` should never be `None`") + .wait(); + Poll::Ready(result) + } +} + +/// A future that is always ready. +#[cfg(feature = "std")] +#[doc(hidden)] +#[derive(Debug, Clone)] +pub struct Ready { + _private: (), +} + +#[cfg(feature = "std")] +impl Future for Ready { + type Output = (); + + #[inline] + fn poll(self: Pin<&mut Self>, _context: &mut Context<'_>) -> Poll { + Poll::Ready(()) + } +} + +#[test] +fn send_and_sync() { + fn assert_send_and_sync() {} + + #[cfg(all(feature = "std", not(target_family = "wasm")))] + { + assert_send_and_sync::(); + assert_send_and_sync::(); + } + + assert_send_and_sync::>(); + assert_send_and_sync::>(); +} diff --git a/.cargo-vendor/event-listener-strategy/tests/easy_wrapper.rs b/.cargo-vendor/event-listener-strategy/tests/easy_wrapper.rs new file mode 100644 index 0000000000..4ec48ec7cb --- /dev/null +++ b/.cargo-vendor/event-listener-strategy/tests/easy_wrapper.rs @@ -0,0 +1,109 @@ +//! Testing of the `easy_wrapper!` macro. + +use event_listener_strategy::{easy_wrapper, EventListenerFuture, Strategy}; +use std::{marker::PhantomData, pin::Pin, task::Poll}; + +#[cfg(target_family = "wasm")] +use wasm_bindgen_test::wasm_bindgen_test as test; + +#[test] +fn easy_wrapper_generics() { + // Easy case. + struct MyStrategy; + + impl EventListenerFuture for MyStrategy { + type Output = (); + + fn poll_with_strategy<'a, S: Strategy<'a>>( + self: Pin<&mut Self>, + _strategy: &mut S, + _context: &mut S::Context, + ) -> Poll { + Poll::Ready(()) + } + } + + easy_wrapper! { + struct MyEasyWrapper(MyStrategy => ()); + #[cfg(all(feature = "std", not(target_family = "wasm")))] + wait(); + } + + #[cfg(all(feature = "std", not(target_family = "wasm")))] + MyEasyWrapper::_new(MyStrategy).wait(); + + // Medium case with generics. + struct MyStrategy2 { + _marker: PhantomData, + } + + impl EventListenerFuture for MyStrategy2 { + type Output = T; + + fn poll_with_strategy<'a, S: Strategy<'a>>( + self: Pin<&mut Self>, + _strategy: &mut S, + _context: &mut S::Context, + ) -> Poll { + unreachable!() + } + } + + easy_wrapper! { + struct MyEasyWrapper2(MyStrategy2 => T); + #[cfg(all(feature = "std", not(target_family = "wasm")))] + wait(); + } + + // Medium mode with lifetime. + struct MyStrategylt<'a> { + _marker: PhantomData<&'a ()>, + } + + impl<'a> EventListenerFuture for MyStrategylt<'a> { + type Output = &'a (); + + fn poll_with_strategy<'b, S: Strategy<'b>>( + self: Pin<&mut Self>, + _strategy: &mut S, + _context: &mut S::Context, + ) -> Poll { + unreachable!() + } + } + + easy_wrapper! { + struct MyEasyWrapperlt<'a>(MyStrategylt<'a> => &'a ()); + #[cfg(all(feature = "std", not(target_family = "wasm")))] + wait(); + } + + // Hard mode with generic bounds. + struct MyStrategy3<'a, T: ?Sized> + where + T: 'a, + { + _marker: PhantomData<&'a T>, + } + + impl<'a, T: ?Sized> EventListenerFuture for MyStrategy3<'a, T> + where + T: 'a, + { + type Output = &'a T; + + fn poll_with_strategy<'b, S: Strategy<'b>>( + self: Pin<&mut Self>, + _strategy: &mut S, + _context: &mut S::Context, + ) -> Poll { + unreachable!() + } + } + + easy_wrapper! { + struct MyEasyWrapper3<'a, T: ?Sized>(MyStrategy3<'a, T> => &'a T) where T: 'a; + #[cfg(all(feature = "std", not(target_family = "wasm")))] + wait(); + } +} diff --git a/.cargo-vendor/event-listener/.cargo-checksum.json b/.cargo-vendor/event-listener/.cargo-checksum.json new file mode 100644 index 0000000000..b5e8dbde0a --- /dev/null +++ b/.cargo-vendor/event-listener/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"fae3ff9e1d850358c66dfc3cd4f95e1418db491917ef6f354ddddbb462d59502","Cargo.lock":"822d6efbde49f627134cdb27e3861fec3d887e57bdea6566dbd9751d3205cdb2","Cargo.toml":"86153ab189b266b0b906d691c71ea6e028e8d65d2a64456d73b49e5db930c00c","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"b914d686589b9a168e199e2b44c545fffbb29c686bac5104c1e3ff306f60009b","benches/bench.rs":"a534ba022682b1979d00c6a9b815bd6433a6c950027f17f4d68733f893f7ecff","examples/mutex.rs":"42f615435b9ae02b7fc2dbf7e855cfe53a08b67b4a691c77e8983e0107b9b6a3","src/lib.rs":"b019317f17eb655b12c3cbf30711a44cc0ea692b423be44a7e8114e45b2e468d","src/no_std.rs":"4a11940ff5a7b06cd416d54f5032748c4738fc22cba99e7034aca045f09ff971","src/no_std/node.rs":"7e60763ac1b06601706f59765ca2a02303c14e7a54030bd2ee9c1a7a828d937c","src/notify.rs":"1355eb4678e3555f286bf5f23aebed9d63c1d1e46c5c430a6867eff715c2a471","src/std.rs":"c6233821bc881e978ace0fc5dbd457060f151d0caf775f8d14eb7fc9eb91b6a7","tests/loom.rs":"774b4a8935bd5aebc7b294e5260da5ca543871af3b32e297ea4e73520e2b515a","tests/notify.rs":"5782d6732d7abd7035f93ea9c9f877fc884269052df1845621cdbead37511b7d"},"package":"6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba"} \ No newline at end of file diff --git a/.cargo-vendor/event-listener/CHANGELOG.md b/.cargo-vendor/event-listener/CHANGELOG.md new file mode 100644 index 0000000000..d9f9965922 --- /dev/null +++ b/.cargo-vendor/event-listener/CHANGELOG.md @@ -0,0 +1,152 @@ +# Version 5.3.1 + +- Disable some optimizations that, in rare conditions, can cause race conditions + causing notifications to be dropped. (#139) +- Ensure the portable-atomic feature is set properly. (#134) +- Update `portable-atomic-util` to v0.2.0. (#132) +- Document the std feature. (#134) + +# Version 5.3.0 + +- Add a `loom` implementation. This feature is unstable and is not semver-supported. (#126) +- Make the panic message for polling the `EventListener` after it has completed more clear. (#125) + +# Version 5.2.0 + +- Make `StackSlot` `Sync`. (#121) + +# Version 5.1.0 + +- Make `StackSlot` `Send`. (#119) + +# Version 5.0.0 + +- **Breaking:** Rework the API to afford better usage. (#105) + - The heap-based API of the v2.x line is back. + - However, there is a stack-based API as an alternative. +- Add a way to get the total number of listeners. (#114) + +# Version 4.0.3 + +- Relax MSRV to 1.60. (#110) + +# Version 4.0.2 + +- Avoid spinning in `wait_deadline`. (#107) + +# Version 4.0.1 + +- Fix a use-after-move error after an `EventListener` is assigned to listen to + another `Event`. (#101) + +# Version 4.0.0 + +- **Breaking:** Fix a footgun in the `EventListener` type. `EventListener::new()` + now no longer takes an `&Event` as an argument, and `EventListener::listen()` + takes the `&Event` as an argument. Hopefully this should prevent `.await`ing + on a listener without making sure it's listening first. (#94) + +# Version 3.1.0 + +- Implement `UnwindSafe` and `RefUnwindSafe` for `EventListener`. This was unintentionally removed in version 3 (#96). + +# Version 3.0.1 + +- Emphasize that `listen()` must be called on `EventListener` in documentation. (#90) +- Write useful output in `fmt::Debug` implementations. (#86) + +# Version 3.0.0 + +- Use the `parking` crate instead of threading APIs (#27) +- Bump MSRV to 1.59 (#71) +- **Breaking:** Make this crate `no_std`-compatible on `default-features = false`. (#34) +- Create a new `event-listener-strategy` crate for abstracting over blocking/non-blocking operations. (#49) +- **Breaking:** Change the `EventListener` API to be `!Unpin`. (#51) +- Enable a feature for the `portable-atomic` crate. (#53) +- **Breaking:** Add a `Notification` trait which is used to enable tagged events. (#52) +- Add an `is_notified()` method to `Event`. (#48) +- **Breaking:** Make it so `notify()` returns the number of listeners notified. (#57) + +# Version 2.5.3 + +- Fix fence on x86 and miri. + +# Version 2.5.2 + +- Fix stacked borrows violation when `-Zmiri-tag-raw-pointers` is enabled. (#24) + +# Version 2.5.1 + +- Replace spinlock with a mutex. + +# Version 2.5.0 + +- Add `EventListener::discard()`. + +# Version 2.4.0 + +- `Event::new()` is now a const fn. + +# Version 2.3.3 + +- Fix a bug in `List::insert()` that was causing deadlocks. + +# Version 2.3.2 + +- Optimization: use a simple spinlock and cache an `Entry` for less allocation. + +# Version 2.3.1 + +- Optimization: don't initialize `Inner` when notifying `Event`. + +# Version 2.3.0 + +- Implement `UnwindSafe`/`RefUnwindSafe` for `Event`/`EventListener`. + +# Version 2.2.1 + +- Always keep the last waker in `EventListener::poll()`. + +# Version 2.2.0 + +- Add `EventListener::same_event()`. + +# Version 2.1.0 + +- Add `EventListener::listens_to()`. + +# Version 2.0.1 + +- Replace `usize::MAX` with `std::usize::MAX`. + +# Version 2.0.0 + +- Remove `Event::notify_one()` and `Event::notify_all()`. +- Add `Event::notify_relaxed()` and `Event::notify_additional_relaxed()`. +- Dropped notified `EventListener` now notifies one *or* one additional listener. + +# Version 1.2.0 + +- Add `Event::notify_additional()`. + +# Version 1.1.2 + +- Change a `Relaxed` load to `Acquire` load. + +# Version 1.1.1 + +- Fix a bug in `EventListener::wait_timeout()`. + +# Version 1.1.0 + +- Add `EventListener::notify()`. + +# Version 1.0.1 + +- Reduce the complexity of `notify_all()` from O(n) to amortized O(1). +- Fix a bug where entries were notified in wrong order. +- Add tests. + +# Version 1.0.0 + +- Initial version. diff --git a/.cargo-vendor/event-listener/Cargo.lock b/.cargo-vendor/event-listener/Cargo.lock new file mode 100644 index 0000000000..d1e3d9d10e --- /dev/null +++ b/.cargo-vendor/event-listener/Cargo.lock @@ -0,0 +1,880 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "aho-corasick" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" +dependencies = [ + "memchr", +] + +[[package]] +name = "anes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" + +[[package]] +name = "anstyle" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b" + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "bumpalo" +version = "3.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" + +[[package]] +name = "cast" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" + +[[package]] +name = "cc" +version = "1.0.97" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "099a5357d84c4c61eb35fc8eafa9a79a902c2f76911e5747ced4e032edd8d9b4" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "ciborium" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "effd91f6c78e5a4ace8a5d3c0b6bfaec9e2baaef55f3efc00e45fb2e477ee926" +dependencies = [ + "ciborium-io", + "ciborium-ll", + "serde", +] + +[[package]] +name = "ciborium-io" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdf919175532b369853f5d5e20b26b43112613fd6fe7aee757e35f7a44642656" + +[[package]] +name = "ciborium-ll" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "defaa24ecc093c77630e6c15e17c51f5e187bf35ee514f4e2d67baaa96dae22b" +dependencies = [ + "ciborium-io", + "half", +] + +[[package]] +name = "clap" +version = "4.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0" +dependencies = [ + "clap_builder", +] + +[[package]] +name = "clap_builder" +version = "4.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4" +dependencies = [ + "anstyle", + "clap_lex", +] + +[[package]] +name = "clap_lex" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" + +[[package]] +name = "concurrent-queue" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" +dependencies = [ + "crossbeam-utils", + "loom", + "portable-atomic", +] + +[[package]] +name = "console_error_panic_hook" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a06aeb73f470f66dcdbf7223caeebb85984942f22f1adb2a088cf9668146bbbc" +dependencies = [ + "cfg-if", + "wasm-bindgen", +] + +[[package]] +name = "criterion" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" +dependencies = [ + "anes", + "cast", + "ciborium", + "clap", + "criterion-plot", + "is-terminal", + "itertools", + "num-traits", + "once_cell", + "oorandom", + "regex", + "serde", + "serde_derive", + "serde_json", + "tinytemplate", + "walkdir", +] + +[[package]] +name = "criterion-plot" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" +dependencies = [ + "cast", + "itertools", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "either" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" + +[[package]] +name = "event-listener" +version = "5.3.1" +dependencies = [ + "concurrent-queue", + "criterion", + "futures-lite", + "loom", + "parking", + "pin-project-lite", + "portable-atomic", + "portable-atomic-util", + "try-lock", + "waker-fn", + "wasm-bindgen-test", +] + +[[package]] +name = "fastrand" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" + +[[package]] +name = "futures-core" +version = "0.3.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" + +[[package]] +name = "futures-io" +version = "0.3.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" + +[[package]] +name = "futures-lite" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" +dependencies = [ + "fastrand", + "futures-core", + "futures-io", + "parking", + "pin-project-lite", +] + +[[package]] +name = "generator" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "186014d53bc231d0090ef8d6f03e0920c54d85a5ed22f4f2f74315ec56cf83fb" +dependencies = [ + "cc", + "cfg-if", + "libc", + "log", + "rustversion", + "windows", +] + +[[package]] +name = "half" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" + +[[package]] +name = "hermit-abi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" + +[[package]] +name = "is-terminal" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b" +dependencies = [ + "hermit-abi", + "libc", + "windows-sys", +] + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" + +[[package]] +name = "js-sys" +version = "0.3.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.154" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346" + +[[package]] +name = "log" +version = "0.4.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" + +[[package]] +name = "loom" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca" +dependencies = [ + "cfg-if", + "generator", + "scoped-tls", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata 0.1.10", +] + +[[package]] +name = "memchr" +version = "2.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" + +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + +[[package]] +name = "num-traits" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" +dependencies = [ + "autocfg", +] + +[[package]] +name = "once_cell" +version = "1.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" + +[[package]] +name = "oorandom" +version = "11.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" + +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + +[[package]] +name = "parking" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" +dependencies = [ + "loom", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" + +[[package]] +name = "portable-atomic" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0" + +[[package]] +name = "portable-atomic-util" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b18c297861eb391769372886a70b9449d51d73a21a1c8a2527652ab1b6453cb5" +dependencies = [ + "portable-atomic", +] + +[[package]] +name = "proc-macro2" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "regex" +version = "1.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata 0.4.3", + "regex-syntax 0.8.2", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax 0.6.29", +] + +[[package]] +name = "regex-automata" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax 0.8.2", +] + +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + +[[package]] +name = "regex-syntax" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" + +[[package]] +name = "rustversion" +version = "1.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "092474d1a01ea8278f69e6a358998405fae5b8b963ddaeb2b0b04a128bf1dfb0" + +[[package]] +name = "ryu" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "scoped-tls" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" + +[[package]] +name = "serde" +version = "1.0.189" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e422a44e74ad4001bdc8eede9a4570ab52f71190e9c076d14369f38b9200537" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.189" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e48d1f918009ce3145511378cf68d613e3b3d9137d67272562080d68a2b32d5" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.107" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" +dependencies = [ + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "smallvec" +version = "1.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" + +[[package]] +name = "syn" +version = "2.0.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "thread_local" +version = "1.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +dependencies = [ + "cfg-if", + "once_cell", +] + +[[package]] +name = "tinytemplate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +dependencies = [ + "serde", + "serde_json", +] + +[[package]] +name = "tracing" +version = "0.1.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +dependencies = [ + "pin-project-lite", + "tracing-core", +] + +[[package]] +name = "tracing-core" +version = "0.1.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", +] + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "unicode-ident" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" + +[[package]] +name = "valuable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + +[[package]] +name = "waker-fn" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" + +[[package]] +name = "walkdir" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" +dependencies = [ + "same-file", + "winapi-util", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.87" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" +dependencies = [ + "cfg-if", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.87" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" +dependencies = [ + "bumpalo", + "log", + "once_cell", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" +dependencies = [ + "cfg-if", + "js-sys", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.87" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.87" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.87" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" + +[[package]] +name = "wasm-bindgen-test" +version = "0.3.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e6e302a7ea94f83a6d09e78e7dc7d9ca7b186bc2829c24a22d0753efd680671" +dependencies = [ + "console_error_panic_hook", + "js-sys", + "scoped-tls", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-bindgen-test-macro", +] + +[[package]] +name = "wasm-bindgen-test-macro" +version = "0.3.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecb993dd8c836930ed130e020e77d9b2e65dd0fbab1b67c790b0f5d80b11a575" +dependencies = [ + "proc-macro2", + "quote", +] + +[[package]] +name = "web-sys" +version = "0.3.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" +dependencies = [ + "winapi", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows" +version = "0.54.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9252e5725dbed82865af151df558e754e4a3c2c30818359eb17465f1346a1b49" +dependencies = [ + "windows-core", + "windows-targets", +] + +[[package]] +name = "windows-core" +version = "0.54.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12661b9c89351d684a50a8a643ce5f608e20243b9fb84687800163429f161d65" +dependencies = [ + "windows-result", + "windows-targets", +] + +[[package]] +name = "windows-result" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "749f0da9cc72d82e600d8d2e44cadd0b9eedb9038f71a1c58556ac1c5791813b" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" diff --git a/.cargo-vendor/event-listener/Cargo.toml b/.cargo-vendor/event-listener/Cargo.toml new file mode 100644 index 0000000000..538c386eed --- /dev/null +++ b/.cargo-vendor/event-listener/Cargo.toml @@ -0,0 +1,108 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.60" +name = "event-listener" +version = "5.3.1" +authors = [ + "Stjepan Glavina ", + "John Nunley ", +] +exclude = ["/.*"] +description = "Notify async tasks or threads" +readme = "README.md" +keywords = [ + "condvar", + "eventcount", + "wake", + "blocking", + "park", +] +categories = [ + "asynchronous", + "concurrency", +] +license = "Apache-2.0 OR MIT" +repository = "https://github.com/smol-rs/event-listener" + +[lib] +bench = false + +[[bench]] +name = "bench" +harness = false + +[dependencies.concurrent-queue] +version = "2.4.0" +default-features = false + +[dependencies.pin-project-lite] +version = "0.2.12" + +[dependencies.portable-atomic-util] +version = "0.2.0" +features = ["alloc"] +optional = true +default-features = false + +[dependencies.portable_atomic_crate] +version = "1.2.0" +optional = true +default-features = false +package = "portable-atomic" + +[dev-dependencies.criterion] +version = "0.5" +features = ["cargo_bench_support"] +default-features = false + +[dev-dependencies.futures-lite] +version = "2.0.0" + +[dev-dependencies.try-lock] +version = "0.2.5" + +[dev-dependencies.waker-fn] +version = "1" + +[features] +default = ["std"] +loom = [ + "concurrent-queue/loom", + "parking?/loom", + "dep:loom", +] +portable-atomic = [ + "portable-atomic-util", + "portable_atomic_crate", + "concurrent-queue/portable-atomic", +] +std = [ + "concurrent-queue/std", + "parking", +] + +[target."cfg(loom)".dependencies.loom] +version = "0.7" +optional = true + +[target."cfg(not(target_family = \"wasm\"))".dependencies.parking] +version = "2.0.0" +optional = true + +[target."cfg(target_family = \"wasm\")".dev-dependencies.wasm-bindgen-test] +version = "0.3" + +[lints.rust.unexpected_cfgs] +level = "warn" +priority = 0 diff --git a/.cargo-vendor/event-listener/LICENSE-APACHE b/.cargo-vendor/event-listener/LICENSE-APACHE new file mode 100644 index 0000000000..16fe87b06e --- /dev/null +++ b/.cargo-vendor/event-listener/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/.cargo-vendor/event-listener/LICENSE-MIT b/.cargo-vendor/event-listener/LICENSE-MIT new file mode 100644 index 0000000000..31aa79387f --- /dev/null +++ b/.cargo-vendor/event-listener/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/.cargo-vendor/event-listener/README.md b/.cargo-vendor/event-listener/README.md new file mode 100644 index 0000000000..80aaef98dc --- /dev/null +++ b/.cargo-vendor/event-listener/README.md @@ -0,0 +1,86 @@ +# event-listener + +[![Build](https://github.com/smol-rs/event-listener/workflows/CI/badge.svg)]( +https://github.com/smol-rs/event-listener/actions) +[![License](https://img.shields.io/badge/license-Apache--2.0_OR_MIT-blue.svg)]( +https://github.com/smol-rs/event-listener) +[![Cargo](https://img.shields.io/crates/v/event-listener.svg)]( +https://crates.io/crates/event-listener) +[![Documentation](https://docs.rs/event-listener/badge.svg)]( +https://docs.rs/event-listener) + +Notify async tasks or threads. + +This is a synchronization primitive similar to [eventcounts] invented by Dmitry Vyukov. + +You can use this crate to turn non-blocking data structures into async or blocking data +structures. See a [simple mutex] implementation that exposes an async and a blocking interface +for acquiring locks. + +[eventcounts]: https://www.1024cores.net/home/lock-free-algorithms/eventcounts +[simple mutex]: ./examples/mutex.rs + +## Examples + +Wait until another thread sets a boolean flag: + +```rust +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; +use std::thread; +use std::time::Duration; +use event_listener::Event; + +let flag = Arc::new(AtomicBool::new(false)); +let event = Arc::new(Event::new()); + +// Spawn a thread that will set the flag after 1 second. +thread::spawn({ + let flag = flag.clone(); + let event = event.clone(); + move || { + // Wait for a second. + thread::sleep(Duration::from_secs(1)); + + // Set the flag. + flag.store(true, Ordering::SeqCst); + + // Notify all listeners that the flag has been set. + event.notify(usize::MAX); + } +}); + +// Wait until the flag is set. +loop { + // Check the flag. + if flag.load(Ordering::SeqCst) { + break; + } + + // Start listening for events. + let listener = event.listen(); + + // Check the flag again after creating the listener. + if flag.load(Ordering::SeqCst) { + break; + } + + // Wait for a notification and continue the loop. + listener.wait(); +} +``` + +## License + +Licensed under either of + + * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT) + +at your option. + +#### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. diff --git a/.cargo-vendor/event-listener/benches/bench.rs b/.cargo-vendor/event-listener/benches/bench.rs new file mode 100644 index 0000000000..d9e0db16d1 --- /dev/null +++ b/.cargo-vendor/event-listener/benches/bench.rs @@ -0,0 +1,26 @@ +use std::iter; + +use criterion::{criterion_group, criterion_main, Criterion}; +use event_listener::{Event, Listener}; + +const COUNT: usize = 8000; + +fn bench_events(c: &mut Criterion) { + c.bench_function("notify_and_wait", |b| { + let ev = Event::new(); + let mut handles = Vec::with_capacity(COUNT); + + b.iter(|| { + handles.extend(iter::repeat_with(|| ev.listen()).take(COUNT)); + + ev.notify(COUNT); + + for handle in handles.drain(..) { + handle.wait(); + } + }); + }); +} + +criterion_group!(benches, bench_events); +criterion_main!(benches); diff --git a/.cargo-vendor/event-listener/examples/mutex.rs b/.cargo-vendor/event-listener/examples/mutex.rs new file mode 100644 index 0000000000..30fbe66c9f --- /dev/null +++ b/.cargo-vendor/event-listener/examples/mutex.rs @@ -0,0 +1,170 @@ +//! A simple mutex implementation. +//! +//! This mutex exposes both blocking and async methods for acquiring a lock. + +#[cfg(not(target_family = "wasm"))] +mod example { + #![allow(dead_code)] + + use std::ops::{Deref, DerefMut}; + use std::sync::{mpsc, Arc}; + use std::thread; + use std::time::{Duration, Instant}; + + use event_listener::{listener, Event, Listener}; + use try_lock::{Locked, TryLock}; + + /// A simple mutex. + struct Mutex { + /// Blocked lock operations. + lock_ops: Event, + + /// The inner non-blocking mutex. + data: TryLock, + } + + unsafe impl Send for Mutex {} + unsafe impl Sync for Mutex {} + + impl Mutex { + /// Creates a mutex. + fn new(t: T) -> Mutex { + Mutex { + lock_ops: Event::new(), + data: TryLock::new(t), + } + } + + /// Attempts to acquire a lock. + fn try_lock(&self) -> Option> { + self.data.try_lock().map(MutexGuard) + } + + /// Blocks until a lock is acquired. + fn lock(&self) -> MutexGuard<'_, T> { + loop { + // Attempt grabbing a lock. + if let Some(guard) = self.try_lock() { + return guard; + } + + // Set up an event listener. + listener!(self.lock_ops => listener); + + // Try again. + if let Some(guard) = self.try_lock() { + return guard; + } + + // Wait for a notification. + listener.wait(); + } + } + + /// Blocks until a lock is acquired or the timeout is reached. + fn lock_timeout(&self, timeout: Duration) -> Option> { + let deadline = Instant::now() + timeout; + + loop { + // Attempt grabbing a lock. + if let Some(guard) = self.try_lock() { + return Some(guard); + } + + // Set up an event listener. + listener!(self.lock_ops => listener); + + // Try again. + if let Some(guard) = self.try_lock() { + return Some(guard); + } + + // Wait until a notification is received. + listener.wait_deadline(deadline)?; + } + } + + /// Acquires a lock asynchronously. + async fn lock_async(&self) -> MutexGuard<'_, T> { + loop { + // Attempt grabbing a lock. + if let Some(guard) = self.try_lock() { + return guard; + } + + // Set up an event listener. + listener!(self.lock_ops => listener); + + // Try again. + if let Some(guard) = self.try_lock() { + return guard; + } + + // Wait until a notification is received. + listener.await; + } + } + } + + /// A guard holding a lock. + struct MutexGuard<'a, T>(Locked<'a, T>); + + impl Deref for MutexGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + &self.0 + } + } + + impl DerefMut for MutexGuard<'_, T> { + fn deref_mut(&mut self) -> &mut T { + &mut self.0 + } + } + + pub(super) fn entry() { + const N: usize = 10; + + // A shared counter. + let counter = Arc::new(Mutex::new(0)); + + // A channel that signals when all threads are done. + let (tx, rx) = mpsc::channel(); + + // Spawn a bunch of threads incrementing the counter. + for _ in 0..N { + let counter = counter.clone(); + let tx = tx.clone(); + + thread::spawn(move || { + let mut counter = counter.lock(); + *counter += 1; + + // If this is the last increment, signal that we're done. + if *counter == N { + tx.send(()).unwrap(); + } + }); + } + + // Wait until the last thread increments the counter. + rx.recv().unwrap(); + + // The counter must equal the number of threads. + assert_eq!(*counter.lock(), N); + + println!("Done!"); + } +} + +#[cfg(target_family = "wasm")] +mod example { + pub(super) fn entry() { + println!("This example is not supported on wasm yet."); + } +} + +fn main() { + example::entry(); +} diff --git a/.cargo-vendor/event-listener/src/lib.rs b/.cargo-vendor/event-listener/src/lib.rs new file mode 100644 index 0000000000..a6818b0797 --- /dev/null +++ b/.cargo-vendor/event-listener/src/lib.rs @@ -0,0 +1,1544 @@ +//! Notify async tasks or threads. +//! +//! This is a synchronization primitive similar to [eventcounts] invented by Dmitry Vyukov. +//! +//! You can use this crate to turn non-blocking data structures into async or blocking data +//! structures. See a [simple mutex] implementation that exposes an async and a blocking interface +//! for acquiring locks. +//! +//! [eventcounts]: https://www.1024cores.net/home/lock-free-algorithms/eventcounts +//! [simple mutex]: https://github.com/smol-rs/event-listener/blob/master/examples/mutex.rs +//! +//! # Examples +//! +//! Wait until another thread sets a boolean flag: +//! +//! ``` +//! use std::sync::atomic::{AtomicBool, Ordering}; +//! use std::sync::Arc; +//! use std::thread; +//! use std::time::Duration; +//! use std::usize; +//! use event_listener::{Event, Listener}; +//! +//! let flag = Arc::new(AtomicBool::new(false)); +//! let event = Arc::new(Event::new()); +//! +//! // Spawn a thread that will set the flag after 1 second. +//! thread::spawn({ +//! let flag = flag.clone(); +//! let event = event.clone(); +//! move || { +//! // Wait for a second. +//! thread::sleep(Duration::from_secs(1)); +//! +//! // Set the flag. +//! flag.store(true, Ordering::SeqCst); +//! +//! // Notify all listeners that the flag has been set. +//! event.notify(usize::MAX); +//! } +//! }); +//! +//! // Wait until the flag is set. +//! loop { +//! // Check the flag. +//! if flag.load(Ordering::SeqCst) { +//! break; +//! } +//! +//! // Start listening for events. +//! let mut listener = event.listen(); +//! +//! // Check the flag again after creating the listener. +//! if flag.load(Ordering::SeqCst) { +//! break; +//! } +//! +//! // Wait for a notification and continue the loop. +//! listener.wait(); +//! } +//! ``` +//! +//! # Features +//! +//! - The `std` feature (enabled by default) enables the use of the Rust standard library. Disable it for `no_std` +//! support +//! +//! - The `portable-atomic` feature enables the use of the [`portable-atomic`] crate to provide +//! atomic operations on platforms that don't support them. +//! +//! [`portable-atomic`]: https://crates.io/crates/portable-atomic + +#![cfg_attr(not(feature = "std"), no_std)] +#![allow(clippy::multiple_bound_locations)] // This is a WONTFIX issue with pin-project-lite +#![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)] +#![doc( + html_favicon_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png" +)] +#![doc( + html_logo_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png" +)] + +#[cfg(not(feature = "std"))] +extern crate alloc; +#[cfg(feature = "std")] +extern crate std as alloc; + +#[cfg_attr(feature = "std", path = "std.rs")] +#[cfg_attr(not(feature = "std"), path = "no_std.rs")] +mod sys; + +mod notify; + +#[cfg(not(feature = "std"))] +use alloc::boxed::Box; + +use core::borrow::Borrow; +use core::fmt; +use core::future::Future; +use core::mem::ManuallyDrop; +use core::pin::Pin; +use core::ptr; +use core::task::{Context, Poll, Waker}; + +#[cfg(all(feature = "std", not(target_family = "wasm")))] +use { + parking::{Parker, Unparker}, + std::time::{Duration, Instant}, +}; + +use sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; +use sync::Arc; + +#[cfg(not(loom))] +use sync::WithMut; + +use notify::NotificationPrivate; +pub use notify::{IntoNotification, Notification}; + +/// Inner state of [`Event`]. +struct Inner { + /// The number of notified entries, or `usize::MAX` if all of them have been notified. + /// + /// If there are no entries, this value is set to `usize::MAX`. + notified: AtomicUsize, + + /// Inner queue of event listeners. + /// + /// On `std` platforms, this is an intrusive linked list. On `no_std` platforms, this is a + /// more traditional `Vec` of listeners, with an atomic queue used as a backup for high + /// contention. + list: sys::List, +} + +impl Inner { + fn new() -> Self { + Self { + notified: AtomicUsize::new(usize::MAX), + list: sys::List::new(), + } + } +} + +/// A synchronization primitive for notifying async tasks and threads. +/// +/// Listeners can be registered using [`Event::listen()`]. There are two ways to notify listeners: +/// +/// 1. [`Event::notify()`] notifies a number of listeners. +/// 2. [`Event::notify_additional()`] notifies a number of previously unnotified listeners. +/// +/// If there are no active listeners at the time a notification is sent, it simply gets lost. +/// +/// There are two ways for a listener to wait for a notification: +/// +/// 1. In an asynchronous manner using `.await`. +/// 2. In a blocking manner by calling [`EventListener::wait()`] on it. +/// +/// If a notified listener is dropped without receiving a notification, dropping will notify +/// another active listener. Whether one *additional* listener will be notified depends on what +/// kind of notification was delivered. +/// +/// Listeners are registered and notified in the first-in first-out fashion, ensuring fairness. +pub struct Event { + /// A pointer to heap-allocated inner state. + /// + /// This pointer is initially null and gets lazily initialized on first use. Semantically, it + /// is an `Arc` so it's important to keep in mind that it contributes to the [`Arc`]'s + /// reference count. + inner: AtomicPtr>, +} + +unsafe impl Send for Event {} +unsafe impl Sync for Event {} + +impl core::panic::UnwindSafe for Event {} +impl core::panic::RefUnwindSafe for Event {} + +impl fmt::Debug for Event { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.try_inner() { + Some(inner) => { + let notified_count = inner.notified.load(Ordering::Relaxed); + let total_count = match inner.list.try_total_listeners() { + Some(total_count) => total_count, + None => { + return f + .debug_tuple("Event") + .field(&format_args!("")) + .finish() + } + }; + + f.debug_struct("Event") + .field("listeners_notified", ¬ified_count) + .field("listeners_total", &total_count) + .finish() + } + None => f + .debug_tuple("Event") + .field(&format_args!("")) + .finish(), + } + } +} + +impl Default for Event { + #[inline] + fn default() -> Self { + Self::new() + } +} + +impl Event { + /// Creates a new `Event` with a tag type. + /// + /// Tagging cannot be implemented efficiently on `no_std`, so this is only available when the + /// `std` feature is enabled. + /// + /// # Examples + /// + /// ``` + /// use event_listener::Event; + /// + /// let event = Event::::with_tag(); + /// ``` + #[cfg(all(feature = "std", not(loom)))] + #[inline] + pub const fn with_tag() -> Self { + Self { + inner: AtomicPtr::new(ptr::null_mut()), + } + } + #[cfg(all(feature = "std", loom))] + #[inline] + pub fn with_tag() -> Self { + Self { + inner: AtomicPtr::new(ptr::null_mut()), + } + } + + /// Tell whether any listeners are currently notified. + /// + /// # Examples + /// + /// ``` + /// use event_listener::{Event, Listener}; + /// + /// let event = Event::new(); + /// let listener = event.listen(); + /// assert!(!event.is_notified()); + /// + /// event.notify(1); + /// assert!(event.is_notified()); + /// ``` + #[inline] + pub fn is_notified(&self) -> bool { + self.try_inner() + .map_or(false, |inner| inner.notified.load(Ordering::Acquire) > 0) + } + + /// Returns a guard listening for a notification. + /// + /// This method emits a `SeqCst` fence after registering a listener. For now, this method + /// is an alias for calling [`EventListener::new()`], pinning it to the heap, and then + /// inserting it into a list. + /// + /// # Examples + /// + /// ``` + /// use event_listener::Event; + /// + /// let event = Event::new(); + /// let listener = event.listen(); + /// ``` + /// + /// # Caveats + /// + /// The above example is equivalent to this code: + /// + /// ```no_compile + /// use event_listener::{Event, EventListener}; + /// + /// let event = Event::new(); + /// let mut listener = Box::pin(EventListener::new()); + /// listener.listen(&event); + /// ``` + /// + /// It creates a new listener, pins it to the heap, and inserts it into the linked list + /// of listeners. While this type of usage is simple, it may be desired to eliminate this + /// heap allocation. In this case, consider using the [`EventListener::new`] constructor + /// directly, which allows for greater control over where the [`EventListener`] is + /// allocated. However, users of this `new` method must be careful to ensure that the + /// [`EventListener`] is `listen`ing before waiting on it; panics may occur otherwise. + #[cold] + pub fn listen(&self) -> EventListener { + let inner = ManuallyDrop::new(unsafe { Arc::from_raw(self.inner()) }); + + // Allocate the listener on the heap and insert it. + let mut listener = Box::pin(InnerListener { + event: Arc::clone(&inner), + listener: None, + }); + listener.as_mut().listen(); + + // Return the listener. + EventListener { listener } + } + + /// Notifies a number of active listeners. + /// + /// The number is allowed to be zero or exceed the current number of listeners. + /// + /// The [`Notification`] trait is used to define what kind of notification is delivered. + /// The default implementation (implemented on `usize`) is a notification that only notifies + /// *at least* the specified number of listeners. + /// + /// In certain cases, this function emits a `SeqCst` fence before notifying listeners. + /// + /// This function returns the number of [`EventListener`]s that were notified by this call. + /// + /// # Caveats + /// + /// If the `std` feature is disabled, the notification will be delayed under high contention, + /// such as when another thread is taking a while to `notify` the event. In this circumstance, + /// this function will return `0` instead of the number of listeners actually notified. Therefore + /// if the `std` feature is disabled the return value of this function should not be relied upon + /// for soundness and should be used only as a hint. + /// + /// If the `std` feature is enabled, no spurious returns are possible, since the `std` + /// implementation uses system locking primitives to ensure there is no unavoidable + /// contention. + /// + /// # Examples + /// + /// Use the default notification strategy: + /// + /// ``` + /// use event_listener::Event; + /// + /// let event = Event::new(); + /// + /// // This notification gets lost because there are no listeners. + /// event.notify(1); + /// + /// let listener1 = event.listen(); + /// let listener2 = event.listen(); + /// let listener3 = event.listen(); + /// + /// // Notifies two listeners. + /// // + /// // Listener queueing is fair, which means `listener1` and `listener2` + /// // get notified here since they start listening before `listener3`. + /// event.notify(2); + /// ``` + /// + /// Notify without emitting a `SeqCst` fence. This uses the [`relaxed`] notification strategy. + /// This is equivalent to calling [`Event::notify_relaxed()`]. + /// + /// [`relaxed`]: IntoNotification::relaxed + /// + /// ``` + /// use event_listener::{IntoNotification, Event}; + /// use std::sync::atomic::{self, Ordering}; + /// + /// let event = Event::new(); + /// + /// // This notification gets lost because there are no listeners. + /// event.notify(1.relaxed()); + /// + /// let listener1 = event.listen(); + /// let listener2 = event.listen(); + /// let listener3 = event.listen(); + /// + /// // We should emit a fence manually when using relaxed notifications. + /// atomic::fence(Ordering::SeqCst); + /// + /// // Notifies two listeners. + /// // + /// // Listener queueing is fair, which means `listener1` and `listener2` + /// // get notified here since they start listening before `listener3`. + /// event.notify(2.relaxed()); + /// ``` + /// + /// Notify additional listeners. In contrast to [`Event::notify()`], this method will notify `n` + /// *additional* listeners that were previously unnotified. This uses the [`additional`] + /// notification strategy. This is equivalent to calling [`Event::notify_additional()`]. + /// + /// [`additional`]: IntoNotification::additional + /// + /// ``` + /// use event_listener::{IntoNotification, Event}; + /// + /// let event = Event::new(); + /// + /// // This notification gets lost because there are no listeners. + /// event.notify(1.additional()); + /// + /// let listener1 = event.listen(); + /// let listener2 = event.listen(); + /// let listener3 = event.listen(); + /// + /// // Notifies two listeners. + /// // + /// // Listener queueing is fair, which means `listener1` and `listener2` + /// // get notified here since they start listening before `listener3`. + /// event.notify(1.additional()); + /// event.notify(1.additional()); + /// ``` + /// + /// Notifies with the [`additional`] and [`relaxed`] strategies at the same time. This is + /// equivalent to calling [`Event::notify_additional_relaxed()`]. + /// + /// ``` + /// use event_listener::{IntoNotification, Event}; + /// use std::sync::atomic::{self, Ordering}; + /// + /// let event = Event::new(); + /// + /// // This notification gets lost because there are no listeners. + /// event.notify(1.additional().relaxed()); + /// + /// let listener1 = event.listen(); + /// let listener2 = event.listen(); + /// let listener3 = event.listen(); + /// + /// // We should emit a fence manually when using relaxed notifications. + /// atomic::fence(Ordering::SeqCst); + /// + /// // Notifies two listeners. + /// // + /// // Listener queueing is fair, which means `listener1` and `listener2` + /// // get notified here since they start listening before `listener3`. + /// event.notify(1.additional().relaxed()); + /// event.notify(1.additional().relaxed()); + /// ``` + #[inline] + pub fn notify(&self, notify: impl IntoNotification) -> usize { + let notify = notify.into_notification(); + + // Make sure the notification comes after whatever triggered it. + notify.fence(notify::Internal::new()); + + let inner = unsafe { &*self.inner() }; + inner.notify(notify) + } + + /// Return a reference to the inner state if it has been initialized. + #[inline] + fn try_inner(&self) -> Option<&Inner> { + let inner = self.inner.load(Ordering::Acquire); + unsafe { inner.as_ref() } + } + + /// Returns a raw, initialized pointer to the inner state. + /// + /// This returns a raw pointer instead of reference because `from_raw` + /// requires raw/mut provenance: . + fn inner(&self) -> *const Inner { + let mut inner = self.inner.load(Ordering::Acquire); + + // If this is the first use, initialize the state. + if inner.is_null() { + // Allocate the state on the heap. + let new = Arc::new(Inner::::new()); + + // Convert the state to a raw pointer. + let new = Arc::into_raw(new) as *mut Inner; + + // Replace the null pointer with the new state pointer. + inner = self + .inner + .compare_exchange(inner, new, Ordering::AcqRel, Ordering::Acquire) + .unwrap_or_else(|x| x); + + // Check if the old pointer value was indeed null. + if inner.is_null() { + // If yes, then use the new state pointer. + inner = new; + } else { + // If not, that means a concurrent operation has initialized the state. + // In that case, use the old pointer and deallocate the new one. + unsafe { + drop(Arc::from_raw(new)); + } + } + } + + inner + } + + /// Get the number of listeners currently listening to this [`Event`]. + /// + /// This call returns the number of [`EventListener`]s that are currently listening to + /// this event. It does this by acquiring the internal event lock and reading the listener + /// count. Therefore it is only available for `std`-enabled platforms. + /// + /// # Caveats + /// + /// This function returns just a snapshot of the number of listeners at this point in time. + /// Due to the nature of multi-threaded CPUs, it is possible that this number will be + /// inaccurate by the time that this function returns. + /// + /// It is possible for the actual number to change at any point. Therefore, the number should + /// only ever be used as a hint. + /// + /// # Examples + /// + /// ``` + /// use event_listener::Event; + /// + /// let event = Event::new(); + /// + /// assert_eq!(event.total_listeners(), 0); + /// + /// let listener1 = event.listen(); + /// assert_eq!(event.total_listeners(), 1); + /// + /// let listener2 = event.listen(); + /// assert_eq!(event.total_listeners(), 2); + /// + /// drop(listener1); + /// drop(listener2); + /// assert_eq!(event.total_listeners(), 0); + /// ``` + #[cfg(feature = "std")] + #[inline] + pub fn total_listeners(&self) -> usize { + if let Some(inner) = self.try_inner() { + inner.list.total_listeners() + } else { + 0 + } + } +} + +impl Event<()> { + /// Creates a new [`Event`]. + /// + /// # Examples + /// + /// ``` + /// use event_listener::Event; + /// + /// let event = Event::new(); + /// ``` + #[inline] + #[cfg(not(loom))] + pub const fn new() -> Self { + Self { + inner: AtomicPtr::new(ptr::null_mut()), + } + } + + #[inline] + #[cfg(loom)] + pub fn new() -> Self { + Self { + inner: AtomicPtr::new(ptr::null_mut()), + } + } + + /// Notifies a number of active listeners without emitting a `SeqCst` fence. + /// + /// The number is allowed to be zero or exceed the current number of listeners. + /// + /// In contrast to [`Event::notify_additional()`], this method only makes sure *at least* `n` + /// listeners among the active ones are notified. + /// + /// Unlike [`Event::notify()`], this method does not emit a `SeqCst` fence. + /// + /// This method only works for untagged events. In other cases, it is recommended to instead + /// use [`Event::notify()`] like so: + /// + /// ``` + /// use event_listener::{IntoNotification, Event}; + /// let event = Event::new(); + /// + /// // Old way: + /// event.notify_relaxed(1); + /// + /// // New way: + /// event.notify(1.relaxed()); + /// ``` + /// + /// # Examples + /// + /// ``` + /// use event_listener::{Event, IntoNotification}; + /// use std::sync::atomic::{self, Ordering}; + /// + /// let event = Event::new(); + /// + /// // This notification gets lost because there are no listeners. + /// event.notify_relaxed(1); + /// + /// let listener1 = event.listen(); + /// let listener2 = event.listen(); + /// let listener3 = event.listen(); + /// + /// // We should emit a fence manually when using relaxed notifications. + /// atomic::fence(Ordering::SeqCst); + /// + /// // Notifies two listeners. + /// // + /// // Listener queueing is fair, which means `listener1` and `listener2` + /// // get notified here since they start listening before `listener3`. + /// event.notify_relaxed(2); + /// ``` + #[inline] + pub fn notify_relaxed(&self, n: usize) -> usize { + self.notify(n.relaxed()) + } + + /// Notifies a number of active and still unnotified listeners. + /// + /// The number is allowed to be zero or exceed the current number of listeners. + /// + /// In contrast to [`Event::notify()`], this method will notify `n` *additional* listeners that + /// were previously unnotified. + /// + /// This method emits a `SeqCst` fence before notifying listeners. + /// + /// This method only works for untagged events. In other cases, it is recommended to instead + /// use [`Event::notify()`] like so: + /// + /// ``` + /// use event_listener::{IntoNotification, Event}; + /// let event = Event::new(); + /// + /// // Old way: + /// event.notify_additional(1); + /// + /// // New way: + /// event.notify(1.additional()); + /// ``` + /// + /// # Examples + /// + /// ``` + /// use event_listener::Event; + /// + /// let event = Event::new(); + /// + /// // This notification gets lost because there are no listeners. + /// event.notify_additional(1); + /// + /// let listener1 = event.listen(); + /// let listener2 = event.listen(); + /// let listener3 = event.listen(); + /// + /// // Notifies two listeners. + /// // + /// // Listener queueing is fair, which means `listener1` and `listener2` + /// // get notified here since they start listening before `listener3`. + /// event.notify_additional(1); + /// event.notify_additional(1); + /// ``` + #[inline] + pub fn notify_additional(&self, n: usize) -> usize { + self.notify(n.additional()) + } + + /// Notifies a number of active and still unnotified listeners without emitting a `SeqCst` + /// fence. + /// + /// The number is allowed to be zero or exceed the current number of listeners. + /// + /// In contrast to [`Event::notify()`], this method will notify `n` *additional* listeners that + /// were previously unnotified. + /// + /// Unlike [`Event::notify_additional()`], this method does not emit a `SeqCst` fence. + /// + /// This method only works for untagged events. In other cases, it is recommended to instead + /// use [`Event::notify()`] like so: + /// + /// ``` + /// use event_listener::{IntoNotification, Event}; + /// let event = Event::new(); + /// + /// // Old way: + /// event.notify_additional_relaxed(1); + /// + /// // New way: + /// event.notify(1.additional().relaxed()); + /// ``` + /// + /// # Examples + /// + /// ``` + /// use event_listener::Event; + /// use std::sync::atomic::{self, Ordering}; + /// + /// let event = Event::new(); + /// + /// // This notification gets lost because there are no listeners. + /// event.notify(1); + /// + /// let listener1 = event.listen(); + /// let listener2 = event.listen(); + /// let listener3 = event.listen(); + /// + /// // We should emit a fence manually when using relaxed notifications. + /// atomic::fence(Ordering::SeqCst); + /// + /// // Notifies two listeners. + /// // + /// // Listener queueing is fair, which means `listener1` and `listener2` + /// // get notified here since they start listening before `listener3`. + /// event.notify_additional_relaxed(1); + /// event.notify_additional_relaxed(1); + /// ``` + #[inline] + pub fn notify_additional_relaxed(&self, n: usize) -> usize { + self.notify(n.additional().relaxed()) + } +} + +impl Drop for Event { + #[inline] + fn drop(&mut self) { + self.inner.with_mut(|&mut inner| { + // If the state pointer has been initialized, drop it. + if !inner.is_null() { + unsafe { + drop(Arc::from_raw(inner)); + } + } + }) + } +} + +/// A handle that is listening to an [`Event`]. +/// +/// This trait represents a type waiting for a notification from an [`Event`]. See the +/// [`EventListener`] type for more documentation on this trait's usage. +pub trait Listener: Future + __sealed::Sealed { + /// Blocks until a notification is received. + /// + /// # Examples + /// + /// ``` + /// use event_listener::{Event, Listener}; + /// + /// let event = Event::new(); + /// let mut listener = event.listen(); + /// + /// // Notify `listener`. + /// event.notify(1); + /// + /// // Receive the notification. + /// listener.wait(); + /// ``` + #[cfg(all(feature = "std", not(target_family = "wasm")))] + fn wait(self) -> T; + + /// Blocks until a notification is received or a timeout is reached. + /// + /// Returns `true` if a notification was received. + /// + /// # Examples + /// + /// ``` + /// use std::time::Duration; + /// use event_listener::{Event, Listener}; + /// + /// let event = Event::new(); + /// let mut listener = event.listen(); + /// + /// // There are no notification so this times out. + /// assert!(listener.wait_timeout(Duration::from_secs(1)).is_none()); + /// ``` + #[cfg(all(feature = "std", not(target_family = "wasm")))] + fn wait_timeout(self, timeout: Duration) -> Option; + + /// Blocks until a notification is received or a deadline is reached. + /// + /// Returns `true` if a notification was received. + /// + /// # Examples + /// + /// ``` + /// use std::time::{Duration, Instant}; + /// use event_listener::{Event, Listener}; + /// + /// let event = Event::new(); + /// let mut listener = event.listen(); + /// + /// // There are no notification so this times out. + /// assert!(listener.wait_deadline(Instant::now() + Duration::from_secs(1)).is_none()); + /// ``` + #[cfg(all(feature = "std", not(target_family = "wasm")))] + fn wait_deadline(self, deadline: Instant) -> Option; + + /// Drops this listener and discards its notification (if any) without notifying another + /// active listener. + /// + /// Returns `true` if a notification was discarded. + /// + /// # Examples + /// + /// ``` + /// use event_listener::{Event, Listener}; + /// + /// let event = Event::new(); + /// let mut listener1 = event.listen(); + /// let mut listener2 = event.listen(); + /// + /// event.notify(1); + /// + /// assert!(listener1.discard()); + /// assert!(!listener2.discard()); + /// ``` + fn discard(self) -> bool; + + /// Returns `true` if this listener listens to the given `Event`. + /// + /// # Examples + /// + /// ``` + /// use event_listener::{Event, Listener}; + /// + /// let event = Event::new(); + /// let listener = event.listen(); + /// + /// assert!(listener.listens_to(&event)); + /// ``` + fn listens_to(&self, event: &Event) -> bool; + + /// Returns `true` if both listeners listen to the same `Event`. + /// + /// # Examples + /// + /// ``` + /// use event_listener::{Event, Listener}; + /// + /// let event = Event::new(); + /// let listener1 = event.listen(); + /// let listener2 = event.listen(); + /// + /// assert!(listener1.same_event(&listener2)); + /// ``` + fn same_event(&self, other: &Self) -> bool; +} + +/// Implement the `Listener` trait using the underlying `InnerListener`. +macro_rules! forward_impl_to_listener { + ($gen:ident => $ty:ty) => { + impl<$gen> crate::Listener<$gen> for $ty { + #[cfg(all(feature = "std", not(target_family = "wasm")))] + fn wait(mut self) -> $gen { + self.listener_mut().wait_internal(None).unwrap() + } + + #[cfg(all(feature = "std", not(target_family = "wasm")))] + fn wait_timeout(mut self, timeout: std::time::Duration) -> Option<$gen> { + self.listener_mut() + .wait_internal(std::time::Instant::now().checked_add(timeout)) + } + + #[cfg(all(feature = "std", not(target_family = "wasm")))] + fn wait_deadline(mut self, deadline: std::time::Instant) -> Option<$gen> { + self.listener_mut().wait_internal(Some(deadline)) + } + + fn discard(mut self) -> bool { + self.listener_mut().discard() + } + + #[inline] + fn listens_to(&self, event: &Event<$gen>) -> bool { + core::ptr::eq::>( + &*self.listener().event, + event.inner.load(core::sync::atomic::Ordering::Acquire), + ) + } + + #[inline] + fn same_event(&self, other: &$ty) -> bool { + core::ptr::eq::>(&*self.listener().event, &*other.listener().event) + } + } + + impl<$gen> Future for $ty { + type Output = $gen; + + #[inline] + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<$gen> { + self.listener_mut().poll_internal(cx) + } + } + }; +} + +/// A guard waiting for a notification from an [`Event`]. +/// +/// There are two ways for a listener to wait for a notification: +/// +/// 1. In an asynchronous manner using `.await`. +/// 2. In a blocking manner by calling [`EventListener::wait()`] on it. +/// +/// If a notified listener is dropped without receiving a notification, dropping will notify +/// another active listener. Whether one *additional* listener will be notified depends on what +/// kind of notification was delivered. +/// +/// See the [`Listener`] trait for the functionality exposed by this type. +/// +/// This structure allocates the listener on the heap. +pub struct EventListener { + listener: Pin>>>>, +} + +unsafe impl Send for EventListener {} +unsafe impl Sync for EventListener {} + +impl core::panic::UnwindSafe for EventListener {} +impl core::panic::RefUnwindSafe for EventListener {} +impl Unpin for EventListener {} + +impl fmt::Debug for EventListener { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("EventListener").finish_non_exhaustive() + } +} + +impl EventListener { + #[inline] + fn listener(&self) -> &InnerListener>> { + &self.listener + } + + #[inline] + fn listener_mut(&mut self) -> Pin<&mut InnerListener>>> { + self.listener.as_mut() + } +} + +forward_impl_to_listener! { T => EventListener } + +/// Create a stack-based event listener for an [`Event`]. +/// +/// [`EventListener`] allocates the listener on the heap. While this works for most use cases, in +/// practice this heap allocation can be expensive for repeated uses. This method allows for +/// allocating the listener on the stack instead. +/// +/// There are limitations to using this macro instead of the [`EventListener`] type, however. +/// Firstly, it is significantly less flexible. The listener is locked to the current stack +/// frame, meaning that it can't be returned or put into a place where it would go out of +/// scope. For instance, this will not work: +/// +/// ```compile_fail +/// use event_listener::{Event, Listener, listener}; +/// +/// fn get_listener(event: &Event) -> impl Listener { +/// listener!(event => cant_return_this); +/// cant_return_this +/// } +/// ``` +/// +/// In addition, the types involved in creating this listener are not able to be named. Therefore +/// it cannot be used in hand-rolled futures or similar structures. +/// +/// The type created by this macro implements [`Listener`], allowing it to be used in cases where +/// [`EventListener`] would normally be used. +/// +/// ## Example +/// +/// To use this macro, replace cases where you would normally use this... +/// +/// ```no_compile +/// let listener = event.listen(); +/// ``` +/// +/// ...with this: +/// +/// ```no_compile +/// listener!(event => listener); +/// ``` +/// +/// Here is the top level example from this crate's documentation, but using [`listener`] instead +/// of [`EventListener`]. +/// +/// ``` +/// use std::sync::atomic::{AtomicBool, Ordering}; +/// use std::sync::Arc; +/// use std::thread; +/// use std::time::Duration; +/// use std::usize; +/// use event_listener::{Event, listener, IntoNotification, Listener}; +/// +/// let flag = Arc::new(AtomicBool::new(false)); +/// let event = Arc::new(Event::new()); +/// +/// // Spawn a thread that will set the flag after 1 second. +/// thread::spawn({ +/// let flag = flag.clone(); +/// let event = event.clone(); +/// move || { +/// // Wait for a second. +/// thread::sleep(Duration::from_secs(1)); +/// +/// // Set the flag. +/// flag.store(true, Ordering::SeqCst); +/// +/// // Notify all listeners that the flag has been set. +/// event.notify(usize::MAX); +/// } +/// }); +/// +/// // Wait until the flag is set. +/// loop { +/// // Check the flag. +/// if flag.load(Ordering::SeqCst) { +/// break; +/// } +/// +/// // Start listening for events. +/// // NEW: Changed to a stack-based listener. +/// listener!(event => listener); +/// +/// // Check the flag again after creating the listener. +/// if flag.load(Ordering::SeqCst) { +/// break; +/// } +/// +/// // Wait for a notification and continue the loop. +/// listener.wait(); +/// } +/// ``` +#[macro_export] +macro_rules! listener { + ($event:expr => $listener:ident) => { + let mut $listener = $crate::__private::StackSlot::new(&$event); + // SAFETY: We shadow $listener so it can't be moved after. + let mut $listener = unsafe { $crate::__private::Pin::new_unchecked(&mut $listener) }; + #[allow(unused_mut)] + let mut $listener = $listener.listen(); + }; +} + +pin_project_lite::pin_project! { + #[project(!Unpin)] + #[project = ListenerProject] + struct InnerListener>> + where + B: Unpin, + { + // The reference to the original event. + event: B, + + // The inner state of the listener. + // + // This is only ever `None` during initialization. After `listen()` has completed, this + // should be `Some`. + #[pin] + listener: Option>, + } + + impl>> PinnedDrop for InnerListener + where + B: Unpin, + { + fn drop(mut this: Pin<&mut Self>) { + // If we're being dropped, we need to remove ourself from the list. + let this = this.project(); + (*this.event).borrow().remove(this.listener, true); + } + } +} + +unsafe impl> + Unpin + Send> Send for InnerListener {} +unsafe impl> + Unpin + Sync> Sync for InnerListener {} + +impl> + Unpin> InnerListener { + /// Insert this listener into the linked list. + #[inline] + fn listen(self: Pin<&mut Self>) { + let this = self.project(); + (*this.event).borrow().insert(this.listener); + } + + /// Wait until the provided deadline. + #[cfg(all(feature = "std", not(target_family = "wasm")))] + fn wait_internal(mut self: Pin<&mut Self>, deadline: Option) -> Option { + fn parker_and_task() -> (Parker, Task) { + let parker = Parker::new(); + let unparker = parker.unparker(); + (parker, Task::Unparker(unparker)) + } + + crate::sync::thread_local! { + /// Cached thread-local parker/unparker pair. + static PARKER: (Parker, Task) = parker_and_task(); + } + + // Try to borrow the thread-local parker/unparker pair. + PARKER + .try_with({ + let this = self.as_mut(); + |(parker, unparker)| this.wait_with_parker(deadline, parker, unparker.as_task_ref()) + }) + .unwrap_or_else(|_| { + // If the pair isn't accessible, we may be being called in a destructor. + // Just create a new pair. + let (parker, unparker) = parking::pair(); + self.as_mut() + .wait_with_parker(deadline, &parker, TaskRef::Unparker(&unparker)) + }) + } + + /// Wait until the provided deadline using the specified parker/unparker pair. + #[cfg(all(feature = "std", not(target_family = "wasm")))] + fn wait_with_parker( + self: Pin<&mut Self>, + deadline: Option, + parker: &Parker, + unparker: TaskRef<'_>, + ) -> Option { + let mut this = self.project(); + let inner = (*this.event).borrow(); + + // Set the listener's state to `Task`. + if let Some(tag) = inner.register(this.listener.as_mut(), unparker).notified() { + // We were already notified, so we don't need to park. + return Some(tag); + } + + // Wait until a notification is received or the timeout is reached. + loop { + match deadline { + None => parker.park(), + + #[cfg(loom)] + Some(_deadline) => { + panic!("parking does not support timeouts under loom"); + } + + #[cfg(not(loom))] + Some(deadline) => { + // Make sure we're not timed out already. + let now = Instant::now(); + if now >= deadline { + // Remove our entry and check if we were notified. + return inner + .remove(this.listener.as_mut(), false) + .expect("We never removed ourself from the list") + .notified(); + } + parker.park_deadline(deadline); + } + } + + // See if we were notified. + if let Some(tag) = inner.register(this.listener.as_mut(), unparker).notified() { + return Some(tag); + } + } + } + + /// Drops this listener and discards its notification (if any) without notifying another + /// active listener. + fn discard(self: Pin<&mut Self>) -> bool { + let this = self.project(); + (*this.event) + .borrow() + .remove(this.listener, false) + .map_or(false, |state| state.is_notified()) + } + + /// Poll this listener for a notification. + fn poll_internal(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + let inner = (*this.event).borrow(); + + // Try to register the listener. + match inner + .register(this.listener, TaskRef::Waker(cx.waker())) + .notified() + { + Some(tag) => { + // We were already notified, so we don't need to park. + Poll::Ready(tag) + } + + None => { + // We're now waiting for a notification. + Poll::Pending + } + } + } +} + +/// The state of a listener. +#[derive(PartialEq)] +enum State { + /// The listener was just created. + Created, + + /// The listener has received a notification. + /// + /// The `bool` is `true` if this was an "additional" notification. + Notified { + /// Whether or not this is an "additional" notification. + additional: bool, + + /// The tag associated with the notification. + tag: T, + }, + + /// A task is waiting for a notification. + Task(Task), + + /// Empty hole used to replace a notified listener. + NotifiedTaken, +} + +impl fmt::Debug for State { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Created => f.write_str("Created"), + Self::Notified { additional, .. } => f + .debug_struct("Notified") + .field("additional", additional) + .finish(), + Self::Task(_) => f.write_str("Task(_)"), + Self::NotifiedTaken => f.write_str("NotifiedTaken"), + } + } +} + +impl State { + fn is_notified(&self) -> bool { + matches!(self, Self::Notified { .. } | Self::NotifiedTaken) + } + + /// If this state was notified, return the tag associated with the notification. + #[allow(unused)] + fn notified(self) -> Option { + match self { + Self::Notified { tag, .. } => Some(tag), + Self::NotifiedTaken => panic!("listener was already notified but taken"), + _ => None, + } + } +} + +/// The result of registering a listener. +#[derive(Debug, PartialEq)] +enum RegisterResult { + /// The listener was already notified. + Notified(T), + + /// The listener has been registered. + Registered, + + /// The listener was never inserted into the list. + NeverInserted, +} + +impl RegisterResult { + /// Whether or not the listener was notified. + /// + /// Panics if the listener was never inserted into the list. + fn notified(self) -> Option { + match self { + Self::Notified(tag) => Some(tag), + Self::Registered => None, + Self::NeverInserted => panic!("{}", NEVER_INSERTED_PANIC), + } + } +} + +/// A task that can be woken up. +#[derive(Debug, Clone)] +enum Task { + /// A waker that wakes up a future. + Waker(Waker), + + /// An unparker that wakes up a thread. + #[cfg(all(feature = "std", not(target_family = "wasm")))] + Unparker(Unparker), +} + +impl Task { + fn as_task_ref(&self) -> TaskRef<'_> { + match self { + Self::Waker(waker) => TaskRef::Waker(waker), + #[cfg(all(feature = "std", not(target_family = "wasm")))] + Self::Unparker(unparker) => TaskRef::Unparker(unparker), + } + } + + fn wake(self) { + match self { + Self::Waker(waker) => waker.wake(), + #[cfg(all(feature = "std", not(target_family = "wasm")))] + Self::Unparker(unparker) => { + unparker.unpark(); + } + } + } +} + +impl PartialEq for Task { + fn eq(&self, other: &Self) -> bool { + self.as_task_ref().will_wake(other.as_task_ref()) + } +} + +/// A reference to a task. +#[derive(Clone, Copy)] +enum TaskRef<'a> { + /// A waker that wakes up a future. + Waker(&'a Waker), + + /// An unparker that wakes up a thread. + #[cfg(all(feature = "std", not(target_family = "wasm")))] + Unparker(&'a Unparker), +} + +impl TaskRef<'_> { + /// Tells if this task will wake up the other task. + #[allow(unreachable_patterns)] + fn will_wake(self, other: Self) -> bool { + match (self, other) { + (Self::Waker(a), Self::Waker(b)) => a.will_wake(b), + #[cfg(all(feature = "std", not(target_family = "wasm")))] + (Self::Unparker(_), Self::Unparker(_)) => { + // TODO: Use unreleased will_unpark API. + false + } + _ => false, + } + } + + /// Converts this task reference to a task by cloning. + fn into_task(self) -> Task { + match self { + Self::Waker(waker) => Task::Waker(waker.clone()), + #[cfg(all(feature = "std", not(target_family = "wasm")))] + Self::Unparker(unparker) => Task::Unparker(unparker.clone()), + } + } +} + +const NEVER_INSERTED_PANIC: &str = "\ +EventListener was not inserted into the linked list, make sure you're not polling \ +EventListener/listener! after it has finished"; + +#[cfg(not(loom))] +/// Synchronization primitive implementation. +mod sync { + #[cfg(not(feature = "portable-atomic"))] + pub(super) use alloc::sync::Arc; + #[cfg(not(feature = "portable-atomic"))] + pub(super) use core::sync::atomic; + + #[cfg(feature = "portable-atomic")] + pub(super) use portable_atomic_crate as atomic; + #[cfg(feature = "portable-atomic")] + pub(super) use portable_atomic_util::Arc; + + #[cfg(all(feature = "std", not(loom)))] + pub(super) use std::sync::{Mutex, MutexGuard}; + #[cfg(all(feature = "std", not(target_family = "wasm"), not(loom)))] + pub(super) use std::thread_local; + + pub(super) trait WithMut { + type Output; + + fn with_mut(&mut self, f: F) -> R + where + F: FnOnce(&mut Self::Output) -> R; + } + + impl WithMut for atomic::AtomicPtr { + type Output = *mut T; + + #[inline] + fn with_mut(&mut self, f: F) -> R + where + F: FnOnce(&mut Self::Output) -> R, + { + f(self.get_mut()) + } + } + + pub(crate) mod cell { + pub(crate) use core::cell::Cell; + + /// This newtype around *mut T exists for interoperability with loom::cell::ConstPtr, + /// which works as a guard and performs additional logic to track access scope. + pub(crate) struct ConstPtr(*mut T); + impl ConstPtr { + pub(crate) unsafe fn deref(&self) -> &T { + &*self.0 + } + + #[allow(unused)] // std code does not need this + pub(crate) unsafe fn deref_mut(&mut self) -> &mut T { + &mut *self.0 + } + } + + /// This UnsafeCell wrapper exists for interoperability with loom::cell::UnsafeCell, and + /// only contains the interface that is needed for this crate. + #[derive(Debug, Default)] + pub(crate) struct UnsafeCell(core::cell::UnsafeCell); + + impl UnsafeCell { + pub(crate) fn new(data: T) -> UnsafeCell { + UnsafeCell(core::cell::UnsafeCell::new(data)) + } + + pub(crate) fn get(&self) -> ConstPtr { + ConstPtr(self.0.get()) + } + + #[allow(dead_code)] // no_std does not need this + pub(crate) fn into_inner(self) -> T { + self.0.into_inner() + } + } + } +} + +#[cfg(loom)] +/// Synchronization primitive implementation. +mod sync { + pub(super) use loom::sync::{atomic, Arc, Mutex, MutexGuard}; + pub(super) use loom::{cell, thread_local}; +} + +fn __test_send_and_sync() { + fn _assert_send() {} + fn _assert_sync() {} + + _assert_send::>(); + _assert_sync::>(); + _assert_send::>(); + _assert_sync::>(); + _assert_send::>(); + _assert_sync::>(); + _assert_send::>(); + _assert_sync::>(); +} + +#[doc(hidden)] +mod __sealed { + use super::{EventListener, __private::StackListener}; + + pub trait Sealed {} + impl Sealed for EventListener {} + impl Sealed for StackListener<'_, '_, T> {} +} + +/// Semver exempt module. +#[doc(hidden)] +pub mod __private { + pub use core::pin::Pin; + + use super::{Event, Inner, InnerListener}; + use core::fmt; + use core::future::Future; + use core::task::{Context, Poll}; + + pin_project_lite::pin_project! { + /// Space on the stack where a stack-based listener can be allocated. + #[doc(hidden)] + #[project(!Unpin)] + pub struct StackSlot<'ev, T> { + #[pin] + listener: InnerListener> + } + } + + impl fmt::Debug for StackSlot<'_, T> { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("StackSlot").finish_non_exhaustive() + } + } + + impl core::panic::UnwindSafe for StackSlot<'_, T> {} + impl core::panic::RefUnwindSafe for StackSlot<'_, T> {} + unsafe impl Send for StackSlot<'_, T> {} + unsafe impl Sync for StackSlot<'_, T> {} + + impl<'ev, T> StackSlot<'ev, T> { + /// Create a new `StackSlot` on the stack. + #[inline] + #[doc(hidden)] + pub fn new(event: &'ev Event) -> Self { + let inner = unsafe { &*event.inner() }; + Self { + listener: InnerListener { + event: inner, + listener: None, + }, + } + } + + /// Start listening on this `StackSlot`. + #[inline] + #[doc(hidden)] + pub fn listen(mut self: Pin<&mut Self>) -> StackListener<'ev, '_, T> { + // Insert ourselves into the list. + self.as_mut().project().listener.listen(); + + // We are now listening. + StackListener { slot: self } + } + } + + /// A stack-based `EventListener`. + #[doc(hidden)] + pub struct StackListener<'ev, 'stack, T> { + slot: Pin<&'stack mut StackSlot<'ev, T>>, + } + + impl core::panic::UnwindSafe for StackListener<'_, '_, T> {} + impl core::panic::RefUnwindSafe for StackListener<'_, '_, T> {} + impl Unpin for StackListener<'_, '_, T> {} + + impl fmt::Debug for StackListener<'_, '_, T> { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("StackListener").finish_non_exhaustive() + } + } + + impl<'ev, T> StackListener<'ev, '_, T> { + #[inline] + fn listener(&self) -> &InnerListener> { + &self.slot.listener + } + + #[inline] + fn listener_mut(&mut self) -> Pin<&mut InnerListener>> { + self.slot.as_mut().project().listener + } + } + + forward_impl_to_listener! { T => StackListener<'_, '_, T> } +} diff --git a/.cargo-vendor/event-listener/src/no_std.rs b/.cargo-vendor/event-listener/src/no_std.rs new file mode 100644 index 0000000000..a6a2f25e6e --- /dev/null +++ b/.cargo-vendor/event-listener/src/no_std.rs @@ -0,0 +1,1429 @@ +//! Implementation of `event-listener` built exclusively on atomics. +//! +//! On `no_std`, we don't have access to `Mutex`, so we can't use intrusive linked lists like the `std` +//! implementation. Normally, we would use a concurrent atomic queue to store listeners, but benchmarks +//! show that using queues in this way is very slow, especially for the single threaded use-case. +//! +//! We've found that it's easier to assume that the `Event` won't be under high contention in most use +//! cases. Therefore, we use a spinlock that protects a linked list of listeners, and fall back to an +//! atomic queue if the lock is contended. Benchmarks show that this is about 20% slower than the std +//! implementation, but still much faster than using a queue. + +#[path = "no_std/node.rs"] +mod node; + +use node::{Node, NothingProducer, TaskWaiting}; + +use crate::notify::{GenericNotify, Internal, Notification}; +use crate::sync::atomic::{AtomicBool, Ordering}; +use crate::sync::cell::{Cell, ConstPtr, UnsafeCell}; +use crate::sync::Arc; +use crate::{RegisterResult, State, Task, TaskRef}; + +use core::fmt; +use core::marker::PhantomData; +use core::mem; +use core::num::NonZeroUsize; +use core::ops; +use core::pin::Pin; + +use alloc::vec::Vec; + +impl crate::Inner { + /// Locks the list. + fn try_lock(&self) -> Option> { + self.list.inner.try_lock().map(|guard| ListGuard { + inner: self, + guard: Some(guard), + tasks: alloc::vec![], + }) + } + + /// Force a queue update. + fn queue_update(&self) { + // Locking and unlocking the mutex will drain the queue if there is no contention. + drop(self.try_lock()); + } + + /// Add a new listener to the list. + /// + /// Does nothing if the list is already registered. + pub(crate) fn insert(&self, mut listener: Pin<&mut Option>>) { + if listener.as_ref().as_pin_ref().is_some() { + // Already inserted. + return; + } + + match self.try_lock() { + Some(mut lock) => { + let key = lock.insert(State::Created); + *listener = Some(Listener::HasNode(key)); + } + + None => { + // Push it to the queue. + let (node, task_waiting) = Node::listener(); + self.list.queue.push(node).unwrap(); + *listener = Some(Listener::Queued(task_waiting)); + + // Force a queue update. + self.queue_update(); + } + } + } + + /// Remove a listener from the list. + pub(crate) fn remove( + &self, + mut listener: Pin<&mut Option>>, + propagate: bool, + ) -> Option> { + loop { + let state = match listener.as_mut().take() { + Some(Listener::HasNode(key)) => { + match self.try_lock() { + Some(mut list) => { + // Fast path removal. + list.remove(key, propagate) + } + + None => { + // Slow path removal. + // This is why intrusive lists don't work on no_std. + let node = Node::RemoveListener { + listener: key, + propagate, + }; + + self.list.queue.push(node).unwrap(); + + // Force a queue update. + self.queue_update(); + + None + } + } + } + + Some(Listener::Queued(tw)) => { + // Make sure it's not added after the queue is drained. + if let Some(key) = tw.cancel() { + // If it was already added, set up our listener and try again. + *listener = Some(Listener::HasNode(key)); + continue; + } + + None + } + + None => None, + + _ => unreachable!(), + }; + + return state; + } + } + + /// Notifies a number of entries. + #[cold] + pub(crate) fn notify(&self, notify: impl Notification) -> usize { + match self.try_lock() { + Some(mut guard) => { + // Notify the listeners. + guard.notify(notify) + } + + None => { + // Push it to the queue. + let node = Node::Notify(GenericNotify::new( + notify.count(Internal::new()), + notify.is_additional(Internal::new()), + NothingProducer::default(), + )); + + self.list.queue.push(node).unwrap(); + + // Force a queue update. + self.queue_update(); + + // We haven't notified anyone yet. + 0 + } + } + } + + /// Register a task to be notified when the event is triggered. + /// + /// Returns `true` if the listener was already notified, and `false` otherwise. If the listener + /// isn't inserted, returns `None`. + pub(crate) fn register( + &self, + mut listener: Pin<&mut Option>>, + task: TaskRef<'_>, + ) -> RegisterResult { + loop { + match listener.as_mut().take() { + Some(Listener::HasNode(key)) => { + *listener = Some(Listener::HasNode(key)); + match self.try_lock() { + Some(mut guard) => { + // Fast path registration. + return guard.register(listener, task); + } + + None => { + // Wait for the lock. + let node = Node::Waiting(task.into_task()); + self.list.queue.push(node).unwrap(); + + // Force a queue update. + self.queue_update(); + + return RegisterResult::Registered; + } + } + } + + Some(Listener::Queued(task_waiting)) => { + // Force a queue update. + self.queue_update(); + + // Are we done yet? + match task_waiting.status() { + Some(key) => { + assert!(key.get() != usize::MAX); + + // We're inserted now, adjust state. + *listener = Some(Listener::HasNode(key)); + } + + None => { + // We're still queued, so register the task. + task_waiting.register(task.into_task()); + *listener = Some(Listener::Queued(task_waiting)); + + // Force a queue update. + self.queue_update(); + + return RegisterResult::Registered; + } + } + } + + None => return RegisterResult::NeverInserted, + + _ => unreachable!(), + } + } + } +} + +#[derive(Debug)] +pub(crate) struct List { + /// The inner list. + inner: Mutex>, + + /// The queue of pending operations. + queue: concurrent_queue::ConcurrentQueue>, +} + +impl List { + pub(super) fn new() -> List { + List { + inner: Mutex::new(ListenerSlab::new()), + queue: concurrent_queue::ConcurrentQueue::unbounded(), + } + } + + /// Try to get the total number of listeners without blocking. + pub(super) fn try_total_listeners(&self) -> Option { + self.inner.try_lock().map(|lock| lock.listeners.len()) + } +} + +/// The guard returned by [`Inner::lock`]. +pub(crate) struct ListGuard<'a, T> { + /// Reference to the inner state. + pub(crate) inner: &'a crate::Inner, + + /// The locked list. + pub(crate) guard: Option>>, + + /// Tasks to wake up once this guard is dropped. + tasks: Vec, +} + +impl ListGuard<'_, T> { + #[cold] + fn process_nodes_slow(&mut self, start_node: Node) { + let guard = self.guard.as_mut().unwrap(); + + // Process the start node. + self.tasks.extend(start_node.apply(guard)); + + // Process all remaining nodes. + while let Ok(node) = self.inner.list.queue.pop() { + self.tasks.extend(node.apply(guard)); + } + } + + #[inline] + fn process_nodes(&mut self) { + // Process every node left in the queue. + if let Ok(start_node) = self.inner.list.queue.pop() { + self.process_nodes_slow(start_node); + } + } +} + +impl ops::Deref for ListGuard<'_, T> { + type Target = ListenerSlab; + + fn deref(&self) -> &Self::Target { + self.guard.as_ref().unwrap() + } +} + +impl ops::DerefMut for ListGuard<'_, T> { + fn deref_mut(&mut self) -> &mut Self::Target { + self.guard.as_mut().unwrap() + } +} + +impl Drop for ListGuard<'_, T> { + fn drop(&mut self) { + while self.guard.is_some() { + // Process every node left in the queue. + self.process_nodes(); + + // Update the atomic `notified` counter. + let list = self.guard.take().unwrap(); + let notified = if list.notified < list.len { + list.notified + } else { + usize::MAX + }; + + self.inner.notified.store(notified, Ordering::Release); + + // Drop the actual lock. + drop(list); + + // Wakeup all tasks. + for task in self.tasks.drain(..) { + task.wake(); + } + + // There is a deadlock where a node is pushed to the end of the queue after we've finished + // process_nodes() but before we've finished dropping the lock. This can lead to some + // notifications not being properly delivered, or listeners not being added to the list. + // Therefore check before we finish dropping if there is anything left in the queue, and + // if so, lock it again and force a queue update. + if !self.inner.list.queue.is_empty() { + self.guard = self.inner.list.inner.try_lock(); + } + } + } +} + +/// An entry representing a registered listener. +enum Entry { + /// Contains the listener state. + Listener { + /// The state of the listener. + state: Cell>, + + /// The previous listener in the list. + prev: Cell>, + + /// The next listener in the list. + next: Cell>, + }, + + /// An empty slot that contains the index of the next empty slot. + Empty(NonZeroUsize), + + /// Sentinel value. + Sentinel, +} + +struct TakenState<'a, T> { + slot: &'a Cell>, + state: State, +} + +impl Drop for TakenState<'_, T> { + fn drop(&mut self) { + self.slot + .set(mem::replace(&mut self.state, State::NotifiedTaken)); + } +} + +impl fmt::Debug for TakenState<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(&self.state, f) + } +} + +impl PartialEq for TakenState<'_, T> { + fn eq(&self, other: &Self) -> bool { + self.state == other.state + } +} + +impl<'a, T> TakenState<'a, T> { + fn new(slot: &'a Cell>) -> Self { + let state = slot.replace(State::NotifiedTaken); + Self { slot, state } + } +} + +impl fmt::Debug for Entry { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Entry::Listener { state, next, prev } => f + .debug_struct("Listener") + .field("state", &TakenState::new(state)) + .field("prev", prev) + .field("next", next) + .finish(), + Entry::Empty(next) => f.debug_tuple("Empty").field(next).finish(), + Entry::Sentinel => f.debug_tuple("Sentinel").finish(), + } + } +} + +impl PartialEq for Entry { + fn eq(&self, other: &Entry) -> bool { + match (self, other) { + ( + Self::Listener { + state: state1, + prev: prev1, + next: next1, + }, + Self::Listener { + state: state2, + prev: prev2, + next: next2, + }, + ) => { + if TakenState::new(state1) != TakenState::new(state2) { + return false; + } + + prev1.get() == prev2.get() && next1.get() == next2.get() + } + (Self::Empty(next1), Self::Empty(next2)) => next1 == next2, + (Self::Sentinel, Self::Sentinel) => true, + _ => false, + } + } +} + +impl Entry { + fn state(&self) -> &Cell> { + match self { + Entry::Listener { state, .. } => state, + _ => unreachable!(), + } + } + + fn prev(&self) -> &Cell> { + match self { + Entry::Listener { prev, .. } => prev, + _ => unreachable!(), + } + } + + fn next(&self) -> &Cell> { + match self { + Entry::Listener { next, .. } => next, + _ => unreachable!(), + } + } +} + +/// A linked list of entries. +pub(crate) struct ListenerSlab { + /// The raw list of entries. + listeners: Vec>, + + /// First entry in the list. + head: Option, + + /// Last entry in the list. + tail: Option, + + /// The first unnotified entry in the list. + start: Option, + + /// The number of notified entries in the list. + notified: usize, + + /// The total number of listeners. + len: usize, + + /// The index of the first `Empty` entry, or the length of the list plus one if there + /// are no empty entries. + first_empty: NonZeroUsize, +} + +impl fmt::Debug for ListenerSlab { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ListenerSlab") + .field("listeners", &self.listeners) + .field("head", &self.head) + .field("tail", &self.tail) + .field("start", &self.start) + .field("notified", &self.notified) + .field("len", &self.len) + .field("first_empty", &self.first_empty) + .finish() + } +} + +impl ListenerSlab { + /// Create a new, empty list. + pub(crate) fn new() -> Self { + Self { + listeners: alloc::vec![Entry::Sentinel], + head: None, + tail: None, + start: None, + notified: 0, + len: 0, + first_empty: unsafe { NonZeroUsize::new_unchecked(1) }, + } + } + + /// Inserts a new entry into the list. + pub(crate) fn insert(&mut self, state: State) -> NonZeroUsize { + // Add the new entry into the list. + let key = { + let entry = Entry::Listener { + state: Cell::new(state), + prev: Cell::new(self.tail), + next: Cell::new(None), + }; + + let key = self.first_empty; + if self.first_empty.get() == self.listeners.len() { + // No empty entries, so add a new entry. + self.listeners.push(entry); + + // SAFETY: Guaranteed to not overflow, since the Vec would have panicked already. + self.first_empty = unsafe { NonZeroUsize::new_unchecked(self.listeners.len()) }; + } else { + // There is an empty entry, so replace it. + let slot = &mut self.listeners[key.get()]; + let next = match mem::replace(slot, entry) { + Entry::Empty(next) => next, + _ => unreachable!(), + }; + + self.first_empty = next; + } + + key + }; + + // Replace the tail with the new entry. + match mem::replace(&mut self.tail, Some(key)) { + None => self.head = Some(key), + Some(tail) => { + let tail = &self.listeners[tail.get()]; + tail.next().set(Some(key)); + } + } + + // If there are no listeners that have been notified, then the new listener is the next + // listener to be notified. + if self.start.is_none() { + self.start = Some(key); + } + + // Increment the length. + self.len += 1; + + key + } + + /// Removes an entry from the list and returns its state. + pub(crate) fn remove(&mut self, key: NonZeroUsize, propagate: bool) -> Option> { + let entry = &self.listeners[key.get()]; + let prev = entry.prev().get(); + let next = entry.next().get(); + + // Unlink from the previous entry. + match prev { + None => self.head = next, + Some(p) => self.listeners[p.get()].next().set(next), + } + + // Unlink from the next entry. + match next { + None => self.tail = prev, + Some(n) => self.listeners[n.get()].prev().set(prev), + } + + // If this was the first unnotified entry, move the pointer to the next one. + if self.start == Some(key) { + self.start = next; + } + + // Extract the state. + let entry = mem::replace( + &mut self.listeners[key.get()], + Entry::Empty(self.first_empty), + ); + self.first_empty = key; + + let mut state = match entry { + Entry::Listener { state, .. } => state.into_inner(), + _ => unreachable!(), + }; + + // Update the counters. + if state.is_notified() { + self.notified = self.notified.saturating_sub(1); + + if propagate { + // Propagate the notification to the next entry. + let state = mem::replace(&mut state, State::NotifiedTaken); + if let State::Notified { tag, additional } = state { + let tags = { + let mut tag = Some(tag); + move || tag.take().expect("called more than once") + }; + + self.notify(GenericNotify::new(1, additional, tags)); + } + } + } + self.len -= 1; + + Some(state) + } + + /// Notifies a number of listeners. + #[cold] + pub(crate) fn notify(&mut self, mut notify: impl Notification) -> usize { + let mut n = notify.count(Internal::new()); + let is_additional = notify.is_additional(Internal::new()); + if !is_additional { + // Make sure we're not notifying more than we have. + if n <= self.notified { + return 0; + } + n -= self.notified; + } + + let original_count = n; + while n > 0 { + n -= 1; + + // Notify the next entry. + match self.start { + None => return original_count - n - 1, + + Some(e) => { + // Get the entry and move the pointer forwards. + let entry = &self.listeners[e.get()]; + self.start = entry.next().get(); + + // Set the state to `Notified` and notify. + let tag = notify.next_tag(Internal::new()); + if let State::Task(task) = entry.state().replace(State::Notified { + tag, + additional: is_additional, + }) { + task.wake(); + } + + // Bump the notified count. + self.notified += 1; + } + } + } + + original_count - n + } + + /// Register a task to be notified when the event is triggered. + /// + /// Returns `true` if the listener was already notified, and `false` otherwise. If the listener + /// isn't inserted, returns `None`. + pub(crate) fn register( + &mut self, + mut listener: Pin<&mut Option>>, + task: TaskRef<'_>, + ) -> RegisterResult { + let key = match *listener { + Some(Listener::HasNode(key)) => key, + _ => return RegisterResult::NeverInserted, + }; + + let entry = &self.listeners[key.get()]; + + // Take the state out and check it. + match entry.state().replace(State::NotifiedTaken) { + State::Notified { tag, .. } => { + // The listener was already notified, so we don't need to do anything. + self.remove(key, false); + *listener = None; + RegisterResult::Notified(tag) + } + + State::Task(other_task) => { + // Only replace the task if it's not the same as the one we're registering. + if task.will_wake(other_task.as_task_ref()) { + entry.state().set(State::Task(other_task)); + } else { + entry.state().set(State::Task(task.into_task())); + } + + RegisterResult::Registered + } + + _ => { + // Register the task. + entry.state().set(State::Task(task.into_task())); + RegisterResult::Registered + } + } + } +} + +pub(crate) enum Listener { + /// The listener has a node inside of the linked list. + HasNode(NonZeroUsize), + + /// The listener has an entry in the queue that may or may not have a task waiting. + Queued(Arc), + + /// Eat the generic type for consistency. + _EatGenericType(PhantomData), +} + +impl fmt::Debug for Listener { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::HasNode(key) => f.debug_tuple("HasNode").field(key).finish(), + Self::Queued(tw) => f.debug_tuple("Queued").field(tw).finish(), + Self::_EatGenericType(_) => unreachable!(), + } + } +} + +impl Unpin for Listener {} + +impl PartialEq for Listener { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (Self::HasNode(a), Self::HasNode(b)) => a == b, + (Self::Queued(a), Self::Queued(b)) => Arc::ptr_eq(a, b), + _ => false, + } + } +} + +/// A simple mutex type that optimistically assumes that the lock is uncontended. +pub(crate) struct Mutex { + /// The inner value. + value: UnsafeCell, + + /// Whether the mutex is locked. + locked: AtomicBool, +} + +impl fmt::Debug for Mutex { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if let Some(lock) = self.try_lock() { + f.debug_tuple("Mutex").field(&*lock).finish() + } else { + f.write_str("Mutex { }") + } + } +} + +impl Mutex { + /// Create a new mutex. + pub(crate) fn new(value: T) -> Self { + Self { + value: UnsafeCell::new(value), + locked: AtomicBool::new(false), + } + } + + /// Lock the mutex. + pub(crate) fn try_lock(&self) -> Option> { + // Try to lock the mutex. + if self + .locked + .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed) + .is_ok() + { + // We have successfully locked the mutex. + Some(MutexGuard { + mutex: self, + guard: self.value.get(), + }) + } else { + self.try_lock_slow() + } + } + + #[cold] + fn try_lock_slow(&self) -> Option> { + // Assume that the contention is short-term. + // Spin for a while to see if the mutex becomes unlocked. + let mut spins = 100u32; + + loop { + if self + .locked + .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed) + .is_ok() + { + // We have successfully locked the mutex. + return Some(MutexGuard { + mutex: self, + guard: self.value.get(), + }); + } + + // Use atomic loads instead of compare-exchange. + while self.locked.load(Ordering::Relaxed) { + // Return None once we've exhausted the number of spins. + spins = spins.checked_sub(1)?; + } + } + } +} + +pub(crate) struct MutexGuard<'a, T> { + mutex: &'a Mutex, + guard: ConstPtr, +} + +impl<'a, T> Drop for MutexGuard<'a, T> { + fn drop(&mut self) { + self.mutex.locked.store(false, Ordering::Release); + } +} + +impl<'a, T> ops::Deref for MutexGuard<'a, T> { + type Target = T; + + fn deref(&self) -> &T { + unsafe { self.guard.deref() } + } +} + +impl<'a, T> ops::DerefMut for MutexGuard<'a, T> { + fn deref_mut(&mut self) -> &mut T { + unsafe { self.guard.deref_mut() } + } +} + +unsafe impl Send for Mutex {} +unsafe impl Sync for Mutex {} + +#[cfg(test)] +mod tests { + use super::*; + + #[cfg(target_family = "wasm")] + use wasm_bindgen_test::wasm_bindgen_test as test; + + #[test] + fn smoke_mutex() { + let mutex = Mutex::new(0); + + { + let mut guard = mutex.try_lock().unwrap(); + *guard += 1; + } + + { + let mut guard = mutex.try_lock().unwrap(); + *guard += 1; + } + + let guard = mutex.try_lock().unwrap(); + assert_eq!(*guard, 2); + } + + #[test] + fn smoke_listener_slab() { + let mut listeners = ListenerSlab::<()>::new(); + + // Insert a few listeners. + let key1 = listeners.insert(State::Created); + let key2 = listeners.insert(State::Created); + let key3 = listeners.insert(State::Created); + + assert_eq!(listeners.len, 3); + assert_eq!(listeners.notified, 0); + assert_eq!(listeners.tail, Some(key3)); + assert_eq!(listeners.head, Some(key1)); + assert_eq!(listeners.start, Some(key1)); + assert_eq!(listeners.first_empty, NonZeroUsize::new(4).unwrap()); + assert_eq!(listeners.listeners[0], Entry::Sentinel); + assert_eq!( + listeners.listeners[1], + Entry::Listener { + state: Cell::new(State::Created), + prev: Cell::new(None), + next: Cell::new(Some(key2)), + } + ); + assert_eq!( + listeners.listeners[2], + Entry::Listener { + state: Cell::new(State::Created), + prev: Cell::new(Some(key1)), + next: Cell::new(Some(key3)), + } + ); + assert_eq!( + listeners.listeners[3], + Entry::Listener { + state: Cell::new(State::Created), + prev: Cell::new(Some(key2)), + next: Cell::new(None), + } + ); + + // Remove one. + assert_eq!(listeners.remove(key2, false), Some(State::Created)); + + assert_eq!(listeners.len, 2); + assert_eq!(listeners.notified, 0); + assert_eq!(listeners.tail, Some(key3)); + assert_eq!(listeners.head, Some(key1)); + assert_eq!(listeners.start, Some(key1)); + assert_eq!(listeners.first_empty, NonZeroUsize::new(2).unwrap()); + assert_eq!(listeners.listeners[0], Entry::Sentinel); + assert_eq!( + listeners.listeners[1], + Entry::Listener { + state: Cell::new(State::Created), + prev: Cell::new(None), + next: Cell::new(Some(key3)), + } + ); + assert_eq!( + listeners.listeners[2], + Entry::Empty(NonZeroUsize::new(4).unwrap()) + ); + assert_eq!( + listeners.listeners[3], + Entry::Listener { + state: Cell::new(State::Created), + prev: Cell::new(Some(key1)), + next: Cell::new(None), + } + ); + } + + #[test] + fn listener_slab_notify() { + let mut listeners = ListenerSlab::new(); + + // Insert a few listeners. + let key1 = listeners.insert(State::Created); + let key2 = listeners.insert(State::Created); + let key3 = listeners.insert(State::Created); + + // Notify one. + listeners.notify(GenericNotify::new(1, true, || ())); + + assert_eq!(listeners.len, 3); + assert_eq!(listeners.notified, 1); + assert_eq!(listeners.tail, Some(key3)); + assert_eq!(listeners.head, Some(key1)); + assert_eq!(listeners.start, Some(key2)); + assert_eq!(listeners.first_empty, NonZeroUsize::new(4).unwrap()); + assert_eq!(listeners.listeners[0], Entry::Sentinel); + assert_eq!( + listeners.listeners[1], + Entry::Listener { + state: Cell::new(State::Notified { + additional: true, + tag: () + }), + prev: Cell::new(None), + next: Cell::new(Some(key2)), + } + ); + assert_eq!( + listeners.listeners[2], + Entry::Listener { + state: Cell::new(State::Created), + prev: Cell::new(Some(key1)), + next: Cell::new(Some(key3)), + } + ); + assert_eq!( + listeners.listeners[3], + Entry::Listener { + state: Cell::new(State::Created), + prev: Cell::new(Some(key2)), + next: Cell::new(None), + } + ); + + // Remove the notified listener. + assert_eq!( + listeners.remove(key1, false), + Some(State::Notified { + additional: true, + tag: () + }) + ); + + assert_eq!(listeners.len, 2); + assert_eq!(listeners.notified, 0); + assert_eq!(listeners.tail, Some(key3)); + assert_eq!(listeners.head, Some(key2)); + assert_eq!(listeners.start, Some(key2)); + assert_eq!(listeners.first_empty, NonZeroUsize::new(1).unwrap()); + assert_eq!(listeners.listeners[0], Entry::Sentinel); + assert_eq!( + listeners.listeners[1], + Entry::Empty(NonZeroUsize::new(4).unwrap()) + ); + assert_eq!( + listeners.listeners[2], + Entry::Listener { + state: Cell::new(State::Created), + prev: Cell::new(None), + next: Cell::new(Some(key3)), + } + ); + assert_eq!( + listeners.listeners[3], + Entry::Listener { + state: Cell::new(State::Created), + prev: Cell::new(Some(key2)), + next: Cell::new(None), + } + ); + } + + #[test] + fn listener_slab_register() { + let woken = Arc::new(AtomicBool::new(false)); + let waker = waker_fn::waker_fn({ + let woken = woken.clone(); + move || woken.store(true, Ordering::SeqCst) + }); + + let mut listeners = ListenerSlab::new(); + + // Insert a few listeners. + let key1 = listeners.insert(State::Created); + let key2 = listeners.insert(State::Created); + let key3 = listeners.insert(State::Created); + + // Register one. + assert_eq!( + listeners.register( + Pin::new(&mut Some(Listener::HasNode(key2))), + TaskRef::Waker(&waker) + ), + RegisterResult::Registered + ); + + assert_eq!(listeners.len, 3); + assert_eq!(listeners.notified, 0); + assert_eq!(listeners.tail, Some(key3)); + assert_eq!(listeners.head, Some(key1)); + assert_eq!(listeners.start, Some(key1)); + assert_eq!(listeners.first_empty, NonZeroUsize::new(4).unwrap()); + assert_eq!(listeners.listeners[0], Entry::Sentinel); + assert_eq!( + listeners.listeners[1], + Entry::Listener { + state: Cell::new(State::Created), + prev: Cell::new(None), + next: Cell::new(Some(key2)), + } + ); + assert_eq!( + listeners.listeners[2], + Entry::Listener { + state: Cell::new(State::Task(Task::Waker(waker.clone()))), + prev: Cell::new(Some(key1)), + next: Cell::new(Some(key3)), + } + ); + assert_eq!( + listeners.listeners[3], + Entry::Listener { + state: Cell::new(State::Created), + prev: Cell::new(Some(key2)), + next: Cell::new(None), + } + ); + + // Notify the listener. + listeners.notify(GenericNotify::new(2, false, || ())); + + assert_eq!(listeners.len, 3); + assert_eq!(listeners.notified, 2); + assert_eq!(listeners.tail, Some(key3)); + assert_eq!(listeners.head, Some(key1)); + assert_eq!(listeners.start, Some(key3)); + assert_eq!(listeners.first_empty, NonZeroUsize::new(4).unwrap()); + assert_eq!(listeners.listeners[0], Entry::Sentinel); + assert_eq!( + listeners.listeners[1], + Entry::Listener { + state: Cell::new(State::Notified { + additional: false, + tag: (), + }), + prev: Cell::new(None), + next: Cell::new(Some(key2)), + } + ); + assert_eq!( + listeners.listeners[2], + Entry::Listener { + state: Cell::new(State::Notified { + additional: false, + tag: (), + }), + prev: Cell::new(Some(key1)), + next: Cell::new(Some(key3)), + } + ); + assert_eq!( + listeners.listeners[3], + Entry::Listener { + state: Cell::new(State::Created), + prev: Cell::new(Some(key2)), + next: Cell::new(None), + } + ); + + assert!(woken.load(Ordering::SeqCst)); + assert_eq!( + listeners.register( + Pin::new(&mut Some(Listener::HasNode(key2))), + TaskRef::Waker(&waker) + ), + RegisterResult::Notified(()) + ); + } + + #[test] + fn listener_slab_notify_prop() { + let woken = Arc::new(AtomicBool::new(false)); + let waker = waker_fn::waker_fn({ + let woken = woken.clone(); + move || woken.store(true, Ordering::SeqCst) + }); + + let mut listeners = ListenerSlab::new(); + + // Insert a few listeners. + let key1 = listeners.insert(State::Created); + let key2 = listeners.insert(State::Created); + let key3 = listeners.insert(State::Created); + + // Register one. + assert_eq!( + listeners.register( + Pin::new(&mut Some(Listener::HasNode(key2))), + TaskRef::Waker(&waker) + ), + RegisterResult::Registered + ); + + assert_eq!(listeners.len, 3); + assert_eq!(listeners.notified, 0); + assert_eq!(listeners.tail, Some(key3)); + assert_eq!(listeners.head, Some(key1)); + assert_eq!(listeners.start, Some(key1)); + assert_eq!(listeners.first_empty, NonZeroUsize::new(4).unwrap()); + assert_eq!(listeners.listeners[0], Entry::Sentinel); + assert_eq!( + listeners.listeners[1], + Entry::Listener { + state: Cell::new(State::Created), + prev: Cell::new(None), + next: Cell::new(Some(key2)), + } + ); + assert_eq!( + listeners.listeners[2], + Entry::Listener { + state: Cell::new(State::Task(Task::Waker(waker.clone()))), + prev: Cell::new(Some(key1)), + next: Cell::new(Some(key3)), + } + ); + assert_eq!( + listeners.listeners[3], + Entry::Listener { + state: Cell::new(State::Created), + prev: Cell::new(Some(key2)), + next: Cell::new(None), + } + ); + + // Notify the first listener. + listeners.notify(GenericNotify::new(1, false, || ())); + + assert_eq!(listeners.len, 3); + assert_eq!(listeners.notified, 1); + assert_eq!(listeners.tail, Some(key3)); + assert_eq!(listeners.head, Some(key1)); + assert_eq!(listeners.start, Some(key2)); + assert_eq!(listeners.first_empty, NonZeroUsize::new(4).unwrap()); + assert_eq!(listeners.listeners[0], Entry::Sentinel); + assert_eq!( + listeners.listeners[1], + Entry::Listener { + state: Cell::new(State::Notified { + additional: false, + tag: (), + }), + prev: Cell::new(None), + next: Cell::new(Some(key2)), + } + ); + assert_eq!( + listeners.listeners[2], + Entry::Listener { + state: Cell::new(State::Task(Task::Waker(waker.clone()))), + prev: Cell::new(Some(key1)), + next: Cell::new(Some(key3)), + } + ); + assert_eq!( + listeners.listeners[3], + Entry::Listener { + state: Cell::new(State::Created), + prev: Cell::new(Some(key2)), + next: Cell::new(None), + } + ); + + // Calling notify again should not change anything. + listeners.notify(GenericNotify::new(1, false, || ())); + + assert_eq!(listeners.len, 3); + assert_eq!(listeners.notified, 1); + assert_eq!(listeners.tail, Some(key3)); + assert_eq!(listeners.head, Some(key1)); + assert_eq!(listeners.start, Some(key2)); + assert_eq!(listeners.first_empty, NonZeroUsize::new(4).unwrap()); + assert_eq!(listeners.listeners[0], Entry::Sentinel); + assert_eq!( + listeners.listeners[1], + Entry::Listener { + state: Cell::new(State::Notified { + additional: false, + tag: (), + }), + prev: Cell::new(None), + next: Cell::new(Some(key2)), + } + ); + assert_eq!( + listeners.listeners[2], + Entry::Listener { + state: Cell::new(State::Task(Task::Waker(waker.clone()))), + prev: Cell::new(Some(key1)), + next: Cell::new(Some(key3)), + } + ); + assert_eq!( + listeners.listeners[3], + Entry::Listener { + state: Cell::new(State::Created), + prev: Cell::new(Some(key2)), + next: Cell::new(None), + } + ); + + // Remove the first listener. + assert_eq!( + listeners.remove(key1, false), + Some(State::Notified { + additional: false, + tag: () + }) + ); + + assert_eq!(listeners.len, 2); + assert_eq!(listeners.notified, 0); + assert_eq!(listeners.tail, Some(key3)); + assert_eq!(listeners.head, Some(key2)); + assert_eq!(listeners.start, Some(key2)); + assert_eq!(listeners.first_empty, NonZeroUsize::new(1).unwrap()); + assert_eq!(listeners.listeners[0], Entry::Sentinel); + assert_eq!( + listeners.listeners[1], + Entry::Empty(NonZeroUsize::new(4).unwrap()) + ); + assert_eq!(*listeners.listeners[2].prev(), Cell::new(None)); + assert_eq!(*listeners.listeners[2].next(), Cell::new(Some(key3))); + assert_eq!( + listeners.listeners[3], + Entry::Listener { + state: Cell::new(State::Created), + prev: Cell::new(Some(key2)), + next: Cell::new(None), + } + ); + + // Notify the second listener. + listeners.notify(GenericNotify::new(1, false, || ())); + assert!(woken.load(Ordering::SeqCst)); + + assert_eq!(listeners.len, 2); + assert_eq!(listeners.notified, 1); + assert_eq!(listeners.tail, Some(key3)); + assert_eq!(listeners.head, Some(key2)); + assert_eq!(listeners.start, Some(key3)); + assert_eq!(listeners.first_empty, NonZeroUsize::new(1).unwrap()); + assert_eq!(listeners.listeners[0], Entry::Sentinel); + assert_eq!( + listeners.listeners[1], + Entry::Empty(NonZeroUsize::new(4).unwrap()) + ); + assert_eq!( + listeners.listeners[2], + Entry::Listener { + state: Cell::new(State::Notified { + additional: false, + tag: (), + }), + prev: Cell::new(None), + next: Cell::new(Some(key3)), + } + ); + assert_eq!( + listeners.listeners[3], + Entry::Listener { + state: Cell::new(State::Created), + prev: Cell::new(Some(key2)), + next: Cell::new(None), + } + ); + + // Remove and propagate the second listener. + assert_eq!(listeners.remove(key2, true), Some(State::NotifiedTaken)); + + // The third listener should be notified. + assert_eq!(listeners.len, 1); + assert_eq!(listeners.notified, 1); + assert_eq!(listeners.tail, Some(key3)); + assert_eq!(listeners.head, Some(key3)); + assert_eq!(listeners.start, None); + assert_eq!(listeners.first_empty, NonZeroUsize::new(2).unwrap()); + assert_eq!(listeners.listeners[0], Entry::Sentinel); + assert_eq!( + listeners.listeners[1], + Entry::Empty(NonZeroUsize::new(4).unwrap()) + ); + assert_eq!( + listeners.listeners[2], + Entry::Empty(NonZeroUsize::new(1).unwrap()) + ); + assert_eq!( + listeners.listeners[3], + Entry::Listener { + state: Cell::new(State::Notified { + additional: false, + tag: (), + }), + prev: Cell::new(None), + next: Cell::new(None), + } + ); + + // Remove the third listener. + assert_eq!( + listeners.remove(key3, false), + Some(State::Notified { + additional: false, + tag: () + }) + ); + } + + #[test] + fn uncontended_inner() { + let inner = crate::Inner::new(); + + // Register two listeners. + let (mut listener1, mut listener2, mut listener3) = (None, None, None); + inner.insert(Pin::new(&mut listener1)); + inner.insert(Pin::new(&mut listener2)); + inner.insert(Pin::new(&mut listener3)); + + assert_eq!( + listener1, + Some(Listener::HasNode(NonZeroUsize::new(1).unwrap())) + ); + assert_eq!( + listener2, + Some(Listener::HasNode(NonZeroUsize::new(2).unwrap())) + ); + + // Register a waker in the second listener. + let woken = Arc::new(AtomicBool::new(false)); + let waker = waker_fn::waker_fn({ + let woken = woken.clone(); + move || woken.store(true, Ordering::SeqCst) + }); + assert_eq!( + inner.register(Pin::new(&mut listener2), TaskRef::Waker(&waker)), + RegisterResult::Registered + ); + + // Notify the first listener. + inner.notify(GenericNotify::new(1, false, || ())); + assert!(!woken.load(Ordering::SeqCst)); + + // Another notify should do nothing. + inner.notify(GenericNotify::new(1, false, || ())); + assert!(!woken.load(Ordering::SeqCst)); + + // Receive the notification. + assert_eq!( + inner.register(Pin::new(&mut listener1), TaskRef::Waker(&waker)), + RegisterResult::Notified(()) + ); + + // First listener is already removed. + assert!(listener1.is_none()); + + // Notify the second listener. + inner.notify(GenericNotify::new(1, false, || ())); + assert!(woken.load(Ordering::SeqCst)); + + // Remove the second listener and propagate the notification. + assert_eq!( + inner.remove(Pin::new(&mut listener2), true), + Some(State::NotifiedTaken) + ); + + // Second listener is already removed. + assert!(listener2.is_none()); + + // Third listener should be notified. + assert_eq!( + inner.register(Pin::new(&mut listener3), TaskRef::Waker(&waker)), + RegisterResult::Notified(()) + ); + } +} diff --git a/.cargo-vendor/event-listener/src/no_std/node.rs b/.cargo-vendor/event-listener/src/no_std/node.rs new file mode 100644 index 0000000000..8901eb27e6 --- /dev/null +++ b/.cargo-vendor/event-listener/src/no_std/node.rs @@ -0,0 +1,249 @@ +//! An operation that can be delayed. + +//! The node that makes up queues. + +use crate::notify::{GenericNotify, Internal, NotificationPrivate, TagProducer}; +use crate::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; +use crate::sync::Arc; +use crate::sys::ListenerSlab; +use crate::{State, Task}; + +use alloc::boxed::Box; + +use core::fmt; +use core::marker::PhantomData; +use core::mem; +use core::num::NonZeroUsize; +use core::ptr; + +pub(crate) struct NothingProducer(PhantomData); + +impl Default for NothingProducer { + fn default() -> Self { + Self(PhantomData) + } +} + +impl fmt::Debug for NothingProducer { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("NothingProducer").finish() + } +} + +impl TagProducer for NothingProducer { + type Tag = T; + + fn next_tag(&mut self) -> Self::Tag { + // This has to be a zero-sized type with no drop handler. + assert_eq!(mem::size_of::(), 0); + assert!(!mem::needs_drop::()); + + // SAFETY: As this is a ZST without a drop handler, zero is valid. + unsafe { mem::zeroed() } + } +} + +/// A node in the backup queue. +pub(crate) enum Node { + /// This node is requesting to add a listener. + // For some reason, the MSRV build says this variant is never constructed. + #[allow(dead_code)] + AddListener { + /// The state of the listener that wants to be added. + task_waiting: Arc, + }, + + /// This node is notifying a listener. + Notify(GenericNotify>), + + /// This node is removing a listener. + RemoveListener { + /// The ID of the listener to remove. + listener: NonZeroUsize, + + /// Whether to propagate notifications to the next listener. + propagate: bool, + }, + + /// We are waiting for the mutex to lock, so they can manipulate it. + Waiting(Task), +} + +impl fmt::Debug for Node { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::AddListener { .. } => f.write_str("AddListener"), + Self::Notify(notify) => f + .debug_struct("Notify") + .field("count", ¬ify.count(Internal::new())) + .field("is_additional", ¬ify.is_additional(Internal::new())) + .finish(), + Self::RemoveListener { + listener, + propagate, + } => f + .debug_struct("RemoveListener") + .field("listener", listener) + .field("propagate", propagate) + .finish(), + Self::Waiting(_) => f.write_str("Waiting"), + } + } +} + +#[derive(Debug)] +pub(crate) struct TaskWaiting { + /// The task that is being waited on. + task: AtomicCell, + + /// The ID of the new entry. + /// + /// This is set to zero when the task is still queued, or usize::MAX when the node should not + /// be added at all. + entry_id: AtomicUsize, +} + +impl Node { + pub(crate) fn listener() -> (Self, Arc) { + // Create a new `TaskWaiting` structure. + let task_waiting = Arc::new(TaskWaiting { + task: AtomicCell::new(), + entry_id: AtomicUsize::new(0), + }); + + ( + Self::AddListener { + task_waiting: task_waiting.clone(), + }, + task_waiting, + ) + } + + /// Apply the node to the list. + pub(super) fn apply(self, list: &mut ListenerSlab) -> Option { + match self { + Node::AddListener { task_waiting } => { + // If we're cancelled, do nothing. + if task_waiting.entry_id.load(Ordering::Relaxed) == usize::MAX { + return task_waiting.task.take().map(|t| *t); + } + + // Add a new entry to the list. + let key = list.insert(State::Created); + assert!(key.get() != usize::MAX); + + // Send the new key to the listener and wake it if necessary. + let old_value = task_waiting.entry_id.swap(key.get(), Ordering::Release); + + // If we're cancelled, remove ourselves from the list. + if old_value == usize::MAX { + list.remove(key, false); + } + + return task_waiting.task.take().map(|t| *t); + } + Node::Notify(notify) => { + // Notify the next `count` listeners. + list.notify(notify); + } + Node::RemoveListener { + listener, + propagate, + } => { + // Remove the listener from the list. + list.remove(listener, propagate); + } + Node::Waiting(task) => { + return Some(task); + } + } + + None + } +} + +impl TaskWaiting { + /// Determine if we are still queued. + /// + /// Returns `Some` with the entry ID if we are no longer queued. + pub(crate) fn status(&self) -> Option { + NonZeroUsize::new(self.entry_id.load(Ordering::Acquire)) + } + + /// Register a listener. + pub(crate) fn register(&self, task: Task) { + // Set the task. + if let Some(task) = self.task.replace(Some(Box::new(task))) { + task.wake(); + } + + // If the entry ID is non-zero, then we are no longer queued. + if self.status().is_some() { + // Wake the task. + if let Some(task) = self.task.take() { + task.wake(); + } + } + } + + /// Mark this listener as cancelled, indicating that it should not be inserted into the list. + /// + /// If this listener was already inserted into the list, returns the entry ID. Otherwise returns + /// `None`. + pub(crate) fn cancel(&self) -> Option { + // Set the entry ID to usize::MAX. + let id = self.entry_id.swap(usize::MAX, Ordering::Release); + + // Wake the task. + if let Some(task) = self.task.take() { + task.wake(); + } + + // Return the entry ID if we were queued. + NonZeroUsize::new(id) + } +} + +/// A shared pointer to a value. +/// +/// The inner value is a `Box`. +#[derive(Debug)] +struct AtomicCell(AtomicPtr); + +impl AtomicCell { + /// Create a new `AtomicCell`. + fn new() -> Self { + Self(AtomicPtr::new(ptr::null_mut())) + } + + /// Swap the value out. + fn replace(&self, value: Option>) -> Option> { + let old_value = match value { + Some(value) => self.0.swap(Box::into_raw(value), Ordering::AcqRel), + // Acquire is needed to synchronize with the store of a non-null ptr, but since a null ptr + // will never be dereferenced, there is no need to synchronize the store of a null ptr. + None => self.0.swap(ptr::null_mut(), Ordering::Acquire), + }; + + if old_value.is_null() { + None + } else { + // SAFETY: + // - AcqRel/Acquire ensures that it does not read a pointer to potentially invalid memory. + // - We've checked that old_value is not null. + // - We do not store invalid pointers other than null in self.0. + Some(unsafe { Box::from_raw(old_value) }) + } + } + + /// Take the value out. + fn take(&self) -> Option> { + self.replace(None) + } +} + +impl Drop for AtomicCell { + fn drop(&mut self) { + self.take(); + } +} diff --git a/.cargo-vendor/event-listener/src/notify.rs b/.cargo-vendor/event-listener/src/notify.rs new file mode 100644 index 0000000000..61a9b59682 --- /dev/null +++ b/.cargo-vendor/event-listener/src/notify.rs @@ -0,0 +1,622 @@ +//! The `Notification` trait for specifying notification. + +use crate::sync::atomic::{self, Ordering}; +#[cfg(feature = "std")] +use core::fmt; + +pub(crate) use __private::Internal; + +/// The type of notification to use with an [`Event`]. +/// +/// This is hidden and sealed to prevent changes to this trait from being breaking. +/// +/// [`Event`]: crate::Event +#[doc(hidden)] +pub trait NotificationPrivate { + /// The tag data associated with a notification. + type Tag; + + /// Emit a fence to ensure that the notification is visible to the listeners. + fn fence(&self, internal: Internal); + + /// Whether or not the number of currently waiting listeners should be subtracted from `count()`. + fn is_additional(&self, internal: Internal) -> bool; + + /// Get the number of listeners to wake. + fn count(&self, internal: Internal) -> usize; + + /// Get a tag to be associated with a notification. + /// + /// This method is expected to be called `count()` times. + fn next_tag(&mut self, internal: Internal) -> Self::Tag; +} + +/// A notification that can be used to notify an [`Event`]. +/// +/// This type is used by the [`Event::notify()`] function to determine how many listeners to wake up, whether +/// or not to subtract additional listeners, and other properties. The actual internal data is hidden in a +/// private trait and is intentionally not exposed. This means that users cannot manually implement the +/// [`Notification`] trait. However, it also means that changing the underlying trait is not a semver breaking +/// change. +/// +/// Users can create types that implement notifications using the combinators on the [`IntoNotification`] type. +/// Typical construction of a [`Notification`] starts with a numeric literal (like `3usize`) and then optionally +/// adding combinators. +/// +/// # Example +/// +/// ``` +/// use event_listener::{Event, IntoNotification, Notification}; +/// +/// fn notify(ev: &Event, notify: impl Notification) { +/// ev.notify(notify); +/// } +/// +/// notify(&Event::new(), 1.additional()); +/// ``` +/// +/// [`Event`]: crate::Event +pub trait Notification: NotificationPrivate {} +impl Notification for N {} + +/// Notify a given number of unnotifed listeners. +#[derive(Debug, Clone)] +#[doc(hidden)] +pub struct Notify(usize); + +impl Notify { + /// Create a new `Notify` with the given number of listeners to notify. + fn new(count: usize) -> Self { + Self(count) + } +} + +impl NotificationPrivate for Notify { + type Tag = (); + + fn is_additional(&self, _: Internal) -> bool { + false + } + + fn fence(&self, _: Internal) { + full_fence(); + } + + fn count(&self, _: Internal) -> usize { + self.0 + } + + fn next_tag(&mut self, _: Internal) -> Self::Tag {} +} + +/// Make the underlying notification additional. +#[derive(Debug, Clone)] +#[doc(hidden)] +pub struct Additional(N); + +impl Additional { + /// Create a new `Additional` with the given notification. + fn new(inner: N) -> Self { + Self(inner) + } +} + +impl NotificationPrivate for Additional +where + N: Notification + ?Sized, +{ + type Tag = N::Tag; + + fn is_additional(&self, _: Internal) -> bool { + true + } + + fn fence(&self, i: Internal) { + self.0.fence(i); + } + + fn count(&self, i: Internal) -> usize { + self.0.count(i) + } + + fn next_tag(&mut self, i: Internal) -> Self::Tag { + self.0.next_tag(i) + } +} + +/// Don't emit a fence for this notification. +#[derive(Debug, Clone)] +#[doc(hidden)] +pub struct Relaxed(N); + +impl Relaxed { + /// Create a new `Relaxed` with the given notification. + fn new(inner: N) -> Self { + Self(inner) + } +} + +impl NotificationPrivate for Relaxed +where + N: Notification + ?Sized, +{ + type Tag = N::Tag; + + fn is_additional(&self, i: Internal) -> bool { + self.0.is_additional(i) + } + + fn fence(&self, _: Internal) { + // Don't emit a fence. + } + + fn count(&self, i: Internal) -> usize { + self.0.count(i) + } + + fn next_tag(&mut self, i: Internal) -> Self::Tag { + self.0.next_tag(i) + } +} + +/// Use a tag to notify listeners. +#[cfg(feature = "std")] +#[derive(Debug, Clone)] +#[doc(hidden)] +pub struct Tag { + tag: T, + inner: N, +} + +#[cfg(feature = "std")] +impl Tag { + /// Create a new `Tag` with the given tag and notification. + fn new(tag: T, inner: N) -> Self + where + N: Sized, + { + Self { tag, inner } + } +} + +#[cfg(feature = "std")] +impl NotificationPrivate for Tag +where + N: Notification + ?Sized, + T: Clone, +{ + type Tag = T; + + fn is_additional(&self, i: Internal) -> bool { + self.inner.is_additional(i) + } + + fn fence(&self, i: Internal) { + self.inner.fence(i); + } + + fn count(&self, i: Internal) -> usize { + self.inner.count(i) + } + + fn next_tag(&mut self, _: Internal) -> Self::Tag { + self.tag.clone() + } +} + +/// Use a function to generate a tag to notify listeners. +#[cfg(feature = "std")] +#[doc(hidden)] +pub struct TagWith { + tag: F, + inner: N, +} + +#[cfg(feature = "std")] +impl fmt::Debug for TagWith { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + struct Ellipses; + + impl fmt::Debug for Ellipses { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("..") + } + } + + f.debug_struct("TagWith") + .field("tag", &Ellipses) + .field("inner", &self.inner) + .finish() + } +} + +#[cfg(feature = "std")] +impl TagWith { + /// Create a new `TagFn` with the given tag function and notification. + fn new(tag: F, inner: N) -> Self { + Self { tag, inner } + } +} + +#[cfg(feature = "std")] +impl NotificationPrivate for TagWith +where + N: Notification + ?Sized, + F: FnMut() -> T, +{ + type Tag = T; + + fn is_additional(&self, i: Internal) -> bool { + self.inner.is_additional(i) + } + + fn fence(&self, i: Internal) { + self.inner.fence(i); + } + + fn count(&self, i: Internal) -> usize { + self.inner.count(i) + } + + fn next_tag(&mut self, _: Internal) -> Self::Tag { + (self.tag)() + } +} + +/// A generic notification. +#[derive(Debug)] +pub(crate) struct GenericNotify { + /// Number of listeners to notify. + count: usize, + + /// Whether this notification is additional. + additional: bool, + + /// Generate tags. + tags: F, +} + +impl> GenericNotify { + pub(crate) fn new(count: usize, additional: bool, tags: F) -> Self { + Self { + count, + additional, + tags, + } + } +} + +impl> NotificationPrivate for GenericNotify { + type Tag = T; + + fn is_additional(&self, _: Internal) -> bool { + self.additional + } + + fn fence(&self, _: Internal) { + // Don't emit a fence. + } + + fn count(&self, _: Internal) -> usize { + self.count + } + + fn next_tag(&mut self, _: Internal) -> Self::Tag { + self.tags.next_tag() + } +} + +/// The producer for a generic notification. +pub(crate) trait TagProducer { + type Tag; + + /// Get the next tag. + fn next_tag(&mut self) -> Self::Tag; +} + +impl T> TagProducer for F { + type Tag = T; + + fn next_tag(&mut self) -> T { + (self)() + } +} + +/// A value that can be converted into a [`Notification`]. +/// +/// This trait adds onto the [`Notification`] trait by providing combinators that can be applied to all +/// notification types as well as numeric literals. This transforms what would normally be: +/// +/// ``` +/// use event_listener::Event; +/// +/// let event = Event::new(); +/// +/// // Note that each use case needs its own function, leading to bloat. +/// event.notify(1); +/// event.notify_additional(3); +/// event.notify_relaxed(5); +/// event.notify_additional_relaxed(2); +/// ``` +/// +/// into this: +/// +/// ``` +/// use event_listener::{Event, IntoNotification, Listener}; +/// +/// let event = Event::new(); +/// +/// event.notify(1); +/// event.notify(3.additional()); +/// event.notify(5.relaxed()); +/// event.notify(2.additional().relaxed()); +/// ``` +/// +/// This trait is implemented for all types that implement [`Notification`], as well as for non-floating-point +/// numeric literals (`usize`, `i32`, etc). +/// +/// This function can be thought of as being analogous to [`std::iter::IntoIterator`], but for [`Notification`]. +pub trait IntoNotification: __private::Sealed { + /// The tag data associated with a notification. + /// + /// By default, most [`Event`]s will use the unit type, `()`. However, this can be used to pass data along to + /// the listener. + type Tag; + + /// The notification type. + /// + /// Tells what kind of underlying type that the [`Notification`] is. You probably don't need to worry about + /// this. + type Notify: Notification; + + /// Convert this value into a notification. + /// + /// This allows the user to convert an [`IntoNotification`] into a [`Notification`]. + /// + /// # Panics + /// + /// This function panics if the value represents a negative number of notifications. + /// + /// # Examples + /// + /// ``` + /// use event_listener::IntoNotification; + /// + /// let _ = 3.into_notification(); + /// ``` + fn into_notification(self) -> Self::Notify; + + /// Convert this value into an additional notification. + /// + /// By default, notifications ignore listeners that are already notified. Generally, this happens when there + /// is an [`EventListener`] that has been woken up, but hasn't been polled to completion or waited on yet. + /// For instance, if you have three notified listeners and you call `event.notify(5)`, only two listeners + /// will be woken up. + /// + /// This default behavior is generally desired. For instance, if you are writing a `Mutex` implementation + /// powered by an [`Event`], you usually only want one consumer to be notified at a time. If you notified + /// a listener when another listener is already notified, you would have unnecessary contention for your + /// lock, as both listeners fight over the lock. Therefore, you would call `event.notify(1)` to make sure + /// *at least* one listener is awake. + /// + /// Sometimes, this behavior is not desired. For instance, if you are writing an MPMC channel, it is desirable + /// for multiple listeners to be reading from the underlying queue at once. In this case, you would instead + /// call `event.notify(1.additional())`. + /// + /// # Examples + /// + /// ``` + /// use event_listener::{Event, IntoNotification, Listener}; + /// + /// let event = Event::new(); + /// + /// let mut l1 = event.listen(); + /// let mut l2 = event.listen(); + /// + /// // This will only wake up the first listener, as the second call observes that there is already a + /// // notified listener. + /// event.notify(1); + /// event.notify(1); + /// + /// // This call wakes up the other listener. + /// event.notify(1.additional()); + /// ``` + fn additional(self) -> Additional + where + Self: Sized, + { + Additional::new(self.into_notification()) + } + + /// Don't emit a fence for this notification. + /// + /// Usually, notifications emit a `SeqCst` atomic fence before any listeners are woken up. This ensures + /// that notification state isn't inconsistent before any wakers are woken up. However, it may be + /// desirable to omit this fence in certain cases. + /// + /// - You are running the [`Event`] on a single thread, where no synchronization needs to occur. + /// - You are emitting the `SeqCst` fence yourself. + /// + /// In these cases, `relaxed()` can be used to avoid emitting the `SeqCst` fence. + /// + /// # Examples + /// + /// ``` + /// use event_listener::{Event, IntoNotification, Listener}; + /// use std::sync::atomic::{self, Ordering}; + /// + /// let event = Event::new(); + /// + /// let listener1 = event.listen(); + /// let listener2 = event.listen(); + /// let listener3 = event.listen(); + /// + /// // We should emit a fence manually when using relaxed notifications. + /// atomic::fence(Ordering::SeqCst); + /// + /// // Notifies two listeners. + /// // + /// // Listener queueing is fair, which means `listener1` and `listener2` + /// // get notified here since they start listening before `listener3`. + /// event.notify(1.relaxed()); + /// event.notify(1.relaxed()); + /// ``` + fn relaxed(self) -> Relaxed + where + Self: Sized, + { + Relaxed::new(self.into_notification()) + } + + /// Use a tag with this notification. + /// + /// In many cases, it is desired to send additional information to the listener of the [`Event`]. For instance, + /// it is possible to optimize a `Mutex` implementation by locking directly on the next listener, without + /// needing to ever unlock the mutex at all. + /// + /// The tag provided is cloned to provide the tag for all listeners. In cases where this is not flexible + /// enough, use [`IntoNotification::with_tag()`] instead. + /// + /// Tagging functions cannot be implemented efficiently for `no_std`, so this is only available + /// when the `std` feature is enabled. + /// + /// # Examples + /// + /// ``` + /// use event_listener::{IntoNotification, Listener, Event}; + /// + /// let event = Event::::with_tag(); + /// + /// let mut listener1 = event.listen(); + /// let mut listener2 = event.listen(); + /// + /// // Notify with `true` then `false`. + /// event.notify(1.additional().tag(true)); + /// event.notify(1.additional().tag(false)); + /// + /// assert_eq!(listener1.wait(), true); + /// assert_eq!(listener2.wait(), false); + /// ``` + #[cfg(feature = "std")] + fn tag(self, tag: T) -> Tag + where + Self: Sized + IntoNotification, + { + Tag::new(tag, self.into_notification()) + } + + /// Use a function to generate a tag with this notification. + /// + /// In many cases, it is desired to send additional information to the listener of the [`Event`]. For instance, + /// it is possible to optimize a `Mutex` implementation by locking directly on the next listener, without + /// needing to ever unlock the mutex at all. + /// + /// Tagging functions cannot be implemented efficiently for `no_std`, so this is only available + /// when the `std` feature is enabled. + /// + /// # Examples + /// + /// ``` + /// use event_listener::{IntoNotification, Listener, Event}; + /// + /// let event = Event::::with_tag(); + /// + /// let mut listener1 = event.listen(); + /// let mut listener2 = event.listen(); + /// + /// // Notify with `true` then `false`. + /// event.notify(1.additional().tag_with(|| true)); + /// event.notify(1.additional().tag_with(|| false)); + /// + /// assert_eq!(listener1.wait(), true); + /// assert_eq!(listener2.wait(), false); + /// ``` + #[cfg(feature = "std")] + fn tag_with(self, tag: F) -> TagWith + where + Self: Sized + IntoNotification, + F: FnMut() -> T, + { + TagWith::new(tag, self.into_notification()) + } +} + +impl IntoNotification for N { + type Tag = N::Tag; + type Notify = N; + + fn into_notification(self) -> Self::Notify { + self + } +} + +macro_rules! impl_for_numeric_types { + ($($ty:ty)*) => {$( + impl IntoNotification for $ty { + type Tag = (); + type Notify = Notify; + + #[allow(unused_comparisons)] + fn into_notification(self) -> Self::Notify { + if self < 0 { + panic!("negative notification count"); + } + + Notify::new(self.try_into().expect("overflow")) + } + } + + impl __private::Sealed for $ty {} + )*}; +} + +impl_for_numeric_types! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 } + +/// Equivalent to `atomic::fence(Ordering::SeqCst)`, but in some cases faster. +#[inline] +pub(super) fn full_fence() { + #[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), not(miri), not(loom)))] + { + use core::{arch::asm, cell::UnsafeCell}; + // HACK(stjepang): On x86 architectures there are two different ways of executing + // a `SeqCst` fence. + // + // 1. `atomic::fence(SeqCst)`, which compiles into a `mfence` instruction. + // 2. A `lock ` instruction. + // + // Both instructions have the effect of a full barrier, but empirical benchmarks have shown + // that the second one is sometimes a bit faster. + let a = UnsafeCell::new(0_usize); + // It is common to use `lock or` here, but when using a local variable, `lock not`, which + // does not change the flag, should be slightly more efficient. + // Refs: https://www.felixcloutier.com/x86/not + unsafe { + #[cfg(target_pointer_width = "64")] + asm!("lock not qword ptr [{0}]", in(reg) a.get(), options(nostack, preserves_flags)); + #[cfg(target_pointer_width = "32")] + asm!("lock not dword ptr [{0:e}]", in(reg) a.get(), options(nostack, preserves_flags)); + } + return; + } + #[allow(unreachable_code)] + { + atomic::fence(Ordering::SeqCst); + } +} + +mod __private { + /// Make sure the NotificationPrivate trait can't be implemented outside of this crate. + #[doc(hidden)] + #[derive(Debug)] + pub struct Internal(()); + + impl Internal { + pub(crate) fn new() -> Self { + Self(()) + } + } + + #[doc(hidden)] + pub trait Sealed {} + impl Sealed for N {} +} diff --git a/.cargo-vendor/event-listener/src/std.rs b/.cargo-vendor/event-listener/src/std.rs new file mode 100644 index 0000000000..c509ebc0b0 --- /dev/null +++ b/.cargo-vendor/event-listener/src/std.rs @@ -0,0 +1,392 @@ +//! libstd-based implementation of `event-listener`. +//! +//! This implementation crates an intrusive linked list of listeners. + +use crate::notify::{GenericNotify, Internal, Notification}; +use crate::sync::atomic::Ordering; +use crate::sync::cell::{Cell, UnsafeCell}; +use crate::sync::{Mutex, MutexGuard}; +use crate::{RegisterResult, State, TaskRef}; + +use core::marker::PhantomPinned; +use core::mem; +use core::ops::{Deref, DerefMut}; +use core::pin::Pin; +use core::ptr::NonNull; + +pub(super) struct List(Mutex>); + +struct Inner { + /// The head of the linked list. + head: Option>>, + + /// The tail of the linked list. + tail: Option>>, + + /// The first unnotified listener. + next: Option>>, + + /// Total number of listeners. + len: usize, + + /// The number of notified listeners. + notified: usize, +} + +impl List { + /// Create a new, empty event listener list. + pub(super) fn new() -> Self { + Self(Mutex::new(Inner { + head: None, + tail: None, + next: None, + len: 0, + notified: 0, + })) + } + + /// Get the total number of listeners without blocking. + pub(crate) fn try_total_listeners(&self) -> Option { + self.0.try_lock().ok().map(|list| list.len) + } + + /// Get the total number of listeners with blocking. + pub(crate) fn total_listeners(&self) -> usize { + self.0.lock().unwrap_or_else(|e| e.into_inner()).len + } +} + +impl crate::Inner { + fn lock(&self) -> ListLock<'_, '_, T> { + ListLock { + inner: self, + lock: self.list.0.lock().unwrap_or_else(|e| e.into_inner()), + } + } + + /// Add a new listener to the list. + pub(crate) fn insert(&self, mut listener: Pin<&mut Option>>) { + let mut inner = self.lock(); + + listener.as_mut().set(Some(Listener { + link: UnsafeCell::new(Link { + state: Cell::new(State::Created), + prev: Cell::new(inner.tail), + next: Cell::new(None), + }), + _pin: PhantomPinned, + })); + let listener = listener.as_pin_mut().unwrap(); + + { + let entry_guard = listener.link.get(); + // SAFETY: We are locked, so we can access the inner `link`. + let entry = unsafe { entry_guard.deref() }; + + // Replace the tail with the new entry. + match mem::replace(&mut inner.tail, Some(entry.into())) { + None => inner.head = Some(entry.into()), + Some(t) => unsafe { t.as_ref().next.set(Some(entry.into())) }, + }; + } + + // If there are no unnotified entries, this is the first one. + if inner.next.is_none() { + inner.next = inner.tail; + } + + // Bump the entry count. + inner.len += 1; + } + + /// Remove a listener from the list. + pub(crate) fn remove( + &self, + listener: Pin<&mut Option>>, + propagate: bool, + ) -> Option> { + self.lock().remove(listener, propagate) + } + + /// Notifies a number of entries. + #[cold] + pub(crate) fn notify(&self, notify: impl Notification) -> usize { + self.lock().notify(notify) + } + + /// Register a task to be notified when the event is triggered. + /// + /// Returns `true` if the listener was already notified, and `false` otherwise. If the listener + /// isn't inserted, returns `None`. + pub(crate) fn register( + &self, + mut listener: Pin<&mut Option>>, + task: TaskRef<'_>, + ) -> RegisterResult { + let mut inner = self.lock(); + let entry_guard = match listener.as_mut().as_pin_mut() { + Some(listener) => listener.link.get(), + None => return RegisterResult::NeverInserted, + }; + // SAFETY: We are locked, so we can access the inner `link`. + let entry = unsafe { entry_guard.deref() }; + + // Take out the state and check it. + match entry.state.replace(State::NotifiedTaken) { + State::Notified { tag, .. } => { + // We have been notified, remove the listener. + inner.remove(listener, false); + RegisterResult::Notified(tag) + } + + State::Task(other_task) => { + // Only replace the task if it's different. + entry.state.set(State::Task({ + if !task.will_wake(other_task.as_task_ref()) { + task.into_task() + } else { + other_task + } + })); + + RegisterResult::Registered + } + + _ => { + // We have not been notified, register the task. + entry.state.set(State::Task(task.into_task())); + RegisterResult::Registered + } + } + } +} + +impl Inner { + fn remove( + &mut self, + mut listener: Pin<&mut Option>>, + propagate: bool, + ) -> Option> { + let entry_guard = listener.as_mut().as_pin_mut()?.link.get(); + let entry = unsafe { entry_guard.deref() }; + + let prev = entry.prev.get(); + let next = entry.next.get(); + + // Unlink from the previous entry. + match prev { + None => self.head = next, + Some(p) => unsafe { + p.as_ref().next.set(next); + }, + } + + // Unlink from the next entry. + match next { + None => self.tail = prev, + Some(n) => unsafe { + n.as_ref().prev.set(prev); + }, + } + + // If this was the first unnotified entry, update the next pointer. + if self.next == Some(entry.into()) { + self.next = next; + } + + // The entry is now fully unlinked, so we can now take it out safely. + let entry = unsafe { + listener + .get_unchecked_mut() + .take() + .unwrap() + .link + .into_inner() + }; + + // This State::Created is immediately dropped and exists as a workaround for the absence of + // loom::cell::Cell::into_inner. The intent is `let mut state = entry.state.into_inner();` + // + // refs: https://github.com/tokio-rs/loom/pull/341 + let mut state = entry.state.replace(State::Created); + + // Update the notified count. + if state.is_notified() { + self.notified -= 1; + + if propagate { + let state = mem::replace(&mut state, State::NotifiedTaken); + if let State::Notified { additional, tag } = state { + let tags = { + let mut tag = Some(tag); + move || tag.take().expect("tag already taken") + }; + self.notify(GenericNotify::new(1, additional, tags)); + } + } + } + self.len -= 1; + + Some(state) + } + + #[cold] + fn notify(&mut self, mut notify: impl Notification) -> usize { + let mut n = notify.count(Internal::new()); + let is_additional = notify.is_additional(Internal::new()); + + if !is_additional { + if n < self.notified { + return 0; + } + n -= self.notified; + } + + let original_count = n; + while n > 0 { + n -= 1; + + // Notify the next entry. + match self.next { + None => return original_count - n - 1, + + Some(e) => { + // Get the entry and move the pointer forwards. + let entry = unsafe { e.as_ref() }; + self.next = entry.next.get(); + + // Set the state to `Notified` and notify. + let tag = notify.next_tag(Internal::new()); + if let State::Task(task) = entry.state.replace(State::Notified { + additional: is_additional, + tag, + }) { + task.wake(); + } + + // Bump the notified count. + self.notified += 1; + } + } + } + + original_count - n + } +} + +struct ListLock<'a, 'b, T> { + lock: MutexGuard<'a, Inner>, + inner: &'b crate::Inner, +} + +impl Deref for ListLock<'_, '_, T> { + type Target = Inner; + + fn deref(&self) -> &Self::Target { + &self.lock + } +} + +impl DerefMut for ListLock<'_, '_, T> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.lock + } +} + +impl Drop for ListLock<'_, '_, T> { + fn drop(&mut self) { + let list = &mut **self; + + // Update the notified count. + let notified = if list.notified < list.len { + list.notified + } else { + usize::MAX + }; + + self.inner.notified.store(notified, Ordering::Release); + } +} + +pub(crate) struct Listener { + /// The inner link in the linked list. + /// + /// # Safety + /// + /// This can only be accessed while the central mutex is locked. + link: UnsafeCell>, + + /// This listener cannot be moved after being pinned. + _pin: PhantomPinned, +} + +struct Link { + /// The current state of the listener. + state: Cell>, + + /// The previous link in the linked list. + prev: Cell>>>, + + /// The next link in the linked list. + next: Cell>>>, +} + +#[cfg(test)] +mod tests { + use super::*; + use futures_lite::pin; + + #[cfg(target_family = "wasm")] + use wasm_bindgen_test::wasm_bindgen_test as test; + + macro_rules! make_listeners { + ($($id:ident),*) => { + $( + let $id = Option::>::None; + pin!($id); + )* + }; + } + + #[test] + fn insert() { + let inner = crate::Inner::new(); + make_listeners!(listen1, listen2, listen3); + + // Register the listeners. + inner.insert(listen1.as_mut()); + inner.insert(listen2.as_mut()); + inner.insert(listen3.as_mut()); + + assert_eq!(inner.lock().len, 3); + + // Remove one. + assert_eq!(inner.remove(listen2, false), Some(State::Created)); + assert_eq!(inner.lock().len, 2); + + // Remove another. + assert_eq!(inner.remove(listen1, false), Some(State::Created)); + assert_eq!(inner.lock().len, 1); + } + + #[test] + fn drop_non_notified() { + let inner = crate::Inner::new(); + make_listeners!(listen1, listen2, listen3); + + // Register the listeners. + inner.insert(listen1.as_mut()); + inner.insert(listen2.as_mut()); + inner.insert(listen3.as_mut()); + + // Notify one. + inner.notify(GenericNotify::new(1, false, || ())); + + // Remove one. + inner.remove(listen3, true); + + // Remove the rest. + inner.remove(listen1, true); + inner.remove(listen2, true); + } +} diff --git a/.cargo-vendor/event-listener/tests/loom.rs b/.cargo-vendor/event-listener/tests/loom.rs new file mode 100644 index 0000000000..1cd92e26ad --- /dev/null +++ b/.cargo-vendor/event-listener/tests/loom.rs @@ -0,0 +1,211 @@ +#![cfg(loom)] +use std::future::Future; +use std::pin::Pin; +use std::sync::{Arc, Mutex}; +use std::task::Context; + +use event_listener::{Event, EventListener}; +use waker_fn::waker_fn; + +#[cfg(target_family = "wasm")] +use wasm_bindgen_test::wasm_bindgen_test as test; + +fn is_notified(listener: &mut EventListener) -> bool { + let waker = waker_fn(|| ()); + Pin::new(listener) + .poll(&mut Context::from_waker(&waker)) + .is_ready() +} + +#[test] +fn notify() { + loom::model(|| { + let event = Event::new(); + + let mut l1 = event.listen(); + let mut l2 = event.listen(); + let mut l3 = event.listen(); + + assert!(!is_notified(&mut l1)); + assert!(!is_notified(&mut l2)); + assert!(!is_notified(&mut l3)); + + assert_eq!(event.notify(2), 2); + assert_eq!(event.notify(1), 0); + + assert!(is_notified(&mut l1)); + assert!(is_notified(&mut l2)); + assert!(!is_notified(&mut l3)); + }); +} + +#[test] +fn notify_additional() { + loom::model(|| { + let event = Event::new(); + + let mut l1 = event.listen(); + let mut l2 = event.listen(); + let mut l3 = event.listen(); + + assert_eq!(event.notify_additional(1), 1); + assert_eq!(event.notify(1), 0); + assert_eq!(event.notify_additional(1), 1); + + assert!(is_notified(&mut l1)); + assert!(is_notified(&mut l2)); + assert!(!is_notified(&mut l3)); + }) +} + +#[test] +fn notify_one() { + loom::model(|| { + let event = Event::new(); + + let mut l1 = event.listen(); + let mut l2 = event.listen(); + + assert!(!is_notified(&mut l1)); + assert!(!is_notified(&mut l2)); + + assert_eq!(event.notify(1), 1); + assert!(is_notified(&mut l1)); + assert!(!is_notified(&mut l2)); + + assert_eq!(event.notify(1), 1); + assert!(is_notified(&mut l2)); + }); +} + +#[test] +fn notify_all() { + loom::model(|| { + let event = Event::new(); + + let mut l1 = event.listen(); + let mut l2 = event.listen(); + + assert!(!is_notified(&mut l1)); + assert!(!is_notified(&mut l2)); + + assert_eq!(event.notify(usize::MAX), 2); + assert!(is_notified(&mut l1)); + assert!(is_notified(&mut l2)); + }); +} + +#[test] +fn drop_notified() { + loom::model(|| { + let event = Event::new(); + + let l1 = event.listen(); + let mut l2 = event.listen(); + let mut l3 = event.listen(); + + assert_eq!(event.notify(1), 1); + drop(l1); + assert!(is_notified(&mut l2)); + assert!(!is_notified(&mut l3)); + }); +} + +#[test] +fn drop_notified2() { + loom::model(|| { + let event = Event::new(); + + let l1 = event.listen(); + let mut l2 = event.listen(); + let mut l3 = event.listen(); + + assert_eq!(event.notify(2), 2); + drop(l1); + assert!(is_notified(&mut l2)); + assert!(!is_notified(&mut l3)); + }); +} + +#[test] +fn drop_notified_additional() { + loom::model(|| { + let event = Event::new(); + + let l1 = event.listen(); + let mut l2 = event.listen(); + let mut l3 = event.listen(); + let mut l4 = event.listen(); + + assert_eq!(event.notify_additional(1), 1); + assert_eq!(event.notify(2), 1); + drop(l1); + assert!(is_notified(&mut l2)); + assert!(is_notified(&mut l3)); + assert!(!is_notified(&mut l4)); + }); +} + +#[test] +fn drop_non_notified() { + loom::model(|| { + let event = Event::new(); + + let mut l1 = event.listen(); + let mut l2 = event.listen(); + let l3 = event.listen(); + + assert_eq!(event.notify(1), 1); + drop(l3); + assert!(is_notified(&mut l1)); + assert!(!is_notified(&mut l2)); + }) +} + +#[test] +fn notify_all_fair() { + loom::model(|| { + let event = Event::new(); + let v = Arc::new(Mutex::new(vec![])); + + let mut l1 = event.listen(); + let mut l2 = event.listen(); + let mut l3 = event.listen(); + + let waker1 = { + let v = v.clone(); + waker_fn(move || v.lock().unwrap().push(1)) + }; + let waker2 = { + let v = v.clone(); + waker_fn(move || v.lock().unwrap().push(2)) + }; + let waker3 = { + let v = v.clone(); + waker_fn(move || v.lock().unwrap().push(3)) + }; + + assert!(Pin::new(&mut l1) + .poll(&mut Context::from_waker(&waker1)) + .is_pending()); + assert!(Pin::new(&mut l2) + .poll(&mut Context::from_waker(&waker2)) + .is_pending()); + assert!(Pin::new(&mut l3) + .poll(&mut Context::from_waker(&waker3)) + .is_pending()); + + assert_eq!(event.notify(usize::MAX), 3); + assert_eq!(&*v.lock().unwrap(), &[1, 2, 3]); + + assert!(Pin::new(&mut l1) + .poll(&mut Context::from_waker(&waker1)) + .is_ready()); + assert!(Pin::new(&mut l2) + .poll(&mut Context::from_waker(&waker2)) + .is_ready()); + assert!(Pin::new(&mut l3) + .poll(&mut Context::from_waker(&waker3)) + .is_ready()); + }) +} diff --git a/.cargo-vendor/event-listener/tests/notify.rs b/.cargo-vendor/event-listener/tests/notify.rs new file mode 100644 index 0000000000..c37dc9a784 --- /dev/null +++ b/.cargo-vendor/event-listener/tests/notify.rs @@ -0,0 +1,192 @@ +use std::future::Future; +use std::pin::Pin; +use std::sync::{Arc, Mutex}; +use std::task::Context; + +use event_listener::{Event, EventListener}; +use waker_fn::waker_fn; + +#[cfg(target_family = "wasm")] +use wasm_bindgen_test::wasm_bindgen_test as test; + +fn is_notified(listener: &mut EventListener) -> bool { + let waker = waker_fn(|| ()); + Pin::new(listener) + .poll(&mut Context::from_waker(&waker)) + .is_ready() +} + +#[test] +fn notify() { + let event = Event::new(); + + let mut l1 = event.listen(); + let mut l2 = event.listen(); + let mut l3 = event.listen(); + + assert!(!is_notified(&mut l1)); + assert!(!is_notified(&mut l2)); + assert!(!is_notified(&mut l3)); + + assert_eq!(event.notify(2), 2); + assert_eq!(event.notify(1), 0); + + assert!(is_notified(&mut l1)); + assert!(is_notified(&mut l2)); + assert!(!is_notified(&mut l3)); +} + +#[test] +fn notify_additional() { + let event = Event::new(); + + let mut l1 = event.listen(); + let mut l2 = event.listen(); + let mut l3 = event.listen(); + + assert_eq!(event.notify_additional(1), 1); + assert_eq!(event.notify(1), 0); + assert_eq!(event.notify_additional(1), 1); + + assert!(is_notified(&mut l1)); + assert!(is_notified(&mut l2)); + assert!(!is_notified(&mut l3)); +} + +#[test] +fn notify_one() { + let event = Event::new(); + + let mut l1 = event.listen(); + let mut l2 = event.listen(); + + assert!(!is_notified(&mut l1)); + assert!(!is_notified(&mut l2)); + + assert_eq!(event.notify(1), 1); + assert!(is_notified(&mut l1)); + assert!(!is_notified(&mut l2)); + + assert_eq!(event.notify(1), 1); + assert!(is_notified(&mut l2)); +} + +#[test] +fn notify_all() { + let event = Event::new(); + + let mut l1 = event.listen(); + let mut l2 = event.listen(); + + assert!(!is_notified(&mut l1)); + assert!(!is_notified(&mut l2)); + + assert_eq!(event.notify(usize::MAX), 2); + assert!(is_notified(&mut l1)); + assert!(is_notified(&mut l2)); +} + +#[test] +fn drop_notified() { + let event = Event::new(); + + let l1 = event.listen(); + let mut l2 = event.listen(); + let mut l3 = event.listen(); + + assert_eq!(event.notify(1), 1); + drop(l1); + assert!(is_notified(&mut l2)); + assert!(!is_notified(&mut l3)); +} + +#[test] +fn drop_notified2() { + let event = Event::new(); + + let l1 = event.listen(); + let mut l2 = event.listen(); + let mut l3 = event.listen(); + + assert_eq!(event.notify(2), 2); + drop(l1); + assert!(is_notified(&mut l2)); + assert!(!is_notified(&mut l3)); +} + +#[test] +fn drop_notified_additional() { + let event = Event::new(); + + let l1 = event.listen(); + let mut l2 = event.listen(); + let mut l3 = event.listen(); + let mut l4 = event.listen(); + + assert_eq!(event.notify_additional(1), 1); + assert_eq!(event.notify(2), 1); + drop(l1); + assert!(is_notified(&mut l2)); + assert!(is_notified(&mut l3)); + assert!(!is_notified(&mut l4)); +} + +#[test] +fn drop_non_notified() { + let event = Event::new(); + + let mut l1 = event.listen(); + let mut l2 = event.listen(); + let l3 = event.listen(); + + assert_eq!(event.notify(1), 1); + drop(l3); + assert!(is_notified(&mut l1)); + assert!(!is_notified(&mut l2)); +} + +#[test] +fn notify_all_fair() { + let event = Event::new(); + let v = Arc::new(Mutex::new(vec![])); + + let mut l1 = event.listen(); + let mut l2 = event.listen(); + let mut l3 = event.listen(); + + let waker1 = { + let v = v.clone(); + waker_fn(move || v.lock().unwrap().push(1)) + }; + let waker2 = { + let v = v.clone(); + waker_fn(move || v.lock().unwrap().push(2)) + }; + let waker3 = { + let v = v.clone(); + waker_fn(move || v.lock().unwrap().push(3)) + }; + + assert!(Pin::new(&mut l1) + .poll(&mut Context::from_waker(&waker1)) + .is_pending()); + assert!(Pin::new(&mut l2) + .poll(&mut Context::from_waker(&waker2)) + .is_pending()); + assert!(Pin::new(&mut l3) + .poll(&mut Context::from_waker(&waker3)) + .is_pending()); + + assert_eq!(event.notify(usize::MAX), 3); + assert_eq!(&*v.lock().unwrap(), &[1, 2, 3]); + + assert!(Pin::new(&mut l1) + .poll(&mut Context::from_waker(&waker1)) + .is_ready()); + assert!(Pin::new(&mut l2) + .poll(&mut Context::from_waker(&waker2)) + .is_ready()); + assert!(Pin::new(&mut l3) + .poll(&mut Context::from_waker(&waker3)) + .is_ready()); +} diff --git a/.cargo-vendor/http-0.2.12/.cargo-checksum.json b/.cargo-vendor/http-0.2.12/.cargo-checksum.json new file mode 100644 index 0000000000..6cff05060b --- /dev/null +++ b/.cargo-vendor/http-0.2.12/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"2df3a57e1185cbfc8eab81f0ea8b9eab3e28854e1c5f5e712ed5db24427f5054","Cargo.toml":"de90ca352de60a54cf5dfedeb4589b7a6ef6e523a2a90fea1a0f956a7e2f7caf","LICENSE-APACHE":"8bb1b50b0e5c9399ae33bd35fab2769010fa6c14e8860c729a52295d84896b7a","LICENSE-MIT":"dc91f8200e4b2a1f9261035d4c18c33c246911a6c0f7b543d75347e61b249cff","README.md":"2b08369b8ce261843a84103642fd4d8b1ab556af6d6397dbc78c19f7025d255a","src/byte_str.rs":"e1131683d8a3234f6b0983ad1f18a46794961ce401d590378370e58c60f4fbdc","src/convert.rs":"a31a4351cd3ee36a58ff4f5b30ce2c8967cde8486faea2d2673a8f8cb74b3204","src/error.rs":"e9a0c5c2af9eb98a23f967d9ff416095c80f6998fbd14a63acebeeca9e8aedac","src/extensions.rs":"5f85c3e1eef53d0fcbd4a24a6c13828790dac74ad60f71cad365e14d39b196a6","src/header/map.rs":"8f4f9022c899abf723294f6017348238f116df939abf54f891bcc74a95cbfe71","src/header/mod.rs":"aa07991ab517f1cef50bd8ebbec6ea76b95d8faedeaa61d3e410b8bf7f737da9","src/header/name.rs":"9554b80b81ea7cfd807c1d6ff52801a07ca675c8d0dffb0eee77c22a3a3a1a26","src/header/value.rs":"ffea8236f38178fa3dd600b893d1eb8b698e3a052aaad2dbdda4a14e1b3c7108","src/lib.rs":"d4bbd2761bc5fb93f71e037c838853c2460ac43e8e176c9e4b7739ece97c4060","src/method.rs":"a40a8219cdbe1071cd448bc154dbe88e78a29d755bca2bde095190bcd595f7dd","src/request.rs":"4bf726a91d5776f11f2d29b270090550838b1cebf812ef5acdd62d00878325fc","src/response.rs":"137adc01d53225ce07c06f8f64cd082af437bcbf297dce20a3a5907e3f2544fe","src/status.rs":"fd9d1c1670bde5f94934ff2a9fa9c7f2db5bbe32a750e4e202bf2775b5c5cac3","src/uri/authority.rs":"605ab42eed3ed6692746a846f845c8f2ba7e34c4738e929e5683714f17c7a162","src/uri/builder.rs":"875506b3a603a6e35557548ed0cf3beb7de0a4d1c898316e7293f3bc2ffb05c5","src/uri/mod.rs":"fd083d2bb380268a2c1c6236aed6f312d469a55cd259fd55b20a801e72e6c8b1","src/uri/path.rs":"1a87eaedf4ce65a0af9020eff5ca4e78d1eaba0a3d05a0a99ed2cc8912054f64","src/uri/port.rs":"a30793678abc96e833d026d96f060244183ab631e19eafbbad8e4643c7bb9d86","src/uri/scheme.rs":"59e6f12d3e1e1ee982e68a4a6556f25e94073ca3d77c372b6d8d71daf8f62f2a","src/uri/tests.rs":"61f88b73490c2442ec12cb0829aa1ddd28f1bce874b4fc6dd7a544c80280aeb1","src/version.rs":"623ef60a450203b051f3457e2f095508b66aaaa799b1447fb1b34d92cb2e7d62","tests/header_map.rs":"749ef0461bff58a01d96b5072268da7b36105f60d0db585e0c616e7e440f1601","tests/header_map_fuzz.rs":"7f8be3f097ceb9e0c5c4b44ef6ae1ee209cd7b6d1ea4b4be45356142792190de","tests/status_code.rs":"4c1bd08baffa6265aad5e837b189c269a3bef9031984b37980c24a8c671ac22c"},"package":"601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1"} \ No newline at end of file diff --git a/.cargo-vendor/http-0.2.12/CHANGELOG.md b/.cargo-vendor/http-0.2.12/CHANGELOG.md new file mode 100644 index 0000000000..40e0476adf --- /dev/null +++ b/.cargo-vendor/http-0.2.12/CHANGELOG.md @@ -0,0 +1,219 @@ +# 0.2.12 (March 4, 2024) + +* Add methods to allow trying to allocate in the `HeaderMap`, returning an error if oversize instead of panicking. +* Fix `HeaderName::from_lowercase` that could allow NUL bytes in some cases. + +# 0.2.11 (November 13, 2023) + +* Fix MIRI error in `header::Iter`. + +# 0.2.10 (November 10, 2023) + +* Fix parsing of `Authority` to handle square brackets in incorrect order. +* Fix `HeaderMap::with_capacity()` to handle arithmetic overflow. + +# 0.2.9 (February 17, 2023) + +* Add `HeaderName` constants for `cache-status` and `cdn-cache-control`. +* Implement `Hash` for `PathAndQuery`. +* Re-export `HeaderName` at crate root. + +# 0.2.8 (June 6, 2022) + +* Fix internal usage of uninitialized memory to use `MaybeUninit` inside `HeaderName`. + +# 0.2.7 (April 28, 2022) + +* MSRV bumped to `1.49`. +* Add `extend()` method to `Extensions`. +* Add `From` and `From` impls for `Uri`. +* Make `HeaderName::from_static` a `const fn`. + +# 0.2.6 (December 30, 2021) + +* Upgrade internal `itoa` dependency to 1.0. + +# 0.2.5 (September 21, 2021) + +* Add `is_empty()` and `len()` methods to `Extensions`. +* Add `version_ref()` method to `request::Builder`. +* Implement `TryFrom>` and `TryFrom` for `Authority`, `Uri`, `PathAndQuery`, and `HeaderName`. +* Make `HeaderValue::from_static` a `const fn`. + +# 0.2.4 (April 4, 2021) + +* Fix `Uri` parsing to allow `{`, `"`, and `}` in paths. + +# 0.2.3 (January 7, 2021) + +* Upgrade internal (private) `bytes` dependency to 1.0. + +# 0.2.2 (December 14, 2020) + +* Fix (potential double) panic of (`HeaderMap`) `OccupiedEntry::remove_entry` and + `remove_entry_mult` when multiple values are present. ([#446], [#449] dekellum) +* Safety audits of (priv) `ByteStr` and refactor of `Authority` ([#408], [#414] sbosnick) +* Fix `HeaderName` to error instead of panic when input is too long ([#432] [#433] acfoltzer) +* Allow `StatusCode` to encode values 100-999 without error. Use of the + unclassified range 600-999 remains discouraged. ([#144], [#438], [#443] quininer dekellum) +* Add `String` and `&String` fallible conversions to `PathAndQuery` ([#450] mkindahl) +* Fix `Authority` (and `Uri`) to error instead of panic on unbalanced brackets + ([#435], [#445] aeryz) + +# 0.2.1 (March 25, 2020) + +* Add `extensions_ref` and `extensions_mut` to `request::Builder` and `response::Builder`. + +# 0.2.0 (December 2, 2019) + +* Add `Version::HTTP_3` constant. +* Add `HeaderValue::from_maybe_shared`, `HeaderValue::from_maybe_shared_unchecked`, `Uri::from_maybe_shared`, `Authority::from_maybe_shared`, and `PathAndQuery::from_maybe_shared`. +* Change `request::Builder`, `response::Builder`, and `uri::Builder` to use by-value methods instead of by-ref. +* Change from `HttpTryFrom` trait to `std::convert::TryFrom`. +* Change `HeaderMap::entry` to no longer return a `Result`. +* Change `HeaderMap::drain` iterator to match the behavior of `IntoIter`. +* Change `Authority::port` to return an `Option` instead of `Option`. +* Change `Uri::scheme` to return `Option<&Scheme>` instead of `Option<&str>`. +* Change `Uri::authority` to return `Option<&Authority>` instead of `Option<&str>`. +* Remove `InvalidUriBytes`, `InvalidHeaderNameBytes`, and `InvalidHeaderValueBytes` error types. +* Remove `HeaderValue::from_shared`, `HeaderValue::from_shared_unchecked`, `Uri::from_shared`, `Authority::from_shared`, `Scheme::from_shared`, and `PathAndQuery::from_shared`. +* Remove `Authority::port_part`. +* Remove `Uri::scheme_part` and `Uri::authority_part`. + +# 0.1.20 (November 26, 2019) + +* Fix possible double-free if `header::Drain` iterator is `std::mem::forgot`en (#357). +* Fix possible data race if multiple `header::ValueDrain`s are iterated on different threads (#362). +* Fix `HeaderMap::reserve` capacity overflows (#360). +* Fix parsing long authority-form `Uri`s (#351). + +# 0.1.19 (October 15, 2019) + +* Allow `%` in IPv6 addresses in `Uri` (#343). + +# 0.1.18 (July 26, 2019) + +* Fix compilation of `HeaderName` parsing on WASM targets (#324). +* Implement `HttpTryFrom` for `HeaderMap` (#326). +* Export `http::header::HeaderValue` as `http::HeaderValue`. + +# 0.1.17 (April 5, 2019) + +* Add `Error::inner_ref()` to view the kind of error (#303) +* Add `headers_ref()` and `headers_mut()` methods to `request::Builder` and `response::Builder` (#293) + +# 0.1.16 (February 19, 2019) + +* Fix `Uri` to permit more characters in the `path` (#296) + +# 0.1.15 (January 22, 2019) + +* Fix `Uri::host()` to include brackets of IPv6 literals (#292) +* Add `scheme_str` and `port_u16` methods to `Uri` (#287) +* Add `method_ref`, `uri_ref`, and `headers_ref` to `request::Builder` (#284) + +# 0.1.14 (November 21, 2018) + +* Add `Port` struct (#252, #255, #265) +* Introduce `Uri` builder (#219) +* Empty `Method` no longer considered valid (#262) +* Fix `Uri` equality when terminating question mark is present (#270) +* Allow % character in userinfo (#269) +* Support additional tokens for header names (#271) +* Export `http::headers::{IterMut, ValuesMut}` (#278) + +# 0.1.13 (September 14, 2018) + +* impl `fmt::Display` for `HeaderName` (#249) +* Fix `uri::Authority` parsing when there is no host after an `@` (#248) +* Fix `Uri` parsing to allow more characters in query strings (#247) + +# 0.1.12 (September 7, 2018) + +* Fix `HeaderValue` parsing to allow HTABs (#244) + +# 0.1.11 (September 5, 2018) + +* Add `From<&Self>` for `HeaderValue`, `Method`, and `StatusCode` (#238) +* Add `Uri::from_static` (#240) + +# 0.1.10 (August 8, 2018) + +* `impl HttpTryFrom` for HeaderValue (#236) + +# 0.1.9 (August 7, 2018) + +* Fix double percent encoding (#233) +* Add additional HttpTryFrom impls (#234) + +# 0.1.8 (July 23, 2018) + +* Add fuller set of `PartialEq` for `Method` (#221) +* Reduce size of `HeaderMap` by using `Box<[Entry]>` instea of `Vec` (#224) +* Reduce size of `Extensions` by storing as `Option>` (#227) +* Implement `Iterator::size_hint` for most iterators in `header` (#226) + +# 0.1.7 (June 22, 2018) + +* Add `From for HeaderValue` for most integer types (#218). +* Add `Uri::into_parts()` inherent method (same as `Parts::from(uri)`) (#214). +* Fix converting `Uri`s in authority-form to `Parts` and then back into `Uri` (#216). +* Fix `Authority` parsing to reject multiple port sections (#215). +* Fix parsing 1 character authority-form `Uri`s into illegal forms (#220). + +# 0.1.6 (June 13, 2018) + +* Add `HeaderName::from_static()` constructor (#195). +* Add `Authority::from_static()` constructor (#186). +* Implement `From` for `HeaderValue` (#184). +* Fix duplicate keys when iterating over `header::Keys` (#201). + +# 0.1.5 (February 28, 2018) + +* Add websocket handshake related header constants (#162). +* Parsing `Authority` with an empty string now returns an error (#164). +* Implement `PartialEq` for `StatusCode` (#153). +* Implement `HttpTryFrom<&Uri>` for `Uri` (#165). +* Implement `FromStr` for `Method` (#167). +* Implement `HttpTryFrom` for `Uri` (#171). +* Add `into_body` fns to `Request` and `Response` (#172). +* Fix `Request::options` (#177). + +# 0.1.4 (January 4, 2018) + +* Add PathAndQuery::from_static (#148). +* Impl PartialOrd / PartialEq for Authority and PathAndQuery (#150). +* Add `map` fn to `Request` and `Response` (#151). + +# 0.1.3 (December 11, 2017) + +* Add `Scheme` associated consts for common protos. + +# 0.1.2 (November 29, 2017) + +* Add Uri accessor for scheme part. +* Fix Uri parsing bug (#134) + +# 0.1.1 (October 9, 2017) + +* Provide Uri accessors for parts (#129) +* Add Request builder helpers. (#123) +* Misc performance improvements (#126) + +# 0.1.0 (September 8, 2017) + +* Initial release. + +[#144]: https://github.com/hyperium/http/issues/144 +[#408]: https://github.com/hyperium/http/pull/408 +[#414]: https://github.com/hyperium/http/pull/414 +[#432]: https://github.com/hyperium/http/issues/432 +[#433]: https://github.com/hyperium/http/pull/433 +[#438]: https://github.com/hyperium/http/pull/438 +[#443]: https://github.com/hyperium/http/pull/443 +[#446]: https://github.com/hyperium/http/issues/446 +[#449]: https://github.com/hyperium/http/pull/449 +[#450]: https://github.com/hyperium/http/pull/450 +[#435]: https://github.com/hyperium/http/issues/435 +[#445]: https://github.com/hyperium/http/pull/445 + diff --git a/.cargo-vendor/http-0.2.12/Cargo.toml b/.cargo-vendor/http-0.2.12/Cargo.toml new file mode 100644 index 0000000000..d83b0e9cde --- /dev/null +++ b/.cargo-vendor/http-0.2.12/Cargo.toml @@ -0,0 +1,60 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +rust-version = "1.49.0" +name = "http" +version = "0.2.12" +authors = [ + "Alex Crichton ", + "Carl Lerche ", + "Sean McArthur ", +] +description = """ +A set of types for representing HTTP requests and responses. +""" +documentation = "https://docs.rs/http" +readme = "README.md" +keywords = ["http"] +categories = ["web-programming"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/hyperium/http" + +[dependencies.bytes] +version = "1" + +[dependencies.fnv] +version = "1.0.5" + +[dependencies.itoa] +version = "1" + +[dev-dependencies.doc-comment] +version = "0.3" + +[dev-dependencies.indexmap] +version = "<=1.8" + +[dev-dependencies.quickcheck] +version = "0.9.0" + +[dev-dependencies.rand] +version = "0.7.0" + +[dev-dependencies.seahash] +version = "3.0.5" + +[dev-dependencies.serde] +version = "1.0" + +[dev-dependencies.serde_json] +version = "1.0" diff --git a/.cargo-vendor/http-0.2.12/LICENSE-APACHE b/.cargo-vendor/http-0.2.12/LICENSE-APACHE new file mode 100644 index 0000000000..80176c2b23 --- /dev/null +++ b/.cargo-vendor/http-0.2.12/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright 2017 http-rs authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/.cargo-vendor/http-0.2.12/LICENSE-MIT b/.cargo-vendor/http-0.2.12/LICENSE-MIT new file mode 100644 index 0000000000..0cbc550492 --- /dev/null +++ b/.cargo-vendor/http-0.2.12/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2017 http-rs authors + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/.cargo-vendor/http-0.2.12/README.md b/.cargo-vendor/http-0.2.12/README.md new file mode 100644 index 0000000000..2ae8d56cd3 --- /dev/null +++ b/.cargo-vendor/http-0.2.12/README.md @@ -0,0 +1,80 @@ +# HTTP + +A general purpose library of common HTTP types + +[![CI](https://github.com/hyperium/http/workflows/CI/badge.svg)](https://github.com/hyperium/http/actions?query=workflow%3ACI) +[![Crates.io](https://img.shields.io/crates/v/http.svg)](https://crates.io/crates/http) +[![Documentation](https://docs.rs/http/badge.svg)][dox] + +More information about this crate can be found in the [crate +documentation][dox]. + +[dox]: https://docs.rs/http + +## Usage + +To use `http`, first add this to your `Cargo.toml`: + +```toml +[dependencies] +http = "0.2" +``` + +Next, add this to your crate: + +```rust +use http::{Request, Response}; + +fn main() { + // ... +} +``` + +## Examples + +Create an HTTP request: + +```rust +use http::Request; + +fn main() { + let request = Request::builder() + .uri("https://www.rust-lang.org/") + .header("User-Agent", "awesome/1.0") + .body(()) + .unwrap(); +} +``` + +Create an HTTP response: + +```rust +use http::{Response, StatusCode}; + +fn main() { + let response = Response::builder() + .status(StatusCode::MOVED_PERMANENTLY) + .header("Location", "https://www.rust-lang.org/install.html") + .body(()) + .unwrap(); +} +``` + +# Supported Rust Versions + +This project follows the [Tokio MSRV][msrv] and is currently set to `1.49`. + +[msrv]: https://github.com/tokio-rs/tokio/#supported-rust-versions + +# License + +Licensed under either of + +- Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or https://apache.org/licenses/LICENSE-2.0) +- MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT) + +# Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. diff --git a/.cargo-vendor/http-0.2.12/src/byte_str.rs b/.cargo-vendor/http-0.2.12/src/byte_str.rs new file mode 100644 index 0000000000..dec222b5f4 --- /dev/null +++ b/.cargo-vendor/http-0.2.12/src/byte_str.rs @@ -0,0 +1,85 @@ +use bytes::Bytes; + +use std::{ops, str}; + +#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub(crate) struct ByteStr { + // Invariant: bytes contains valid UTF-8 + bytes: Bytes, +} + +impl ByteStr { + #[inline] + pub fn new() -> ByteStr { + ByteStr { + // Invariant: the empty slice is trivially valid UTF-8. + bytes: Bytes::new(), + } + } + + #[inline] + pub const fn from_static(val: &'static str) -> ByteStr { + ByteStr { + // Invariant: val is a str so contains valid UTF-8. + bytes: Bytes::from_static(val.as_bytes()), + } + } + + #[inline] + /// ## Panics + /// In a debug build this will panic if `bytes` is not valid UTF-8. + /// + /// ## Safety + /// `bytes` must contain valid UTF-8. In a release build it is undefined + /// behaviour to call this with `bytes` that is not valid UTF-8. + pub unsafe fn from_utf8_unchecked(bytes: Bytes) -> ByteStr { + if cfg!(debug_assertions) { + match str::from_utf8(&bytes) { + Ok(_) => (), + Err(err) => panic!( + "ByteStr::from_utf8_unchecked() with invalid bytes; error = {}, bytes = {:?}", + err, bytes + ), + } + } + // Invariant: assumed by the safety requirements of this function. + ByteStr { bytes: bytes } + } +} + +impl ops::Deref for ByteStr { + type Target = str; + + #[inline] + fn deref(&self) -> &str { + let b: &[u8] = self.bytes.as_ref(); + // Safety: the invariant of `bytes` is that it contains valid UTF-8. + unsafe { str::from_utf8_unchecked(b) } + } +} + +impl From for ByteStr { + #[inline] + fn from(src: String) -> ByteStr { + ByteStr { + // Invariant: src is a String so contains valid UTF-8. + bytes: Bytes::from(src), + } + } +} + +impl<'a> From<&'a str> for ByteStr { + #[inline] + fn from(src: &'a str) -> ByteStr { + ByteStr { + // Invariant: src is a str so contains valid UTF-8. + bytes: Bytes::copy_from_slice(src.as_bytes()), + } + } +} + +impl From for Bytes { + fn from(src: ByteStr) -> Self { + src.bytes + } +} diff --git a/.cargo-vendor/http-0.2.12/src/convert.rs b/.cargo-vendor/http-0.2.12/src/convert.rs new file mode 100644 index 0000000000..aeee2219a9 --- /dev/null +++ b/.cargo-vendor/http-0.2.12/src/convert.rs @@ -0,0 +1,17 @@ +macro_rules! if_downcast_into { + ($in_ty:ty, $out_ty:ty, $val:ident, $body:expr) => ({ + if std::any::TypeId::of::<$in_ty>() == std::any::TypeId::of::<$out_ty>() { + // Store the value in an `Option` so we can `take` + // it after casting to `&mut dyn Any`. + let mut slot = Some($val); + // Re-write the `$val` ident with the downcasted value. + let $val = (&mut slot as &mut dyn std::any::Any) + .downcast_mut::>() + .unwrap() + .take() + .unwrap(); + // Run the $body in scope of the replaced val. + $body + } + }) +} diff --git a/.cargo-vendor/http-0.2.12/src/error.rs b/.cargo-vendor/http-0.2.12/src/error.rs new file mode 100644 index 0000000000..762ee1c26a --- /dev/null +++ b/.cargo-vendor/http-0.2.12/src/error.rs @@ -0,0 +1,160 @@ +use std::error; +use std::fmt; +use std::result; + +use crate::header; +use crate::header::MaxSizeReached; +use crate::method; +use crate::status; +use crate::uri; + +/// A generic "error" for HTTP connections +/// +/// This error type is less specific than the error returned from other +/// functions in this crate, but all other errors can be converted to this +/// error. Consumers of this crate can typically consume and work with this form +/// of error for conversions with the `?` operator. +pub struct Error { + inner: ErrorKind, +} + +/// A `Result` typedef to use with the `http::Error` type +pub type Result = result::Result; + +enum ErrorKind { + StatusCode(status::InvalidStatusCode), + Method(method::InvalidMethod), + Uri(uri::InvalidUri), + UriParts(uri::InvalidUriParts), + HeaderName(header::InvalidHeaderName), + HeaderValue(header::InvalidHeaderValue), + MaxSizeReached(MaxSizeReached), +} + +impl fmt::Debug for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_tuple("http::Error") + // Skip the noise of the ErrorKind enum + .field(&self.get_ref()) + .finish() + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self.get_ref(), f) + } +} + +impl Error { + /// Return true if the underlying error has the same type as T. + pub fn is(&self) -> bool { + self.get_ref().is::() + } + + /// Return a reference to the lower level, inner error. + pub fn get_ref(&self) -> &(dyn error::Error + 'static) { + use self::ErrorKind::*; + + match self.inner { + StatusCode(ref e) => e, + Method(ref e) => e, + Uri(ref e) => e, + UriParts(ref e) => e, + HeaderName(ref e) => e, + HeaderValue(ref e) => e, + MaxSizeReached(ref e) => e, + } + } +} + +impl error::Error for Error { + // Return any available cause from the inner error. Note the inner error is + // not itself the cause. + fn source(&self) -> Option<&(dyn error::Error + 'static)> { + self.get_ref().source() + } +} + +impl From for Error { + fn from(err: MaxSizeReached) -> Error { + Error { + inner: ErrorKind::MaxSizeReached(err), + } + } +} + +impl From for Error { + fn from(err: status::InvalidStatusCode) -> Error { + Error { + inner: ErrorKind::StatusCode(err), + } + } +} + +impl From for Error { + fn from(err: method::InvalidMethod) -> Error { + Error { + inner: ErrorKind::Method(err), + } + } +} + +impl From for Error { + fn from(err: uri::InvalidUri) -> Error { + Error { + inner: ErrorKind::Uri(err), + } + } +} + +impl From for Error { + fn from(err: uri::InvalidUriParts) -> Error { + Error { + inner: ErrorKind::UriParts(err), + } + } +} + +impl From for Error { + fn from(err: header::InvalidHeaderName) -> Error { + Error { + inner: ErrorKind::HeaderName(err), + } + } +} + +impl From for Error { + fn from(err: header::InvalidHeaderValue) -> Error { + Error { + inner: ErrorKind::HeaderValue(err), + } + } +} + +impl From for Error { + fn from(err: std::convert::Infallible) -> Error { + match err {} + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn inner_error_is_invalid_status_code() { + if let Err(e) = status::StatusCode::from_u16(6666) { + let err: Error = e.into(); + let ie = err.get_ref(); + assert!(!ie.is::()); + assert!(ie.is::()); + ie.downcast_ref::().unwrap(); + + assert!(!err.is::()); + assert!(err.is::()); + } else { + panic!("Bad status allowed!"); + } + } +} diff --git a/.cargo-vendor/http-0.2.12/src/extensions.rs b/.cargo-vendor/http-0.2.12/src/extensions.rs new file mode 100644 index 0000000000..7e815df772 --- /dev/null +++ b/.cargo-vendor/http-0.2.12/src/extensions.rs @@ -0,0 +1,250 @@ +use std::any::{Any, TypeId}; +use std::collections::HashMap; +use std::fmt; +use std::hash::{BuildHasherDefault, Hasher}; + +type AnyMap = HashMap, BuildHasherDefault>; + +// With TypeIds as keys, there's no need to hash them. They are already hashes +// themselves, coming from the compiler. The IdHasher just holds the u64 of +// the TypeId, and then returns it, instead of doing any bit fiddling. +#[derive(Default)] +struct IdHasher(u64); + +impl Hasher for IdHasher { + fn write(&mut self, _: &[u8]) { + unreachable!("TypeId calls write_u64"); + } + + #[inline] + fn write_u64(&mut self, id: u64) { + self.0 = id; + } + + #[inline] + fn finish(&self) -> u64 { + self.0 + } +} + +/// A type map of protocol extensions. +/// +/// `Extensions` can be used by `Request` and `Response` to store +/// extra data derived from the underlying protocol. +#[derive(Default)] +pub struct Extensions { + // If extensions are never used, no need to carry around an empty HashMap. + // That's 3 words. Instead, this is only 1 word. + map: Option>, +} + +impl Extensions { + /// Create an empty `Extensions`. + #[inline] + pub fn new() -> Extensions { + Extensions { map: None } + } + + /// Insert a type into this `Extensions`. + /// + /// If a extension of this type already existed, it will + /// be returned. + /// + /// # Example + /// + /// ``` + /// # use http::Extensions; + /// let mut ext = Extensions::new(); + /// assert!(ext.insert(5i32).is_none()); + /// assert!(ext.insert(4u8).is_none()); + /// assert_eq!(ext.insert(9i32), Some(5i32)); + /// ``` + pub fn insert(&mut self, val: T) -> Option { + self.map + .get_or_insert_with(|| Box::new(HashMap::default())) + .insert(TypeId::of::(), Box::new(val)) + .and_then(|boxed| { + (boxed as Box) + .downcast() + .ok() + .map(|boxed| *boxed) + }) + } + + /// Get a reference to a type previously inserted on this `Extensions`. + /// + /// # Example + /// + /// ``` + /// # use http::Extensions; + /// let mut ext = Extensions::new(); + /// assert!(ext.get::().is_none()); + /// ext.insert(5i32); + /// + /// assert_eq!(ext.get::(), Some(&5i32)); + /// ``` + pub fn get(&self) -> Option<&T> { + self.map + .as_ref() + .and_then(|map| map.get(&TypeId::of::())) + .and_then(|boxed| (&**boxed as &(dyn Any + 'static)).downcast_ref()) + } + + /// Get a mutable reference to a type previously inserted on this `Extensions`. + /// + /// # Example + /// + /// ``` + /// # use http::Extensions; + /// let mut ext = Extensions::new(); + /// ext.insert(String::from("Hello")); + /// ext.get_mut::().unwrap().push_str(" World"); + /// + /// assert_eq!(ext.get::().unwrap(), "Hello World"); + /// ``` + pub fn get_mut(&mut self) -> Option<&mut T> { + self.map + .as_mut() + .and_then(|map| map.get_mut(&TypeId::of::())) + .and_then(|boxed| (&mut **boxed as &mut (dyn Any + 'static)).downcast_mut()) + } + + /// Remove a type from this `Extensions`. + /// + /// If a extension of this type existed, it will be returned. + /// + /// # Example + /// + /// ``` + /// # use http::Extensions; + /// let mut ext = Extensions::new(); + /// ext.insert(5i32); + /// assert_eq!(ext.remove::(), Some(5i32)); + /// assert!(ext.get::().is_none()); + /// ``` + pub fn remove(&mut self) -> Option { + self.map + .as_mut() + .and_then(|map| map.remove(&TypeId::of::())) + .and_then(|boxed| { + (boxed as Box) + .downcast() + .ok() + .map(|boxed| *boxed) + }) + } + + /// Clear the `Extensions` of all inserted extensions. + /// + /// # Example + /// + /// ``` + /// # use http::Extensions; + /// let mut ext = Extensions::new(); + /// ext.insert(5i32); + /// ext.clear(); + /// + /// assert!(ext.get::().is_none()); + /// ``` + #[inline] + pub fn clear(&mut self) { + if let Some(ref mut map) = self.map { + map.clear(); + } + } + + /// Check whether the extension set is empty or not. + /// + /// # Example + /// + /// ``` + /// # use http::Extensions; + /// let mut ext = Extensions::new(); + /// assert!(ext.is_empty()); + /// ext.insert(5i32); + /// assert!(!ext.is_empty()); + /// ``` + #[inline] + pub fn is_empty(&self) -> bool { + self.map + .as_ref() + .map_or(true, |map| map.is_empty()) + } + + /// Get the numer of extensions available. + /// + /// # Example + /// + /// ``` + /// # use http::Extensions; + /// let mut ext = Extensions::new(); + /// assert_eq!(ext.len(), 0); + /// ext.insert(5i32); + /// assert_eq!(ext.len(), 1); + /// ``` + #[inline] + pub fn len(&self) -> usize { + self.map + .as_ref() + .map_or(0, |map| map.len()) + } + + /// Extends `self` with another `Extensions`. + /// + /// If an instance of a specific type exists in both, the one in `self` is overwritten with the + /// one from `other`. + /// + /// # Example + /// + /// ``` + /// # use http::Extensions; + /// let mut ext_a = Extensions::new(); + /// ext_a.insert(8u8); + /// ext_a.insert(16u16); + /// + /// let mut ext_b = Extensions::new(); + /// ext_b.insert(4u8); + /// ext_b.insert("hello"); + /// + /// ext_a.extend(ext_b); + /// assert_eq!(ext_a.len(), 3); + /// assert_eq!(ext_a.get::(), Some(&4u8)); + /// assert_eq!(ext_a.get::(), Some(&16u16)); + /// assert_eq!(ext_a.get::<&'static str>().copied(), Some("hello")); + /// ``` + pub fn extend(&mut self, other: Self) { + if let Some(other) = other.map { + if let Some(map) = &mut self.map { + map.extend(*other); + } else { + self.map = Some(other); + } + } + } +} + +impl fmt::Debug for Extensions { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Extensions").finish() + } +} + +#[test] +fn test_extensions() { + #[derive(Debug, PartialEq)] + struct MyType(i32); + + let mut extensions = Extensions::new(); + + extensions.insert(5i32); + extensions.insert(MyType(10)); + + assert_eq!(extensions.get(), Some(&5i32)); + assert_eq!(extensions.get_mut(), Some(&mut 5i32)); + + assert_eq!(extensions.remove::(), Some(5i32)); + assert!(extensions.get::().is_none()); + + assert_eq!(extensions.get::(), None); + assert_eq!(extensions.get(), Some(&MyType(10))); +} diff --git a/.cargo-vendor/http-0.2.12/src/header/map.rs b/.cargo-vendor/http-0.2.12/src/header/map.rs new file mode 100644 index 0000000000..36f1c92654 --- /dev/null +++ b/.cargo-vendor/http-0.2.12/src/header/map.rs @@ -0,0 +1,3909 @@ +use std::collections::HashMap; +use std::collections::hash_map::RandomState; +use std::convert::TryFrom; +use std::hash::{BuildHasher, Hash, Hasher}; +use std::iter::{FromIterator, FusedIterator}; +use std::marker::PhantomData; +use std::{fmt, mem, ops, ptr, vec}; + +use crate::Error; + +use super::HeaderValue; +use super::name::{HdrName, HeaderName, InvalidHeaderName}; + +pub use self::as_header_name::AsHeaderName; +pub use self::into_header_name::IntoHeaderName; + +/// A set of HTTP headers +/// +/// `HeaderMap` is an multimap of [`HeaderName`] to values. +/// +/// [`HeaderName`]: struct.HeaderName.html +/// +/// # Examples +/// +/// Basic usage +/// +/// ``` +/// # use http::HeaderMap; +/// # use http::header::{CONTENT_LENGTH, HOST, LOCATION}; +/// let mut headers = HeaderMap::new(); +/// +/// headers.insert(HOST, "example.com".parse().unwrap()); +/// headers.insert(CONTENT_LENGTH, "123".parse().unwrap()); +/// +/// assert!(headers.contains_key(HOST)); +/// assert!(!headers.contains_key(LOCATION)); +/// +/// assert_eq!(headers[HOST], "example.com"); +/// +/// headers.remove(HOST); +/// +/// assert!(!headers.contains_key(HOST)); +/// ``` +#[derive(Clone)] +pub struct HeaderMap { + // Used to mask values to get an index + mask: Size, + indices: Box<[Pos]>, + entries: Vec>, + extra_values: Vec>, + danger: Danger, +} + +// # Implementation notes +// +// Below, you will find a fairly large amount of code. Most of this is to +// provide the necessary functions to efficiently manipulate the header +// multimap. The core hashing table is based on robin hood hashing [1]. While +// this is the same hashing algorithm used as part of Rust's `HashMap` in +// stdlib, many implementation details are different. The two primary reasons +// for this divergence are that `HeaderMap` is a multimap and the structure has +// been optimized to take advantage of the characteristics of HTTP headers. +// +// ## Structure Layout +// +// Most of the data contained by `HeaderMap` is *not* stored in the hash table. +// Instead, pairs of header name and *first* associated header value are stored +// in the `entries` vector. If the header name has more than one associated +// header value, then additional values are stored in `extra_values`. The actual +// hash table (`indices`) only maps hash codes to indices in `entries`. This +// means that, when an eviction happens, the actual header name and value stay +// put and only a tiny amount of memory has to be copied. +// +// Extra values associated with a header name are tracked using a linked list. +// Links are formed with offsets into `extra_values` and not pointers. +// +// [1]: https://en.wikipedia.org/wiki/Hash_table#Robin_Hood_hashing + +/// `HeaderMap` entry iterator. +/// +/// Yields `(&HeaderName, &value)` tuples. The same header name may be yielded +/// more than once if it has more than one associated value. +#[derive(Debug)] +pub struct Iter<'a, T> { + map: &'a HeaderMap, + entry: usize, + cursor: Option, +} + +/// `HeaderMap` mutable entry iterator +/// +/// Yields `(&HeaderName, &mut value)` tuples. The same header name may be +/// yielded more than once if it has more than one associated value. +#[derive(Debug)] +pub struct IterMut<'a, T> { + map: *mut HeaderMap, + entry: usize, + cursor: Option, + lt: PhantomData<&'a mut HeaderMap>, +} + +/// An owning iterator over the entries of a `HeaderMap`. +/// +/// This struct is created by the `into_iter` method on `HeaderMap`. +#[derive(Debug)] +pub struct IntoIter { + // If None, pull from `entries` + next: Option, + entries: vec::IntoIter>, + extra_values: Vec>, +} + +/// An iterator over `HeaderMap` keys. +/// +/// Each header name is yielded only once, even if it has more than one +/// associated value. +#[derive(Debug)] +pub struct Keys<'a, T> { + inner: ::std::slice::Iter<'a, Bucket>, +} + +/// `HeaderMap` value iterator. +/// +/// Each value contained in the `HeaderMap` will be yielded. +#[derive(Debug)] +pub struct Values<'a, T> { + inner: Iter<'a, T>, +} + +/// `HeaderMap` mutable value iterator +#[derive(Debug)] +pub struct ValuesMut<'a, T> { + inner: IterMut<'a, T>, +} + +/// A drain iterator for `HeaderMap`. +#[derive(Debug)] +pub struct Drain<'a, T> { + idx: usize, + len: usize, + entries: *mut [Bucket], + // If None, pull from `entries` + next: Option, + extra_values: *mut Vec>, + lt: PhantomData<&'a mut HeaderMap>, +} + +/// A view to all values stored in a single entry. +/// +/// This struct is returned by `HeaderMap::get_all`. +#[derive(Debug)] +pub struct GetAll<'a, T> { + map: &'a HeaderMap, + index: Option, +} + +/// A view into a single location in a `HeaderMap`, which may be vacant or occupied. +#[derive(Debug)] +pub enum Entry<'a, T: 'a> { + /// An occupied entry + Occupied(OccupiedEntry<'a, T>), + + /// A vacant entry + Vacant(VacantEntry<'a, T>), +} + +/// A view into a single empty location in a `HeaderMap`. +/// +/// This struct is returned as part of the `Entry` enum. +#[derive(Debug)] +pub struct VacantEntry<'a, T> { + map: &'a mut HeaderMap, + key: HeaderName, + hash: HashValue, + probe: usize, + danger: bool, +} + +/// A view into a single occupied location in a `HeaderMap`. +/// +/// This struct is returned as part of the `Entry` enum. +#[derive(Debug)] +pub struct OccupiedEntry<'a, T> { + map: &'a mut HeaderMap, + probe: usize, + index: usize, +} + +/// An iterator of all values associated with a single header name. +#[derive(Debug)] +pub struct ValueIter<'a, T> { + map: &'a HeaderMap, + index: usize, + front: Option, + back: Option, +} + +/// A mutable iterator of all values associated with a single header name. +#[derive(Debug)] +pub struct ValueIterMut<'a, T> { + map: *mut HeaderMap, + index: usize, + front: Option, + back: Option, + lt: PhantomData<&'a mut HeaderMap>, +} + +/// An drain iterator of all values associated with a single header name. +#[derive(Debug)] +pub struct ValueDrain<'a, T> { + first: Option, + next: Option<::std::vec::IntoIter>, + lt: PhantomData<&'a mut HeaderMap>, +} + +/// Error returned when max capacity of `HeaderMap` is exceeded +pub struct MaxSizeReached { + _priv: (), +} + +/// Tracks the value iterator state +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +enum Cursor { + Head, + Values(usize), +} + +/// Type used for representing the size of a HeaderMap value. +/// +/// 32,768 is more than enough entries for a single header map. Setting this +/// limit enables using `u16` to represent all offsets, which takes 2 bytes +/// instead of 8 on 64 bit processors. +/// +/// Setting this limit is especially beneficial for `indices`, making it more +/// cache friendly. More hash codes can fit in a cache line. +/// +/// You may notice that `u16` may represent more than 32,768 values. This is +/// true, but 32,768 should be plenty and it allows us to reserve the top bit +/// for future usage. +type Size = u16; + +/// This limit falls out from above. +const MAX_SIZE: usize = 1 << 15; + +/// An entry in the hash table. This represents the full hash code for an entry +/// as well as the position of the entry in the `entries` vector. +#[derive(Copy, Clone)] +struct Pos { + // Index in the `entries` vec + index: Size, + // Full hash value for the entry. + hash: HashValue, +} + +/// Hash values are limited to u16 as well. While `fast_hash` and `Hasher` +/// return `usize` hash codes, limiting the effective hash code to the lower 16 +/// bits is fine since we know that the `indices` vector will never grow beyond +/// that size. +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +struct HashValue(u16); + +/// Stores the data associated with a `HeaderMap` entry. Only the first value is +/// included in this struct. If a header name has more than one associated +/// value, all extra values are stored in the `extra_values` vector. A doubly +/// linked list of entries is maintained. The doubly linked list is used so that +/// removing a value is constant time. This also has the nice property of +/// enabling double ended iteration. +#[derive(Debug, Clone)] +struct Bucket { + hash: HashValue, + key: HeaderName, + value: T, + links: Option, +} + +/// The head and tail of the value linked list. +#[derive(Debug, Copy, Clone)] +struct Links { + next: usize, + tail: usize, +} + +/// Access to the `links` value in a slice of buckets. +/// +/// It's important that no other field is accessed, since it may have been +/// freed in a `Drain` iterator. +#[derive(Debug)] +struct RawLinks(*mut [Bucket]); + +/// Node in doubly-linked list of header value entries +#[derive(Debug, Clone)] +struct ExtraValue { + value: T, + prev: Link, + next: Link, +} + +/// A header value node is either linked to another node in the `extra_values` +/// list or it points to an entry in `entries`. The entry in `entries` is the +/// start of the list and holds the associated header name. +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +enum Link { + Entry(usize), + Extra(usize), +} + +/// Tracks the header map danger level! This relates to the adaptive hashing +/// algorithm. A HeaderMap starts in the "green" state, when a large number of +/// collisions are detected, it transitions to the yellow state. At this point, +/// the header map will either grow and switch back to the green state OR it +/// will transition to the red state. +/// +/// When in the red state, a safe hashing algorithm is used and all values in +/// the header map have to be rehashed. +#[derive(Clone)] +enum Danger { + Green, + Yellow, + Red(RandomState), +} + +// Constants related to detecting DOS attacks. +// +// Displacement is the number of entries that get shifted when inserting a new +// value. Forward shift is how far the entry gets stored from the ideal +// position. +// +// The current constant values were picked from another implementation. It could +// be that there are different values better suited to the header map case. +const DISPLACEMENT_THRESHOLD: usize = 128; +const FORWARD_SHIFT_THRESHOLD: usize = 512; + +// The default strategy for handling the yellow danger state is to increase the +// header map capacity in order to (hopefully) reduce the number of collisions. +// If growing the hash map would cause the load factor to drop bellow this +// threshold, then instead of growing, the headermap is switched to the red +// danger state and safe hashing is used instead. +const LOAD_FACTOR_THRESHOLD: f32 = 0.2; + +// Macro used to iterate the hash table starting at a given point, looping when +// the end is hit. +macro_rules! probe_loop { + ($label:tt: $probe_var: ident < $len: expr, $body: expr) => { + debug_assert!($len > 0); + $label: + loop { + if $probe_var < $len { + $body + $probe_var += 1; + } else { + $probe_var = 0; + } + } + }; + ($probe_var: ident < $len: expr, $body: expr) => { + debug_assert!($len > 0); + loop { + if $probe_var < $len { + $body + $probe_var += 1; + } else { + $probe_var = 0; + } + } + }; +} + +// First part of the robinhood algorithm. Given a key, find the slot in which it +// will be inserted. This is done by starting at the "ideal" spot. Then scanning +// until the destination slot is found. A destination slot is either the next +// empty slot or the next slot that is occupied by an entry that has a lower +// displacement (displacement is the distance from the ideal spot). +// +// This is implemented as a macro instead of a function that takes a closure in +// order to guarantee that it is "inlined". There is no way to annotate closures +// to guarantee inlining. +macro_rules! insert_phase_one { + ($map:ident, + $key:expr, + $probe:ident, + $pos:ident, + $hash:ident, + $danger:ident, + $vacant:expr, + $occupied:expr, + $robinhood:expr) => + {{ + let $hash = hash_elem_using(&$map.danger, &$key); + let mut $probe = desired_pos($map.mask, $hash); + let mut dist = 0; + let ret; + + // Start at the ideal position, checking all slots + probe_loop!('probe: $probe < $map.indices.len(), { + if let Some(($pos, entry_hash)) = $map.indices[$probe].resolve() { + // The slot is already occupied, but check if it has a lower + // displacement. + let their_dist = probe_distance($map.mask, entry_hash, $probe); + + if their_dist < dist { + // The new key's distance is larger, so claim this spot and + // displace the current entry. + // + // Check if this insertion is above the danger threshold. + let $danger = + dist >= FORWARD_SHIFT_THRESHOLD && !$map.danger.is_red(); + + ret = $robinhood; + break 'probe; + } else if entry_hash == $hash && $map.entries[$pos].key == $key { + // There already is an entry with the same key. + ret = $occupied; + break 'probe; + } + } else { + // The entry is vacant, use it for this key. + let $danger = + dist >= FORWARD_SHIFT_THRESHOLD && !$map.danger.is_red(); + + ret = $vacant; + break 'probe; + } + + dist += 1; + }); + + ret + }} +} + +// ===== impl HeaderMap ===== + +impl HeaderMap { + /// Create an empty `HeaderMap`. + /// + /// The map will be created without any capacity. This function will not + /// allocate. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// let map = HeaderMap::new(); + /// + /// assert!(map.is_empty()); + /// assert_eq!(0, map.capacity()); + /// ``` + pub fn new() -> Self { + HeaderMap::try_with_capacity(0).unwrap() + } +} + +impl HeaderMap { + /// Create an empty `HeaderMap` with the specified capacity. + /// + /// The returned map will allocate internal storage in order to hold about + /// `capacity` elements without reallocating. However, this is a "best + /// effort" as there are usage patterns that could cause additional + /// allocations before `capacity` headers are stored in the map. + /// + /// More capacity than requested may be allocated. + /// + /// # Panics + /// + /// This method panics if capacity exceeds max `HeaderMap` capacity. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// let map: HeaderMap = HeaderMap::with_capacity(10); + /// + /// assert!(map.is_empty()); + /// assert_eq!(12, map.capacity()); + /// ``` + pub fn with_capacity(capacity: usize) -> HeaderMap { + Self::try_with_capacity(capacity).expect("size overflows MAX_SIZE") + } + + /// Create an empty `HeaderMap` with the specified capacity. + /// + /// The returned map will allocate internal storage in order to hold about + /// `capacity` elements without reallocating. However, this is a "best + /// effort" as there are usage patterns that could cause additional + /// allocations before `capacity` headers are stored in the map. + /// + /// More capacity than requested may be allocated. + /// + /// # Errors + /// + /// This function may return an error if `HeaderMap` exceeds max capacity + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// let map: HeaderMap = HeaderMap::try_with_capacity(10).unwrap(); + /// + /// assert!(map.is_empty()); + /// assert_eq!(12, map.capacity()); + /// ``` + pub fn try_with_capacity(capacity: usize) -> Result, MaxSizeReached> { + if capacity == 0 { + Ok(HeaderMap { + mask: 0, + indices: Box::new([]), // as a ZST, this doesn't actually allocate anything + entries: Vec::new(), + extra_values: Vec::new(), + danger: Danger::Green, + }) + } else { + let raw_cap = match to_raw_capacity(capacity).checked_next_power_of_two() { + Some(c) => c, + None => return Err(MaxSizeReached { _priv: () }), + }; + if raw_cap > MAX_SIZE { + return Err(MaxSizeReached { _priv: () }); + } + debug_assert!(raw_cap > 0); + + Ok(HeaderMap { + mask: (raw_cap - 1) as Size, + indices: vec![Pos::none(); raw_cap].into_boxed_slice(), + entries: Vec::with_capacity(raw_cap), + extra_values: Vec::new(), + danger: Danger::Green, + }) + } + } + + /// Returns the number of headers stored in the map. + /// + /// This number represents the total number of **values** stored in the map. + /// This number can be greater than or equal to the number of **keys** + /// stored given that a single key may have more than one associated value. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::{ACCEPT, HOST}; + /// let mut map = HeaderMap::new(); + /// + /// assert_eq!(0, map.len()); + /// + /// map.insert(ACCEPT, "text/plain".parse().unwrap()); + /// map.insert(HOST, "localhost".parse().unwrap()); + /// + /// assert_eq!(2, map.len()); + /// + /// map.append(ACCEPT, "text/html".parse().unwrap()); + /// + /// assert_eq!(3, map.len()); + /// ``` + pub fn len(&self) -> usize { + self.entries.len() + self.extra_values.len() + } + + /// Returns the number of keys stored in the map. + /// + /// This number will be less than or equal to `len()` as each key may have + /// more than one associated value. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::{ACCEPT, HOST}; + /// let mut map = HeaderMap::new(); + /// + /// assert_eq!(0, map.keys_len()); + /// + /// map.insert(ACCEPT, "text/plain".parse().unwrap()); + /// map.insert(HOST, "localhost".parse().unwrap()); + /// + /// assert_eq!(2, map.keys_len()); + /// + /// map.insert(ACCEPT, "text/html".parse().unwrap()); + /// + /// assert_eq!(2, map.keys_len()); + /// ``` + pub fn keys_len(&self) -> usize { + self.entries.len() + } + + /// Returns true if the map contains no elements. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::HOST; + /// let mut map = HeaderMap::new(); + /// + /// assert!(map.is_empty()); + /// + /// map.insert(HOST, "hello.world".parse().unwrap()); + /// + /// assert!(!map.is_empty()); + /// ``` + pub fn is_empty(&self) -> bool { + self.entries.len() == 0 + } + + /// Clears the map, removing all key-value pairs. Keeps the allocated memory + /// for reuse. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::HOST; + /// let mut map = HeaderMap::new(); + /// map.insert(HOST, "hello.world".parse().unwrap()); + /// + /// map.clear(); + /// assert!(map.is_empty()); + /// assert!(map.capacity() > 0); + /// ``` + pub fn clear(&mut self) { + self.entries.clear(); + self.extra_values.clear(); + self.danger = Danger::Green; + + for e in self.indices.iter_mut() { + *e = Pos::none(); + } + } + + /// Returns the number of headers the map can hold without reallocating. + /// + /// This number is an approximation as certain usage patterns could cause + /// additional allocations before the returned capacity is filled. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::HOST; + /// let mut map = HeaderMap::new(); + /// + /// assert_eq!(0, map.capacity()); + /// + /// map.insert(HOST, "hello.world".parse().unwrap()); + /// assert_eq!(6, map.capacity()); + /// ``` + pub fn capacity(&self) -> usize { + usable_capacity(self.indices.len()) + } + + /// Reserves capacity for at least `additional` more headers to be inserted + /// into the `HeaderMap`. + /// + /// The header map may reserve more space to avoid frequent reallocations. + /// Like with `with_capacity`, this will be a "best effort" to avoid + /// allocations until `additional` more headers are inserted. Certain usage + /// patterns could cause additional allocations before the number is + /// reached. + /// + /// # Panics + /// + /// Panics if the new allocation size overflows `HeaderMap` `MAX_SIZE`. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::HOST; + /// let mut map = HeaderMap::new(); + /// map.reserve(10); + /// # map.insert(HOST, "bar".parse().unwrap()); + /// ``` + pub fn reserve(&mut self, additional: usize) { + self.try_reserve(additional) + .expect("size overflows MAX_SIZE") + } + + /// Reserves capacity for at least `additional` more headers to be inserted + /// into the `HeaderMap`. + /// + /// The header map may reserve more space to avoid frequent reallocations. + /// Like with `with_capacity`, this will be a "best effort" to avoid + /// allocations until `additional` more headers are inserted. Certain usage + /// patterns could cause additional allocations before the number is + /// reached. + /// + /// # Errors + /// + /// This method differs from `reserve` by returning an error instead of + /// panicking if the value is too large. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::HOST; + /// let mut map = HeaderMap::new(); + /// map.try_reserve(10).unwrap(); + /// # map.try_insert(HOST, "bar".parse().unwrap()).unwrap(); + /// ``` + pub fn try_reserve(&mut self, additional: usize) -> Result<(), MaxSizeReached> { + // TODO: This can't overflow if done properly... since the max # of + // elements is u16::MAX. + let cap = self + .entries + .len() + .checked_add(additional) + .ok_or_else(|| MaxSizeReached::new())?; + + if cap > self.indices.len() { + let cap = cap + .checked_next_power_of_two() + .ok_or_else(|| MaxSizeReached::new())?; + if cap > MAX_SIZE { + return Err(MaxSizeReached::new()); + } + + if self.entries.len() == 0 { + self.mask = cap as Size - 1; + self.indices = vec![Pos::none(); cap].into_boxed_slice(); + self.entries = Vec::with_capacity(usable_capacity(cap)); + } else { + self.try_grow(cap)?; + } + } + + Ok(()) + } + + /// Returns a reference to the value associated with the key. + /// + /// If there are multiple values associated with the key, then the first one + /// is returned. Use `get_all` to get all values associated with a given + /// key. Returns `None` if there are no values associated with the key. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::HOST; + /// let mut map = HeaderMap::new(); + /// assert!(map.get("host").is_none()); + /// + /// map.insert(HOST, "hello".parse().unwrap()); + /// assert_eq!(map.get(HOST).unwrap(), &"hello"); + /// assert_eq!(map.get("host").unwrap(), &"hello"); + /// + /// map.append(HOST, "world".parse().unwrap()); + /// assert_eq!(map.get("host").unwrap(), &"hello"); + /// ``` + pub fn get(&self, key: K) -> Option<&T> + where + K: AsHeaderName, + { + self.get2(&key) + } + + fn get2(&self, key: &K) -> Option<&T> + where + K: AsHeaderName, + { + match key.find(self) { + Some((_, found)) => { + let entry = &self.entries[found]; + Some(&entry.value) + } + None => None, + } + } + + /// Returns a mutable reference to the value associated with the key. + /// + /// If there are multiple values associated with the key, then the first one + /// is returned. Use `entry` to get all values associated with a given + /// key. Returns `None` if there are no values associated with the key. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::HOST; + /// let mut map = HeaderMap::default(); + /// map.insert(HOST, "hello".to_string()); + /// map.get_mut("host").unwrap().push_str("-world"); + /// + /// assert_eq!(map.get(HOST).unwrap(), &"hello-world"); + /// ``` + pub fn get_mut(&mut self, key: K) -> Option<&mut T> + where + K: AsHeaderName, + { + match key.find(self) { + Some((_, found)) => { + let entry = &mut self.entries[found]; + Some(&mut entry.value) + } + None => None, + } + } + + /// Returns a view of all values associated with a key. + /// + /// The returned view does not incur any allocations and allows iterating + /// the values associated with the key. See [`GetAll`] for more details. + /// Returns `None` if there are no values associated with the key. + /// + /// [`GetAll`]: struct.GetAll.html + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::HOST; + /// let mut map = HeaderMap::new(); + /// + /// map.insert(HOST, "hello".parse().unwrap()); + /// map.append(HOST, "goodbye".parse().unwrap()); + /// + /// let view = map.get_all("host"); + /// + /// let mut iter = view.iter(); + /// assert_eq!(&"hello", iter.next().unwrap()); + /// assert_eq!(&"goodbye", iter.next().unwrap()); + /// assert!(iter.next().is_none()); + /// ``` + pub fn get_all(&self, key: K) -> GetAll<'_, T> + where + K: AsHeaderName, + { + GetAll { + map: self, + index: key.find(self).map(|(_, i)| i), + } + } + + /// Returns true if the map contains a value for the specified key. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::HOST; + /// let mut map = HeaderMap::new(); + /// assert!(!map.contains_key(HOST)); + /// + /// map.insert(HOST, "world".parse().unwrap()); + /// assert!(map.contains_key("host")); + /// ``` + pub fn contains_key(&self, key: K) -> bool + where + K: AsHeaderName, + { + key.find(self).is_some() + } + + /// An iterator visiting all key-value pairs. + /// + /// The iteration order is arbitrary, but consistent across platforms for + /// the same crate version. Each key will be yielded once per associated + /// value. So, if a key has 3 associated values, it will be yielded 3 times. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::{CONTENT_LENGTH, HOST}; + /// let mut map = HeaderMap::new(); + /// + /// map.insert(HOST, "hello".parse().unwrap()); + /// map.append(HOST, "goodbye".parse().unwrap()); + /// map.insert(CONTENT_LENGTH, "123".parse().unwrap()); + /// + /// for (key, value) in map.iter() { + /// println!("{:?}: {:?}", key, value); + /// } + /// ``` + pub fn iter(&self) -> Iter<'_, T> { + Iter { + map: self, + entry: 0, + cursor: self.entries.first().map(|_| Cursor::Head), + } + } + + /// An iterator visiting all key-value pairs, with mutable value references. + /// + /// The iterator order is arbitrary, but consistent across platforms for the + /// same crate version. Each key will be yielded once per associated value, + /// so if a key has 3 associated values, it will be yielded 3 times. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::{CONTENT_LENGTH, HOST}; + /// let mut map = HeaderMap::default(); + /// + /// map.insert(HOST, "hello".to_string()); + /// map.append(HOST, "goodbye".to_string()); + /// map.insert(CONTENT_LENGTH, "123".to_string()); + /// + /// for (key, value) in map.iter_mut() { + /// value.push_str("-boop"); + /// } + /// ``` + pub fn iter_mut(&mut self) -> IterMut<'_, T> { + IterMut { + map: self as *mut _, + entry: 0, + cursor: self.entries.first().map(|_| Cursor::Head), + lt: PhantomData, + } + } + + /// An iterator visiting all keys. + /// + /// The iteration order is arbitrary, but consistent across platforms for + /// the same crate version. Each key will be yielded only once even if it + /// has multiple associated values. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::{CONTENT_LENGTH, HOST}; + /// let mut map = HeaderMap::new(); + /// + /// map.insert(HOST, "hello".parse().unwrap()); + /// map.append(HOST, "goodbye".parse().unwrap()); + /// map.insert(CONTENT_LENGTH, "123".parse().unwrap()); + /// + /// for key in map.keys() { + /// println!("{:?}", key); + /// } + /// ``` + pub fn keys(&self) -> Keys<'_, T> { + Keys { + inner: self.entries.iter(), + } + } + + /// An iterator visiting all values. + /// + /// The iteration order is arbitrary, but consistent across platforms for + /// the same crate version. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::{CONTENT_LENGTH, HOST}; + /// let mut map = HeaderMap::new(); + /// + /// map.insert(HOST, "hello".parse().unwrap()); + /// map.append(HOST, "goodbye".parse().unwrap()); + /// map.insert(CONTENT_LENGTH, "123".parse().unwrap()); + /// + /// for value in map.values() { + /// println!("{:?}", value); + /// } + /// ``` + pub fn values(&self) -> Values<'_, T> { + Values { inner: self.iter() } + } + + /// An iterator visiting all values mutably. + /// + /// The iteration order is arbitrary, but consistent across platforms for + /// the same crate version. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::{CONTENT_LENGTH, HOST}; + /// let mut map = HeaderMap::default(); + /// + /// map.insert(HOST, "hello".to_string()); + /// map.append(HOST, "goodbye".to_string()); + /// map.insert(CONTENT_LENGTH, "123".to_string()); + /// + /// for value in map.values_mut() { + /// value.push_str("-boop"); + /// } + /// ``` + pub fn values_mut(&mut self) -> ValuesMut<'_, T> { + ValuesMut { + inner: self.iter_mut(), + } + } + + /// Clears the map, returning all entries as an iterator. + /// + /// The internal memory is kept for reuse. + /// + /// For each yielded item that has `None` provided for the `HeaderName`, + /// then the associated header name is the same as that of the previously + /// yielded item. The first yielded item will have `HeaderName` set. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::{CONTENT_LENGTH, HOST}; + /// let mut map = HeaderMap::new(); + /// + /// map.insert(HOST, "hello".parse().unwrap()); + /// map.append(HOST, "goodbye".parse().unwrap()); + /// map.insert(CONTENT_LENGTH, "123".parse().unwrap()); + /// + /// let mut drain = map.drain(); + /// + /// + /// assert_eq!(drain.next(), Some((Some(HOST), "hello".parse().unwrap()))); + /// assert_eq!(drain.next(), Some((None, "goodbye".parse().unwrap()))); + /// + /// assert_eq!(drain.next(), Some((Some(CONTENT_LENGTH), "123".parse().unwrap()))); + /// + /// assert_eq!(drain.next(), None); + /// ``` + pub fn drain(&mut self) -> Drain<'_, T> { + for i in self.indices.iter_mut() { + *i = Pos::none(); + } + + // Memory safety + // + // When the Drain is first created, it shortens the length of + // the source vector to make sure no uninitialized or moved-from + // elements are accessible at all if the Drain's destructor never + // gets to run. + + let entries = &mut self.entries[..] as *mut _; + let extra_values = &mut self.extra_values as *mut _; + let len = self.entries.len(); + unsafe { self.entries.set_len(0); } + + Drain { + idx: 0, + len, + entries, + extra_values, + next: None, + lt: PhantomData, + } + } + + fn value_iter(&self, idx: Option) -> ValueIter<'_, T> { + use self::Cursor::*; + + if let Some(idx) = idx { + let back = { + let entry = &self.entries[idx]; + + entry.links.map(|l| Values(l.tail)).unwrap_or(Head) + }; + + ValueIter { + map: self, + index: idx, + front: Some(Head), + back: Some(back), + } + } else { + ValueIter { + map: self, + index: ::std::usize::MAX, + front: None, + back: None, + } + } + } + + fn value_iter_mut(&mut self, idx: usize) -> ValueIterMut<'_, T> { + use self::Cursor::*; + + let back = { + let entry = &self.entries[idx]; + + entry.links.map(|l| Values(l.tail)).unwrap_or(Head) + }; + + ValueIterMut { + map: self as *mut _, + index: idx, + front: Some(Head), + back: Some(back), + lt: PhantomData, + } + } + + /// Gets the given key's corresponding entry in the map for in-place + /// manipulation. + /// + /// # Panics + /// + /// This method panics if capacity exceeds max `HeaderMap` capacity + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// let mut map: HeaderMap = HeaderMap::default(); + /// + /// let headers = &[ + /// "content-length", + /// "x-hello", + /// "Content-Length", + /// "x-world", + /// ]; + /// + /// for &header in headers { + /// let counter = map.entry(header).or_insert(0); + /// *counter += 1; + /// } + /// + /// assert_eq!(map["content-length"], 2); + /// assert_eq!(map["x-hello"], 1); + /// ``` + pub fn entry(&mut self, key: K) -> Entry<'_, T> + where + K: IntoHeaderName, + { + key.try_entry(self).expect("size overflows MAX_SIZE") + } + + /// Gets the given key's corresponding entry in the map for in-place + /// manipulation. + /// + /// # Errors + /// + /// This method differs from `entry` by allowing types that may not be + /// valid `HeaderName`s to passed as the key (such as `String`). If they + /// do not parse as a valid `HeaderName`, this returns an + /// `InvalidHeaderName` error. + /// + /// If reserving space goes over the maximum, this will also return an + /// error. However, to prevent breaking changes to the return type, the + /// error will still say `InvalidHeaderName`, unlike other `try_*` methods + /// which return a `MaxSizeReached` error. + pub fn try_entry(&mut self, key: K) -> Result, InvalidHeaderName> + where + K: AsHeaderName, + { + key.try_entry(self).map_err(|err| match err { + as_header_name::TryEntryError::InvalidHeaderName(e) => e, + as_header_name::TryEntryError::MaxSizeReached(_e) => { + // Unfortunately, we cannot change the return type of this + // method, so the max size reached error needs to be converted + // into an InvalidHeaderName. Yay. + InvalidHeaderName::new() + } + }) + } + + fn try_entry2(&mut self, key: K) -> Result, MaxSizeReached> + where + K: Hash + Into, + HeaderName: PartialEq, + { + // Ensure that there is space in the map + self.try_reserve_one()?; + + Ok(insert_phase_one!( + self, + key, + probe, + pos, + hash, + danger, + Entry::Vacant(VacantEntry { + map: self, + hash: hash, + key: key.into(), + probe: probe, + danger: danger, + }), + Entry::Occupied(OccupiedEntry { + map: self, + index: pos, + probe: probe, + }), + Entry::Vacant(VacantEntry { + map: self, + hash: hash, + key: key.into(), + probe: probe, + danger: danger, + }) + )) + } + + /// Inserts a key-value pair into the map. + /// + /// If the map did not previously have this key present, then `None` is + /// returned. + /// + /// If the map did have this key present, the new value is associated with + /// the key and all previous values are removed. **Note** that only a single + /// one of the previous values is returned. If there are multiple values + /// that have been previously associated with the key, then the first one is + /// returned. See `insert_mult` on `OccupiedEntry` for an API that returns + /// all values. + /// + /// The key is not updated, though; this matters for types that can be `==` + /// without being identical. + /// + /// # Panics + /// + /// This method panics if capacity exceeds max `HeaderMap` capacity + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::HOST; + /// let mut map = HeaderMap::new(); + /// assert!(map.insert(HOST, "world".parse().unwrap()).is_none()); + /// assert!(!map.is_empty()); + /// + /// let mut prev = map.insert(HOST, "earth".parse().unwrap()).unwrap(); + /// assert_eq!("world", prev); + /// ``` + pub fn insert(&mut self, key: K, val: T) -> Option + where + K: IntoHeaderName, + { + self.try_insert(key, val).expect("size overflows MAX_SIZE") + } + + /// Inserts a key-value pair into the map. + /// + /// If the map did not previously have this key present, then `None` is + /// returned. + /// + /// If the map did have this key present, the new value is associated with + /// the key and all previous values are removed. **Note** that only a single + /// one of the previous values is returned. If there are multiple values + /// that have been previously associated with the key, then the first one is + /// returned. See `insert_mult` on `OccupiedEntry` for an API that returns + /// all values. + /// + /// The key is not updated, though; this matters for types that can be `==` + /// without being identical. + /// + /// # Errors + /// + /// This function may return an error if `HeaderMap` exceeds max capacity + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::HOST; + /// let mut map = HeaderMap::new(); + /// assert!(map.try_insert(HOST, "world".parse().unwrap()).unwrap().is_none()); + /// assert!(!map.is_empty()); + /// + /// let mut prev = map.try_insert(HOST, "earth".parse().unwrap()).unwrap().unwrap(); + /// assert_eq!("world", prev); + /// ``` + pub fn try_insert(&mut self, key: K, val: T) -> Result, MaxSizeReached> + where + K: IntoHeaderName, + { + key.try_insert(self, val) + } + + #[inline] + fn try_insert2(&mut self, key: K, value: T) -> Result, MaxSizeReached> + where + K: Hash + Into, + HeaderName: PartialEq, + { + self.try_reserve_one()?; + + Ok(insert_phase_one!( + self, + key, + probe, + pos, + hash, + danger, + // Vacant + { + let _ = danger; // Make lint happy + let index = self.entries.len(); + self.try_insert_entry(hash, key.into(), value)?; + self.indices[probe] = Pos::new(index, hash); + None + }, + // Occupied + Some(self.insert_occupied(pos, value)), + // Robinhood + { + self.try_insert_phase_two(key.into(), value, hash, probe, danger)?; + None + } + )) + } + + /// Set an occupied bucket to the given value + #[inline] + fn insert_occupied(&mut self, index: usize, value: T) -> T { + if let Some(links) = self.entries[index].links { + self.remove_all_extra_values(links.next); + } + + let entry = &mut self.entries[index]; + mem::replace(&mut entry.value, value) + } + + fn insert_occupied_mult(&mut self, index: usize, value: T) -> ValueDrain<'_, T> { + let old; + let links; + + { + let entry = &mut self.entries[index]; + + old = mem::replace(&mut entry.value, value); + links = entry.links.take(); + } + + let raw_links = self.raw_links(); + let extra_values = &mut self.extra_values; + + let next = links.map(|l| { + drain_all_extra_values(raw_links, extra_values, l.next) + .into_iter() + }); + + ValueDrain { + first: Some(old), + next: next, + lt: PhantomData, + } + } + + /// Inserts a key-value pair into the map. + /// + /// If the map did not previously have this key present, then `false` is + /// returned. + /// + /// If the map did have this key present, the new value is pushed to the end + /// of the list of values currently associated with the key. The key is not + /// updated, though; this matters for types that can be `==` without being + /// identical. + /// + /// # Panics + /// + /// This method panics if capacity exceeds max `HeaderMap` capacity + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::HOST; + /// let mut map = HeaderMap::new(); + /// assert!(map.insert(HOST, "world".parse().unwrap()).is_none()); + /// assert!(!map.is_empty()); + /// + /// map.append(HOST, "earth".parse().unwrap()); + /// + /// let values = map.get_all("host"); + /// let mut i = values.iter(); + /// assert_eq!("world", *i.next().unwrap()); + /// assert_eq!("earth", *i.next().unwrap()); + /// ``` + pub fn append(&mut self, key: K, value: T) -> bool + where + K: IntoHeaderName, + { + self.try_append(key, value) + .expect("size overflows MAX_SIZE") + } + + /// Inserts a key-value pair into the map. + /// + /// If the map did not previously have this key present, then `false` is + /// returned. + /// + /// If the map did have this key present, the new value is pushed to the end + /// of the list of values currently associated with the key. The key is not + /// updated, though; this matters for types that can be `==` without being + /// identical. + /// + /// # Errors + /// + /// This function may return an error if `HeaderMap` exceeds max capacity + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::HOST; + /// let mut map = HeaderMap::new(); + /// assert!(map.try_insert(HOST, "world".parse().unwrap()).unwrap().is_none()); + /// assert!(!map.is_empty()); + /// + /// map.try_append(HOST, "earth".parse().unwrap()).unwrap(); + /// + /// let values = map.get_all("host"); + /// let mut i = values.iter(); + /// assert_eq!("world", *i.next().unwrap()); + /// assert_eq!("earth", *i.next().unwrap()); + /// ``` + pub fn try_append(&mut self, key: K, value: T) -> Result + where + K: IntoHeaderName, + { + key.try_append(self, value) + } + + #[inline] + fn try_append2(&mut self, key: K, value: T) -> Result + where + K: Hash + Into, + HeaderName: PartialEq, + { + self.try_reserve_one()?; + + Ok(insert_phase_one!( + self, + key, + probe, + pos, + hash, + danger, + // Vacant + { + let _ = danger; + let index = self.entries.len(); + self.try_insert_entry(hash, key.into(), value)?; + self.indices[probe] = Pos::new(index, hash); + false + }, + // Occupied + { + append_value(pos, &mut self.entries[pos], &mut self.extra_values, value); + true + }, + // Robinhood + { + self.try_insert_phase_two(key.into(), value, hash, probe, danger)?; + + false + } + )) + } + + #[inline] + fn find(&self, key: &K) -> Option<(usize, usize)> + where + K: Hash + Into, + HeaderName: PartialEq, + { + if self.entries.is_empty() { + return None; + } + + let hash = hash_elem_using(&self.danger, key); + let mask = self.mask; + let mut probe = desired_pos(mask, hash); + let mut dist = 0; + + probe_loop!(probe < self.indices.len(), { + if let Some((i, entry_hash)) = self.indices[probe].resolve() { + if dist > probe_distance(mask, entry_hash, probe) { + // give up when probe distance is too long + return None; + } else if entry_hash == hash && self.entries[i].key == *key { + return Some((probe, i)); + } + } else { + return None; + } + + dist += 1; + }); + } + + /// phase 2 is post-insert where we forward-shift `Pos` in the indices. + #[inline] + fn try_insert_phase_two( + &mut self, + key: HeaderName, + value: T, + hash: HashValue, + probe: usize, + danger: bool, + ) -> Result { + // Push the value and get the index + let index = self.entries.len(); + self.try_insert_entry(hash, key, value)?; + + let num_displaced = do_insert_phase_two(&mut self.indices, probe, Pos::new(index, hash)); + + if danger || num_displaced >= DISPLACEMENT_THRESHOLD { + // Increase danger level + self.danger.to_yellow(); + } + + Ok(index) + } + + /// Removes a key from the map, returning the value associated with the key. + /// + /// Returns `None` if the map does not contain the key. If there are + /// multiple values associated with the key, then the first one is returned. + /// See `remove_entry_mult` on `OccupiedEntry` for an API that yields all + /// values. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::HOST; + /// let mut map = HeaderMap::new(); + /// map.insert(HOST, "hello.world".parse().unwrap()); + /// + /// let prev = map.remove(HOST).unwrap(); + /// assert_eq!("hello.world", prev); + /// + /// assert!(map.remove(HOST).is_none()); + /// ``` + pub fn remove(&mut self, key: K) -> Option + where + K: AsHeaderName, + { + match key.find(self) { + Some((probe, idx)) => { + if let Some(links) = self.entries[idx].links { + self.remove_all_extra_values(links.next); + } + + let entry = self.remove_found(probe, idx); + + Some(entry.value) + } + None => None, + } + } + + /// Remove an entry from the map. + /// + /// Warning: To avoid inconsistent state, extra values _must_ be removed + /// for the `found` index (via `remove_all_extra_values` or similar) + /// _before_ this method is called. + #[inline] + fn remove_found(&mut self, probe: usize, found: usize) -> Bucket { + // index `probe` and entry `found` is to be removed + // use swap_remove, but then we need to update the index that points + // to the other entry that has to move + self.indices[probe] = Pos::none(); + let entry = self.entries.swap_remove(found); + + // correct index that points to the entry that had to swap places + if let Some(entry) = self.entries.get(found) { + // was not last element + // examine new element in `found` and find it in indices + let mut probe = desired_pos(self.mask, entry.hash); + + probe_loop!(probe < self.indices.len(), { + if let Some((i, _)) = self.indices[probe].resolve() { + if i >= self.entries.len() { + // found it + self.indices[probe] = Pos::new(found, entry.hash); + break; + } + } + }); + + // Update links + if let Some(links) = entry.links { + self.extra_values[links.next].prev = Link::Entry(found); + self.extra_values[links.tail].next = Link::Entry(found); + } + } + + // backward shift deletion in self.indices + // after probe, shift all non-ideally placed indices backward + if self.entries.len() > 0 { + let mut last_probe = probe; + let mut probe = probe + 1; + + probe_loop!(probe < self.indices.len(), { + if let Some((_, entry_hash)) = self.indices[probe].resolve() { + if probe_distance(self.mask, entry_hash, probe) > 0 { + self.indices[last_probe] = self.indices[probe]; + self.indices[probe] = Pos::none(); + } else { + break; + } + } else { + break; + } + + last_probe = probe; + }); + } + + entry + } + + /// Removes the `ExtraValue` at the given index. + #[inline] + fn remove_extra_value(&mut self, idx: usize) -> ExtraValue { + let raw_links = self.raw_links(); + remove_extra_value(raw_links, &mut self.extra_values, idx) + } + + fn remove_all_extra_values(&mut self, mut head: usize) { + loop { + let extra = self.remove_extra_value(head); + + if let Link::Extra(idx) = extra.next { + head = idx; + } else { + break; + } + } + } + + #[inline] + fn try_insert_entry( + &mut self, + hash: HashValue, + key: HeaderName, + value: T, + ) -> Result<(), MaxSizeReached> { + if self.entries.len() >= MAX_SIZE { + return Err(MaxSizeReached::new()); + } + + self.entries.push(Bucket { + hash: hash, + key: key, + value: value, + links: None, + }); + + Ok(()) + } + + fn rebuild(&mut self) { + // Loop over all entries and re-insert them into the map + 'outer: for (index, entry) in self.entries.iter_mut().enumerate() { + let hash = hash_elem_using(&self.danger, &entry.key); + let mut probe = desired_pos(self.mask, hash); + let mut dist = 0; + + // Update the entry's hash code + entry.hash = hash; + + probe_loop!(probe < self.indices.len(), { + if let Some((_, entry_hash)) = self.indices[probe].resolve() { + // if existing element probed less than us, swap + let their_dist = probe_distance(self.mask, entry_hash, probe); + + if their_dist < dist { + // Robinhood + break; + } + } else { + // Vacant slot + self.indices[probe] = Pos::new(index, hash); + continue 'outer; + } + + dist += 1; + }); + + do_insert_phase_two(&mut self.indices, probe, Pos::new(index, hash)); + } + } + + fn reinsert_entry_in_order(&mut self, pos: Pos) { + if let Some((_, entry_hash)) = pos.resolve() { + // Find first empty bucket and insert there + let mut probe = desired_pos(self.mask, entry_hash); + + probe_loop!(probe < self.indices.len(), { + if self.indices[probe].resolve().is_none() { + // empty bucket, insert here + self.indices[probe] = pos; + return; + } + }); + } + } + + fn try_reserve_one(&mut self) -> Result<(), MaxSizeReached> { + let len = self.entries.len(); + + if self.danger.is_yellow() { + let load_factor = self.entries.len() as f32 / self.indices.len() as f32; + + if load_factor >= LOAD_FACTOR_THRESHOLD { + // Transition back to green danger level + self.danger.to_green(); + + // Double the capacity + let new_cap = self.indices.len() * 2; + + // Grow the capacity + self.try_grow(new_cap)?; + } else { + self.danger.to_red(); + + // Rebuild hash table + for index in self.indices.iter_mut() { + *index = Pos::none(); + } + + self.rebuild(); + } + } else if len == self.capacity() { + if len == 0 { + let new_raw_cap = 8; + self.mask = 8 - 1; + self.indices = vec![Pos::none(); new_raw_cap].into_boxed_slice(); + self.entries = Vec::with_capacity(usable_capacity(new_raw_cap)); + } else { + let raw_cap = self.indices.len(); + self.try_grow(raw_cap << 1)?; + } + } + + Ok(()) + } + + #[inline] + fn try_grow(&mut self, new_raw_cap: usize) -> Result<(), MaxSizeReached> { + if new_raw_cap > MAX_SIZE { + return Err(MaxSizeReached::new()); + } + + // find first ideally placed element -- start of cluster + let mut first_ideal = 0; + + for (i, pos) in self.indices.iter().enumerate() { + if let Some((_, entry_hash)) = pos.resolve() { + if 0 == probe_distance(self.mask, entry_hash, i) { + first_ideal = i; + break; + } + } + } + + // visit the entries in an order where we can simply reinsert them + // into self.indices without any bucket stealing. + let old_indices = mem::replace( + &mut self.indices, + vec![Pos::none(); new_raw_cap].into_boxed_slice(), + ); + self.mask = new_raw_cap.wrapping_sub(1) as Size; + + for &pos in &old_indices[first_ideal..] { + self.reinsert_entry_in_order(pos); + } + + for &pos in &old_indices[..first_ideal] { + self.reinsert_entry_in_order(pos); + } + + // Reserve additional entry slots + let more = self.capacity() - self.entries.len(); + self.entries.reserve_exact(more); + Ok(()) + } + + #[inline] + fn raw_links(&mut self) -> RawLinks { + RawLinks(&mut self.entries[..] as *mut _) + } +} + +/// Removes the `ExtraValue` at the given index. +#[inline] +fn remove_extra_value( + mut raw_links: RawLinks, + extra_values: &mut Vec>, + idx: usize) + -> ExtraValue +{ + let prev; + let next; + + { + debug_assert!(extra_values.len() > idx); + let extra = &extra_values[idx]; + prev = extra.prev; + next = extra.next; + } + + // First unlink the extra value + match (prev, next) { + (Link::Entry(prev), Link::Entry(next)) => { + debug_assert_eq!(prev, next); + + raw_links[prev] = None; + } + (Link::Entry(prev), Link::Extra(next)) => { + debug_assert!(raw_links[prev].is_some()); + + raw_links[prev].as_mut().unwrap() + .next = next; + + debug_assert!(extra_values.len() > next); + extra_values[next].prev = Link::Entry(prev); + } + (Link::Extra(prev), Link::Entry(next)) => { + debug_assert!(raw_links[next].is_some()); + + raw_links[next].as_mut().unwrap() + .tail = prev; + + debug_assert!(extra_values.len() > prev); + extra_values[prev].next = Link::Entry(next); + } + (Link::Extra(prev), Link::Extra(next)) => { + debug_assert!(extra_values.len() > next); + debug_assert!(extra_values.len() > prev); + + extra_values[prev].next = Link::Extra(next); + extra_values[next].prev = Link::Extra(prev); + } + } + + // Remove the extra value + let mut extra = extra_values.swap_remove(idx); + + // This is the index of the value that was moved (possibly `extra`) + let old_idx = extra_values.len(); + + // Update the links + if extra.prev == Link::Extra(old_idx) { + extra.prev = Link::Extra(idx); + } + + if extra.next == Link::Extra(old_idx) { + extra.next = Link::Extra(idx); + } + + // Check if another entry was displaced. If it was, then the links + // need to be fixed. + if idx != old_idx { + let next; + let prev; + + { + debug_assert!(extra_values.len() > idx); + let moved = &extra_values[idx]; + next = moved.next; + prev = moved.prev; + } + + // An entry was moved, we have to the links + match prev { + Link::Entry(entry_idx) => { + // It is critical that we do not attempt to read the + // header name or value as that memory may have been + // "released" already. + debug_assert!(raw_links[entry_idx].is_some()); + + let links = raw_links[entry_idx].as_mut().unwrap(); + links.next = idx; + } + Link::Extra(extra_idx) => { + debug_assert!(extra_values.len() > extra_idx); + extra_values[extra_idx].next = Link::Extra(idx); + } + } + + match next { + Link::Entry(entry_idx) => { + debug_assert!(raw_links[entry_idx].is_some()); + + let links = raw_links[entry_idx].as_mut().unwrap(); + links.tail = idx; + } + Link::Extra(extra_idx) => { + debug_assert!(extra_values.len() > extra_idx); + extra_values[extra_idx].prev = Link::Extra(idx); + } + } + } + + debug_assert!({ + for v in &*extra_values { + assert!(v.next != Link::Extra(old_idx)); + assert!(v.prev != Link::Extra(old_idx)); + } + + true + }); + + extra +} + +fn drain_all_extra_values( + raw_links: RawLinks, + extra_values: &mut Vec>, + mut head: usize) + -> Vec +{ + let mut vec = Vec::new(); + loop { + let extra = remove_extra_value(raw_links, extra_values, head); + vec.push(extra.value); + + if let Link::Extra(idx) = extra.next { + head = idx; + } else { + break; + } + } + vec +} + +impl<'a, T> IntoIterator for &'a HeaderMap { + type Item = (&'a HeaderName, &'a T); + type IntoIter = Iter<'a, T>; + + fn into_iter(self) -> Iter<'a, T> { + self.iter() + } +} + +impl<'a, T> IntoIterator for &'a mut HeaderMap { + type Item = (&'a HeaderName, &'a mut T); + type IntoIter = IterMut<'a, T>; + + fn into_iter(self) -> IterMut<'a, T> { + self.iter_mut() + } +} + +impl IntoIterator for HeaderMap { + type Item = (Option, T); + type IntoIter = IntoIter; + + /// Creates a consuming iterator, that is, one that moves keys and values + /// out of the map in arbitrary order. The map cannot be used after calling + /// this. + /// + /// For each yielded item that has `None` provided for the `HeaderName`, + /// then the associated header name is the same as that of the previously + /// yielded item. The first yielded item will have `HeaderName` set. + /// + /// # Examples + /// + /// Basic usage. + /// + /// ``` + /// # use http::header; + /// # use http::header::*; + /// let mut map = HeaderMap::new(); + /// map.insert(header::CONTENT_LENGTH, "123".parse().unwrap()); + /// map.insert(header::CONTENT_TYPE, "json".parse().unwrap()); + /// + /// let mut iter = map.into_iter(); + /// assert_eq!(iter.next(), Some((Some(header::CONTENT_LENGTH), "123".parse().unwrap()))); + /// assert_eq!(iter.next(), Some((Some(header::CONTENT_TYPE), "json".parse().unwrap()))); + /// assert!(iter.next().is_none()); + /// ``` + /// + /// Multiple values per key. + /// + /// ``` + /// # use http::header; + /// # use http::header::*; + /// let mut map = HeaderMap::new(); + /// + /// map.append(header::CONTENT_LENGTH, "123".parse().unwrap()); + /// map.append(header::CONTENT_LENGTH, "456".parse().unwrap()); + /// + /// map.append(header::CONTENT_TYPE, "json".parse().unwrap()); + /// map.append(header::CONTENT_TYPE, "html".parse().unwrap()); + /// map.append(header::CONTENT_TYPE, "xml".parse().unwrap()); + /// + /// let mut iter = map.into_iter(); + /// + /// assert_eq!(iter.next(), Some((Some(header::CONTENT_LENGTH), "123".parse().unwrap()))); + /// assert_eq!(iter.next(), Some((None, "456".parse().unwrap()))); + /// + /// assert_eq!(iter.next(), Some((Some(header::CONTENT_TYPE), "json".parse().unwrap()))); + /// assert_eq!(iter.next(), Some((None, "html".parse().unwrap()))); + /// assert_eq!(iter.next(), Some((None, "xml".parse().unwrap()))); + /// assert!(iter.next().is_none()); + /// ``` + fn into_iter(self) -> IntoIter { + IntoIter { + next: None, + entries: self.entries.into_iter(), + extra_values: self.extra_values, + } + } +} + +impl FromIterator<(HeaderName, T)> for HeaderMap { + fn from_iter(iter: I) -> Self + where + I: IntoIterator, + { + let mut map = HeaderMap::default(); + map.extend(iter); + map + } +} + +/// Try to convert a `HashMap` into a `HeaderMap`. +/// +/// # Examples +/// +/// ``` +/// use std::collections::HashMap; +/// use std::convert::TryInto; +/// use http::HeaderMap; +/// +/// let mut map = HashMap::new(); +/// map.insert("X-Custom-Header".to_string(), "my value".to_string()); +/// +/// let headers: HeaderMap = (&map).try_into().expect("valid headers"); +/// assert_eq!(headers["X-Custom-Header"], "my value"); +/// ``` +impl<'a, K, V, T> TryFrom<&'a HashMap> for HeaderMap + where + K: Eq + Hash, + HeaderName: TryFrom<&'a K>, + >::Error: Into, + T: TryFrom<&'a V>, + T::Error: Into, +{ + type Error = Error; + + fn try_from(c: &'a HashMap) -> Result { + c.into_iter() + .map(|(k, v)| -> crate::Result<(HeaderName, T)> { + let name = TryFrom::try_from(k).map_err(Into::into)?; + let value = TryFrom::try_from(v).map_err(Into::into)?; + Ok((name, value)) + }) + .collect() + } +} + +impl Extend<(Option, T)> for HeaderMap { + /// Extend a `HeaderMap` with the contents of another `HeaderMap`. + /// + /// This function expects the yielded items to follow the same structure as + /// `IntoIter`. + /// + /// # Panics + /// + /// This panics if the first yielded item does not have a `HeaderName`. + /// + /// # Examples + /// + /// ``` + /// # use http::header::*; + /// let mut map = HeaderMap::new(); + /// + /// map.insert(ACCEPT, "text/plain".parse().unwrap()); + /// map.insert(HOST, "hello.world".parse().unwrap()); + /// + /// let mut extra = HeaderMap::new(); + /// + /// extra.insert(HOST, "foo.bar".parse().unwrap()); + /// extra.insert(COOKIE, "hello".parse().unwrap()); + /// extra.append(COOKIE, "world".parse().unwrap()); + /// + /// map.extend(extra); + /// + /// assert_eq!(map["host"], "foo.bar"); + /// assert_eq!(map["accept"], "text/plain"); + /// assert_eq!(map["cookie"], "hello"); + /// + /// let v = map.get_all("host"); + /// assert_eq!(1, v.iter().count()); + /// + /// let v = map.get_all("cookie"); + /// assert_eq!(2, v.iter().count()); + /// ``` + fn extend, T)>>(&mut self, iter: I) { + let mut iter = iter.into_iter(); + + // The structure of this is a bit weird, but it is mostly to make the + // borrow checker happy. + let (mut key, mut val) = match iter.next() { + Some((Some(key), val)) => (key, val), + Some((None, _)) => panic!("expected a header name, but got None"), + None => return, + }; + + 'outer: loop { + let mut entry = match self.try_entry2(key).expect("size overflows MAX_SIZE") { + Entry::Occupied(mut e) => { + // Replace all previous values while maintaining a handle to + // the entry. + e.insert(val); + e + } + Entry::Vacant(e) => e.insert_entry(val), + }; + + // As long as `HeaderName` is none, keep inserting the value into + // the current entry + loop { + match iter.next() { + Some((Some(k), v)) => { + key = k; + val = v; + continue 'outer; + } + Some((None, v)) => { + entry.append(v); + } + None => { + return; + } + } + } + } + } +} + +impl Extend<(HeaderName, T)> for HeaderMap { + fn extend>(&mut self, iter: I) { + // Keys may be already present or show multiple times in the iterator. + // Reserve the entire hint lower bound if the map is empty. + // Otherwise reserve half the hint (rounded up), so the map + // will only resize twice in the worst case. + let iter = iter.into_iter(); + + let reserve = if self.is_empty() { + iter.size_hint().0 + } else { + (iter.size_hint().0 + 1) / 2 + }; + + self.reserve(reserve); + + for (k, v) in iter { + self.append(k, v); + } + } +} + +impl PartialEq for HeaderMap { + fn eq(&self, other: &HeaderMap) -> bool { + if self.len() != other.len() { + return false; + } + + self.keys() + .all(|key| self.get_all(key) == other.get_all(key)) + } +} + +impl Eq for HeaderMap {} + +impl fmt::Debug for HeaderMap { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_map().entries(self.iter()).finish() + } +} + +impl Default for HeaderMap { + fn default() -> Self { + HeaderMap::try_with_capacity(0).expect("zero capacity should never fail") + } +} + +impl<'a, K, T> ops::Index for HeaderMap +where + K: AsHeaderName, +{ + type Output = T; + + /// # Panics + /// Using the index operator will cause a panic if the header you're querying isn't set. + #[inline] + fn index(&self, index: K) -> &T { + match self.get2(&index) { + Some(val) => val, + None => panic!("no entry found for key {:?}", index.as_str()), + } + } +} + +/// phase 2 is post-insert where we forward-shift `Pos` in the indices. +/// +/// returns the number of displaced elements +#[inline] +fn do_insert_phase_two(indices: &mut [Pos], mut probe: usize, mut old_pos: Pos) -> usize { + let mut num_displaced = 0; + + probe_loop!(probe < indices.len(), { + let pos = &mut indices[probe]; + + if pos.is_none() { + *pos = old_pos; + break; + } else { + num_displaced += 1; + old_pos = mem::replace(pos, old_pos); + } + }); + + num_displaced +} + +#[inline] +fn append_value( + entry_idx: usize, + entry: &mut Bucket, + extra: &mut Vec>, + value: T, +) { + match entry.links { + Some(links) => { + let idx = extra.len(); + extra.push(ExtraValue { + value: value, + prev: Link::Extra(links.tail), + next: Link::Entry(entry_idx), + }); + + extra[links.tail].next = Link::Extra(idx); + + entry.links = Some(Links { tail: idx, ..links }); + } + None => { + let idx = extra.len(); + extra.push(ExtraValue { + value: value, + prev: Link::Entry(entry_idx), + next: Link::Entry(entry_idx), + }); + + entry.links = Some(Links { + next: idx, + tail: idx, + }); + } + } +} + +// ===== impl Iter ===== + +impl<'a, T> Iterator for Iter<'a, T> { + type Item = (&'a HeaderName, &'a T); + + fn next(&mut self) -> Option { + use self::Cursor::*; + + if self.cursor.is_none() { + if (self.entry + 1) >= self.map.entries.len() { + return None; + } + + self.entry += 1; + self.cursor = Some(Cursor::Head); + } + + let entry = &self.map.entries[self.entry]; + + match self.cursor.unwrap() { + Head => { + self.cursor = entry.links.map(|l| Values(l.next)); + Some((&entry.key, &entry.value)) + } + Values(idx) => { + let extra = &self.map.extra_values[idx]; + + match extra.next { + Link::Entry(_) => self.cursor = None, + Link::Extra(i) => self.cursor = Some(Values(i)), + } + + Some((&entry.key, &extra.value)) + } + } + } + + fn size_hint(&self) -> (usize, Option) { + let map = self.map; + debug_assert!(map.entries.len() >= self.entry); + + let lower = map.entries.len() - self.entry; + // We could pessimistically guess at the upper bound, saying + // that its lower + map.extra_values.len(). That could be + // way over though, such as if we're near the end, and have + // already gone through several extra values... + (lower, None) + } +} + +impl<'a, T> FusedIterator for Iter<'a, T> {} + +unsafe impl<'a, T: Sync> Sync for Iter<'a, T> {} +unsafe impl<'a, T: Sync> Send for Iter<'a, T> {} + +// ===== impl IterMut ===== + +impl<'a, T> IterMut<'a, T> { + fn next_unsafe(&mut self) -> Option<(&'a HeaderName, *mut T)> { + use self::Cursor::*; + + if self.cursor.is_none() { + if (self.entry + 1) >= unsafe { &*self.map }.entries.len() { + return None; + } + + self.entry += 1; + self.cursor = Some(Cursor::Head); + } + + let entry = unsafe { &mut (*self.map).entries[self.entry] }; + + match self.cursor.unwrap() { + Head => { + self.cursor = entry.links.map(|l| Values(l.next)); + Some((&entry.key, &mut entry.value as *mut _)) + } + Values(idx) => { + let extra = unsafe { &mut (*self.map).extra_values[idx] }; + + match extra.next { + Link::Entry(_) => self.cursor = None, + Link::Extra(i) => self.cursor = Some(Values(i)), + } + + Some((&entry.key, &mut extra.value as *mut _)) + } + } + } +} + +impl<'a, T> Iterator for IterMut<'a, T> { + type Item = (&'a HeaderName, &'a mut T); + + fn next(&mut self) -> Option { + self.next_unsafe() + .map(|(key, ptr)| (key, unsafe { &mut *ptr })) + } + + fn size_hint(&self) -> (usize, Option) { + let map = unsafe { &*self.map }; + debug_assert!(map.entries.len() >= self.entry); + + let lower = map.entries.len() - self.entry; + // We could pessimistically guess at the upper bound, saying + // that its lower + map.extra_values.len(). That could be + // way over though, such as if we're near the end, and have + // already gone through several extra values... + (lower, None) + } +} + +impl<'a, T> FusedIterator for IterMut<'a, T> {} + +unsafe impl<'a, T: Sync> Sync for IterMut<'a, T> {} +unsafe impl<'a, T: Send> Send for IterMut<'a, T> {} + +// ===== impl Keys ===== + +impl<'a, T> Iterator for Keys<'a, T> { + type Item = &'a HeaderName; + + fn next(&mut self) -> Option { + self.inner.next().map(|b| &b.key) + } + + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} + +impl<'a, T> ExactSizeIterator for Keys<'a, T> {} +impl<'a, T> FusedIterator for Keys<'a, T> {} + +// ===== impl Values ==== + +impl<'a, T> Iterator for Values<'a, T> { + type Item = &'a T; + + fn next(&mut self) -> Option { + self.inner.next().map(|(_, v)| v) + } + + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} + +impl<'a, T> FusedIterator for Values<'a, T> {} + +// ===== impl ValuesMut ==== + +impl<'a, T> Iterator for ValuesMut<'a, T> { + type Item = &'a mut T; + + fn next(&mut self) -> Option { + self.inner.next().map(|(_, v)| v) + } + + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} + +impl<'a, T> FusedIterator for ValuesMut<'a, T> {} + +// ===== impl Drain ===== + +impl<'a, T> Iterator for Drain<'a, T> { + type Item = (Option, T); + + fn next(&mut self) -> Option { + if let Some(next) = self.next { + // Remove the extra value + + let raw_links = RawLinks(self.entries); + let extra = unsafe { + remove_extra_value(raw_links, &mut *self.extra_values, next) + }; + + match extra.next { + Link::Extra(idx) => self.next = Some(idx), + Link::Entry(_) => self.next = None, + } + + return Some((None, extra.value)); + } + + let idx = self.idx; + + if idx == self.len { + return None; + } + + self.idx += 1; + + unsafe { + let entry = &(*self.entries)[idx]; + + // Read the header name + let key = ptr::read(&entry.key as *const _); + let value = ptr::read(&entry.value as *const _); + self.next = entry.links.map(|l| l.next); + + Some((Some(key), value)) + } + } + + fn size_hint(&self) -> (usize, Option) { + // At least this many names... It's unknown if the user wants + // to count the extra_values on top. + // + // For instance, extending a new `HeaderMap` wouldn't need to + // reserve the upper-bound in `entries`, only the lower-bound. + let lower = self.len - self.idx; + let upper = unsafe { (*self.extra_values).len() } + lower; + (lower, Some(upper)) + } +} + +impl<'a, T> FusedIterator for Drain<'a, T> {} + +impl<'a, T> Drop for Drain<'a, T> { + fn drop(&mut self) { + for _ in self {} + } +} + +unsafe impl<'a, T: Sync> Sync for Drain<'a, T> {} +unsafe impl<'a, T: Send> Send for Drain<'a, T> {} + +// ===== impl Entry ===== + +impl<'a, T> Entry<'a, T> { + /// Ensures a value is in the entry by inserting the default if empty. + /// + /// Returns a mutable reference to the **first** value in the entry. + /// + /// # Panics + /// + /// This method panics if capacity exceeds max `HeaderMap` capacity + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// let mut map: HeaderMap = HeaderMap::default(); + /// + /// let headers = &[ + /// "content-length", + /// "x-hello", + /// "Content-Length", + /// "x-world", + /// ]; + /// + /// for &header in headers { + /// let counter = map.entry(header) + /// .or_insert(0); + /// *counter += 1; + /// } + /// + /// assert_eq!(map["content-length"], 2); + /// assert_eq!(map["x-hello"], 1); + /// ``` + pub fn or_insert(self, default: T) -> &'a mut T { + self.or_try_insert(default) + .expect("size overflows MAX_SIZE") + } + + /// Ensures a value is in the entry by inserting the default if empty. + /// + /// Returns a mutable reference to the **first** value in the entry. + /// + /// # Errors + /// + /// This function may return an error if `HeaderMap` exceeds max capacity + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// let mut map: HeaderMap = HeaderMap::default(); + /// + /// let headers = &[ + /// "content-length", + /// "x-hello", + /// "Content-Length", + /// "x-world", + /// ]; + /// + /// for &header in headers { + /// let counter = map.entry(header) + /// .or_try_insert(0) + /// .unwrap(); + /// *counter += 1; + /// } + /// + /// assert_eq!(map["content-length"], 2); + /// assert_eq!(map["x-hello"], 1); + /// ``` + pub fn or_try_insert(self, default: T) -> Result<&'a mut T, MaxSizeReached> { + use self::Entry::*; + + match self { + Occupied(e) => Ok(e.into_mut()), + Vacant(e) => e.try_insert(default), + } + } + + /// Ensures a value is in the entry by inserting the result of the default + /// function if empty. + /// + /// The default function is not called if the entry exists in the map. + /// Returns a mutable reference to the **first** value in the entry. + /// + /// # Examples + /// + /// Basic usage. + /// + /// ``` + /// # use http::HeaderMap; + /// let mut map = HeaderMap::new(); + /// + /// let res = map.entry("x-hello") + /// .or_insert_with(|| "world".parse().unwrap()); + /// + /// assert_eq!(res, "world"); + /// ``` + /// + /// The default function is not called if the entry exists in the map. + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::HOST; + /// let mut map = HeaderMap::new(); + /// map.try_insert(HOST, "world".parse().unwrap()).unwrap(); + /// + /// let res = map.try_entry("host") + /// .unwrap() + /// .or_try_insert_with(|| unreachable!()) + /// .unwrap(); + /// + /// + /// assert_eq!(res, "world"); + /// ``` + pub fn or_insert_with T>(self, default: F) -> &'a mut T { + self.or_try_insert_with(default) + .expect("size overflows MAX_SIZE") + } + + /// Ensures a value is in the entry by inserting the result of the default + /// function if empty. + /// + /// The default function is not called if the entry exists in the map. + /// Returns a mutable reference to the **first** value in the entry. + /// + /// # Examples + /// + /// Basic usage. + /// + /// ``` + /// # use http::HeaderMap; + /// let mut map = HeaderMap::new(); + /// + /// let res = map.entry("x-hello") + /// .or_insert_with(|| "world".parse().unwrap()); + /// + /// assert_eq!(res, "world"); + /// ``` + /// + /// The default function is not called if the entry exists in the map. + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::HOST; + /// let mut map = HeaderMap::new(); + /// map.try_insert(HOST, "world".parse().unwrap()).unwrap(); + /// + /// let res = map.try_entry("host") + /// .unwrap() + /// .or_try_insert_with(|| unreachable!()) + /// .unwrap(); + /// + /// + /// assert_eq!(res, "world"); + /// ``` + pub fn or_try_insert_with T>( + self, + default: F, + ) -> Result<&'a mut T, MaxSizeReached> { + use self::Entry::*; + + match self { + Occupied(e) => Ok(e.into_mut()), + Vacant(e) => e.try_insert(default()), + } + } + + /// Returns a reference to the entry's key + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// let mut map = HeaderMap::new(); + /// + /// assert_eq!(map.entry("x-hello").key(), "x-hello"); + /// ``` + pub fn key(&self) -> &HeaderName { + use self::Entry::*; + + match *self { + Vacant(ref e) => e.key(), + Occupied(ref e) => e.key(), + } + } +} + +// ===== impl VacantEntry ===== + +impl<'a, T> VacantEntry<'a, T> { + /// Returns a reference to the entry's key + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// let mut map = HeaderMap::new(); + /// + /// assert_eq!(map.entry("x-hello").key().as_str(), "x-hello"); + /// ``` + pub fn key(&self) -> &HeaderName { + &self.key + } + + /// Take ownership of the key + /// + /// # Examples + /// + /// ``` + /// # use http::header::{HeaderMap, Entry}; + /// let mut map = HeaderMap::new(); + /// + /// if let Entry::Vacant(v) = map.entry("x-hello") { + /// assert_eq!(v.into_key().as_str(), "x-hello"); + /// } + /// ``` + pub fn into_key(self) -> HeaderName { + self.key + } + + /// Insert the value into the entry. + /// + /// The value will be associated with this entry's key. A mutable reference + /// to the inserted value will be returned. + /// + /// # Examples + /// + /// ``` + /// # use http::header::{HeaderMap, Entry}; + /// let mut map = HeaderMap::new(); + /// + /// if let Entry::Vacant(v) = map.entry("x-hello") { + /// v.insert("world".parse().unwrap()); + /// } + /// + /// assert_eq!(map["x-hello"], "world"); + /// ``` + pub fn insert(self, value: T) -> &'a mut T { + self.try_insert(value).expect("size overflows MAX_SIZE") + } + + /// Insert the value into the entry. + /// + /// The value will be associated with this entry's key. A mutable reference + /// to the inserted value will be returned. + /// + /// # Examples + /// + /// ``` + /// # use http::header::{HeaderMap, Entry}; + /// let mut map = HeaderMap::new(); + /// + /// if let Entry::Vacant(v) = map.entry("x-hello") { + /// v.insert("world".parse().unwrap()); + /// } + /// + /// assert_eq!(map["x-hello"], "world"); + /// ``` + pub fn try_insert(self, value: T) -> Result<&'a mut T, MaxSizeReached> { + // Ensure that there is space in the map + let index = + self.map + .try_insert_phase_two(self.key, value, self.hash, self.probe, self.danger)?; + + Ok(&mut self.map.entries[index].value) + } + + /// Insert the value into the entry. + /// + /// The value will be associated with this entry's key. The new + /// `OccupiedEntry` is returned, allowing for further manipulation. + /// + /// # Examples + /// + /// ``` + /// # use http::header::*; + /// let mut map = HeaderMap::new(); + /// + /// if let Entry::Vacant(v) = map.try_entry("x-hello").unwrap() { + /// let mut e = v.try_insert_entry("world".parse().unwrap()).unwrap(); + /// e.insert("world2".parse().unwrap()); + /// } + /// + /// assert_eq!(map["x-hello"], "world2"); + /// ``` + pub fn insert_entry(self, value: T) -> OccupiedEntry<'a, T> { + self.try_insert_entry(value) + .expect("size overflows MAX_SIZE") + } + + /// Insert the value into the entry. + /// + /// The value will be associated with this entry's key. The new + /// `OccupiedEntry` is returned, allowing for further manipulation. + /// + /// # Examples + /// + /// ``` + /// # use http::header::*; + /// let mut map = HeaderMap::new(); + /// + /// if let Entry::Vacant(v) = map.try_entry("x-hello").unwrap() { + /// let mut e = v.try_insert_entry("world".parse().unwrap()).unwrap(); + /// e.insert("world2".parse().unwrap()); + /// } + /// + /// assert_eq!(map["x-hello"], "world2"); + /// ``` + pub fn try_insert_entry(self, value: T) -> Result, MaxSizeReached> { + // Ensure that there is space in the map + let index = + self.map + .try_insert_phase_two(self.key, value, self.hash, self.probe, self.danger)?; + + Ok(OccupiedEntry { + map: self.map, + index: index, + probe: self.probe, + }) + } +} + +// ===== impl GetAll ===== + +impl<'a, T: 'a> GetAll<'a, T> { + /// Returns an iterator visiting all values associated with the entry. + /// + /// Values are iterated in insertion order. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::HOST; + /// let mut map = HeaderMap::new(); + /// map.insert(HOST, "hello.world".parse().unwrap()); + /// map.append(HOST, "hello.earth".parse().unwrap()); + /// + /// let values = map.get_all("host"); + /// let mut iter = values.iter(); + /// assert_eq!(&"hello.world", iter.next().unwrap()); + /// assert_eq!(&"hello.earth", iter.next().unwrap()); + /// assert!(iter.next().is_none()); + /// ``` + pub fn iter(&self) -> ValueIter<'a, T> { + // This creates a new GetAll struct so that the lifetime + // isn't bound to &self. + GetAll { + map: self.map, + index: self.index, + } + .into_iter() + } +} + +impl<'a, T: PartialEq> PartialEq for GetAll<'a, T> { + fn eq(&self, other: &Self) -> bool { + self.iter().eq(other.iter()) + } +} + +impl<'a, T> IntoIterator for GetAll<'a, T> { + type Item = &'a T; + type IntoIter = ValueIter<'a, T>; + + fn into_iter(self) -> ValueIter<'a, T> { + self.map.value_iter(self.index) + } +} + +impl<'a, 'b: 'a, T> IntoIterator for &'b GetAll<'a, T> { + type Item = &'a T; + type IntoIter = ValueIter<'a, T>; + + fn into_iter(self) -> ValueIter<'a, T> { + self.map.value_iter(self.index) + } +} + +// ===== impl ValueIter ===== + +impl<'a, T: 'a> Iterator for ValueIter<'a, T> { + type Item = &'a T; + + fn next(&mut self) -> Option { + use self::Cursor::*; + + match self.front { + Some(Head) => { + let entry = &self.map.entries[self.index]; + + if self.back == Some(Head) { + self.front = None; + self.back = None; + } else { + // Update the iterator state + match entry.links { + Some(links) => { + self.front = Some(Values(links.next)); + } + None => unreachable!(), + } + } + + Some(&entry.value) + } + Some(Values(idx)) => { + let extra = &self.map.extra_values[idx]; + + if self.front == self.back { + self.front = None; + self.back = None; + } else { + match extra.next { + Link::Entry(_) => self.front = None, + Link::Extra(i) => self.front = Some(Values(i)), + } + } + + Some(&extra.value) + } + None => None, + } + } + + fn size_hint(&self) -> (usize, Option) { + match (self.front, self.back) { + // Exactly 1 value... + (Some(Cursor::Head), Some(Cursor::Head)) => (1, Some(1)), + // At least 1... + (Some(_), _) => (1, None), + // No more values... + (None, _) => (0, Some(0)), + } + } +} + +impl<'a, T: 'a> DoubleEndedIterator for ValueIter<'a, T> { + fn next_back(&mut self) -> Option { + use self::Cursor::*; + + match self.back { + Some(Head) => { + self.front = None; + self.back = None; + Some(&self.map.entries[self.index].value) + } + Some(Values(idx)) => { + let extra = &self.map.extra_values[idx]; + + if self.front == self.back { + self.front = None; + self.back = None; + } else { + match extra.prev { + Link::Entry(_) => self.back = Some(Head), + Link::Extra(idx) => self.back = Some(Values(idx)), + } + } + + Some(&extra.value) + } + None => None, + } + } +} + +impl<'a, T> FusedIterator for ValueIter<'a, T> {} + +// ===== impl ValueIterMut ===== + +impl<'a, T: 'a> Iterator for ValueIterMut<'a, T> { + type Item = &'a mut T; + + fn next(&mut self) -> Option { + use self::Cursor::*; + + let entry = unsafe { &mut (*self.map).entries[self.index] }; + + match self.front { + Some(Head) => { + if self.back == Some(Head) { + self.front = None; + self.back = None; + } else { + // Update the iterator state + match entry.links { + Some(links) => { + self.front = Some(Values(links.next)); + } + None => unreachable!(), + } + } + + Some(&mut entry.value) + } + Some(Values(idx)) => { + let extra = unsafe { &mut (*self.map).extra_values[idx] }; + + if self.front == self.back { + self.front = None; + self.back = None; + } else { + match extra.next { + Link::Entry(_) => self.front = None, + Link::Extra(i) => self.front = Some(Values(i)), + } + } + + Some(&mut extra.value) + } + None => None, + } + } +} + +impl<'a, T: 'a> DoubleEndedIterator for ValueIterMut<'a, T> { + fn next_back(&mut self) -> Option { + use self::Cursor::*; + + let entry = unsafe { &mut (*self.map).entries[self.index] }; + + match self.back { + Some(Head) => { + self.front = None; + self.back = None; + Some(&mut entry.value) + } + Some(Values(idx)) => { + let extra = unsafe { &mut (*self.map).extra_values[idx] }; + + if self.front == self.back { + self.front = None; + self.back = None; + } else { + match extra.prev { + Link::Entry(_) => self.back = Some(Head), + Link::Extra(idx) => self.back = Some(Values(idx)), + } + } + + Some(&mut extra.value) + } + None => None, + } + } +} + +impl<'a, T> FusedIterator for ValueIterMut<'a, T> {} + +unsafe impl<'a, T: Sync> Sync for ValueIterMut<'a, T> {} +unsafe impl<'a, T: Send> Send for ValueIterMut<'a, T> {} + +// ===== impl IntoIter ===== + +impl Iterator for IntoIter { + type Item = (Option, T); + + fn next(&mut self) -> Option { + if let Some(next) = self.next { + self.next = match self.extra_values[next].next { + Link::Entry(_) => None, + Link::Extra(v) => Some(v), + }; + + let value = unsafe { ptr::read(&self.extra_values[next].value) }; + + return Some((None, value)); + } + + if let Some(bucket) = self.entries.next() { + self.next = bucket.links.map(|l| l.next); + let name = Some(bucket.key); + let value = bucket.value; + + return Some((name, value)); + } + + None + } + + fn size_hint(&self) -> (usize, Option) { + let (lower, _) = self.entries.size_hint(); + // There could be more than just the entries upper, as there + // could be items in the `extra_values`. We could guess, saying + // `upper + extra_values.len()`, but that could overestimate by a lot. + (lower, None) + } +} + +impl FusedIterator for IntoIter {} + +impl Drop for IntoIter { + fn drop(&mut self) { + // Ensure the iterator is consumed + for _ in self.by_ref() {} + + // All the values have already been yielded out. + unsafe { + self.extra_values.set_len(0); + } + } +} + +// ===== impl OccupiedEntry ===== + +impl<'a, T> OccupiedEntry<'a, T> { + /// Returns a reference to the entry's key. + /// + /// # Examples + /// + /// ``` + /// # use http::header::{HeaderMap, Entry, HOST}; + /// let mut map = HeaderMap::new(); + /// map.insert(HOST, "world".parse().unwrap()); + /// + /// if let Entry::Occupied(e) = map.entry("host") { + /// assert_eq!("host", e.key()); + /// } + /// ``` + pub fn key(&self) -> &HeaderName { + &self.map.entries[self.index].key + } + + /// Get a reference to the first value in the entry. + /// + /// Values are stored in insertion order. + /// + /// # Panics + /// + /// `get` panics if there are no values associated with the entry. + /// + /// # Examples + /// + /// ``` + /// # use http::header::{HeaderMap, Entry, HOST}; + /// let mut map = HeaderMap::new(); + /// map.insert(HOST, "hello.world".parse().unwrap()); + /// + /// if let Entry::Occupied(mut e) = map.entry("host") { + /// assert_eq!(e.get(), &"hello.world"); + /// + /// e.append("hello.earth".parse().unwrap()); + /// + /// assert_eq!(e.get(), &"hello.world"); + /// } + /// ``` + pub fn get(&self) -> &T { + &self.map.entries[self.index].value + } + + /// Get a mutable reference to the first value in the entry. + /// + /// Values are stored in insertion order. + /// + /// # Panics + /// + /// `get_mut` panics if there are no values associated with the entry. + /// + /// # Examples + /// + /// ``` + /// # use http::header::{HeaderMap, Entry, HOST}; + /// let mut map = HeaderMap::default(); + /// map.insert(HOST, "hello.world".to_string()); + /// + /// if let Entry::Occupied(mut e) = map.entry("host") { + /// e.get_mut().push_str("-2"); + /// assert_eq!(e.get(), &"hello.world-2"); + /// } + /// ``` + pub fn get_mut(&mut self) -> &mut T { + &mut self.map.entries[self.index].value + } + + /// Converts the `OccupiedEntry` into a mutable reference to the **first** + /// value. + /// + /// The lifetime of the returned reference is bound to the original map. + /// + /// # Panics + /// + /// `into_mut` panics if there are no values associated with the entry. + /// + /// # Examples + /// + /// ``` + /// # use http::header::{HeaderMap, Entry, HOST}; + /// let mut map = HeaderMap::default(); + /// map.insert(HOST, "hello.world".to_string()); + /// map.append(HOST, "hello.earth".to_string()); + /// + /// if let Entry::Occupied(e) = map.entry("host") { + /// e.into_mut().push_str("-2"); + /// } + /// + /// assert_eq!("hello.world-2", map["host"]); + /// ``` + pub fn into_mut(self) -> &'a mut T { + &mut self.map.entries[self.index].value + } + + /// Sets the value of the entry. + /// + /// All previous values associated with the entry are removed and the first + /// one is returned. See `insert_mult` for an API that returns all values. + /// + /// # Examples + /// + /// ``` + /// # use http::header::{HeaderMap, Entry, HOST}; + /// let mut map = HeaderMap::new(); + /// map.insert(HOST, "hello.world".parse().unwrap()); + /// + /// if let Entry::Occupied(mut e) = map.entry("host") { + /// let mut prev = e.insert("earth".parse().unwrap()); + /// assert_eq!("hello.world", prev); + /// } + /// + /// assert_eq!("earth", map["host"]); + /// ``` + pub fn insert(&mut self, value: T) -> T { + self.map.insert_occupied(self.index, value.into()) + } + + /// Sets the value of the entry. + /// + /// This function does the same as `insert` except it returns an iterator + /// that yields all values previously associated with the key. + /// + /// # Examples + /// + /// ``` + /// # use http::header::{HeaderMap, Entry, HOST}; + /// let mut map = HeaderMap::new(); + /// map.insert(HOST, "world".parse().unwrap()); + /// map.append(HOST, "world2".parse().unwrap()); + /// + /// if let Entry::Occupied(mut e) = map.entry("host") { + /// let mut prev = e.insert_mult("earth".parse().unwrap()); + /// assert_eq!("world", prev.next().unwrap()); + /// assert_eq!("world2", prev.next().unwrap()); + /// assert!(prev.next().is_none()); + /// } + /// + /// assert_eq!("earth", map["host"]); + /// ``` + pub fn insert_mult(&mut self, value: T) -> ValueDrain<'_, T> { + self.map.insert_occupied_mult(self.index, value.into()) + } + + /// Insert the value into the entry. + /// + /// The new value is appended to the end of the entry's value list. All + /// previous values associated with the entry are retained. + /// + /// # Examples + /// + /// ``` + /// # use http::header::{HeaderMap, Entry, HOST}; + /// let mut map = HeaderMap::new(); + /// map.insert(HOST, "world".parse().unwrap()); + /// + /// if let Entry::Occupied(mut e) = map.entry("host") { + /// e.append("earth".parse().unwrap()); + /// } + /// + /// let values = map.get_all("host"); + /// let mut i = values.iter(); + /// assert_eq!("world", *i.next().unwrap()); + /// assert_eq!("earth", *i.next().unwrap()); + /// ``` + pub fn append(&mut self, value: T) { + let idx = self.index; + let entry = &mut self.map.entries[idx]; + append_value(idx, entry, &mut self.map.extra_values, value.into()); + } + + /// Remove the entry from the map. + /// + /// All values associated with the entry are removed and the first one is + /// returned. See `remove_entry_mult` for an API that returns all values. + /// + /// # Examples + /// + /// ``` + /// # use http::header::{HeaderMap, Entry, HOST}; + /// let mut map = HeaderMap::new(); + /// map.insert(HOST, "world".parse().unwrap()); + /// + /// if let Entry::Occupied(e) = map.entry("host") { + /// let mut prev = e.remove(); + /// assert_eq!("world", prev); + /// } + /// + /// assert!(!map.contains_key("host")); + /// ``` + pub fn remove(self) -> T { + self.remove_entry().1 + } + + /// Remove the entry from the map. + /// + /// The key and all values associated with the entry are removed and the + /// first one is returned. See `remove_entry_mult` for an API that returns + /// all values. + /// + /// # Examples + /// + /// ``` + /// # use http::header::{HeaderMap, Entry, HOST}; + /// let mut map = HeaderMap::new(); + /// map.insert(HOST, "world".parse().unwrap()); + /// + /// if let Entry::Occupied(e) = map.entry("host") { + /// let (key, mut prev) = e.remove_entry(); + /// assert_eq!("host", key.as_str()); + /// assert_eq!("world", prev); + /// } + /// + /// assert!(!map.contains_key("host")); + /// ``` + pub fn remove_entry(self) -> (HeaderName, T) { + if let Some(links) = self.map.entries[self.index].links { + self.map.remove_all_extra_values(links.next); + } + + let entry = self.map.remove_found(self.probe, self.index); + + (entry.key, entry.value) + } + + /// Remove the entry from the map. + /// + /// The key and all values associated with the entry are removed and + /// returned. + pub fn remove_entry_mult(self) -> (HeaderName, ValueDrain<'a, T>) { + let raw_links = self.map.raw_links(); + let extra_values = &mut self.map.extra_values; + + let next = self.map.entries[self.index].links.map(|l| { + drain_all_extra_values(raw_links, extra_values, l.next) + .into_iter() + }); + + let entry = self.map.remove_found(self.probe, self.index); + + let drain = ValueDrain { + first: Some(entry.value), + next, + lt: PhantomData, + }; + (entry.key, drain) + } + + /// Returns an iterator visiting all values associated with the entry. + /// + /// Values are iterated in insertion order. + /// + /// # Examples + /// + /// ``` + /// # use http::header::{HeaderMap, Entry, HOST}; + /// let mut map = HeaderMap::new(); + /// map.insert(HOST, "world".parse().unwrap()); + /// map.append(HOST, "earth".parse().unwrap()); + /// + /// if let Entry::Occupied(e) = map.entry("host") { + /// let mut iter = e.iter(); + /// assert_eq!(&"world", iter.next().unwrap()); + /// assert_eq!(&"earth", iter.next().unwrap()); + /// assert!(iter.next().is_none()); + /// } + /// ``` + pub fn iter(&self) -> ValueIter<'_, T> { + self.map.value_iter(Some(self.index)) + } + + /// Returns an iterator mutably visiting all values associated with the + /// entry. + /// + /// Values are iterated in insertion order. + /// + /// # Examples + /// + /// ``` + /// # use http::header::{HeaderMap, Entry, HOST}; + /// let mut map = HeaderMap::default(); + /// map.insert(HOST, "world".to_string()); + /// map.append(HOST, "earth".to_string()); + /// + /// if let Entry::Occupied(mut e) = map.entry("host") { + /// for e in e.iter_mut() { + /// e.push_str("-boop"); + /// } + /// } + /// + /// let mut values = map.get_all("host"); + /// let mut i = values.iter(); + /// assert_eq!(&"world-boop", i.next().unwrap()); + /// assert_eq!(&"earth-boop", i.next().unwrap()); + /// ``` + pub fn iter_mut(&mut self) -> ValueIterMut<'_, T> { + self.map.value_iter_mut(self.index) + } +} + +impl<'a, T> IntoIterator for OccupiedEntry<'a, T> { + type Item = &'a mut T; + type IntoIter = ValueIterMut<'a, T>; + + fn into_iter(self) -> ValueIterMut<'a, T> { + self.map.value_iter_mut(self.index) + } +} + +impl<'a, 'b: 'a, T> IntoIterator for &'b OccupiedEntry<'a, T> { + type Item = &'a T; + type IntoIter = ValueIter<'a, T>; + + fn into_iter(self) -> ValueIter<'a, T> { + self.iter() + } +} + +impl<'a, 'b: 'a, T> IntoIterator for &'b mut OccupiedEntry<'a, T> { + type Item = &'a mut T; + type IntoIter = ValueIterMut<'a, T>; + + fn into_iter(self) -> ValueIterMut<'a, T> { + self.iter_mut() + } +} + +// ===== impl ValueDrain ===== + +impl<'a, T> Iterator for ValueDrain<'a, T> { + type Item = T; + + fn next(&mut self) -> Option { + if self.first.is_some() { + self.first.take() + } else if let Some(ref mut extras) = self.next { + extras.next() + } else { + None + } + } + + fn size_hint(&self) -> (usize, Option) { + match (&self.first, &self.next) { + // Exactly 1 + (&Some(_), &None) => (1, Some(1)), + // 1 + extras + (&Some(_), &Some(ref extras)) => { + let (l, u) = extras.size_hint(); + (l + 1, u.map(|u| u + 1)) + }, + // Extras only + (&None, &Some(ref extras)) => extras.size_hint(), + // No more + (&None, &None) => (0, Some(0)), + } + } +} + +impl<'a, T> FusedIterator for ValueDrain<'a, T> {} + +impl<'a, T> Drop for ValueDrain<'a, T> { + fn drop(&mut self) { + while let Some(_) = self.next() {} + } +} + +unsafe impl<'a, T: Sync> Sync for ValueDrain<'a, T> {} +unsafe impl<'a, T: Send> Send for ValueDrain<'a, T> {} + +// ===== impl RawLinks ===== + +impl Clone for RawLinks { + fn clone(&self) -> RawLinks { + *self + } +} + +impl Copy for RawLinks {} + +impl ops::Index for RawLinks { + type Output = Option; + + fn index(&self, idx: usize) -> &Self::Output { + unsafe { + &(*self.0)[idx].links + } + } +} + +impl ops::IndexMut for RawLinks { + fn index_mut(&mut self, idx: usize) -> &mut Self::Output { + unsafe { + &mut (*self.0)[idx].links + } + } +} + +// ===== impl Pos ===== + +impl Pos { + #[inline] + fn new(index: usize, hash: HashValue) -> Self { + debug_assert!(index < MAX_SIZE); + Pos { + index: index as Size, + hash: hash, + } + } + + #[inline] + fn none() -> Self { + Pos { + index: !0, + hash: HashValue(0), + } + } + + #[inline] + fn is_some(&self) -> bool { + !self.is_none() + } + + #[inline] + fn is_none(&self) -> bool { + self.index == !0 + } + + #[inline] + fn resolve(&self) -> Option<(usize, HashValue)> { + if self.is_some() { + Some((self.index as usize, self.hash)) + } else { + None + } + } +} + +impl Danger { + fn is_red(&self) -> bool { + match *self { + Danger::Red(_) => true, + _ => false, + } + } + + fn to_red(&mut self) { + debug_assert!(self.is_yellow()); + *self = Danger::Red(RandomState::new()); + } + + fn is_yellow(&self) -> bool { + match *self { + Danger::Yellow => true, + _ => false, + } + } + + fn to_yellow(&mut self) { + match *self { + Danger::Green => { + *self = Danger::Yellow; + } + _ => {} + } + } + + fn to_green(&mut self) { + debug_assert!(self.is_yellow()); + *self = Danger::Green; + } +} + +// ===== impl MaxSizeReached ===== + +impl MaxSizeReached { + fn new() -> Self { + MaxSizeReached { _priv: () } + } +} + +impl fmt::Debug for MaxSizeReached { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("MaxSizeReached") + // skip _priv noise + .finish() + } +} + +impl fmt::Display for MaxSizeReached { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("max size reached") + } +} + +impl std::error::Error for MaxSizeReached {} + +// ===== impl Utils ===== + +#[inline] +fn usable_capacity(cap: usize) -> usize { + cap - cap / 4 +} + +#[inline] +fn to_raw_capacity(n: usize) -> usize { + match n.checked_add(n / 3) { + Some(n) => n, + None => panic!( + "requested capacity {} too large: overflow while converting to raw capacity", + n + ), + } +} + +#[inline] +fn desired_pos(mask: Size, hash: HashValue) -> usize { + (hash.0 & mask) as usize +} + +/// The number of steps that `current` is forward of the desired position for hash +#[inline] +fn probe_distance(mask: Size, hash: HashValue, current: usize) -> usize { + current.wrapping_sub(desired_pos(mask, hash)) & mask as usize +} + +fn hash_elem_using(danger: &Danger, k: &K) -> HashValue +where + K: Hash, +{ + use fnv::FnvHasher; + + const MASK: u64 = (MAX_SIZE as u64) - 1; + + let hash = match *danger { + // Safe hash + Danger::Red(ref hasher) => { + let mut h = hasher.build_hasher(); + k.hash(&mut h); + h.finish() + } + // Fast hash + _ => { + let mut h = FnvHasher::default(); + k.hash(&mut h); + h.finish() + } + }; + + HashValue((hash & MASK) as u16) +} + +/* + * + * ===== impl IntoHeaderName / AsHeaderName ===== + * + */ + +mod into_header_name { + use super::{Entry, HdrName, HeaderMap, HeaderName, MaxSizeReached}; + + /// A marker trait used to identify values that can be used as insert keys + /// to a `HeaderMap`. + pub trait IntoHeaderName: Sealed {} + + // All methods are on this pub(super) trait, instead of `IntoHeaderName`, + // so that they aren't publicly exposed to the world. + // + // Being on the `IntoHeaderName` trait would mean users could call + // `"host".insert(&mut map, "localhost")`. + // + // Ultimately, this allows us to adjust the signatures of these methods + // without breaking any external crate. + pub trait Sealed { + #[doc(hidden)] + fn try_insert(self, map: &mut HeaderMap, val: T) + -> Result, MaxSizeReached>; + + #[doc(hidden)] + fn try_append(self, map: &mut HeaderMap, val: T) -> Result; + + #[doc(hidden)] + fn try_entry(self, map: &mut HeaderMap) -> Result, MaxSizeReached>; + } + + // ==== impls ==== + + impl Sealed for HeaderName { + #[inline] + fn try_insert( + self, + map: &mut HeaderMap, + val: T, + ) -> Result, MaxSizeReached> { + map.try_insert2(self, val) + } + + #[inline] + fn try_append(self, map: &mut HeaderMap, val: T) -> Result { + map.try_append2(self, val) + } + + #[inline] + fn try_entry(self, map: &mut HeaderMap) -> Result, MaxSizeReached> { + map.try_entry2(self) + } + } + + impl IntoHeaderName for HeaderName {} + + impl<'a> Sealed for &'a HeaderName { + #[inline] + fn try_insert( + self, + map: &mut HeaderMap, + val: T, + ) -> Result, MaxSizeReached> { + map.try_insert2(self, val) + } + #[inline] + fn try_append(self, map: &mut HeaderMap, val: T) -> Result { + map.try_append2(self, val) + } + + #[inline] + fn try_entry(self, map: &mut HeaderMap) -> Result, MaxSizeReached> { + map.try_entry2(self) + } + } + + impl<'a> IntoHeaderName for &'a HeaderName {} + + impl Sealed for &'static str { + #[inline] + fn try_insert( + self, + map: &mut HeaderMap, + val: T, + ) -> Result, MaxSizeReached> { + HdrName::from_static(self, move |hdr| map.try_insert2(hdr, val)) + } + #[inline] + fn try_append(self, map: &mut HeaderMap, val: T) -> Result { + HdrName::from_static(self, move |hdr| map.try_append2(hdr, val)) + } + + #[inline] + fn try_entry(self, map: &mut HeaderMap) -> Result, MaxSizeReached> { + HdrName::from_static(self, move |hdr| map.try_entry2(hdr)) + } + } + + impl IntoHeaderName for &'static str {} +} + +mod as_header_name { + use super::{Entry, HdrName, HeaderMap, HeaderName, InvalidHeaderName, MaxSizeReached}; + + /// A marker trait used to identify values that can be used as search keys + /// to a `HeaderMap`. + pub trait AsHeaderName: Sealed {} + + // Debug not currently needed, save on compiling it + #[allow(missing_debug_implementations)] + pub enum TryEntryError { + InvalidHeaderName(InvalidHeaderName), + MaxSizeReached(MaxSizeReached), + } + + impl From for TryEntryError { + fn from(e: InvalidHeaderName) -> TryEntryError { + TryEntryError::InvalidHeaderName(e) + } + } + + impl From for TryEntryError { + fn from(e: MaxSizeReached) -> TryEntryError { + TryEntryError::MaxSizeReached(e) + } + } + + // All methods are on this pub(super) trait, instead of `AsHeaderName`, + // so that they aren't publicly exposed to the world. + // + // Being on the `AsHeaderName` trait would mean users could call + // `"host".find(&map)`. + // + // Ultimately, this allows us to adjust the signatures of these methods + // without breaking any external crate. + pub trait Sealed { + #[doc(hidden)] + fn try_entry(self, map: &mut HeaderMap) -> Result, TryEntryError>; + + #[doc(hidden)] + fn find(&self, map: &HeaderMap) -> Option<(usize, usize)>; + + #[doc(hidden)] + fn as_str(&self) -> &str; + } + + // ==== impls ==== + + impl Sealed for HeaderName { + #[inline] + fn try_entry(self, map: &mut HeaderMap) -> Result, TryEntryError> { + Ok(map.try_entry2(self)?) + } + + #[inline] + fn find(&self, map: &HeaderMap) -> Option<(usize, usize)> { + map.find(self) + } + + fn as_str(&self) -> &str { + ::as_str(self) + } + } + + impl AsHeaderName for HeaderName {} + + impl<'a> Sealed for &'a HeaderName { + #[inline] + fn try_entry(self, map: &mut HeaderMap) -> Result, TryEntryError> { + Ok(map.try_entry2(self)?) + } + + #[inline] + fn find(&self, map: &HeaderMap) -> Option<(usize, usize)> { + map.find(*self) + } + + fn as_str(&self) -> &str { + ::as_str(*self) + } + } + + impl<'a> AsHeaderName for &'a HeaderName {} + + impl<'a> Sealed for &'a str { + #[inline] + fn try_entry(self, map: &mut HeaderMap) -> Result, TryEntryError> { + Ok(HdrName::from_bytes(self.as_bytes(), move |hdr| { + map.try_entry2(hdr) + })??) + } + + #[inline] + fn find(&self, map: &HeaderMap) -> Option<(usize, usize)> { + HdrName::from_bytes(self.as_bytes(), move |hdr| map.find(&hdr)).unwrap_or(None) + } + + fn as_str(&self) -> &str { + self + } + } + + impl<'a> AsHeaderName for &'a str {} + + impl Sealed for String { + #[inline] + fn try_entry(self, map: &mut HeaderMap) -> Result, TryEntryError> { + Ok(self.as_str().try_entry(map)?) + } + + #[inline] + fn find(&self, map: &HeaderMap) -> Option<(usize, usize)> { + Sealed::find(&self.as_str(), map) + } + + fn as_str(&self) -> &str { + self + } + } + + impl AsHeaderName for String {} + + impl<'a> Sealed for &'a String { + #[inline] + fn try_entry(self, map: &mut HeaderMap) -> Result, TryEntryError> { + self.as_str().try_entry(map) + } + + #[inline] + fn find(&self, map: &HeaderMap) -> Option<(usize, usize)> { + Sealed::find(*self, map) + } + + fn as_str(&self) -> &str { + *self + } + } + + impl<'a> AsHeaderName for &'a String {} +} + +#[test] +fn test_bounds() { + fn check_bounds() {} + + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); +} + +#[test] +fn skip_duplicates_during_key_iteration() { + let mut map = HeaderMap::new(); + map.try_append("a", HeaderValue::from_static("a")).unwrap(); + map.try_append("a", HeaderValue::from_static("b")).unwrap(); + assert_eq!(map.keys().count(), map.keys_len()); +} diff --git a/.cargo-vendor/http-0.2.12/src/header/mod.rs b/.cargo-vendor/http-0.2.12/src/header/mod.rs new file mode 100644 index 0000000000..4995541209 --- /dev/null +++ b/.cargo-vendor/http-0.2.12/src/header/mod.rs @@ -0,0 +1,175 @@ +//! HTTP header types +//! +//! The module provides [`HeaderName`], [`HeaderMap`], and a number of types +//! used for interacting with `HeaderMap`. These types allow representing both +//! HTTP/1 and HTTP/2 headers. +//! +//! # `HeaderName` +//! +//! The `HeaderName` type represents both standard header names as well as +//! custom header names. The type handles the case insensitive nature of header +//! names and is used as the key portion of `HeaderMap`. Header names are +//! normalized to lower case. In other words, when creating a `HeaderName` with +//! a string, even if upper case characters are included, when getting a string +//! representation of the `HeaderName`, it will be all lower case. This allows +//! for faster `HeaderMap` comparison operations. +//! +//! The internal representation is optimized to efficiently handle the cases +//! most commonly encountered when working with HTTP. Standard header names are +//! special cased and are represented internally as an enum. Short custom +//! headers will be stored directly in the `HeaderName` struct and will not +//! incur any allocation overhead, however longer strings will require an +//! allocation for storage. +//! +//! ## Limitations +//! +//! `HeaderName` has a max length of 32,768 for header names. Attempting to +//! parse longer names will result in a panic. +//! +//! # `HeaderMap` +//! +//! `HeaderMap` is a map structure of header names highly optimized for use +//! cases common with HTTP. It is a [multimap] structure, where each header name +//! may have multiple associated header values. Given this, some of the APIs +//! diverge from [`HashMap`]. +//! +//! ## Overview +//! +//! Just like `HashMap` in Rust's stdlib, `HeaderMap` is based on [Robin Hood +//! hashing]. This algorithm tends to reduce the worst case search times in the +//! table and enables high load factors without seriously affecting performance. +//! Internally, keys and values are stored in vectors. As such, each insertion +//! will not incur allocation overhead. However, once the underlying vector +//! storage is full, a larger vector must be allocated and all values copied. +//! +//! ## Deterministic ordering +//! +//! Unlike Rust's `HashMap`, values in `HeaderMap` are deterministically +//! ordered. Roughly, values are ordered by insertion. This means that a +//! function that deterministically operates on a header map can rely on the +//! iteration order to remain consistent across processes and platforms. +//! +//! ## Adaptive hashing +//! +//! `HeaderMap` uses an adaptive hashing strategy in order to efficiently handle +//! most common cases. All standard headers have statically computed hash values +//! which removes the need to perform any hashing of these headers at runtime. +//! The default hash function emphasizes performance over robustness. However, +//! `HeaderMap` detects high collision rates and switches to a secure hash +//! function in those events. The threshold is set such that only denial of +//! service attacks should trigger it. +//! +//! ## Limitations +//! +//! `HeaderMap` can store a maximum of 32,768 headers (header name / value +//! pairs). Attempting to insert more will result in a panic. +//! +//! [`HeaderName`]: struct.HeaderName.html +//! [`HeaderMap`]: struct.HeaderMap.html +//! [multimap]: https://en.wikipedia.org/wiki/Multimap +//! [`HashMap`]: https://doc.rust-lang.org/std/collections/struct.HashMap.html +//! [Robin Hood hashing]: https://en.wikipedia.org/wiki/Hash_table#Robin_Hood_hashing + +mod map; +mod name; +mod value; + +pub use self::map::{ + AsHeaderName, Drain, Entry, GetAll, HeaderMap, IntoHeaderName, IntoIter, Iter, IterMut, Keys, + MaxSizeReached, OccupiedEntry, VacantEntry, ValueDrain, ValueIter, ValueIterMut, Values, + ValuesMut, +}; +pub use self::name::{HeaderName, InvalidHeaderName}; +pub use self::value::{HeaderValue, InvalidHeaderValue, ToStrError}; + +// Use header name constants +pub use self::name::{ + ACCEPT, + ACCEPT_CHARSET, + ACCEPT_ENCODING, + ACCEPT_LANGUAGE, + ACCEPT_RANGES, + ACCESS_CONTROL_ALLOW_CREDENTIALS, + ACCESS_CONTROL_ALLOW_HEADERS, + ACCESS_CONTROL_ALLOW_METHODS, + ACCESS_CONTROL_ALLOW_ORIGIN, + ACCESS_CONTROL_EXPOSE_HEADERS, + ACCESS_CONTROL_MAX_AGE, + ACCESS_CONTROL_REQUEST_HEADERS, + ACCESS_CONTROL_REQUEST_METHOD, + AGE, + ALLOW, + ALT_SVC, + AUTHORIZATION, + CACHE_CONTROL, + CACHE_STATUS, + CDN_CACHE_CONTROL, + CONNECTION, + CONTENT_DISPOSITION, + CONTENT_ENCODING, + CONTENT_LANGUAGE, + CONTENT_LENGTH, + CONTENT_LOCATION, + CONTENT_RANGE, + CONTENT_SECURITY_POLICY, + CONTENT_SECURITY_POLICY_REPORT_ONLY, + CONTENT_TYPE, + COOKIE, + DNT, + DATE, + ETAG, + EXPECT, + EXPIRES, + FORWARDED, + FROM, + HOST, + IF_MATCH, + IF_MODIFIED_SINCE, + IF_NONE_MATCH, + IF_RANGE, + IF_UNMODIFIED_SINCE, + LAST_MODIFIED, + LINK, + LOCATION, + MAX_FORWARDS, + ORIGIN, + PRAGMA, + PROXY_AUTHENTICATE, + PROXY_AUTHORIZATION, + PUBLIC_KEY_PINS, + PUBLIC_KEY_PINS_REPORT_ONLY, + RANGE, + REFERER, + REFERRER_POLICY, + REFRESH, + RETRY_AFTER, + SEC_WEBSOCKET_ACCEPT, + SEC_WEBSOCKET_EXTENSIONS, + SEC_WEBSOCKET_KEY, + SEC_WEBSOCKET_PROTOCOL, + SEC_WEBSOCKET_VERSION, + SERVER, + SET_COOKIE, + STRICT_TRANSPORT_SECURITY, + TE, + TRAILER, + TRANSFER_ENCODING, + UPGRADE, + UPGRADE_INSECURE_REQUESTS, + USER_AGENT, + VARY, + VIA, + WARNING, + WWW_AUTHENTICATE, + X_CONTENT_TYPE_OPTIONS, + X_DNS_PREFETCH_CONTROL, + X_FRAME_OPTIONS, + X_XSS_PROTECTION, +}; + +/// Maximum length of a header name +/// +/// Generally, 64kb for a header name is WAY too much than would ever be needed +/// in practice. Restricting it to this size enables using `u16` values to +/// represent offsets when dealing with header names. +const MAX_HEADER_NAME_LEN: usize = (1 << 16) - 1; diff --git a/.cargo-vendor/http-0.2.12/src/header/name.rs b/.cargo-vendor/http-0.2.12/src/header/name.rs new file mode 100644 index 0000000000..e51bc355f7 --- /dev/null +++ b/.cargo-vendor/http-0.2.12/src/header/name.rs @@ -0,0 +1,1880 @@ +use crate::byte_str::ByteStr; +use bytes::{Bytes, BytesMut}; + +use std::borrow::Borrow; +use std::error::Error; +use std::convert::{TryFrom}; +use std::hash::{Hash, Hasher}; +use std::mem::MaybeUninit; +use std::str::FromStr; +use std::fmt; + +/// Represents an HTTP header field name +/// +/// Header field names identify the header. Header sets may include multiple +/// headers with the same name. The HTTP specification defines a number of +/// standard headers, but HTTP messages may include non-standard header names as +/// well as long as they adhere to the specification. +/// +/// `HeaderName` is used as the [`HeaderMap`] key. Constants are available for +/// all standard header names in the [`header`] module. +/// +/// # Representation +/// +/// `HeaderName` represents standard header names using an `enum`, as such they +/// will not require an allocation for storage. All custom header names are +/// lower cased upon conversion to a `HeaderName` value. This avoids the +/// overhead of dynamically doing lower case conversion during the hash code +/// computation and the comparison operation. +/// +/// [`HeaderMap`]: struct.HeaderMap.html +/// [`header`]: index.html +#[derive(Clone, Eq, PartialEq, Hash)] +pub struct HeaderName { + inner: Repr, +} + +// Almost a full `HeaderName` +#[derive(Debug, Hash)] +pub struct HdrName<'a> { + inner: Repr>, +} + +#[derive(Debug, Clone, Eq, PartialEq, Hash)] +enum Repr { + Standard(StandardHeader), + Custom(T), +} + +// Used to hijack the Hash impl +#[derive(Debug, Clone, Eq, PartialEq)] +struct Custom(ByteStr); + +#[derive(Debug, Clone)] +// Invariant: If lower then buf is valid UTF-8. +struct MaybeLower<'a> { + buf: &'a [u8], + lower: bool, +} + +/// A possible error when converting a `HeaderName` from another type. +pub struct InvalidHeaderName { + _priv: (), +} + +macro_rules! standard_headers { + ( + $( + $(#[$docs:meta])* + ($konst:ident, $upcase:ident, $name_bytes:literal); + )+ + ) => { + #[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)] + enum StandardHeader { + $( + $konst, + )+ + } + + $( + $(#[$docs])* + pub const $upcase: HeaderName = HeaderName { + inner: Repr::Standard(StandardHeader::$konst), + }; + )+ + + impl StandardHeader { + #[inline] + fn as_str(&self) -> &'static str { + match *self { + // Safety: test_parse_standard_headers ensures these &[u8]s are &str-safe. + $( + StandardHeader::$konst => unsafe { std::str::from_utf8_unchecked( $name_bytes ) }, + )+ + } + } + + const fn from_bytes(name_bytes: &[u8]) -> Option { + match name_bytes { + $( + $name_bytes => Some(StandardHeader::$konst), + )+ + _ => None, + } + } + } + + #[cfg(test)] + const TEST_HEADERS: &'static [(StandardHeader, &'static [u8])] = &[ + $( + (StandardHeader::$konst, $name_bytes), + )+ + ]; + + #[test] + fn test_parse_standard_headers() { + for &(std, name_bytes) in TEST_HEADERS { + // Test lower case + assert_eq!(HeaderName::from_bytes(name_bytes).unwrap(), HeaderName::from(std)); + + // Test upper case + let upper = std::str::from_utf8(name_bytes).expect("byte string constants are all utf-8").to_uppercase(); + assert_eq!(HeaderName::from_bytes(upper.as_bytes()).unwrap(), HeaderName::from(std)); + } + } + + #[test] + fn test_standard_headers_into_bytes() { + for &(std, name_bytes) in TEST_HEADERS { + let name = std::str::from_utf8(name_bytes).unwrap(); + let std = HeaderName::from(std); + // Test lower case + let bytes: Bytes = + HeaderName::from_bytes(name_bytes).unwrap().inner.into(); + assert_eq!(bytes, name); + assert_eq!(HeaderName::from_bytes(name_bytes).unwrap(), std); + + // Test upper case + let upper = name.to_uppercase(); + let bytes: Bytes = + HeaderName::from_bytes(upper.as_bytes()).unwrap().inner.into(); + assert_eq!(bytes, name_bytes); + assert_eq!(HeaderName::from_bytes(upper.as_bytes()).unwrap(), + std); + } + + } + } +} + +// Generate constants for all standard HTTP headers. This includes a static hash +// code for the "fast hash" path. The hash code for static headers *do not* have +// to match the text representation of those headers. This is because header +// strings are always converted to the static values (when they match) before +// being hashed. This means that it is impossible to compare the static hash +// code of CONTENT_LENGTH with "content-length". +standard_headers! { + /// Advertises which content types the client is able to understand. + /// + /// The Accept request HTTP header advertises which content types, expressed + /// as MIME types, the client is able to understand. Using content + /// negotiation, the server then selects one of the proposals, uses it and + /// informs the client of its choice with the Content-Type response header. + /// Browsers set adequate values for this header depending of the context + /// where the request is done: when fetching a CSS stylesheet a different + /// value is set for the request than when fetching an image, video or a + /// script. + (Accept, ACCEPT, b"accept"); + + /// Advertises which character set the client is able to understand. + /// + /// The Accept-Charset request HTTP header advertises which character set + /// the client is able to understand. Using content negotiation, the server + /// then selects one of the proposals, uses it and informs the client of its + /// choice within the Content-Type response header. Browsers usually don't + /// set this header as the default value for each content type is usually + /// correct and transmitting it would allow easier fingerprinting. + /// + /// If the server cannot serve any matching character set, it can + /// theoretically send back a 406 (Not Acceptable) error code. But, for a + /// better user experience, this is rarely done and the more common way is + /// to ignore the Accept-Charset header in this case. + (AcceptCharset, ACCEPT_CHARSET, b"accept-charset"); + + /// Advertises which content encoding the client is able to understand. + /// + /// The Accept-Encoding request HTTP header advertises which content + /// encoding, usually a compression algorithm, the client is able to + /// understand. Using content negotiation, the server selects one of the + /// proposals, uses it and informs the client of its choice with the + /// Content-Encoding response header. + /// + /// Even if both the client and the server supports the same compression + /// algorithms, the server may choose not to compress the body of a + /// response, if the identity value is also acceptable. Two common cases + /// lead to this: + /// + /// * The data to be sent is already compressed and a second compression + /// won't lead to smaller data to be transmitted. This may the case with + /// some image formats; + /// + /// * The server is overloaded and cannot afford the computational overhead + /// induced by the compression requirement. Typically, Microsoft recommends + /// not to compress if a server use more than 80 % of its computational + /// power. + /// + /// As long as the identity value, meaning no encryption, is not explicitly + /// forbidden, by an identity;q=0 or a *;q=0 without another explicitly set + /// value for identity, the server must never send back a 406 Not Acceptable + /// error. + (AcceptEncoding, ACCEPT_ENCODING, b"accept-encoding"); + + /// Advertises which languages the client is able to understand. + /// + /// The Accept-Language request HTTP header advertises which languages the + /// client is able to understand, and which locale variant is preferred. + /// Using content negotiation, the server then selects one of the proposals, + /// uses it and informs the client of its choice with the Content-Language + /// response header. Browsers set adequate values for this header according + /// their user interface language and even if a user can change it, this + /// happens rarely (and is frown upon as it leads to fingerprinting). + /// + /// This header is a hint to be used when the server has no way of + /// determining the language via another way, like a specific URL, that is + /// controlled by an explicit user decision. It is recommended that the + /// server never overrides an explicit decision. The content of the + /// Accept-Language is often out of the control of the user (like when + /// traveling and using an Internet Cafe in a different country); the user + /// may also want to visit a page in another language than the locale of + /// their user interface. + /// + /// If the server cannot serve any matching language, it can theoretically + /// send back a 406 (Not Acceptable) error code. But, for a better user + /// experience, this is rarely done and more common way is to ignore the + /// Accept-Language header in this case. + (AcceptLanguage, ACCEPT_LANGUAGE, b"accept-language"); + + /// Marker used by the server to advertise partial request support. + /// + /// The Accept-Ranges response HTTP header is a marker used by the server to + /// advertise its support of partial requests. The value of this field + /// indicates the unit that can be used to define a range. + /// + /// In presence of an Accept-Ranges header, the browser may try to resume an + /// interrupted download, rather than to start it from the start again. + (AcceptRanges, ACCEPT_RANGES, b"accept-ranges"); + + /// Preflight response indicating if the response to the request can be + /// exposed to the page. + /// + /// The Access-Control-Allow-Credentials response header indicates whether + /// or not the response to the request can be exposed to the page. It can be + /// exposed when the true value is returned; it can't in other cases. + /// + /// Credentials are cookies, authorization headers or TLS client + /// certificates. + /// + /// When used as part of a response to a preflight request, this indicates + /// whether or not the actual request can be made using credentials. Note + /// that simple GET requests are not preflighted, and so if a request is + /// made for a resource with credentials, if this header is not returned + /// with the resource, the response is ignored by the browser and not + /// returned to web content. + /// + /// The Access-Control-Allow-Credentials header works in conjunction with + /// the XMLHttpRequest.withCredentials property or with the credentials + /// option in the Request() constructor of the Fetch API. Credentials must + /// be set on both sides (the Access-Control-Allow-Credentials header and in + /// the XHR or Fetch request) in order for the CORS request with credentials + /// to succeed. + (AccessControlAllowCredentials, ACCESS_CONTROL_ALLOW_CREDENTIALS, b"access-control-allow-credentials"); + + /// Preflight response indicating permitted HTTP headers. + /// + /// The Access-Control-Allow-Headers response header is used in response to + /// a preflight request to indicate which HTTP headers will be available via + /// Access-Control-Expose-Headers when making the actual request. + /// + /// The simple headers, Accept, Accept-Language, Content-Language, + /// Content-Type (but only with a MIME type of its parsed value (ignoring + /// parameters) of either application/x-www-form-urlencoded, + /// multipart/form-data, or text/plain), are always available and don't need + /// to be listed by this header. + /// + /// This header is required if the request has an + /// Access-Control-Request-Headers header. + (AccessControlAllowHeaders, ACCESS_CONTROL_ALLOW_HEADERS, b"access-control-allow-headers"); + + /// Preflight header response indicating permitted access methods. + /// + /// The Access-Control-Allow-Methods response header specifies the method or + /// methods allowed when accessing the resource in response to a preflight + /// request. + (AccessControlAllowMethods, ACCESS_CONTROL_ALLOW_METHODS, b"access-control-allow-methods"); + + /// Indicates whether the response can be shared with resources with the + /// given origin. + (AccessControlAllowOrigin, ACCESS_CONTROL_ALLOW_ORIGIN, b"access-control-allow-origin"); + + /// Indicates which headers can be exposed as part of the response by + /// listing their names. + (AccessControlExposeHeaders, ACCESS_CONTROL_EXPOSE_HEADERS, b"access-control-expose-headers"); + + /// Indicates how long the results of a preflight request can be cached. + (AccessControlMaxAge, ACCESS_CONTROL_MAX_AGE, b"access-control-max-age"); + + /// Informs the server which HTTP headers will be used when an actual + /// request is made. + (AccessControlRequestHeaders, ACCESS_CONTROL_REQUEST_HEADERS, b"access-control-request-headers"); + + /// Informs the server know which HTTP method will be used when the actual + /// request is made. + (AccessControlRequestMethod, ACCESS_CONTROL_REQUEST_METHOD, b"access-control-request-method"); + + /// Indicates the time in seconds the object has been in a proxy cache. + /// + /// The Age header is usually close to zero. If it is Age: 0, it was + /// probably just fetched from the origin server; otherwise It is usually + /// calculated as a difference between the proxy's current date and the Date + /// general header included in the HTTP response. + (Age, AGE, b"age"); + + /// Lists the set of methods support by a resource. + /// + /// This header must be sent if the server responds with a 405 Method Not + /// Allowed status code to indicate which request methods can be used. An + /// empty Allow header indicates that the resource allows no request + /// methods, which might occur temporarily for a given resource, for + /// example. + (Allow, ALLOW, b"allow"); + + /// Advertises the availability of alternate services to clients. + (AltSvc, ALT_SVC, b"alt-svc"); + + /// Contains the credentials to authenticate a user agent with a server. + /// + /// Usually this header is included after the server has responded with a + /// 401 Unauthorized status and the WWW-Authenticate header. + (Authorization, AUTHORIZATION, b"authorization"); + + /// Specifies directives for caching mechanisms in both requests and + /// responses. + /// + /// Caching directives are unidirectional, meaning that a given directive in + /// a request is not implying that the same directive is to be given in the + /// response. + (CacheControl, CACHE_CONTROL, b"cache-control"); + + /// Indicates how caches have handled a response and its corresponding request. + /// + /// See [RFC 9211](https://www.rfc-editor.org/rfc/rfc9211.html). + (CacheStatus, CACHE_STATUS, b"cache-status"); + + /// Specifies directives that allow origin servers to control the behavior of CDN caches + /// interposed between them and clients separately from other caches that might handle the + /// response. + /// + /// See [RFC 9213](https://www.rfc-editor.org/rfc/rfc9213.html). + (CdnCacheControl, CDN_CACHE_CONTROL, b"cdn-cache-control"); + + /// Controls whether or not the network connection stays open after the + /// current transaction finishes. + /// + /// If the value sent is keep-alive, the connection is persistent and not + /// closed, allowing for subsequent requests to the same server to be done. + /// + /// Except for the standard hop-by-hop headers (Keep-Alive, + /// Transfer-Encoding, TE, Connection, Trailer, Upgrade, Proxy-Authorization + /// and Proxy-Authenticate), any hop-by-hop headers used by the message must + /// be listed in the Connection header, so that the first proxy knows he has + /// to consume them and not to forward them further. Standard hop-by-hop + /// headers can be listed too (it is often the case of Keep-Alive, but this + /// is not mandatory. + (Connection, CONNECTION, b"connection"); + + /// Indicates if the content is expected to be displayed inline. + /// + /// In a regular HTTP response, the Content-Disposition response header is a + /// header indicating if the content is expected to be displayed inline in + /// the browser, that is, as a Web page or as part of a Web page, or as an + /// attachment, that is downloaded and saved locally. + /// + /// In a multipart/form-data body, the HTTP Content-Disposition general + /// header is a header that can be used on the subpart of a multipart body + /// to give information about the field it applies to. The subpart is + /// delimited by the boundary defined in the Content-Type header. Used on + /// the body itself, Content-Disposition has no effect. + /// + /// The Content-Disposition header is defined in the larger context of MIME + /// messages for e-mail, but only a subset of the possible parameters apply + /// to HTTP forms and POST requests. Only the value form-data, as well as + /// the optional directive name and filename, can be used in the HTTP + /// context. + (ContentDisposition, CONTENT_DISPOSITION, b"content-disposition"); + + /// Used to compress the media-type. + /// + /// When present, its value indicates what additional content encoding has + /// been applied to the entity-body. It lets the client know, how to decode + /// in order to obtain the media-type referenced by the Content-Type header. + /// + /// It is recommended to compress data as much as possible and therefore to + /// use this field, but some types of resources, like jpeg images, are + /// already compressed. Sometimes using additional compression doesn't + /// reduce payload size and can even make the payload longer. + (ContentEncoding, CONTENT_ENCODING, b"content-encoding"); + + /// Used to describe the languages intended for the audience. + /// + /// This header allows a user to differentiate according to the users' own + /// preferred language. For example, if "Content-Language: de-DE" is set, it + /// says that the document is intended for German language speakers + /// (however, it doesn't indicate the document is written in German. For + /// example, it might be written in English as part of a language course for + /// German speakers). + /// + /// If no Content-Language is specified, the default is that the content is + /// intended for all language audiences. Multiple language tags are also + /// possible, as well as applying the Content-Language header to various + /// media types and not only to textual documents. + (ContentLanguage, CONTENT_LANGUAGE, b"content-language"); + + /// Indicates the size of the entity-body. + /// + /// The header value must be a decimal indicating the number of octets sent + /// to the recipient. + (ContentLength, CONTENT_LENGTH, b"content-length"); + + /// Indicates an alternate location for the returned data. + /// + /// The principal use case is to indicate the URL of the resource + /// transmitted as the result of content negotiation. + /// + /// Location and Content-Location are different: Location indicates the + /// target of a redirection (or the URL of a newly created document), while + /// Content-Location indicates the direct URL to use to access the resource, + /// without the need of further content negotiation. Location is a header + /// associated with the response, while Content-Location is associated with + /// the entity returned. + (ContentLocation, CONTENT_LOCATION, b"content-location"); + + /// Indicates where in a full body message a partial message belongs. + (ContentRange, CONTENT_RANGE, b"content-range"); + + /// Allows controlling resources the user agent is allowed to load for a + /// given page. + /// + /// With a few exceptions, policies mostly involve specifying server origins + /// and script endpoints. This helps guard against cross-site scripting + /// attacks (XSS). + (ContentSecurityPolicy, CONTENT_SECURITY_POLICY, b"content-security-policy"); + + /// Allows experimenting with policies by monitoring their effects. + /// + /// The HTTP Content-Security-Policy-Report-Only response header allows web + /// developers to experiment with policies by monitoring (but not enforcing) + /// their effects. These violation reports consist of JSON documents sent + /// via an HTTP POST request to the specified URI. + (ContentSecurityPolicyReportOnly, CONTENT_SECURITY_POLICY_REPORT_ONLY, b"content-security-policy-report-only"); + + /// Used to indicate the media type of the resource. + /// + /// In responses, a Content-Type header tells the client what the content + /// type of the returned content actually is. Browsers will do MIME sniffing + /// in some cases and will not necessarily follow the value of this header; + /// to prevent this behavior, the header X-Content-Type-Options can be set + /// to nosniff. + /// + /// In requests, (such as POST or PUT), the client tells the server what + /// type of data is actually sent. + (ContentType, CONTENT_TYPE, b"content-type"); + + /// Contains stored HTTP cookies previously sent by the server with the + /// Set-Cookie header. + /// + /// The Cookie header might be omitted entirely, if the privacy setting of + /// the browser are set to block them, for example. + (Cookie, COOKIE, b"cookie"); + + /// Indicates the client's tracking preference. + /// + /// This header lets users indicate whether they would prefer privacy rather + /// than personalized content. + (Dnt, DNT, b"dnt"); + + /// Contains the date and time at which the message was originated. + (Date, DATE, b"date"); + + /// Identifier for a specific version of a resource. + /// + /// This header allows caches to be more efficient, and saves bandwidth, as + /// a web server does not need to send a full response if the content has + /// not changed. On the other side, if the content has changed, etags are + /// useful to help prevent simultaneous updates of a resource from + /// overwriting each other ("mid-air collisions"). + /// + /// If the resource at a given URL changes, a new Etag value must be + /// generated. Etags are therefore similar to fingerprints and might also be + /// used for tracking purposes by some servers. A comparison of them allows + /// to quickly determine whether two representations of a resource are the + /// same, but they might also be set to persist indefinitely by a tracking + /// server. + (Etag, ETAG, b"etag"); + + /// Indicates expectations that need to be fulfilled by the server in order + /// to properly handle the request. + /// + /// The only expectation defined in the specification is Expect: + /// 100-continue, to which the server shall respond with: + /// + /// * 100 if the information contained in the header is sufficient to cause + /// an immediate success, + /// + /// * 417 (Expectation Failed) if it cannot meet the expectation; or any + /// other 4xx status otherwise. + /// + /// For example, the server may reject a request if its Content-Length is + /// too large. + /// + /// No common browsers send the Expect header, but some other clients such + /// as cURL do so by default. + (Expect, EXPECT, b"expect"); + + /// Contains the date/time after which the response is considered stale. + /// + /// Invalid dates, like the value 0, represent a date in the past and mean + /// that the resource is already expired. + /// + /// If there is a Cache-Control header with the "max-age" or "s-max-age" + /// directive in the response, the Expires header is ignored. + (Expires, EXPIRES, b"expires"); + + /// Contains information from the client-facing side of proxy servers that + /// is altered or lost when a proxy is involved in the path of the request. + /// + /// The alternative and de-facto standard versions of this header are the + /// X-Forwarded-For, X-Forwarded-Host and X-Forwarded-Proto headers. + /// + /// This header is used for debugging, statistics, and generating + /// location-dependent content and by design it exposes privacy sensitive + /// information, such as the IP address of the client. Therefore the user's + /// privacy must be kept in mind when deploying this header. + (Forwarded, FORWARDED, b"forwarded"); + + /// Contains an Internet email address for a human user who controls the + /// requesting user agent. + /// + /// If you are running a robotic user agent (e.g. a crawler), the From + /// header should be sent, so you can be contacted if problems occur on + /// servers, such as if the robot is sending excessive, unwanted, or invalid + /// requests. + (From, FROM, b"from"); + + /// Specifies the domain name of the server and (optionally) the TCP port + /// number on which the server is listening. + /// + /// If no port is given, the default port for the service requested (e.g., + /// "80" for an HTTP URL) is implied. + /// + /// A Host header field must be sent in all HTTP/1.1 request messages. A 400 + /// (Bad Request) status code will be sent to any HTTP/1.1 request message + /// that lacks a Host header field or contains more than one. + (Host, HOST, b"host"); + + /// Makes a request conditional based on the E-Tag. + /// + /// For GET and HEAD methods, the server will send back the requested + /// resource only if it matches one of the listed ETags. For PUT and other + /// non-safe methods, it will only upload the resource in this case. + /// + /// The comparison with the stored ETag uses the strong comparison + /// algorithm, meaning two files are considered identical byte to byte only. + /// This is weakened when the W/ prefix is used in front of the ETag. + /// + /// There are two common use cases: + /// + /// * For GET and HEAD methods, used in combination with an Range header, it + /// can guarantee that the new ranges requested comes from the same resource + /// than the previous one. If it doesn't match, then a 416 (Range Not + /// Satisfiable) response is returned. + /// + /// * For other methods, and in particular for PUT, If-Match can be used to + /// prevent the lost update problem. It can check if the modification of a + /// resource that the user wants to upload will not override another change + /// that has been done since the original resource was fetched. If the + /// request cannot be fulfilled, the 412 (Precondition Failed) response is + /// returned. + (IfMatch, IF_MATCH, b"if-match"); + + /// Makes a request conditional based on the modification date. + /// + /// The If-Modified-Since request HTTP header makes the request conditional: + /// the server will send back the requested resource, with a 200 status, + /// only if it has been last modified after the given date. If the request + /// has not been modified since, the response will be a 304 without any + /// body; the Last-Modified header will contain the date of last + /// modification. Unlike If-Unmodified-Since, If-Modified-Since can only be + /// used with a GET or HEAD. + /// + /// When used in combination with If-None-Match, it is ignored, unless the + /// server doesn't support If-None-Match. + /// + /// The most common use case is to update a cached entity that has no + /// associated ETag. + (IfModifiedSince, IF_MODIFIED_SINCE, b"if-modified-since"); + + /// Makes a request conditional based on the E-Tag. + /// + /// The If-None-Match HTTP request header makes the request conditional. For + /// GET and HEAD methods, the server will send back the requested resource, + /// with a 200 status, only if it doesn't have an ETag matching the given + /// ones. For other methods, the request will be processed only if the + /// eventually existing resource's ETag doesn't match any of the values + /// listed. + /// + /// When the condition fails for GET and HEAD methods, then the server must + /// return HTTP status code 304 (Not Modified). For methods that apply + /// server-side changes, the status code 412 (Precondition Failed) is used. + /// Note that the server generating a 304 response MUST generate any of the + /// following header fields that would have been sent in a 200 (OK) response + /// to the same request: Cache-Control, Content-Location, Date, ETag, + /// Expires, and Vary. + /// + /// The comparison with the stored ETag uses the weak comparison algorithm, + /// meaning two files are considered identical not only if they are + /// identical byte to byte, but if the content is equivalent. For example, + /// two pages that would differ only by the date of generation in the footer + /// would be considered as identical. + /// + /// When used in combination with If-Modified-Since, it has precedence (if + /// the server supports it). + /// + /// There are two common use cases: + /// + /// * For `GET` and `HEAD` methods, to update a cached entity that has an associated ETag. + /// * For other methods, and in particular for `PUT`, `If-None-Match` used with + /// the `*` value can be used to save a file not known to exist, + /// guaranteeing that another upload didn't happen before, losing the data + /// of the previous put; this problems is the variation of the lost update + /// problem. + (IfNoneMatch, IF_NONE_MATCH, b"if-none-match"); + + /// Makes a request conditional based on range. + /// + /// The If-Range HTTP request header makes a range request conditional: if + /// the condition is fulfilled, the range request will be issued and the + /// server sends back a 206 Partial Content answer with the appropriate + /// body. If the condition is not fulfilled, the full resource is sent back, + /// with a 200 OK status. + /// + /// This header can be used either with a Last-Modified validator, or with + /// an ETag, but not with both. + /// + /// The most common use case is to resume a download, to guarantee that the + /// stored resource has not been modified since the last fragment has been + /// received. + (IfRange, IF_RANGE, b"if-range"); + + /// Makes the request conditional based on the last modification date. + /// + /// The If-Unmodified-Since request HTTP header makes the request + /// conditional: the server will send back the requested resource, or accept + /// it in the case of a POST or another non-safe method, only if it has not + /// been last modified after the given date. If the request has been + /// modified after the given date, the response will be a 412 (Precondition + /// Failed) error. + /// + /// There are two common use cases: + /// + /// * In conjunction non-safe methods, like POST, it can be used to + /// implement an optimistic concurrency control, like done by some wikis: + /// editions are rejected if the stored document has been modified since the + /// original has been retrieved. + /// + /// * In conjunction with a range request with a If-Range header, it can be + /// used to ensure that the new fragment requested comes from an unmodified + /// document. + (IfUnmodifiedSince, IF_UNMODIFIED_SINCE, b"if-unmodified-since"); + + /// Content-Types that are acceptable for the response. + (LastModified, LAST_MODIFIED, b"last-modified"); + + /// Allows the server to point an interested client to another resource + /// containing metadata about the requested resource. + (Link, LINK, b"link"); + + /// Indicates the URL to redirect a page to. + /// + /// The Location response header indicates the URL to redirect a page to. It + /// only provides a meaning when served with a 3xx status response. + /// + /// The HTTP method used to make the new request to fetch the page pointed + /// to by Location depends of the original method and of the kind of + /// redirection: + /// + /// * If 303 (See Also) responses always lead to the use of a GET method, + /// 307 (Temporary Redirect) and 308 (Permanent Redirect) don't change the + /// method used in the original request; + /// + /// * 301 (Permanent Redirect) and 302 (Found) doesn't change the method + /// most of the time, though older user-agents may (so you basically don't + /// know). + /// + /// All responses with one of these status codes send a Location header. + /// + /// Beside redirect response, messages with 201 (Created) status also + /// include the Location header. It indicates the URL to the newly created + /// resource. + /// + /// Location and Content-Location are different: Location indicates the + /// target of a redirection (or the URL of a newly created resource), while + /// Content-Location indicates the direct URL to use to access the resource + /// when content negotiation happened, without the need of further content + /// negotiation. Location is a header associated with the response, while + /// Content-Location is associated with the entity returned. + (Location, LOCATION, b"location"); + + /// Indicates the max number of intermediaries the request should be sent + /// through. + (MaxForwards, MAX_FORWARDS, b"max-forwards"); + + /// Indicates where a fetch originates from. + /// + /// It doesn't include any path information, but only the server name. It is + /// sent with CORS requests, as well as with POST requests. It is similar to + /// the Referer header, but, unlike this header, it doesn't disclose the + /// whole path. + (Origin, ORIGIN, b"origin"); + + /// HTTP/1.0 header usually used for backwards compatibility. + /// + /// The Pragma HTTP/1.0 general header is an implementation-specific header + /// that may have various effects along the request-response chain. It is + /// used for backwards compatibility with HTTP/1.0 caches where the + /// Cache-Control HTTP/1.1 header is not yet present. + (Pragma, PRAGMA, b"pragma"); + + /// Defines the authentication method that should be used to gain access to + /// a proxy. + /// + /// Unlike `www-authenticate`, the `proxy-authenticate` header field applies + /// only to the next outbound client on the response chain. This is because + /// only the client that chose a given proxy is likely to have the + /// credentials necessary for authentication. However, when multiple proxies + /// are used within the same administrative domain, such as office and + /// regional caching proxies within a large corporate network, it is common + /// for credentials to be generated by the user agent and passed through the + /// hierarchy until consumed. Hence, in such a configuration, it will appear + /// as if Proxy-Authenticate is being forwarded because each proxy will send + /// the same challenge set. + /// + /// The `proxy-authenticate` header is sent along with a `407 Proxy + /// Authentication Required`. + (ProxyAuthenticate, PROXY_AUTHENTICATE, b"proxy-authenticate"); + + /// Contains the credentials to authenticate a user agent to a proxy server. + /// + /// This header is usually included after the server has responded with a + /// 407 Proxy Authentication Required status and the Proxy-Authenticate + /// header. + (ProxyAuthorization, PROXY_AUTHORIZATION, b"proxy-authorization"); + + /// Associates a specific cryptographic public key with a certain server. + /// + /// This decreases the risk of MITM attacks with forged certificates. If one + /// or several keys are pinned and none of them are used by the server, the + /// browser will not accept the response as legitimate, and will not display + /// it. + (PublicKeyPins, PUBLIC_KEY_PINS, b"public-key-pins"); + + /// Sends reports of pinning violation to the report-uri specified in the + /// header. + /// + /// Unlike `Public-Key-Pins`, this header still allows browsers to connect + /// to the server if the pinning is violated. + (PublicKeyPinsReportOnly, PUBLIC_KEY_PINS_REPORT_ONLY, b"public-key-pins-report-only"); + + /// Indicates the part of a document that the server should return. + /// + /// Several parts can be requested with one Range header at once, and the + /// server may send back these ranges in a multipart document. If the server + /// sends back ranges, it uses the 206 Partial Content for the response. If + /// the ranges are invalid, the server returns the 416 Range Not Satisfiable + /// error. The server can also ignore the Range header and return the whole + /// document with a 200 status code. + (Range, RANGE, b"range"); + + /// Contains the address of the previous web page from which a link to the + /// currently requested page was followed. + /// + /// The Referer header allows servers to identify where people are visiting + /// them from and may use that data for analytics, logging, or optimized + /// caching, for example. + (Referer, REFERER, b"referer"); + + /// Governs which referrer information should be included with requests + /// made. + (ReferrerPolicy, REFERRER_POLICY, b"referrer-policy"); + + /// Informs the web browser that the current page or frame should be + /// refreshed. + (Refresh, REFRESH, b"refresh"); + + /// The Retry-After response HTTP header indicates how long the user agent + /// should wait before making a follow-up request. There are two main cases + /// this header is used: + /// + /// * When sent with a 503 (Service Unavailable) response, it indicates how + /// long the service is expected to be unavailable. + /// + /// * When sent with a redirect response, such as 301 (Moved Permanently), + /// it indicates the minimum time that the user agent is asked to wait + /// before issuing the redirected request. + (RetryAfter, RETRY_AFTER, b"retry-after"); + + /// The |Sec-WebSocket-Accept| header field is used in the WebSocket + /// opening handshake. It is sent from the server to the client to + /// confirm that the server is willing to initiate the WebSocket + /// connection. + (SecWebSocketAccept, SEC_WEBSOCKET_ACCEPT, b"sec-websocket-accept"); + + /// The |Sec-WebSocket-Extensions| header field is used in the WebSocket + /// opening handshake. It is initially sent from the client to the + /// server, and then subsequently sent from the server to the client, to + /// agree on a set of protocol-level extensions to use for the duration + /// of the connection. + (SecWebSocketExtensions, SEC_WEBSOCKET_EXTENSIONS, b"sec-websocket-extensions"); + + /// The |Sec-WebSocket-Key| header field is used in the WebSocket opening + /// handshake. It is sent from the client to the server to provide part + /// of the information used by the server to prove that it received a + /// valid WebSocket opening handshake. This helps ensure that the server + /// does not accept connections from non-WebSocket clients (e.g., HTTP + /// clients) that are being abused to send data to unsuspecting WebSocket + /// servers. + (SecWebSocketKey, SEC_WEBSOCKET_KEY, b"sec-websocket-key"); + + /// The |Sec-WebSocket-Protocol| header field is used in the WebSocket + /// opening handshake. It is sent from the client to the server and back + /// from the server to the client to confirm the subprotocol of the + /// connection. This enables scripts to both select a subprotocol and be + /// sure that the server agreed to serve that subprotocol. + (SecWebSocketProtocol, SEC_WEBSOCKET_PROTOCOL, b"sec-websocket-protocol"); + + /// The |Sec-WebSocket-Version| header field is used in the WebSocket + /// opening handshake. It is sent from the client to the server to + /// indicate the protocol version of the connection. This enables + /// servers to correctly interpret the opening handshake and subsequent + /// data being sent from the data, and close the connection if the server + /// cannot interpret that data in a safe manner. + (SecWebSocketVersion, SEC_WEBSOCKET_VERSION, b"sec-websocket-version"); + + /// Contains information about the software used by the origin server to + /// handle the request. + /// + /// Overly long and detailed Server values should be avoided as they + /// potentially reveal internal implementation details that might make it + /// (slightly) easier for attackers to find and exploit known security + /// holes. + (Server, SERVER, b"server"); + + /// Used to send cookies from the server to the user agent. + (SetCookie, SET_COOKIE, b"set-cookie"); + + /// Tells the client to communicate with HTTPS instead of using HTTP. + (StrictTransportSecurity, STRICT_TRANSPORT_SECURITY, b"strict-transport-security"); + + /// Informs the server of transfer encodings willing to be accepted as part + /// of the response. + /// + /// See also the Transfer-Encoding response header for more details on + /// transfer encodings. Note that chunked is always acceptable for HTTP/1.1 + /// recipients and you that don't have to specify "chunked" using the TE + /// header. However, it is useful for setting if the client is accepting + /// trailer fields in a chunked transfer coding using the "trailers" value. + (Te, TE, b"te"); + + /// Allows the sender to include additional fields at the end of chunked + /// messages. + (Trailer, TRAILER, b"trailer"); + + /// Specifies the form of encoding used to safely transfer the entity to the + /// client. + /// + /// `transfer-encoding` is a hop-by-hop header, that is applying to a + /// message between two nodes, not to a resource itself. Each segment of a + /// multi-node connection can use different `transfer-encoding` values. If + /// you want to compress data over the whole connection, use the end-to-end + /// header `content-encoding` header instead. + /// + /// When present on a response to a `HEAD` request that has no body, it + /// indicates the value that would have applied to the corresponding `GET` + /// message. + (TransferEncoding, TRANSFER_ENCODING, b"transfer-encoding"); + + /// Contains a string that allows identifying the requesting client's + /// software. + (UserAgent, USER_AGENT, b"user-agent"); + + /// Used as part of the exchange to upgrade the protocol. + (Upgrade, UPGRADE, b"upgrade"); + + /// Sends a signal to the server expressing the client’s preference for an + /// encrypted and authenticated response. + (UpgradeInsecureRequests, UPGRADE_INSECURE_REQUESTS, b"upgrade-insecure-requests"); + + /// Determines how to match future requests with cached responses. + /// + /// The `vary` HTTP response header determines how to match future request + /// headers to decide whether a cached response can be used rather than + /// requesting a fresh one from the origin server. It is used by the server + /// to indicate which headers it used when selecting a representation of a + /// resource in a content negotiation algorithm. + /// + /// The `vary` header should be set on a 304 Not Modified response exactly + /// like it would have been set on an equivalent 200 OK response. + (Vary, VARY, b"vary"); + + /// Added by proxies to track routing. + /// + /// The `via` general header is added by proxies, both forward and reverse + /// proxies, and can appear in the request headers and the response headers. + /// It is used for tracking message forwards, avoiding request loops, and + /// identifying the protocol capabilities of senders along the + /// request/response chain. + (Via, VIA, b"via"); + + /// General HTTP header contains information about possible problems with + /// the status of the message. + /// + /// More than one `warning` header may appear in a response. Warning header + /// fields can in general be applied to any message, however some warn-codes + /// are specific to caches and can only be applied to response messages. + (Warning, WARNING, b"warning"); + + /// Defines the authentication method that should be used to gain access to + /// a resource. + (WwwAuthenticate, WWW_AUTHENTICATE, b"www-authenticate"); + + /// Marker used by the server to indicate that the MIME types advertised in + /// the `content-type` headers should not be changed and be followed. + /// + /// This allows to opt-out of MIME type sniffing, or, in other words, it is + /// a way to say that the webmasters knew what they were doing. + /// + /// This header was introduced by Microsoft in IE 8 as a way for webmasters + /// to block content sniffing that was happening and could transform + /// non-executable MIME types into executable MIME types. Since then, other + /// browsers have introduced it, even if their MIME sniffing algorithms were + /// less aggressive. + /// + /// Site security testers usually expect this header to be set. + (XContentTypeOptions, X_CONTENT_TYPE_OPTIONS, b"x-content-type-options"); + + /// Controls DNS prefetching. + /// + /// The `x-dns-prefetch-control` HTTP response header controls DNS + /// prefetching, a feature by which browsers proactively perform domain name + /// resolution on both links that the user may choose to follow as well as + /// URLs for items referenced by the document, including images, CSS, + /// JavaScript, and so forth. + /// + /// This prefetching is performed in the background, so that the DNS is + /// likely to have been resolved by the time the referenced items are + /// needed. This reduces latency when the user clicks a link. + (XDnsPrefetchControl, X_DNS_PREFETCH_CONTROL, b"x-dns-prefetch-control"); + + /// Indicates whether or not a browser should be allowed to render a page in + /// a frame. + /// + /// Sites can use this to avoid clickjacking attacks, by ensuring that their + /// content is not embedded into other sites. + /// + /// The added security is only provided if the user accessing the document + /// is using a browser supporting `x-frame-options`. + (XFrameOptions, X_FRAME_OPTIONS, b"x-frame-options"); + + /// Stop pages from loading when an XSS attack is detected. + /// + /// The HTTP X-XSS-Protection response header is a feature of Internet + /// Explorer, Chrome and Safari that stops pages from loading when they + /// detect reflected cross-site scripting (XSS) attacks. Although these + /// protections are largely unnecessary in modern browsers when sites + /// implement a strong Content-Security-Policy that disables the use of + /// inline JavaScript ('unsafe-inline'), they can still provide protections + /// for users of older web browsers that don't yet support CSP. + (XXssProtection, X_XSS_PROTECTION, b"x-xss-protection"); +} + +/// Valid header name characters +/// +/// ```not_rust +/// field-name = token +/// separators = "(" | ")" | "<" | ">" | "@" +/// | "," | ";" | ":" | "\" | <"> +/// | "/" | "[" | "]" | "?" | "=" +/// | "{" | "}" | SP | HT +/// token = 1*tchar +/// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" +/// / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~" +/// / DIGIT / ALPHA +/// ; any VCHAR, except delimiters +/// ``` +// HEADER_CHARS maps every byte that is 128 or larger to 0 so everything that is +// mapped by HEADER_CHARS, maps to a valid single-byte UTF-8 codepoint. +const HEADER_CHARS: [u8; 256] = [ + // 0 1 2 3 4 5 6 7 8 9 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 1x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 2x + 0, 0, 0, b'!', b'"', b'#', b'$', b'%', b'&', b'\'', // 3x + 0, 0, b'*', b'+', 0, b'-', b'.', 0, b'0', b'1', // 4x + b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', 0, 0, // 5x + 0, 0, 0, 0, 0, b'a', b'b', b'c', b'd', b'e', // 6x + b'f', b'g', b'h', b'i', b'j', b'k', b'l', b'm', b'n', b'o', // 7x + b'p', b'q', b'r', b's', b't', b'u', b'v', b'w', b'x', b'y', // 8x + b'z', 0, 0, 0, b'^', b'_', b'`', b'a', b'b', b'c', // 9x + b'd', b'e', b'f', b'g', b'h', b'i', b'j', b'k', b'l', b'm', // 10x + b'n', b'o', b'p', b'q', b'r', b's', b't', b'u', b'v', b'w', // 11x + b'x', b'y', b'z', 0, b'|', 0, b'~', 0, 0, 0, // 12x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 13x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 14x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 15x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 16x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 17x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 18x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 19x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 21x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 22x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 23x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 24x + 0, 0, 0, 0, 0, 0 // 25x +]; + +/// Valid header name characters for HTTP/2.0 and HTTP/3.0 +// HEADER_CHARS_H2 maps every byte that is 128 or larger to 0 so everything that is +// mapped by HEADER_CHARS_H2, maps to a valid single-byte UTF-8 codepoint. +const HEADER_CHARS_H2: [u8; 256] = [ + // 0 1 2 3 4 5 6 7 8 9 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 1x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 2x + 0, 0, 0, b'!', b'"', b'#', b'$', b'%', b'&', b'\'', // 3x + 0, 0, b'*', b'+', 0, b'-', b'.', 0, b'0', b'1', // 4x + b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', 0, 0, // 5x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 6x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 7x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 8x + 0, 0, 0, 0, b'^', b'_', b'`', b'a', b'b', b'c', // 9x + b'd', b'e', b'f', b'g', b'h', b'i', b'j', b'k', b'l', b'm', // 10x + b'n', b'o', b'p', b'q', b'r', b's', b't', b'u', b'v', b'w', // 11x + b'x', b'y', b'z', 0, b'|', 0, b'~', 0, 0, 0, // 12x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 13x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 14x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 15x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 16x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 17x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 18x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 19x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 21x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 22x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 23x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 24x + 0, 0, 0, 0, 0, 0 // 25x +]; + +fn parse_hdr<'a>( + data: &'a [u8], + b: &'a mut [MaybeUninit; SCRATCH_BUF_SIZE], + table: &[u8; 256], +) -> Result, InvalidHeaderName> { + match data.len() { + 0 => Err(InvalidHeaderName::new()), + len @ 1..=SCRATCH_BUF_SIZE => { + // Read from data into the buffer - transforming using `table` as we go + data.iter() + .zip(b.iter_mut()) + .for_each(|(index, out)| *out = MaybeUninit::new(table[*index as usize])); + // Safety: len bytes of b were just initialized. + let name: &'a [u8] = unsafe { slice_assume_init(&b[0..len]) }; + match StandardHeader::from_bytes(name) { + Some(sh) => Ok(sh.into()), + None => { + if name.contains(&0) { + Err(InvalidHeaderName::new()) + } else { + Ok(HdrName::custom(name, true)) + } + } + } + } + SCRATCH_BUF_OVERFLOW..=super::MAX_HEADER_NAME_LEN => Ok(HdrName::custom(data, false)), + _ => Err(InvalidHeaderName::new()), + } +} + + + +impl<'a> From for HdrName<'a> { + fn from(hdr: StandardHeader) -> HdrName<'a> { + HdrName { inner: Repr::Standard(hdr) } + } +} + +impl HeaderName { + /// Converts a slice of bytes to an HTTP header name. + /// + /// This function normalizes the input. + pub fn from_bytes(src: &[u8]) -> Result { + let mut buf = uninit_u8_array(); + // Precondition: HEADER_CHARS is a valid table for parse_hdr(). + match parse_hdr(src, &mut buf, &HEADER_CHARS)?.inner { + Repr::Standard(std) => Ok(std.into()), + Repr::Custom(MaybeLower { buf, lower: true }) => { + let buf = Bytes::copy_from_slice(buf); + // Safety: the invariant on MaybeLower ensures buf is valid UTF-8. + let val = unsafe { ByteStr::from_utf8_unchecked(buf) }; + Ok(Custom(val).into()) + } + Repr::Custom(MaybeLower { buf, lower: false }) => { + use bytes::{BufMut}; + let mut dst = BytesMut::with_capacity(buf.len()); + + for b in buf.iter() { + // HEADER_CHARS maps all bytes to valid single-byte UTF-8 + let b = HEADER_CHARS[*b as usize]; + + if b == 0 { + return Err(InvalidHeaderName::new()); + } + + dst.put_u8(b); + } + + // Safety: the loop above maps all bytes in buf to valid single byte + // UTF-8 before copying them into dst. This means that dst (and hence + // dst.freeze()) is valid UTF-8. + let val = unsafe { ByteStr::from_utf8_unchecked(dst.freeze()) }; + + Ok(Custom(val).into()) + } + } + } + + /// Converts a slice of bytes to an HTTP header name. + /// + /// This function expects the input to only contain lowercase characters. + /// This is useful when decoding HTTP/2.0 or HTTP/3.0 headers. Both + /// require that all headers be represented in lower case. + /// + /// # Examples + /// + /// ``` + /// # use http::header::*; + /// + /// // Parsing a lower case header + /// let hdr = HeaderName::from_lowercase(b"content-length").unwrap(); + /// assert_eq!(CONTENT_LENGTH, hdr); + /// + /// // Parsing a header that contains uppercase characters + /// assert!(HeaderName::from_lowercase(b"Content-Length").is_err()); + /// ``` + pub fn from_lowercase(src: &[u8]) -> Result { + let mut buf = uninit_u8_array(); + // Precondition: HEADER_CHARS_H2 is a valid table for parse_hdr() + match parse_hdr(src, &mut buf, &HEADER_CHARS_H2)?.inner { + Repr::Standard(std) => Ok(std.into()), + Repr::Custom(MaybeLower { buf, lower: true }) => { + let buf = Bytes::copy_from_slice(buf); + // Safety: the invariant on MaybeLower ensures buf is valid UTF-8. + let val = unsafe { ByteStr::from_utf8_unchecked(buf) }; + Ok(Custom(val).into()) + } + Repr::Custom(MaybeLower { buf, lower: false }) => { + for &b in buf.iter() { + // HEADER_CHARS_H2 maps all bytes that are not valid single-byte + // UTF-8 to 0 so this check returns an error for invalid UTF-8. + if HEADER_CHARS_H2[b as usize] == 0 { + return Err(InvalidHeaderName::new()); + } + } + + let buf = Bytes::copy_from_slice(buf); + // Safety: the loop above checks that each byte of buf (either + // version) is valid UTF-8. + let val = unsafe { ByteStr::from_utf8_unchecked(buf) }; + Ok(Custom(val).into()) + } + } + } + + /// Converts a static string to a HTTP header name. + /// + /// This function requires the static string to only contain lowercase + /// characters, numerals and symbols, as per the HTTP/2.0 specification + /// and header names internal representation within this library. + /// + /// # Panics + /// + /// This function panics when the static string is a invalid header. + /// + /// Until [Allow panicking in constants](https://github.com/rust-lang/rfcs/pull/2345) + /// makes its way into stable, the panic message at compile-time is + /// going to look cryptic, but should at least point at your header value: + /// + /// ```text + /// error: any use of this value will cause an error + /// --> http/src/header/name.rs:1241:13 + /// | + /// 1241 | ([] as [u8; 0])[0]; // Invalid header name + /// | ^^^^^^^^^^^^^^^^^^ + /// | | + /// | index out of bounds: the length is 0 but the index is 0 + /// | inside `http::HeaderName::from_static` at http/src/header/name.rs:1241:13 + /// | inside `INVALID_NAME` at src/main.rs:3:34 + /// | + /// ::: src/main.rs:3:1 + /// | + /// 3 | const INVALID_NAME: HeaderName = HeaderName::from_static("Capitalized"); + /// | ------------------------------------------------------------------------ + /// ``` + /// + /// # Examples + /// + /// ``` + /// # use http::header::*; + /// // Parsing a standard header + /// let hdr = HeaderName::from_static("content-length"); + /// assert_eq!(CONTENT_LENGTH, hdr); + /// + /// // Parsing a custom header + /// let CUSTOM_HEADER: &'static str = "custom-header"; + /// + /// let a = HeaderName::from_lowercase(b"custom-header").unwrap(); + /// let b = HeaderName::from_static(CUSTOM_HEADER); + /// assert_eq!(a, b); + /// ``` + /// + /// ```should_panic + /// # use http::header::*; + /// # + /// // Parsing a header that contains invalid symbols(s): + /// HeaderName::from_static("content{}{}length"); // This line panics! + /// + /// // Parsing a header that contains invalid uppercase characters. + /// let a = HeaderName::from_static("foobar"); + /// let b = HeaderName::from_static("FOOBAR"); // This line panics! + /// ``` + #[allow(unconditional_panic)] // required for the panic circumvention + pub const fn from_static(src: &'static str) -> HeaderName { + let name_bytes = src.as_bytes(); + if let Some(standard) = StandardHeader::from_bytes(name_bytes) { + return HeaderName{ + inner: Repr::Standard(standard), + }; + } + + if name_bytes.len() == 0 || name_bytes.len() > super::MAX_HEADER_NAME_LEN || { + let mut i = 0; + loop { + if i >= name_bytes.len() { + break false; + } else if HEADER_CHARS_H2[name_bytes[i] as usize] == 0 { + break true; + } + i += 1; + } + } { + ([] as [u8; 0])[0]; // Invalid header name + } + + HeaderName { + inner: Repr::Custom(Custom(ByteStr::from_static(src))) + } + } + + /// Returns a `str` representation of the header. + /// + /// The returned string will always be lower case. + #[inline] + pub fn as_str(&self) -> &str { + match self.inner { + Repr::Standard(v) => v.as_str(), + Repr::Custom(ref v) => &*v.0, + } + } + + pub(super) fn into_bytes(self) -> Bytes { + self.inner.into() + } +} + +impl FromStr for HeaderName { + type Err = InvalidHeaderName; + + fn from_str(s: &str) -> Result { + HeaderName::from_bytes(s.as_bytes()).map_err(|_| InvalidHeaderName { _priv: () }) + } +} + +impl AsRef for HeaderName { + fn as_ref(&self) -> &str { + self.as_str() + } +} + +impl AsRef<[u8]> for HeaderName { + fn as_ref(&self) -> &[u8] { + self.as_str().as_bytes() + } +} + +impl Borrow for HeaderName { + fn borrow(&self) -> &str { + self.as_str() + } +} + +impl fmt::Debug for HeaderName { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(self.as_str(), fmt) + } +} + +impl fmt::Display for HeaderName { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self.as_str(), fmt) + } +} + +impl InvalidHeaderName { + pub(super) fn new() -> InvalidHeaderName { + InvalidHeaderName { _priv: () } + } +} + +impl<'a> From<&'a HeaderName> for HeaderName { + fn from(src: &'a HeaderName) -> HeaderName { + src.clone() + } +} + +#[doc(hidden)] +impl From> for Bytes +where + T: Into, +{ + fn from(repr: Repr) -> Bytes { + match repr { + Repr::Standard(header) => Bytes::from_static(header.as_str().as_bytes()), + Repr::Custom(header) => header.into(), + } + } +} + +impl From for Bytes { + #[inline] + fn from(Custom(inner): Custom) -> Bytes { + Bytes::from(inner) + } +} + +impl<'a> TryFrom<&'a str> for HeaderName { + type Error = InvalidHeaderName; + #[inline] + fn try_from(s: &'a str) -> Result { + Self::from_bytes(s.as_bytes()) + } +} + +impl<'a> TryFrom<&'a String> for HeaderName { + type Error = InvalidHeaderName; + #[inline] + fn try_from(s: &'a String) -> Result { + Self::from_bytes(s.as_bytes()) + } +} + +impl<'a> TryFrom<&'a [u8]> for HeaderName { + type Error = InvalidHeaderName; + #[inline] + fn try_from(s: &'a [u8]) -> Result { + Self::from_bytes(s) + } +} + +impl TryFrom for HeaderName { + type Error = InvalidHeaderName; + + #[inline] + fn try_from(s: String) -> Result { + Self::from_bytes(s.as_bytes()) + } +} + +impl TryFrom> for HeaderName { + type Error = InvalidHeaderName; + + #[inline] + fn try_from(vec: Vec) -> Result { + Self::from_bytes(&vec) + } +} + +#[doc(hidden)] +impl From for HeaderName { + fn from(src: StandardHeader) -> HeaderName { + HeaderName { + inner: Repr::Standard(src), + } + } +} + +#[doc(hidden)] +impl From for HeaderName { + fn from(src: Custom) -> HeaderName { + HeaderName { + inner: Repr::Custom(src), + } + } +} + +impl<'a> PartialEq<&'a HeaderName> for HeaderName { + #[inline] + fn eq(&self, other: &&'a HeaderName) -> bool { + *self == **other + } +} + +impl<'a> PartialEq for &'a HeaderName { + #[inline] + fn eq(&self, other: &HeaderName) -> bool { + *other == *self + } +} + +impl PartialEq for HeaderName { + /// Performs a case-insensitive comparison of the string against the header + /// name + /// + /// # Examples + /// + /// ``` + /// use http::header::CONTENT_LENGTH; + /// + /// assert_eq!(CONTENT_LENGTH, "content-length"); + /// assert_eq!(CONTENT_LENGTH, "Content-Length"); + /// assert_ne!(CONTENT_LENGTH, "content length"); + /// ``` + #[inline] + fn eq(&self, other: &str) -> bool { + eq_ignore_ascii_case(self.as_ref(), other.as_bytes()) + } +} + +impl PartialEq for str { + /// Performs a case-insensitive comparison of the string against the header + /// name + /// + /// # Examples + /// + /// ``` + /// use http::header::CONTENT_LENGTH; + /// + /// assert_eq!(CONTENT_LENGTH, "content-length"); + /// assert_eq!(CONTENT_LENGTH, "Content-Length"); + /// assert_ne!(CONTENT_LENGTH, "content length"); + /// ``` + #[inline] + fn eq(&self, other: &HeaderName) -> bool { + *other == *self + } +} + +impl<'a> PartialEq<&'a str> for HeaderName { + /// Performs a case-insensitive comparison of the string against the header + /// name + #[inline] + fn eq(&self, other: &&'a str) -> bool { + *self == **other + } +} + +impl<'a> PartialEq for &'a str { + /// Performs a case-insensitive comparison of the string against the header + /// name + #[inline] + fn eq(&self, other: &HeaderName) -> bool { + *other == *self + } +} + +impl fmt::Debug for InvalidHeaderName { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("InvalidHeaderName") + // skip _priv noise + .finish() + } +} + +impl fmt::Display for InvalidHeaderName { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("invalid HTTP header name") + } +} + +impl Error for InvalidHeaderName {} + +// ===== HdrName ===== + +impl<'a> HdrName<'a> { + // Precondition: if lower then buf is valid UTF-8 + fn custom(buf: &'a [u8], lower: bool) -> HdrName<'a> { + HdrName { + // Invariant (on MaybeLower): follows from the precondition + inner: Repr::Custom(MaybeLower { + buf: buf, + lower: lower, + }), + } + } + + pub fn from_bytes(hdr: &[u8], f: F) -> Result + where F: FnOnce(HdrName<'_>) -> U, + { + let mut buf = uninit_u8_array(); + // Precondition: HEADER_CHARS is a valid table for parse_hdr(). + let hdr = parse_hdr(hdr, &mut buf, &HEADER_CHARS)?; + Ok(f(hdr)) + } + + pub fn from_static(hdr: &'static str, f: F) -> U + where + F: FnOnce(HdrName<'_>) -> U, + { + let mut buf = uninit_u8_array(); + let hdr = + // Precondition: HEADER_CHARS is a valid table for parse_hdr(). + parse_hdr(hdr.as_bytes(), &mut buf, &HEADER_CHARS).expect("static str is invalid name"); + f(hdr) + } +} + +#[doc(hidden)] +impl<'a> From> for HeaderName { + fn from(src: HdrName<'a>) -> HeaderName { + match src.inner { + Repr::Standard(s) => HeaderName { + inner: Repr::Standard(s), + }, + Repr::Custom(maybe_lower) => { + if maybe_lower.lower { + let buf = Bytes::copy_from_slice(&maybe_lower.buf[..]); + // Safety: the invariant on MaybeLower ensures buf is valid UTF-8. + let byte_str = unsafe { ByteStr::from_utf8_unchecked(buf) }; + + HeaderName { + inner: Repr::Custom(Custom(byte_str)), + } + } else { + use bytes::BufMut; + let mut dst = BytesMut::with_capacity(maybe_lower.buf.len()); + + for b in maybe_lower.buf.iter() { + // HEADER_CHARS maps each byte to a valid single-byte UTF-8 + // codepoint. + dst.put_u8(HEADER_CHARS[*b as usize]); + } + + // Safety: the loop above maps each byte of maybe_lower.buf to a + // valid single-byte UTF-8 codepoint before copying it into dst. + // dst (and hence dst.freeze()) is thus valid UTF-8. + let buf = unsafe { ByteStr::from_utf8_unchecked(dst.freeze()) }; + + HeaderName { + inner: Repr::Custom(Custom(buf)), + } + } + } + } + } +} + +#[doc(hidden)] +impl<'a> PartialEq> for HeaderName { + #[inline] + fn eq(&self, other: &HdrName<'a>) -> bool { + match self.inner { + Repr::Standard(a) => match other.inner { + Repr::Standard(b) => a == b, + _ => false, + }, + Repr::Custom(Custom(ref a)) => match other.inner { + Repr::Custom(ref b) => { + if b.lower { + a.as_bytes() == b.buf + } else { + eq_ignore_ascii_case(a.as_bytes(), b.buf) + } + } + _ => false, + }, + } + } +} + +// ===== Custom ===== + +impl Hash for Custom { + #[inline] + fn hash(&self, hasher: &mut H) { + hasher.write(self.0.as_bytes()) + } +} + +// ===== MaybeLower ===== + +impl<'a> Hash for MaybeLower<'a> { + #[inline] + fn hash(&self, hasher: &mut H) { + if self.lower { + hasher.write(self.buf); + } else { + for &b in self.buf { + hasher.write(&[HEADER_CHARS[b as usize]]); + } + } + } +} + +// Assumes that the left hand side is already lower case +#[inline] +fn eq_ignore_ascii_case(lower: &[u8], s: &[u8]) -> bool { + if lower.len() != s.len() { + return false; + } + + lower.iter().zip(s).all(|(a, b)| { + *a == HEADER_CHARS[*b as usize] + }) +} + +// Utility functions for MaybeUninit<>. These are drawn from unstable API's on +// MaybeUninit<> itself. +const SCRATCH_BUF_SIZE: usize = 64; +const SCRATCH_BUF_OVERFLOW: usize = SCRATCH_BUF_SIZE + 1; + +fn uninit_u8_array() -> [MaybeUninit; SCRATCH_BUF_SIZE] { + let arr = MaybeUninit::<[MaybeUninit; SCRATCH_BUF_SIZE]>::uninit(); + // Safety: assume_init() is claiming that an array of MaybeUninit<> + // has been initilized, but MaybeUninit<>'s do not require initilizaton. + unsafe { arr.assume_init() } +} + +// Assuming all the elements are initilized, get a slice of them. +// +// Safety: All elements of `slice` must be initilized to prevent +// undefined behavior. +unsafe fn slice_assume_init(slice: &[MaybeUninit]) -> &[T] { + &*(slice as *const [MaybeUninit] as *const [T]) +} + +#[cfg(test)] +mod tests { + use super::*; + use self::StandardHeader::Vary; + + #[test] + fn test_bounds() { + fn check_bounds() {} + check_bounds::(); + } + + #[test] + fn test_parse_invalid_headers() { + for i in 0..128 { + let hdr = vec![1u8; i]; + assert!(HeaderName::from_bytes(&hdr).is_err(), "{} invalid header chars did not fail", i); + } + } + + const ONE_TOO_LONG: &[u8] = &[b'a'; super::super::MAX_HEADER_NAME_LEN+1]; + + #[test] + fn test_invalid_name_lengths() { + assert!( + HeaderName::from_bytes(&[]).is_err(), + "zero-length header name is an error", + ); + + let long = &ONE_TOO_LONG[0..super::super::MAX_HEADER_NAME_LEN]; + + let long_str = std::str::from_utf8(long).unwrap(); + assert_eq!(HeaderName::from_static(long_str), long_str); // shouldn't panic! + + assert!( + HeaderName::from_bytes(long).is_ok(), + "max header name length is ok", + ); + assert!( + HeaderName::from_bytes(ONE_TOO_LONG).is_err(), + "longer than max header name length is an error", + ); + } + + #[test] + #[should_panic] + fn test_static_invalid_name_lengths() { + // Safety: ONE_TOO_LONG contains only the UTF-8 safe, single-byte codepoint b'a'. + let _ = HeaderName::from_static(unsafe { std::str::from_utf8_unchecked(ONE_TOO_LONG) }); + } + + #[test] + fn test_from_hdr_name() { + use self::StandardHeader::Vary; + + let name = HeaderName::from(HdrName { + inner: Repr::Standard(Vary), + }); + + assert_eq!(name.inner, Repr::Standard(Vary)); + + let name = HeaderName::from(HdrName { + inner: Repr::Custom(MaybeLower { + buf: b"hello-world", + lower: true, + }), + }); + + assert_eq!(name.inner, Repr::Custom(Custom(ByteStr::from_static("hello-world")))); + + let name = HeaderName::from(HdrName { + inner: Repr::Custom(MaybeLower { + buf: b"Hello-World", + lower: false, + }), + }); + + assert_eq!(name.inner, Repr::Custom(Custom(ByteStr::from_static("hello-world")))); + } + + #[test] + fn test_eq_hdr_name() { + use self::StandardHeader::Vary; + + let a = HeaderName { inner: Repr::Standard(Vary) }; + let b = HdrName { inner: Repr::Standard(Vary) }; + + assert_eq!(a, b); + + let a = HeaderName { inner: Repr::Custom(Custom(ByteStr::from_static("vaary"))) }; + assert_ne!(a, b); + + let b = HdrName { inner: Repr::Custom(MaybeLower { + buf: b"vaary", + lower: true, + })}; + + assert_eq!(a, b); + + let b = HdrName { inner: Repr::Custom(MaybeLower { + buf: b"vaary", + lower: false, + })}; + + assert_eq!(a, b); + + let b = HdrName { inner: Repr::Custom(MaybeLower { + buf: b"VAARY", + lower: false, + })}; + + assert_eq!(a, b); + + let a = HeaderName { inner: Repr::Standard(Vary) }; + assert_ne!(a, b); + } + + #[test] + fn test_from_static_std() { + let a = HeaderName { inner: Repr::Standard(Vary) }; + + let b = HeaderName::from_static("vary"); + assert_eq!(a, b); + + let b = HeaderName::from_static("vaary"); + assert_ne!(a, b); + } + + #[test] + #[should_panic] + fn test_from_static_std_uppercase() { + HeaderName::from_static("Vary"); + } + + #[test] + #[should_panic] + fn test_from_static_std_symbol() { + HeaderName::from_static("vary{}"); + } + + // MaybeLower { lower: true } + #[test] + fn test_from_static_custom_short() { + let a = HeaderName { inner: Repr::Custom(Custom(ByteStr::from_static("customheader"))) }; + let b = HeaderName::from_static("customheader"); + assert_eq!(a, b); + } + + #[test] + #[should_panic] + fn test_from_static_custom_short_uppercase() { + HeaderName::from_static("custom header"); + } + + #[test] + #[should_panic] + fn test_from_static_custom_short_symbol() { + HeaderName::from_static("CustomHeader"); + } + + // MaybeLower { lower: false } + #[test] + fn test_from_static_custom_long() { + let a = HeaderName { inner: Repr::Custom(Custom(ByteStr::from_static( + "longer-than-63--thisheaderislongerthansixtythreecharactersandthushandleddifferent" + ))) }; + let b = HeaderName::from_static( + "longer-than-63--thisheaderislongerthansixtythreecharactersandthushandleddifferent" + ); + assert_eq!(a, b); + } + + #[test] + #[should_panic] + fn test_from_static_custom_long_uppercase() { + HeaderName::from_static( + "Longer-Than-63--ThisHeaderIsLongerThanSixtyThreeCharactersAndThusHandledDifferent" + ); + } + + #[test] + #[should_panic] + fn test_from_static_custom_long_symbol() { + HeaderName::from_static( + "longer-than-63--thisheader{}{}{}{}islongerthansixtythreecharactersandthushandleddifferent" + ); + } + + #[test] + fn test_from_static_custom_single_char() { + let a = HeaderName { inner: Repr::Custom(Custom(ByteStr::from_static("a"))) }; + let b = HeaderName::from_static("a"); + assert_eq!(a, b); + } + + #[test] + #[should_panic] + fn test_from_static_empty() { + HeaderName::from_static(""); + } + + #[test] + fn test_all_tokens() { + HeaderName::from_static("!#$%&'*+-.^_`|~0123456789abcdefghijklmnopqrstuvwxyz"); + } + + #[test] + fn test_from_lowercase() { + HeaderName::from_lowercase(&[0; 10]).unwrap_err(); + HeaderName::from_lowercase(&[b'A'; 10]).unwrap_err(); + HeaderName::from_lowercase(&[0x1; 10]).unwrap_err(); + HeaderName::from_lowercase(&[0xFF; 10]).unwrap_err(); + //HeaderName::from_lowercase(&[0; 100]).unwrap_err(); + HeaderName::from_lowercase(&[b'A'; 100]).unwrap_err(); + HeaderName::from_lowercase(&[0x1; 100]).unwrap_err(); + HeaderName::from_lowercase(&[0xFF; 100]).unwrap_err(); + } +} diff --git a/.cargo-vendor/http-0.2.12/src/header/value.rs b/.cargo-vendor/http-0.2.12/src/header/value.rs new file mode 100644 index 0000000000..bf05f16f4e --- /dev/null +++ b/.cargo-vendor/http-0.2.12/src/header/value.rs @@ -0,0 +1,795 @@ +use bytes::{Bytes, BytesMut}; + +use std::convert::TryFrom; +use std::error::Error; +use std::fmt::Write; +use std::str::FromStr; +use std::{cmp, fmt, mem, str}; + +use crate::header::name::HeaderName; + +/// Represents an HTTP header field value. +/// +/// In practice, HTTP header field values are usually valid ASCII. However, the +/// HTTP spec allows for a header value to contain opaque bytes as well. In this +/// case, the header field value is not able to be represented as a string. +/// +/// To handle this, the `HeaderValue` is useable as a type and can be compared +/// with strings and implements `Debug`. A `to_str` fn is provided that returns +/// an `Err` if the header value contains non visible ascii characters. +#[derive(Clone, Hash)] +pub struct HeaderValue { + inner: Bytes, + is_sensitive: bool, +} + +/// A possible error when converting a `HeaderValue` from a string or byte +/// slice. +pub struct InvalidHeaderValue { + _priv: (), +} + +/// A possible error when converting a `HeaderValue` to a string representation. +/// +/// Header field values may contain opaque bytes, in which case it is not +/// possible to represent the value as a string. +#[derive(Debug)] +pub struct ToStrError { + _priv: (), +} + +impl HeaderValue { + /// Convert a static string to a `HeaderValue`. + /// + /// This function will not perform any copying, however the string is + /// checked to ensure that no invalid characters are present. Only visible + /// ASCII characters (32-127) are permitted. + /// + /// # Panics + /// + /// This function panics if the argument contains invalid header value + /// characters. + /// + /// Until [Allow panicking in constants](https://github.com/rust-lang/rfcs/pull/2345) + /// makes its way into stable, the panic message at compile-time is + /// going to look cryptic, but should at least point at your header value: + /// + /// ```text + /// error: any use of this value will cause an error + /// --> http/src/header/value.rs:67:17 + /// | + /// 67 | ([] as [u8; 0])[0]; // Invalid header value + /// | ^^^^^^^^^^^^^^^^^^ + /// | | + /// | index out of bounds: the length is 0 but the index is 0 + /// | inside `HeaderValue::from_static` at http/src/header/value.rs:67:17 + /// | inside `INVALID_HEADER` at src/main.rs:73:33 + /// | + /// ::: src/main.rs:73:1 + /// | + /// 73 | const INVALID_HEADER: HeaderValue = HeaderValue::from_static("жsome value"); + /// | ---------------------------------------------------------------------------- + /// ``` + /// + /// # Examples + /// + /// ``` + /// # use http::header::HeaderValue; + /// let val = HeaderValue::from_static("hello"); + /// assert_eq!(val, "hello"); + /// ``` + #[inline] + #[allow(unconditional_panic)] // required for the panic circumvention + pub const fn from_static(src: &'static str) -> HeaderValue { + let bytes = src.as_bytes(); + let mut i = 0; + while i < bytes.len() { + if !is_visible_ascii(bytes[i]) { + ([] as [u8; 0])[0]; // Invalid header value + } + i += 1; + } + + HeaderValue { + inner: Bytes::from_static(bytes), + is_sensitive: false, + } + } + + /// Attempt to convert a string to a `HeaderValue`. + /// + /// If the argument contains invalid header value characters, an error is + /// returned. Only visible ASCII characters (32-127) are permitted. Use + /// `from_bytes` to create a `HeaderValue` that includes opaque octets + /// (128-255). + /// + /// This function is intended to be replaced in the future by a `TryFrom` + /// implementation once the trait is stabilized in std. + /// + /// # Examples + /// + /// ``` + /// # use http::header::HeaderValue; + /// let val = HeaderValue::from_str("hello").unwrap(); + /// assert_eq!(val, "hello"); + /// ``` + /// + /// An invalid value + /// + /// ``` + /// # use http::header::HeaderValue; + /// let val = HeaderValue::from_str("\n"); + /// assert!(val.is_err()); + /// ``` + #[inline] + pub fn from_str(src: &str) -> Result { + HeaderValue::try_from_generic(src, |s| Bytes::copy_from_slice(s.as_bytes())) + } + + /// Converts a HeaderName into a HeaderValue + /// + /// Since every valid HeaderName is a valid HeaderValue this is done infallibly. + /// + /// # Examples + /// + /// ``` + /// # use http::header::{HeaderValue, HeaderName}; + /// # use http::header::ACCEPT; + /// let val = HeaderValue::from_name(ACCEPT); + /// assert_eq!(val, HeaderValue::from_bytes(b"accept").unwrap()); + /// ``` + #[inline] + pub fn from_name(name: HeaderName) -> HeaderValue { + name.into() + } + + /// Attempt to convert a byte slice to a `HeaderValue`. + /// + /// If the argument contains invalid header value bytes, an error is + /// returned. Only byte values between 32 and 255 (inclusive) are permitted, + /// excluding byte 127 (DEL). + /// + /// This function is intended to be replaced in the future by a `TryFrom` + /// implementation once the trait is stabilized in std. + /// + /// # Examples + /// + /// ``` + /// # use http::header::HeaderValue; + /// let val = HeaderValue::from_bytes(b"hello\xfa").unwrap(); + /// assert_eq!(val, &b"hello\xfa"[..]); + /// ``` + /// + /// An invalid value + /// + /// ``` + /// # use http::header::HeaderValue; + /// let val = HeaderValue::from_bytes(b"\n"); + /// assert!(val.is_err()); + /// ``` + #[inline] + pub fn from_bytes(src: &[u8]) -> Result { + HeaderValue::try_from_generic(src, Bytes::copy_from_slice) + } + + /// Attempt to convert a `Bytes` buffer to a `HeaderValue`. + /// + /// This will try to prevent a copy if the type passed is the type used + /// internally, and will copy the data if it is not. + pub fn from_maybe_shared(src: T) -> Result + where + T: AsRef<[u8]> + 'static, + { + if_downcast_into!(T, Bytes, src, { + return HeaderValue::from_shared(src); + }); + + HeaderValue::from_bytes(src.as_ref()) + } + + /// Convert a `Bytes` directly into a `HeaderValue` without validating. + /// + /// This function does NOT validate that illegal bytes are not contained + /// within the buffer. + pub unsafe fn from_maybe_shared_unchecked(src: T) -> HeaderValue + where + T: AsRef<[u8]> + 'static, + { + if cfg!(debug_assertions) { + match HeaderValue::from_maybe_shared(src) { + Ok(val) => val, + Err(_err) => { + panic!("HeaderValue::from_maybe_shared_unchecked() with invalid bytes"); + } + } + } else { + + if_downcast_into!(T, Bytes, src, { + return HeaderValue { + inner: src, + is_sensitive: false, + }; + }); + + let src = Bytes::copy_from_slice(src.as_ref()); + HeaderValue { + inner: src, + is_sensitive: false, + } + } + } + + fn from_shared(src: Bytes) -> Result { + HeaderValue::try_from_generic(src, std::convert::identity) + } + + fn try_from_generic, F: FnOnce(T) -> Bytes>(src: T, into: F) -> Result { + for &b in src.as_ref() { + if !is_valid(b) { + return Err(InvalidHeaderValue { _priv: () }); + } + } + Ok(HeaderValue { + inner: into(src), + is_sensitive: false, + }) + } + + /// Yields a `&str` slice if the `HeaderValue` only contains visible ASCII + /// chars. + /// + /// This function will perform a scan of the header value, checking all the + /// characters. + /// + /// # Examples + /// + /// ``` + /// # use http::header::HeaderValue; + /// let val = HeaderValue::from_static("hello"); + /// assert_eq!(val.to_str().unwrap(), "hello"); + /// ``` + pub fn to_str(&self) -> Result<&str, ToStrError> { + let bytes = self.as_ref(); + + for &b in bytes { + if !is_visible_ascii(b) { + return Err(ToStrError { _priv: () }); + } + } + + unsafe { Ok(str::from_utf8_unchecked(bytes)) } + } + + /// Returns the length of `self`. + /// + /// This length is in bytes. + /// + /// # Examples + /// + /// ``` + /// # use http::header::HeaderValue; + /// let val = HeaderValue::from_static("hello"); + /// assert_eq!(val.len(), 5); + /// ``` + #[inline] + pub fn len(&self) -> usize { + self.as_ref().len() + } + + /// Returns true if the `HeaderValue` has a length of zero bytes. + /// + /// # Examples + /// + /// ``` + /// # use http::header::HeaderValue; + /// let val = HeaderValue::from_static(""); + /// assert!(val.is_empty()); + /// + /// let val = HeaderValue::from_static("hello"); + /// assert!(!val.is_empty()); + /// ``` + #[inline] + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Converts a `HeaderValue` to a byte slice. + /// + /// # Examples + /// + /// ``` + /// # use http::header::HeaderValue; + /// let val = HeaderValue::from_static("hello"); + /// assert_eq!(val.as_bytes(), b"hello"); + /// ``` + #[inline] + pub fn as_bytes(&self) -> &[u8] { + self.as_ref() + } + + /// Mark that the header value represents sensitive information. + /// + /// # Examples + /// + /// ``` + /// # use http::header::HeaderValue; + /// let mut val = HeaderValue::from_static("my secret"); + /// + /// val.set_sensitive(true); + /// assert!(val.is_sensitive()); + /// + /// val.set_sensitive(false); + /// assert!(!val.is_sensitive()); + /// ``` + #[inline] + pub fn set_sensitive(&mut self, val: bool) { + self.is_sensitive = val; + } + + /// Returns `true` if the value represents sensitive data. + /// + /// Sensitive data could represent passwords or other data that should not + /// be stored on disk or in memory. By marking header values as sensitive, + /// components using this crate can be instructed to treat them with special + /// care for security reasons. For example, caches can avoid storing + /// sensitive values, and HPACK encoders used by HTTP/2.0 implementations + /// can choose not to compress them. + /// + /// Additionally, sensitive values will be masked by the `Debug` + /// implementation of `HeaderValue`. + /// + /// Note that sensitivity is not factored into equality or ordering. + /// + /// # Examples + /// + /// ``` + /// # use http::header::HeaderValue; + /// let mut val = HeaderValue::from_static("my secret"); + /// + /// val.set_sensitive(true); + /// assert!(val.is_sensitive()); + /// + /// val.set_sensitive(false); + /// assert!(!val.is_sensitive()); + /// ``` + #[inline] + pub fn is_sensitive(&self) -> bool { + self.is_sensitive + } +} + +impl AsRef<[u8]> for HeaderValue { + #[inline] + fn as_ref(&self) -> &[u8] { + self.inner.as_ref() + } +} + +impl fmt::Debug for HeaderValue { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.is_sensitive { + f.write_str("Sensitive") + } else { + f.write_str("\"")?; + let mut from = 0; + let bytes = self.as_bytes(); + for (i, &b) in bytes.iter().enumerate() { + if !is_visible_ascii(b) || b == b'"' { + if from != i { + f.write_str(unsafe { str::from_utf8_unchecked(&bytes[from..i]) })?; + } + if b == b'"' { + f.write_str("\\\"")?; + } else { + write!(f, "\\x{:x}", b)?; + } + from = i + 1; + } + } + + f.write_str(unsafe { str::from_utf8_unchecked(&bytes[from..]) })?; + f.write_str("\"") + } + } +} + +impl From for HeaderValue { + #[inline] + fn from(h: HeaderName) -> HeaderValue { + HeaderValue { + inner: h.into_bytes(), + is_sensitive: false, + } + } +} + +macro_rules! from_integers { + ($($name:ident: $t:ident => $max_len:expr),*) => {$( + impl From<$t> for HeaderValue { + fn from(num: $t) -> HeaderValue { + let mut buf = if mem::size_of::() - 1 < $max_len { + // On 32bit platforms, BytesMut max inline size + // is 15 bytes, but the $max_len could be bigger. + // + // The likelihood of the number *actually* being + // that big is very small, so only allocate + // if the number needs that space. + // + // The largest decimal number in 15 digits: + // It wold be 10.pow(15) - 1, but this is a constant + // version. + if num as u64 > 999_999_999_999_999_999 { + BytesMut::with_capacity($max_len) + } else { + // fits inline... + BytesMut::new() + } + } else { + // full value fits inline, so don't allocate! + BytesMut::new() + }; + let _ = buf.write_str(::itoa::Buffer::new().format(num)); + HeaderValue { + inner: buf.freeze(), + is_sensitive: false, + } + } + } + + #[test] + fn $name() { + let n: $t = 55; + let val = HeaderValue::from(n); + assert_eq!(val, &n.to_string()); + + let n = ::std::$t::MAX; + let val = HeaderValue::from(n); + assert_eq!(val, &n.to_string()); + } + )*}; +} + +from_integers! { + // integer type => maximum decimal length + + // u8 purposely left off... HeaderValue::from(b'3') could be confusing + from_u16: u16 => 5, + from_i16: i16 => 6, + from_u32: u32 => 10, + from_i32: i32 => 11, + from_u64: u64 => 20, + from_i64: i64 => 20 +} + +#[cfg(target_pointer_width = "16")] +from_integers! { + from_usize: usize => 5, + from_isize: isize => 6 +} + +#[cfg(target_pointer_width = "32")] +from_integers! { + from_usize: usize => 10, + from_isize: isize => 11 +} + +#[cfg(target_pointer_width = "64")] +from_integers! { + from_usize: usize => 20, + from_isize: isize => 20 +} + +#[cfg(test)] +mod from_header_name_tests { + use super::*; + use crate::header::map::HeaderMap; + use crate::header::name; + + #[test] + fn it_can_insert_header_name_as_header_value() { + let mut map = HeaderMap::new(); + map.insert(name::UPGRADE, name::SEC_WEBSOCKET_PROTOCOL.into()); + map.insert( + name::ACCEPT, + name::HeaderName::from_bytes(b"hello-world").unwrap().into(), + ); + + assert_eq!( + map.get(name::UPGRADE).unwrap(), + HeaderValue::from_bytes(b"sec-websocket-protocol").unwrap() + ); + + assert_eq!( + map.get(name::ACCEPT).unwrap(), + HeaderValue::from_bytes(b"hello-world").unwrap() + ); + } +} + +impl FromStr for HeaderValue { + type Err = InvalidHeaderValue; + + #[inline] + fn from_str(s: &str) -> Result { + HeaderValue::from_str(s) + } +} + +impl<'a> From<&'a HeaderValue> for HeaderValue { + #[inline] + fn from(t: &'a HeaderValue) -> Self { + t.clone() + } +} + +impl<'a> TryFrom<&'a str> for HeaderValue { + type Error = InvalidHeaderValue; + + #[inline] + fn try_from(t: &'a str) -> Result { + t.parse() + } +} + +impl<'a> TryFrom<&'a String> for HeaderValue { + type Error = InvalidHeaderValue; + #[inline] + fn try_from(s: &'a String) -> Result { + Self::from_bytes(s.as_bytes()) + } +} + +impl<'a> TryFrom<&'a [u8]> for HeaderValue { + type Error = InvalidHeaderValue; + + #[inline] + fn try_from(t: &'a [u8]) -> Result { + HeaderValue::from_bytes(t) + } +} + +impl TryFrom for HeaderValue { + type Error = InvalidHeaderValue; + + #[inline] + fn try_from(t: String) -> Result { + HeaderValue::from_shared(t.into()) + } +} + +impl TryFrom> for HeaderValue { + type Error = InvalidHeaderValue; + + #[inline] + fn try_from(vec: Vec) -> Result { + HeaderValue::from_shared(vec.into()) + } +} + +#[cfg(test)] +mod try_from_header_name_tests { + use super::*; + use crate::header::name; + + #[test] + fn it_converts_using_try_from() { + assert_eq!( + HeaderValue::try_from(name::UPGRADE).unwrap(), + HeaderValue::from_bytes(b"upgrade").unwrap() + ); + } +} + +const fn is_visible_ascii(b: u8) -> bool { + b >= 32 && b < 127 || b == b'\t' +} + +#[inline] +fn is_valid(b: u8) -> bool { + b >= 32 && b != 127 || b == b'\t' +} + +impl fmt::Debug for InvalidHeaderValue { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("InvalidHeaderValue") + // skip _priv noise + .finish() + } +} + +impl fmt::Display for InvalidHeaderValue { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("failed to parse header value") + } +} + +impl Error for InvalidHeaderValue {} + +impl fmt::Display for ToStrError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("failed to convert header to a str") + } +} + +impl Error for ToStrError {} + +// ===== PartialEq / PartialOrd ===== + +impl PartialEq for HeaderValue { + #[inline] + fn eq(&self, other: &HeaderValue) -> bool { + self.inner == other.inner + } +} + +impl Eq for HeaderValue {} + +impl PartialOrd for HeaderValue { + #[inline] + fn partial_cmp(&self, other: &HeaderValue) -> Option { + self.inner.partial_cmp(&other.inner) + } +} + +impl Ord for HeaderValue { + #[inline] + fn cmp(&self, other: &Self) -> cmp::Ordering { + self.inner.cmp(&other.inner) + } +} + +impl PartialEq for HeaderValue { + #[inline] + fn eq(&self, other: &str) -> bool { + self.inner == other.as_bytes() + } +} + +impl PartialEq<[u8]> for HeaderValue { + #[inline] + fn eq(&self, other: &[u8]) -> bool { + self.inner == other + } +} + +impl PartialOrd for HeaderValue { + #[inline] + fn partial_cmp(&self, other: &str) -> Option { + (*self.inner).partial_cmp(other.as_bytes()) + } +} + +impl PartialOrd<[u8]> for HeaderValue { + #[inline] + fn partial_cmp(&self, other: &[u8]) -> Option { + (*self.inner).partial_cmp(other) + } +} + +impl PartialEq for str { + #[inline] + fn eq(&self, other: &HeaderValue) -> bool { + *other == *self + } +} + +impl PartialEq for [u8] { + #[inline] + fn eq(&self, other: &HeaderValue) -> bool { + *other == *self + } +} + +impl PartialOrd for str { + #[inline] + fn partial_cmp(&self, other: &HeaderValue) -> Option { + self.as_bytes().partial_cmp(other.as_bytes()) + } +} + +impl PartialOrd for [u8] { + #[inline] + fn partial_cmp(&self, other: &HeaderValue) -> Option { + self.partial_cmp(other.as_bytes()) + } +} + +impl PartialEq for HeaderValue { + #[inline] + fn eq(&self, other: &String) -> bool { + *self == &other[..] + } +} + +impl PartialOrd for HeaderValue { + #[inline] + fn partial_cmp(&self, other: &String) -> Option { + self.inner.partial_cmp(other.as_bytes()) + } +} + +impl PartialEq for String { + #[inline] + fn eq(&self, other: &HeaderValue) -> bool { + *other == *self + } +} + +impl PartialOrd for String { + #[inline] + fn partial_cmp(&self, other: &HeaderValue) -> Option { + self.as_bytes().partial_cmp(other.as_bytes()) + } +} + +impl<'a> PartialEq for &'a HeaderValue { + #[inline] + fn eq(&self, other: &HeaderValue) -> bool { + **self == *other + } +} + +impl<'a> PartialOrd for &'a HeaderValue { + #[inline] + fn partial_cmp(&self, other: &HeaderValue) -> Option { + (**self).partial_cmp(other) + } +} + +impl<'a, T: ?Sized> PartialEq<&'a T> for HeaderValue +where + HeaderValue: PartialEq, +{ + #[inline] + fn eq(&self, other: &&'a T) -> bool { + *self == **other + } +} + +impl<'a, T: ?Sized> PartialOrd<&'a T> for HeaderValue +where + HeaderValue: PartialOrd, +{ + #[inline] + fn partial_cmp(&self, other: &&'a T) -> Option { + self.partial_cmp(*other) + } +} + +impl<'a> PartialEq for &'a str { + #[inline] + fn eq(&self, other: &HeaderValue) -> bool { + *other == *self + } +} + +impl<'a> PartialOrd for &'a str { + #[inline] + fn partial_cmp(&self, other: &HeaderValue) -> Option { + self.as_bytes().partial_cmp(other.as_bytes()) + } +} + +#[test] +fn test_try_from() { + HeaderValue::try_from(vec![127]).unwrap_err(); +} + +#[test] +fn test_debug() { + let cases = &[ + ("hello", "\"hello\""), + ("hello \"world\"", "\"hello \\\"world\\\"\""), + ("\u{7FFF}hello", "\"\\xe7\\xbf\\xbfhello\""), + ]; + + for &(value, expected) in cases { + let val = HeaderValue::from_bytes(value.as_bytes()).unwrap(); + let actual = format!("{:?}", val); + assert_eq!(expected, actual); + } + + let mut sensitive = HeaderValue::from_static("password"); + sensitive.set_sensitive(true); + assert_eq!("Sensitive", format!("{:?}", sensitive)); +} diff --git a/.cargo-vendor/http-0.2.12/src/lib.rs b/.cargo-vendor/http-0.2.12/src/lib.rs new file mode 100644 index 0000000000..d5b3e2d0b3 --- /dev/null +++ b/.cargo-vendor/http-0.2.12/src/lib.rs @@ -0,0 +1,211 @@ +#![doc(html_root_url = "https://docs.rs/http/0.2.11")] + +//! A general purpose library of common HTTP types +//! +//! This crate is a general purpose library for common types found when working +//! with the HTTP protocol. You'll find `Request` and `Response` types for +//! working as either a client or a server as well as all of their components. +//! Notably you'll find `Uri` for what a `Request` is requesting, a `Method` +//! for how it's being requested, a `StatusCode` for what sort of response came +//! back, a `Version` for how this was communicated, and +//! `HeaderName`/`HeaderValue` definitions to get grouped in a `HeaderMap` to +//! work with request/response headers. +//! +//! You will notably *not* find an implementation of sending requests or +//! spinning up a server in this crate. It's intended that this crate is the +//! "standard library" for HTTP clients and servers without dictating any +//! particular implementation. Note that this crate is still early on in its +//! lifecycle so the support libraries that integrate with the `http` crate are +//! a work in progress! Stay tuned and we'll be sure to highlight crates here +//! in the future. +//! +//! ## Requests and Responses +//! +//! Perhaps the main two types in this crate are the `Request` and `Response` +//! types. A `Request` could either be constructed to get sent off as a client +//! or it can also be received to generate a `Response` for a server. Similarly +//! as a client a `Response` is what you get after sending a `Request`, whereas +//! on a server you'll be manufacturing a `Response` to send back to the client. +//! +//! Each type has a number of accessors for the component fields. For as a +//! server you might want to inspect a requests URI to dispatch it: +//! +//! ``` +//! use http::{Request, Response}; +//! +//! fn response(req: Request<()>) -> http::Result> { +//! match req.uri().path() { +//! "/" => index(req), +//! "/foo" => foo(req), +//! "/bar" => bar(req), +//! _ => not_found(req), +//! } +//! } +//! # fn index(_req: Request<()>) -> http::Result> { panic!() } +//! # fn foo(_req: Request<()>) -> http::Result> { panic!() } +//! # fn bar(_req: Request<()>) -> http::Result> { panic!() } +//! # fn not_found(_req: Request<()>) -> http::Result> { panic!() } +//! ``` +//! +//! On a `Request` you'll also find accessors like `method` to return a +//! `Method` and `headers` to inspect the various headers. A `Response` +//! has similar methods for headers, the status code, etc. +//! +//! In addition to getters, request/response types also have mutable accessors +//! to edit the request/response: +//! +//! ``` +//! use http::{HeaderValue, Response, StatusCode}; +//! use http::header::CONTENT_TYPE; +//! +//! fn add_server_headers(response: &mut Response) { +//! response.headers_mut() +//! .insert(CONTENT_TYPE, HeaderValue::from_static("text/html")); +//! *response.status_mut() = StatusCode::OK; +//! } +//! ``` +//! +//! And finally, one of the most important aspects of requests/responses, the +//! body! The `Request` and `Response` types in this crate are *generic* in +//! what their body is. This allows downstream libraries to use different +//! representations such as `Request>`, `Response`, +//! `Request, Error = _>>`, or even +//! `Response` where the custom type was deserialized from JSON. +//! +//! The body representation is intentionally flexible to give downstream +//! libraries maximal flexibility in implementing the body as appropriate. +//! +//! ## HTTP Headers +//! +//! Another major piece of functionality in this library is HTTP header +//! interpretation and generation. The `HeaderName` type serves as a way to +//! define header *names*, or what's to the left of the colon. A `HeaderValue` +//! conversely is the header *value*, or what's to the right of a colon. +//! +//! For example, if you have an HTTP request that looks like: +//! +//! ```http +//! GET /foo HTTP/1.1 +//! Accept: text/html +//! ``` +//! +//! Then `"Accept"` is a `HeaderName` while `"text/html"` is a `HeaderValue`. +//! Each of these is a dedicated type to allow for a number of interesting +//! optimizations and to also encode the static guarantees of each type. For +//! example a `HeaderName` is always a valid `&str`, but a `HeaderValue` may +//! not be valid UTF-8. +//! +//! The most common header names are already defined for you as constant values +//! in the `header` module of this crate. For example: +//! +//! ``` +//! use http::header::{self, HeaderName}; +//! +//! let name: HeaderName = header::ACCEPT; +//! assert_eq!(name.as_str(), "accept"); +//! ``` +//! +//! You can, however, also parse header names from strings: +//! +//! ``` +//! use http::header::{self, HeaderName}; +//! +//! let name = "Accept".parse::().unwrap(); +//! assert_eq!(name, header::ACCEPT); +//! ``` +//! +//! Header values can be created from string literals through the `from_static` +//! function: +//! +//! ``` +//! use http::HeaderValue; +//! +//! let value = HeaderValue::from_static("text/html"); +//! assert_eq!(value.as_bytes(), b"text/html"); +//! ``` +//! +//! And header values can also be parsed like names: +//! +//! ``` +//! use http::HeaderValue; +//! +//! let value = "text/html"; +//! let value = value.parse::().unwrap(); +//! ``` +//! +//! Most HTTP requests and responses tend to come with more than one header, so +//! it's not too useful to just work with names and values only! This crate also +//! provides a `HeaderMap` type which is a specialized hash map for keys as +//! `HeaderName` and generic values. This type, like header names, is optimized +//! for common usage but should continue to scale with your needs over time. +//! +//! # URIs +//! +//! Each HTTP `Request` has an associated URI with it. This may just be a path +//! like `/index.html` but it could also be an absolute URL such as +//! `https://www.rust-lang.org/index.html`. A `URI` has a number of accessors to +//! interpret it: +//! +//! ``` +//! use http::Uri; +//! use http::uri::Scheme; +//! +//! let uri = "https://www.rust-lang.org/index.html".parse::().unwrap(); +//! +//! assert_eq!(uri.scheme(), Some(&Scheme::HTTPS)); +//! assert_eq!(uri.host(), Some("www.rust-lang.org")); +//! assert_eq!(uri.path(), "/index.html"); +//! assert_eq!(uri.query(), None); +//! ``` + +#![deny(missing_docs, missing_debug_implementations)] + +#[cfg(test)] +#[macro_use] +extern crate doc_comment; + +#[cfg(test)] +doctest!("../README.md"); + +#[macro_use] +mod convert; + +pub mod header; +pub mod method; +pub mod request; +pub mod response; +pub mod status; +pub mod uri; +pub mod version; + +mod byte_str; +mod error; +mod extensions; + +pub use crate::error::{Error, Result}; +pub use crate::extensions::Extensions; +#[doc(no_inline)] +pub use crate::header::{HeaderMap, HeaderName, HeaderValue}; +pub use crate::method::Method; +pub use crate::request::Request; +pub use crate::response::Response; +pub use crate::status::StatusCode; +pub use crate::uri::Uri; +pub use crate::version::Version; + +fn _assert_types() { + fn assert_send() {} + fn assert_sync() {} + + assert_send::>(); + assert_send::>(); + + assert_sync::>(); + assert_sync::>(); +} + +mod sealed { + /// Private trait to this crate to prevent traits from being implemented in + /// downstream crates. + pub trait Sealed {} +} diff --git a/.cargo-vendor/http-0.2.12/src/method.rs b/.cargo-vendor/http-0.2.12/src/method.rs new file mode 100644 index 0000000000..04261a37ab --- /dev/null +++ b/.cargo-vendor/http-0.2.12/src/method.rs @@ -0,0 +1,473 @@ +//! The HTTP request method +//! +//! This module contains HTTP-method related structs and errors and such. The +//! main type of this module, `Method`, is also reexported at the root of the +//! crate as `http::Method` and is intended for import through that location +//! primarily. +//! +//! # Examples +//! +//! ``` +//! use http::Method; +//! +//! assert_eq!(Method::GET, Method::from_bytes(b"GET").unwrap()); +//! assert!(Method::GET.is_idempotent()); +//! assert_eq!(Method::POST.as_str(), "POST"); +//! ``` + +use self::Inner::*; +use self::extension::{InlineExtension, AllocatedExtension}; + +use std::convert::AsRef; +use std::error::Error; +use std::str::FromStr; +use std::convert::TryFrom; +use std::{fmt, str}; + +/// The Request Method (VERB) +/// +/// This type also contains constants for a number of common HTTP methods such +/// as GET, POST, etc. +/// +/// Currently includes 8 variants representing the 8 methods defined in +/// [RFC 7230](https://tools.ietf.org/html/rfc7231#section-4.1), plus PATCH, +/// and an Extension variant for all extensions. +/// +/// # Examples +/// +/// ``` +/// use http::Method; +/// +/// assert_eq!(Method::GET, Method::from_bytes(b"GET").unwrap()); +/// assert!(Method::GET.is_idempotent()); +/// assert_eq!(Method::POST.as_str(), "POST"); +/// ``` +#[derive(Clone, PartialEq, Eq, Hash)] +pub struct Method(Inner); + +/// A possible error value when converting `Method` from bytes. +pub struct InvalidMethod { + _priv: (), +} + +#[derive(Clone, PartialEq, Eq, Hash)] +enum Inner { + Options, + Get, + Post, + Put, + Delete, + Head, + Trace, + Connect, + Patch, + // If the extension is short enough, store it inline + ExtensionInline(InlineExtension), + // Otherwise, allocate it + ExtensionAllocated(AllocatedExtension), +} + + +impl Method { + /// GET + pub const GET: Method = Method(Get); + + /// POST + pub const POST: Method = Method(Post); + + /// PUT + pub const PUT: Method = Method(Put); + + /// DELETE + pub const DELETE: Method = Method(Delete); + + /// HEAD + pub const HEAD: Method = Method(Head); + + /// OPTIONS + pub const OPTIONS: Method = Method(Options); + + /// CONNECT + pub const CONNECT: Method = Method(Connect); + + /// PATCH + pub const PATCH: Method = Method(Patch); + + /// TRACE + pub const TRACE: Method = Method(Trace); + + /// Converts a slice of bytes to an HTTP method. + pub fn from_bytes(src: &[u8]) -> Result { + match src.len() { + 0 => Err(InvalidMethod::new()), + 3 => match src { + b"GET" => Ok(Method(Get)), + b"PUT" => Ok(Method(Put)), + _ => Method::extension_inline(src), + }, + 4 => match src { + b"POST" => Ok(Method(Post)), + b"HEAD" => Ok(Method(Head)), + _ => Method::extension_inline(src), + }, + 5 => match src { + b"PATCH" => Ok(Method(Patch)), + b"TRACE" => Ok(Method(Trace)), + _ => Method::extension_inline(src), + }, + 6 => match src { + b"DELETE" => Ok(Method(Delete)), + _ => Method::extension_inline(src), + }, + 7 => match src { + b"OPTIONS" => Ok(Method(Options)), + b"CONNECT" => Ok(Method(Connect)), + _ => Method::extension_inline(src), + }, + _ => { + if src.len() < InlineExtension::MAX { + Method::extension_inline(src) + } else { + let allocated = AllocatedExtension::new(src)?; + + Ok(Method(ExtensionAllocated(allocated))) + } + } + } + } + + fn extension_inline(src: &[u8]) -> Result { + let inline = InlineExtension::new(src)?; + + Ok(Method(ExtensionInline(inline))) + } + + /// Whether a method is considered "safe", meaning the request is + /// essentially read-only. + /// + /// See [the spec](https://tools.ietf.org/html/rfc7231#section-4.2.1) + /// for more words. + pub fn is_safe(&self) -> bool { + match self.0 { + Get | Head | Options | Trace => true, + _ => false, + } + } + + /// Whether a method is considered "idempotent", meaning the request has + /// the same result if executed multiple times. + /// + /// See [the spec](https://tools.ietf.org/html/rfc7231#section-4.2.2) for + /// more words. + pub fn is_idempotent(&self) -> bool { + match self.0 { + Put | Delete => true, + _ => self.is_safe(), + } + } + + /// Return a &str representation of the HTTP method + #[inline] + pub fn as_str(&self) -> &str { + match self.0 { + Options => "OPTIONS", + Get => "GET", + Post => "POST", + Put => "PUT", + Delete => "DELETE", + Head => "HEAD", + Trace => "TRACE", + Connect => "CONNECT", + Patch => "PATCH", + ExtensionInline(ref inline) => inline.as_str(), + ExtensionAllocated(ref allocated) => allocated.as_str(), + } + } +} + +impl AsRef for Method { + #[inline] + fn as_ref(&self) -> &str { + self.as_str() + } +} + +impl<'a> PartialEq<&'a Method> for Method { + #[inline] + fn eq(&self, other: &&'a Method) -> bool { + self == *other + } +} + +impl<'a> PartialEq for &'a Method { + #[inline] + fn eq(&self, other: &Method) -> bool { + *self == other + } +} + +impl PartialEq for Method { + #[inline] + fn eq(&self, other: &str) -> bool { + self.as_ref() == other + } +} + +impl PartialEq for str { + #[inline] + fn eq(&self, other: &Method) -> bool { + self == other.as_ref() + } +} + +impl<'a> PartialEq<&'a str> for Method { + #[inline] + fn eq(&self, other: &&'a str) -> bool { + self.as_ref() == *other + } +} + +impl<'a> PartialEq for &'a str { + #[inline] + fn eq(&self, other: &Method) -> bool { + *self == other.as_ref() + } +} + +impl fmt::Debug for Method { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(self.as_ref()) + } +} + +impl fmt::Display for Method { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.write_str(self.as_ref()) + } +} + +impl Default for Method { + #[inline] + fn default() -> Method { + Method::GET + } +} + +impl<'a> From<&'a Method> for Method { + #[inline] + fn from(t: &'a Method) -> Self { + t.clone() + } +} + +impl<'a> TryFrom<&'a [u8]> for Method { + type Error = InvalidMethod; + + #[inline] + fn try_from(t: &'a [u8]) -> Result { + Method::from_bytes(t) + } +} + +impl<'a> TryFrom<&'a str> for Method { + type Error = InvalidMethod; + + #[inline] + fn try_from(t: &'a str) -> Result { + TryFrom::try_from(t.as_bytes()) + } +} + +impl FromStr for Method { + type Err = InvalidMethod; + + #[inline] + fn from_str(t: &str) -> Result { + TryFrom::try_from(t) + } +} + +impl InvalidMethod { + fn new() -> InvalidMethod { + InvalidMethod { _priv: () } + } +} + +impl fmt::Debug for InvalidMethod { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("InvalidMethod") + // skip _priv noise + .finish() + } +} + +impl fmt::Display for InvalidMethod { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("invalid HTTP method") + } +} + +impl Error for InvalidMethod {} + +mod extension { + use super::InvalidMethod; + use std::str; + + #[derive(Clone, PartialEq, Eq, Hash)] + // Invariant: the first self.1 bytes of self.0 are valid UTF-8. + pub struct InlineExtension([u8; InlineExtension::MAX], u8); + + #[derive(Clone, PartialEq, Eq, Hash)] + // Invariant: self.0 contains valid UTF-8. + pub struct AllocatedExtension(Box<[u8]>); + + impl InlineExtension { + // Method::from_bytes() assumes this is at least 7 + pub const MAX: usize = 15; + + pub fn new(src: &[u8]) -> Result { + let mut data: [u8; InlineExtension::MAX] = Default::default(); + + write_checked(src, &mut data)?; + + // Invariant: write_checked ensures that the first src.len() bytes + // of data are valid UTF-8. + Ok(InlineExtension(data, src.len() as u8)) + } + + pub fn as_str(&self) -> &str { + let InlineExtension(ref data, len) = self; + // Safety: the invariant of InlineExtension ensures that the first + // len bytes of data contain valid UTF-8. + unsafe {str::from_utf8_unchecked(&data[..*len as usize])} + } + } + + impl AllocatedExtension { + pub fn new(src: &[u8]) -> Result { + let mut data: Vec = vec![0; src.len()]; + + write_checked(src, &mut data)?; + + // Invariant: data is exactly src.len() long and write_checked + // ensures that the first src.len() bytes of data are valid UTF-8. + Ok(AllocatedExtension(data.into_boxed_slice())) + } + + pub fn as_str(&self) -> &str { + // Safety: the invariant of AllocatedExtension ensures that self.0 + // contains valid UTF-8. + unsafe {str::from_utf8_unchecked(&self.0)} + } + } + + // From the HTTP spec section 5.1.1, the HTTP method is case-sensitive and can + // contain the following characters: + // + // ``` + // method = token + // token = 1*tchar + // tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / + // "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA + // ``` + // + // https://www.w3.org/Protocols/HTTP/1.1/draft-ietf-http-v11-spec-01#Method + // + // Note that this definition means that any &[u8] that consists solely of valid + // characters is also valid UTF-8 because the valid method characters are a + // subset of the valid 1 byte UTF-8 encoding. + const METHOD_CHARS: [u8; 256] = [ + // 0 1 2 3 4 5 6 7 8 9 + b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // x + b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 1x + b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 2x + b'\0', b'\0', b'\0', b'!', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 3x + b'\0', b'\0', b'*', b'+', b'\0', b'-', b'.', b'\0', b'0', b'1', // 4x + b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', b'\0', b'\0', // 5x + b'\0', b'\0', b'\0', b'\0', b'\0', b'A', b'B', b'C', b'D', b'E', // 6x + b'F', b'G', b'H', b'I', b'J', b'K', b'L', b'M', b'N', b'O', // 7x + b'P', b'Q', b'R', b'S', b'T', b'U', b'V', b'W', b'X', b'Y', // 8x + b'Z', b'\0', b'\0', b'\0', b'^', b'_', b'`', b'a', b'b', b'c', // 9x + b'd', b'e', b'f', b'g', b'h', b'i', b'j', b'k', b'l', b'm', // 10x + b'n', b'o', b'p', b'q', b'r', b's', b't', b'u', b'v', b'w', // 11x + b'x', b'y', b'z', b'\0', b'|', b'\0', b'~', b'\0', b'\0', b'\0', // 12x + b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 13x + b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 14x + b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 15x + b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 16x + b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 17x + b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 18x + b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 19x + b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 20x + b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 21x + b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 22x + b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 23x + b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 24x + b'\0', b'\0', b'\0', b'\0', b'\0', b'\0' // 25x + ]; + + // write_checked ensures (among other things) that the first src.len() bytes + // of dst are valid UTF-8 + fn write_checked(src: &[u8], dst: &mut [u8]) -> Result<(), InvalidMethod> { + for (i, &b) in src.iter().enumerate() { + let b = METHOD_CHARS[b as usize]; + + if b == 0 { + return Err(InvalidMethod::new()); + } + + dst[i] = b; + } + + Ok(()) + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_method_eq() { + assert_eq!(Method::GET, Method::GET); + assert_eq!(Method::GET, "GET"); + assert_eq!(&Method::GET, "GET"); + + assert_eq!("GET", Method::GET); + assert_eq!("GET", &Method::GET); + + assert_eq!(&Method::GET, Method::GET); + assert_eq!(Method::GET, &Method::GET); + } + + #[test] + fn test_invalid_method() { + assert!(Method::from_str("").is_err()); + assert!(Method::from_bytes(b"").is_err()); + assert!(Method::from_bytes(&[0xC0]).is_err()); // invalid utf-8 + assert!(Method::from_bytes(&[0x10]).is_err()); // invalid method characters + } + + #[test] + fn test_is_idempotent() { + assert!(Method::OPTIONS.is_idempotent()); + assert!(Method::GET.is_idempotent()); + assert!(Method::PUT.is_idempotent()); + assert!(Method::DELETE.is_idempotent()); + assert!(Method::HEAD.is_idempotent()); + assert!(Method::TRACE.is_idempotent()); + + assert!(!Method::POST.is_idempotent()); + assert!(!Method::CONNECT.is_idempotent()); + assert!(!Method::PATCH.is_idempotent()); + } + + #[test] + fn test_extension_method() { + assert_eq!(Method::from_str("WOW").unwrap(), "WOW"); + assert_eq!(Method::from_str("wOw!!").unwrap(), "wOw!!"); + + let long_method = "This_is_a_very_long_method.It_is_valid_but_unlikely."; + assert_eq!(Method::from_str(&long_method).unwrap(), long_method); + } +} diff --git a/.cargo-vendor/http-0.2.12/src/request.rs b/.cargo-vendor/http-0.2.12/src/request.rs new file mode 100644 index 0000000000..9940ae0824 --- /dev/null +++ b/.cargo-vendor/http-0.2.12/src/request.rs @@ -0,0 +1,1087 @@ +//! HTTP request types. +//! +//! This module contains structs related to HTTP requests, notably the +//! `Request` type itself as well as a builder to create requests. Typically +//! you'll import the `http::Request` type rather than reaching into this +//! module itself. +//! +//! # Examples +//! +//! Creating a `Request` to send +//! +//! ```no_run +//! use http::{Request, Response}; +//! +//! let mut request = Request::builder() +//! .uri("https://www.rust-lang.org/") +//! .header("User-Agent", "my-awesome-agent/1.0"); +//! +//! if needs_awesome_header() { +//! request = request.header("Awesome", "yes"); +//! } +//! +//! let response = send(request.body(()).unwrap()); +//! +//! # fn needs_awesome_header() -> bool { +//! # true +//! # } +//! # +//! fn send(req: Request<()>) -> Response<()> { +//! // ... +//! # panic!() +//! } +//! ``` +//! +//! Inspecting a request to see what was sent. +//! +//! ``` +//! use http::{Request, Response, StatusCode}; +//! +//! fn respond_to(req: Request<()>) -> http::Result> { +//! if req.uri() != "/awesome-url" { +//! return Response::builder() +//! .status(StatusCode::NOT_FOUND) +//! .body(()) +//! } +//! +//! let has_awesome_header = req.headers().contains_key("Awesome"); +//! let body = req.body(); +//! +//! // ... +//! # panic!() +//! } +//! ``` + +use std::any::Any; +use std::convert::{TryFrom}; +use std::fmt; + +use crate::header::{HeaderMap, HeaderName, HeaderValue}; +use crate::method::Method; +use crate::version::Version; +use crate::{Extensions, Result, Uri}; + +/// Represents an HTTP request. +/// +/// An HTTP request consists of a head and a potentially optional body. The body +/// component is generic, enabling arbitrary types to represent the HTTP body. +/// For example, the body could be `Vec`, a `Stream` of byte chunks, or a +/// value that has been deserialized. +/// +/// # Examples +/// +/// Creating a `Request` to send +/// +/// ```no_run +/// use http::{Request, Response}; +/// +/// let mut request = Request::builder() +/// .uri("https://www.rust-lang.org/") +/// .header("User-Agent", "my-awesome-agent/1.0"); +/// +/// if needs_awesome_header() { +/// request = request.header("Awesome", "yes"); +/// } +/// +/// let response = send(request.body(()).unwrap()); +/// +/// # fn needs_awesome_header() -> bool { +/// # true +/// # } +/// # +/// fn send(req: Request<()>) -> Response<()> { +/// // ... +/// # panic!() +/// } +/// ``` +/// +/// Inspecting a request to see what was sent. +/// +/// ``` +/// use http::{Request, Response, StatusCode}; +/// +/// fn respond_to(req: Request<()>) -> http::Result> { +/// if req.uri() != "/awesome-url" { +/// return Response::builder() +/// .status(StatusCode::NOT_FOUND) +/// .body(()) +/// } +/// +/// let has_awesome_header = req.headers().contains_key("Awesome"); +/// let body = req.body(); +/// +/// // ... +/// # panic!() +/// } +/// ``` +/// +/// Deserialize a request of bytes via json: +/// +/// ``` +/// # extern crate serde; +/// # extern crate serde_json; +/// # extern crate http; +/// use http::Request; +/// use serde::de; +/// +/// fn deserialize(req: Request>) -> serde_json::Result> +/// where for<'de> T: de::Deserialize<'de>, +/// { +/// let (parts, body) = req.into_parts(); +/// let body = serde_json::from_slice(&body)?; +/// Ok(Request::from_parts(parts, body)) +/// } +/// # +/// # fn main() {} +/// ``` +/// +/// Or alternatively, serialize the body of a request to json +/// +/// ``` +/// # extern crate serde; +/// # extern crate serde_json; +/// # extern crate http; +/// use http::Request; +/// use serde::ser; +/// +/// fn serialize(req: Request) -> serde_json::Result>> +/// where T: ser::Serialize, +/// { +/// let (parts, body) = req.into_parts(); +/// let body = serde_json::to_vec(&body)?; +/// Ok(Request::from_parts(parts, body)) +/// } +/// # +/// # fn main() {} +/// ``` +pub struct Request { + head: Parts, + body: T, +} + +/// Component parts of an HTTP `Request` +/// +/// The HTTP request head consists of a method, uri, version, and a set of +/// header fields. +pub struct Parts { + /// The request's method + pub method: Method, + + /// The request's URI + pub uri: Uri, + + /// The request's version + pub version: Version, + + /// The request's headers + pub headers: HeaderMap, + + /// The request's extensions + pub extensions: Extensions, + + _priv: (), +} + +/// An HTTP request builder +/// +/// This type can be used to construct an instance or `Request` +/// through a builder-like pattern. +#[derive(Debug)] +pub struct Builder { + inner: Result, +} + +impl Request<()> { + /// Creates a new builder-style object to manufacture a `Request` + /// + /// This method returns an instance of `Builder` which can be used to + /// create a `Request`. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let request = Request::builder() + /// .method("GET") + /// .uri("https://www.rust-lang.org/") + /// .header("X-Custom-Foo", "Bar") + /// .body(()) + /// .unwrap(); + /// ``` + #[inline] + pub fn builder() -> Builder { + Builder::new() + } + + /// Creates a new `Builder` initialized with a GET method and the given URI. + /// + /// This method returns an instance of `Builder` which can be used to + /// create a `Request`. + /// + /// # Example + /// + /// ``` + /// # use http::*; + /// + /// let request = Request::get("https://www.rust-lang.org/") + /// .body(()) + /// .unwrap(); + /// ``` + pub fn get(uri: T) -> Builder + where + Uri: TryFrom, + >::Error: Into, + + { + Builder::new().method(Method::GET).uri(uri) + } + + /// Creates a new `Builder` initialized with a PUT method and the given URI. + /// + /// This method returns an instance of `Builder` which can be used to + /// create a `Request`. + /// + /// # Example + /// + /// ``` + /// # use http::*; + /// + /// let request = Request::put("https://www.rust-lang.org/") + /// .body(()) + /// .unwrap(); + /// ``` + pub fn put(uri: T) -> Builder + where + Uri: TryFrom, + >::Error: Into, + + { + Builder::new().method(Method::PUT).uri(uri) + } + + /// Creates a new `Builder` initialized with a POST method and the given URI. + /// + /// This method returns an instance of `Builder` which can be used to + /// create a `Request`. + /// + /// # Example + /// + /// ``` + /// # use http::*; + /// + /// let request = Request::post("https://www.rust-lang.org/") + /// .body(()) + /// .unwrap(); + /// ``` + pub fn post(uri: T) -> Builder + where + Uri: TryFrom, + >::Error: Into, + + { + Builder::new().method(Method::POST).uri(uri) + } + + /// Creates a new `Builder` initialized with a DELETE method and the given URI. + /// + /// This method returns an instance of `Builder` which can be used to + /// create a `Request`. + /// + /// # Example + /// + /// ``` + /// # use http::*; + /// + /// let request = Request::delete("https://www.rust-lang.org/") + /// .body(()) + /// .unwrap(); + /// ``` + pub fn delete(uri: T) -> Builder + where + Uri: TryFrom, + >::Error: Into, + + { + Builder::new().method(Method::DELETE).uri(uri) + } + + /// Creates a new `Builder` initialized with an OPTIONS method and the given URI. + /// + /// This method returns an instance of `Builder` which can be used to + /// create a `Request`. + /// + /// # Example + /// + /// ``` + /// # use http::*; + /// + /// let request = Request::options("https://www.rust-lang.org/") + /// .body(()) + /// .unwrap(); + /// # assert_eq!(*request.method(), Method::OPTIONS); + /// ``` + pub fn options(uri: T) -> Builder + where + Uri: TryFrom, + >::Error: Into, + + { + Builder::new().method(Method::OPTIONS).uri(uri) + } + + /// Creates a new `Builder` initialized with a HEAD method and the given URI. + /// + /// This method returns an instance of `Builder` which can be used to + /// create a `Request`. + /// + /// # Example + /// + /// ``` + /// # use http::*; + /// + /// let request = Request::head("https://www.rust-lang.org/") + /// .body(()) + /// .unwrap(); + /// ``` + pub fn head(uri: T) -> Builder + where + Uri: TryFrom, + >::Error: Into, + + { + Builder::new().method(Method::HEAD).uri(uri) + } + + /// Creates a new `Builder` initialized with a CONNECT method and the given URI. + /// + /// This method returns an instance of `Builder` which can be used to + /// create a `Request`. + /// + /// # Example + /// + /// ``` + /// # use http::*; + /// + /// let request = Request::connect("https://www.rust-lang.org/") + /// .body(()) + /// .unwrap(); + /// ``` + pub fn connect(uri: T) -> Builder + where + Uri: TryFrom, + >::Error: Into, + + { + Builder::new().method(Method::CONNECT).uri(uri) + } + + /// Creates a new `Builder` initialized with a PATCH method and the given URI. + /// + /// This method returns an instance of `Builder` which can be used to + /// create a `Request`. + /// + /// # Example + /// + /// ``` + /// # use http::*; + /// + /// let request = Request::patch("https://www.rust-lang.org/") + /// .body(()) + /// .unwrap(); + /// ``` + pub fn patch(uri: T) -> Builder + where + Uri: TryFrom, + >::Error: Into, + { + Builder::new().method(Method::PATCH).uri(uri) + } + + /// Creates a new `Builder` initialized with a TRACE method and the given URI. + /// + /// This method returns an instance of `Builder` which can be used to + /// create a `Request`. + /// + /// # Example + /// + /// ``` + /// # use http::*; + /// + /// let request = Request::trace("https://www.rust-lang.org/") + /// .body(()) + /// .unwrap(); + /// ``` + pub fn trace(uri: T) -> Builder + where + Uri: TryFrom, + >::Error: Into, + { + Builder::new().method(Method::TRACE).uri(uri) + } +} + +impl Request { + /// Creates a new blank `Request` with the body + /// + /// The component parts of this request will be set to their default, e.g. + /// the GET method, no headers, etc. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let request = Request::new("hello world"); + /// + /// assert_eq!(*request.method(), Method::GET); + /// assert_eq!(*request.body(), "hello world"); + /// ``` + #[inline] + pub fn new(body: T) -> Request { + Request { + head: Parts::new(), + body: body, + } + } + + /// Creates a new `Request` with the given components parts and body. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let request = Request::new("hello world"); + /// let (mut parts, body) = request.into_parts(); + /// parts.method = Method::POST; + /// + /// let request = Request::from_parts(parts, body); + /// ``` + #[inline] + pub fn from_parts(parts: Parts, body: T) -> Request { + Request { + head: parts, + body: body, + } + } + + /// Returns a reference to the associated HTTP method. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let request: Request<()> = Request::default(); + /// assert_eq!(*request.method(), Method::GET); + /// ``` + #[inline] + pub fn method(&self) -> &Method { + &self.head.method + } + + /// Returns a mutable reference to the associated HTTP method. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let mut request: Request<()> = Request::default(); + /// *request.method_mut() = Method::PUT; + /// assert_eq!(*request.method(), Method::PUT); + /// ``` + #[inline] + pub fn method_mut(&mut self) -> &mut Method { + &mut self.head.method + } + + /// Returns a reference to the associated URI. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let request: Request<()> = Request::default(); + /// assert_eq!(*request.uri(), *"/"); + /// ``` + #[inline] + pub fn uri(&self) -> &Uri { + &self.head.uri + } + + /// Returns a mutable reference to the associated URI. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let mut request: Request<()> = Request::default(); + /// *request.uri_mut() = "/hello".parse().unwrap(); + /// assert_eq!(*request.uri(), *"/hello"); + /// ``` + #[inline] + pub fn uri_mut(&mut self) -> &mut Uri { + &mut self.head.uri + } + + /// Returns the associated version. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let request: Request<()> = Request::default(); + /// assert_eq!(request.version(), Version::HTTP_11); + /// ``` + #[inline] + pub fn version(&self) -> Version { + self.head.version + } + + /// Returns a mutable reference to the associated version. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let mut request: Request<()> = Request::default(); + /// *request.version_mut() = Version::HTTP_2; + /// assert_eq!(request.version(), Version::HTTP_2); + /// ``` + #[inline] + pub fn version_mut(&mut self) -> &mut Version { + &mut self.head.version + } + + /// Returns a reference to the associated header field map. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let request: Request<()> = Request::default(); + /// assert!(request.headers().is_empty()); + /// ``` + #[inline] + pub fn headers(&self) -> &HeaderMap { + &self.head.headers + } + + /// Returns a mutable reference to the associated header field map. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// # use http::header::*; + /// let mut request: Request<()> = Request::default(); + /// request.headers_mut().insert(HOST, HeaderValue::from_static("world")); + /// assert!(!request.headers().is_empty()); + /// ``` + #[inline] + pub fn headers_mut(&mut self) -> &mut HeaderMap { + &mut self.head.headers + } + + /// Returns a reference to the associated extensions. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let request: Request<()> = Request::default(); + /// assert!(request.extensions().get::().is_none()); + /// ``` + #[inline] + pub fn extensions(&self) -> &Extensions { + &self.head.extensions + } + + /// Returns a mutable reference to the associated extensions. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// # use http::header::*; + /// let mut request: Request<()> = Request::default(); + /// request.extensions_mut().insert("hello"); + /// assert_eq!(request.extensions().get(), Some(&"hello")); + /// ``` + #[inline] + pub fn extensions_mut(&mut self) -> &mut Extensions { + &mut self.head.extensions + } + + /// Returns a reference to the associated HTTP body. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let request: Request = Request::default(); + /// assert!(request.body().is_empty()); + /// ``` + #[inline] + pub fn body(&self) -> &T { + &self.body + } + + /// Returns a mutable reference to the associated HTTP body. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let mut request: Request = Request::default(); + /// request.body_mut().push_str("hello world"); + /// assert!(!request.body().is_empty()); + /// ``` + #[inline] + pub fn body_mut(&mut self) -> &mut T { + &mut self.body + } + + /// Consumes the request, returning just the body. + /// + /// # Examples + /// + /// ``` + /// # use http::Request; + /// let request = Request::new(10); + /// let body = request.into_body(); + /// assert_eq!(body, 10); + /// ``` + #[inline] + pub fn into_body(self) -> T { + self.body + } + + /// Consumes the request returning the head and body parts. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let request = Request::new(()); + /// let (parts, body) = request.into_parts(); + /// assert_eq!(parts.method, Method::GET); + /// ``` + #[inline] + pub fn into_parts(self) -> (Parts, T) { + (self.head, self.body) + } + + /// Consumes the request returning a new request with body mapped to the + /// return type of the passed in function. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let request = Request::builder().body("some string").unwrap(); + /// let mapped_request: Request<&[u8]> = request.map(|b| { + /// assert_eq!(b, "some string"); + /// b.as_bytes() + /// }); + /// assert_eq!(mapped_request.body(), &"some string".as_bytes()); + /// ``` + #[inline] + pub fn map(self, f: F) -> Request + where + F: FnOnce(T) -> U, + { + Request { + body: f(self.body), + head: self.head, + } + } +} + +impl Default for Request { + fn default() -> Request { + Request::new(T::default()) + } +} + +impl fmt::Debug for Request { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Request") + .field("method", self.method()) + .field("uri", self.uri()) + .field("version", &self.version()) + .field("headers", self.headers()) + // omits Extensions because not useful + .field("body", self.body()) + .finish() + } +} + +impl Parts { + /// Creates a new default instance of `Parts` + fn new() -> Parts { + Parts { + method: Method::default(), + uri: Uri::default(), + version: Version::default(), + headers: HeaderMap::default(), + extensions: Extensions::default(), + _priv: (), + } + } +} + +impl fmt::Debug for Parts { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Parts") + .field("method", &self.method) + .field("uri", &self.uri) + .field("version", &self.version) + .field("headers", &self.headers) + // omits Extensions because not useful + // omits _priv because not useful + .finish() + } +} + +impl Builder { + /// Creates a new default instance of `Builder` to construct a `Request`. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// + /// let req = request::Builder::new() + /// .method("POST") + /// .body(()) + /// .unwrap(); + /// ``` + #[inline] + pub fn new() -> Builder { + Builder::default() + } + + /// Set the HTTP method for this request. + /// + /// By default this is `GET`. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// + /// let req = Request::builder() + /// .method("POST") + /// .body(()) + /// .unwrap(); + /// ``` + pub fn method(self, method: T) -> Builder + where + Method: TryFrom, + >::Error: Into, + { + self.and_then(move |mut head| { + let method = TryFrom::try_from(method).map_err(Into::into)?; + head.method = method; + Ok(head) + }) + } + + /// Get the HTTP Method for this request. + /// + /// By default this is `GET`. If builder has error, returns None. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// + /// let mut req = Request::builder(); + /// assert_eq!(req.method_ref(),Some(&Method::GET)); + /// + /// req = req.method("POST"); + /// assert_eq!(req.method_ref(),Some(&Method::POST)); + /// ``` + pub fn method_ref(&self) -> Option<&Method> { + self.inner.as_ref().ok().map(|h| &h.method) + } + + /// Set the URI for this request. + /// + /// By default this is `/`. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// + /// let req = Request::builder() + /// .uri("https://www.rust-lang.org/") + /// .body(()) + /// .unwrap(); + /// ``` + pub fn uri(self, uri: T) -> Builder + where + Uri: TryFrom, + >::Error: Into, + { + self.and_then(move |mut head| { + head.uri = TryFrom::try_from(uri).map_err(Into::into)?; + Ok(head) + }) + } + + /// Get the URI for this request + /// + /// By default this is `/`. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// + /// let mut req = Request::builder(); + /// assert_eq!(req.uri_ref().unwrap(), "/" ); + /// + /// req = req.uri("https://www.rust-lang.org/"); + /// assert_eq!(req.uri_ref().unwrap(), "https://www.rust-lang.org/" ); + /// ``` + pub fn uri_ref(&self) -> Option<&Uri> { + self.inner.as_ref().ok().map(|h| &h.uri) + } + + /// Set the HTTP version for this request. + /// + /// By default this is HTTP/1.1 + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// + /// let req = Request::builder() + /// .version(Version::HTTP_2) + /// .body(()) + /// .unwrap(); + /// ``` + pub fn version(self, version: Version) -> Builder { + self.and_then(move |mut head| { + head.version = version; + Ok(head) + }) + } + + /// Get the HTTP version for this request + /// + /// By default this is HTTP/1.1. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// + /// let mut req = Request::builder(); + /// assert_eq!(req.version_ref().unwrap(), &Version::HTTP_11 ); + /// + /// req = req.version(Version::HTTP_2); + /// assert_eq!(req.version_ref().unwrap(), &Version::HTTP_2 ); + /// ``` + pub fn version_ref(&self) -> Option<&Version> { + self.inner.as_ref().ok().map(|h| &h.version) + } + + /// Appends a header to this request builder. + /// + /// This function will append the provided key/value as a header to the + /// internal `HeaderMap` being constructed. Essentially this is equivalent + /// to calling `HeaderMap::append`. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// # use http::header::HeaderValue; + /// + /// let req = Request::builder() + /// .header("Accept", "text/html") + /// .header("X-Custom-Foo", "bar") + /// .body(()) + /// .unwrap(); + /// ``` + pub fn header(self, key: K, value: V) -> Builder + where + HeaderName: TryFrom, + >::Error: Into, + HeaderValue: TryFrom, + >::Error: Into, + { + self.and_then(move |mut head| { + let name = >::try_from(key).map_err(Into::into)?; + let value = >::try_from(value).map_err(Into::into)?; + head.headers.try_append(name, value)?; + Ok(head) + }) + } + + /// Get header on this request builder. + /// when builder has error returns None + /// + /// # Example + /// + /// ``` + /// # use http::Request; + /// let req = Request::builder() + /// .header("Accept", "text/html") + /// .header("X-Custom-Foo", "bar"); + /// let headers = req.headers_ref().unwrap(); + /// assert_eq!( headers["Accept"], "text/html" ); + /// assert_eq!( headers["X-Custom-Foo"], "bar" ); + /// ``` + pub fn headers_ref(&self) -> Option<&HeaderMap> { + self.inner.as_ref().ok().map(|h| &h.headers) + } + + /// Get headers on this request builder. + /// + /// When builder has error returns None. + /// + /// # Example + /// + /// ``` + /// # use http::{header::HeaderValue, Request}; + /// let mut req = Request::builder(); + /// { + /// let headers = req.headers_mut().unwrap(); + /// headers.insert("Accept", HeaderValue::from_static("text/html")); + /// headers.insert("X-Custom-Foo", HeaderValue::from_static("bar")); + /// } + /// let headers = req.headers_ref().unwrap(); + /// assert_eq!( headers["Accept"], "text/html" ); + /// assert_eq!( headers["X-Custom-Foo"], "bar" ); + /// ``` + pub fn headers_mut(&mut self) -> Option<&mut HeaderMap> { + self.inner.as_mut().ok().map(|h| &mut h.headers) + } + + /// Adds an extension to this builder + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// + /// let req = Request::builder() + /// .extension("My Extension") + /// .body(()) + /// .unwrap(); + /// + /// assert_eq!(req.extensions().get::<&'static str>(), + /// Some(&"My Extension")); + /// ``` + pub fn extension(self, extension: T) -> Builder + where + T: Any + Send + Sync + 'static, + { + self.and_then(move |mut head| { + head.extensions.insert(extension); + Ok(head) + }) + } + + /// Get a reference to the extensions for this request builder. + /// + /// If the builder has an error, this returns `None`. + /// + /// # Example + /// + /// ``` + /// # use http::Request; + /// let req = Request::builder().extension("My Extension").extension(5u32); + /// let extensions = req.extensions_ref().unwrap(); + /// assert_eq!(extensions.get::<&'static str>(), Some(&"My Extension")); + /// assert_eq!(extensions.get::(), Some(&5u32)); + /// ``` + pub fn extensions_ref(&self) -> Option<&Extensions> { + self.inner.as_ref().ok().map(|h| &h.extensions) + } + + /// Get a mutable reference to the extensions for this request builder. + /// + /// If the builder has an error, this returns `None`. + /// + /// # Example + /// + /// ``` + /// # use http::Request; + /// let mut req = Request::builder().extension("My Extension"); + /// let mut extensions = req.extensions_mut().unwrap(); + /// assert_eq!(extensions.get::<&'static str>(), Some(&"My Extension")); + /// extensions.insert(5u32); + /// assert_eq!(extensions.get::(), Some(&5u32)); + /// ``` + pub fn extensions_mut(&mut self) -> Option<&mut Extensions> { + self.inner.as_mut().ok().map(|h| &mut h.extensions) + } + + /// "Consumes" this builder, using the provided `body` to return a + /// constructed `Request`. + /// + /// # Errors + /// + /// This function may return an error if any previously configured argument + /// failed to parse or get converted to the internal representation. For + /// example if an invalid `head` was specified via `header("Foo", + /// "Bar\r\n")` the error will be returned when this function is called + /// rather than when `header` was called. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// + /// let request = Request::builder() + /// .body(()) + /// .unwrap(); + /// ``` + pub fn body(self, body: T) -> Result> { + self.inner.map(move |head| { + Request { + head, + body, + } + }) + } + + // private + + fn and_then(self, func: F) -> Self + where + F: FnOnce(Parts) -> Result + { + Builder { + inner: self.inner.and_then(func), + } + } +} + +impl Default for Builder { + #[inline] + fn default() -> Builder { + Builder { + inner: Ok(Parts::new()), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn it_can_map_a_body_from_one_type_to_another() { + let request = Request::builder().body("some string").unwrap(); + let mapped_request = request.map(|s| { + assert_eq!(s, "some string"); + 123u32 + }); + assert_eq!(mapped_request.body(), &123u32); + } +} diff --git a/.cargo-vendor/http-0.2.12/src/response.rs b/.cargo-vendor/http-0.2.12/src/response.rs new file mode 100644 index 0000000000..1e88a3e55b --- /dev/null +++ b/.cargo-vendor/http-0.2.12/src/response.rs @@ -0,0 +1,793 @@ +//! HTTP response types. +//! +//! This module contains structs related to HTTP responses, notably the +//! `Response` type itself as well as a builder to create responses. Typically +//! you'll import the `http::Response` type rather than reaching into this +//! module itself. +//! +//! # Examples +//! +//! Creating a `Response` to return +//! +//! ``` +//! use http::{Request, Response, StatusCode}; +//! +//! fn respond_to(req: Request<()>) -> http::Result> { +//! let mut builder = Response::builder() +//! .header("Foo", "Bar") +//! .status(StatusCode::OK); +//! +//! if req.headers().contains_key("Another-Header") { +//! builder = builder.header("Another-Header", "Ack"); +//! } +//! +//! builder.body(()) +//! } +//! ``` +//! +//! A simple 404 handler +//! +//! ``` +//! use http::{Request, Response, StatusCode}; +//! +//! fn not_found(_req: Request<()>) -> http::Result> { +//! Response::builder() +//! .status(StatusCode::NOT_FOUND) +//! .body(()) +//! } +//! ``` +//! +//! Or otherwise inspecting the result of a request: +//! +//! ```no_run +//! use http::{Request, Response}; +//! +//! fn get(url: &str) -> http::Result> { +//! // ... +//! # panic!() +//! } +//! +//! let response = get("https://www.rust-lang.org/").unwrap(); +//! +//! if !response.status().is_success() { +//! panic!("failed to get a successful response status!"); +//! } +//! +//! if let Some(date) = response.headers().get("Date") { +//! // we've got a `Date` header! +//! } +//! +//! let body = response.body(); +//! // ... +//! ``` + +use std::any::Any; +use std::convert::TryFrom; +use std::fmt; + +use crate::header::{HeaderMap, HeaderName, HeaderValue}; +use crate::status::StatusCode; +use crate::version::Version; +use crate::{Extensions, Result}; + +/// Represents an HTTP response +/// +/// An HTTP response consists of a head and a potentially optional body. The body +/// component is generic, enabling arbitrary types to represent the HTTP body. +/// For example, the body could be `Vec`, a `Stream` of byte chunks, or a +/// value that has been deserialized. +/// +/// Typically you'll work with responses on the client side as the result of +/// sending a `Request` and on the server you'll be generating a `Response` to +/// send back to the client. +/// +/// # Examples +/// +/// Creating a `Response` to return +/// +/// ``` +/// use http::{Request, Response, StatusCode}; +/// +/// fn respond_to(req: Request<()>) -> http::Result> { +/// let mut builder = Response::builder() +/// .header("Foo", "Bar") +/// .status(StatusCode::OK); +/// +/// if req.headers().contains_key("Another-Header") { +/// builder = builder.header("Another-Header", "Ack"); +/// } +/// +/// builder.body(()) +/// } +/// ``` +/// +/// A simple 404 handler +/// +/// ``` +/// use http::{Request, Response, StatusCode}; +/// +/// fn not_found(_req: Request<()>) -> http::Result> { +/// Response::builder() +/// .status(StatusCode::NOT_FOUND) +/// .body(()) +/// } +/// ``` +/// +/// Or otherwise inspecting the result of a request: +/// +/// ```no_run +/// use http::{Request, Response}; +/// +/// fn get(url: &str) -> http::Result> { +/// // ... +/// # panic!() +/// } +/// +/// let response = get("https://www.rust-lang.org/").unwrap(); +/// +/// if !response.status().is_success() { +/// panic!("failed to get a successful response status!"); +/// } +/// +/// if let Some(date) = response.headers().get("Date") { +/// // we've got a `Date` header! +/// } +/// +/// let body = response.body(); +/// // ... +/// ``` +/// +/// Deserialize a response of bytes via json: +/// +/// ``` +/// # extern crate serde; +/// # extern crate serde_json; +/// # extern crate http; +/// use http::Response; +/// use serde::de; +/// +/// fn deserialize(res: Response>) -> serde_json::Result> +/// where for<'de> T: de::Deserialize<'de>, +/// { +/// let (parts, body) = res.into_parts(); +/// let body = serde_json::from_slice(&body)?; +/// Ok(Response::from_parts(parts, body)) +/// } +/// # +/// # fn main() {} +/// ``` +/// +/// Or alternatively, serialize the body of a response to json +/// +/// ``` +/// # extern crate serde; +/// # extern crate serde_json; +/// # extern crate http; +/// use http::Response; +/// use serde::ser; +/// +/// fn serialize(res: Response) -> serde_json::Result>> +/// where T: ser::Serialize, +/// { +/// let (parts, body) = res.into_parts(); +/// let body = serde_json::to_vec(&body)?; +/// Ok(Response::from_parts(parts, body)) +/// } +/// # +/// # fn main() {} +/// ``` +pub struct Response { + head: Parts, + body: T, +} + +/// Component parts of an HTTP `Response` +/// +/// The HTTP response head consists of a status, version, and a set of +/// header fields. +pub struct Parts { + /// The response's status + pub status: StatusCode, + + /// The response's version + pub version: Version, + + /// The response's headers + pub headers: HeaderMap, + + /// The response's extensions + pub extensions: Extensions, + + _priv: (), +} + +/// An HTTP response builder +/// +/// This type can be used to construct an instance of `Response` through a +/// builder-like pattern. +#[derive(Debug)] +pub struct Builder { + inner: Result, +} + +impl Response<()> { + /// Creates a new builder-style object to manufacture a `Response` + /// + /// This method returns an instance of `Builder` which can be used to + /// create a `Response`. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let response = Response::builder() + /// .status(200) + /// .header("X-Custom-Foo", "Bar") + /// .body(()) + /// .unwrap(); + /// ``` + #[inline] + pub fn builder() -> Builder { + Builder::new() + } +} + +impl Response { + /// Creates a new blank `Response` with the body + /// + /// The component ports of this response will be set to their default, e.g. + /// the ok status, no headers, etc. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let response = Response::new("hello world"); + /// + /// assert_eq!(response.status(), StatusCode::OK); + /// assert_eq!(*response.body(), "hello world"); + /// ``` + #[inline] + pub fn new(body: T) -> Response { + Response { + head: Parts::new(), + body: body, + } + } + + /// Creates a new `Response` with the given head and body + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let response = Response::new("hello world"); + /// let (mut parts, body) = response.into_parts(); + /// + /// parts.status = StatusCode::BAD_REQUEST; + /// let response = Response::from_parts(parts, body); + /// + /// assert_eq!(response.status(), StatusCode::BAD_REQUEST); + /// assert_eq!(*response.body(), "hello world"); + /// ``` + #[inline] + pub fn from_parts(parts: Parts, body: T) -> Response { + Response { + head: parts, + body: body, + } + } + + /// Returns the `StatusCode`. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let response: Response<()> = Response::default(); + /// assert_eq!(response.status(), StatusCode::OK); + /// ``` + #[inline] + pub fn status(&self) -> StatusCode { + self.head.status + } + + /// Returns a mutable reference to the associated `StatusCode`. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let mut response: Response<()> = Response::default(); + /// *response.status_mut() = StatusCode::CREATED; + /// assert_eq!(response.status(), StatusCode::CREATED); + /// ``` + #[inline] + pub fn status_mut(&mut self) -> &mut StatusCode { + &mut self.head.status + } + + /// Returns a reference to the associated version. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let response: Response<()> = Response::default(); + /// assert_eq!(response.version(), Version::HTTP_11); + /// ``` + #[inline] + pub fn version(&self) -> Version { + self.head.version + } + + /// Returns a mutable reference to the associated version. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let mut response: Response<()> = Response::default(); + /// *response.version_mut() = Version::HTTP_2; + /// assert_eq!(response.version(), Version::HTTP_2); + /// ``` + #[inline] + pub fn version_mut(&mut self) -> &mut Version { + &mut self.head.version + } + + /// Returns a reference to the associated header field map. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let response: Response<()> = Response::default(); + /// assert!(response.headers().is_empty()); + /// ``` + #[inline] + pub fn headers(&self) -> &HeaderMap { + &self.head.headers + } + + /// Returns a mutable reference to the associated header field map. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// # use http::header::*; + /// let mut response: Response<()> = Response::default(); + /// response.headers_mut().insert(HOST, HeaderValue::from_static("world")); + /// assert!(!response.headers().is_empty()); + /// ``` + #[inline] + pub fn headers_mut(&mut self) -> &mut HeaderMap { + &mut self.head.headers + } + + /// Returns a reference to the associated extensions. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let response: Response<()> = Response::default(); + /// assert!(response.extensions().get::().is_none()); + /// ``` + #[inline] + pub fn extensions(&self) -> &Extensions { + &self.head.extensions + } + + /// Returns a mutable reference to the associated extensions. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// # use http::header::*; + /// let mut response: Response<()> = Response::default(); + /// response.extensions_mut().insert("hello"); + /// assert_eq!(response.extensions().get(), Some(&"hello")); + /// ``` + #[inline] + pub fn extensions_mut(&mut self) -> &mut Extensions { + &mut self.head.extensions + } + + /// Returns a reference to the associated HTTP body. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let response: Response = Response::default(); + /// assert!(response.body().is_empty()); + /// ``` + #[inline] + pub fn body(&self) -> &T { + &self.body + } + + /// Returns a mutable reference to the associated HTTP body. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let mut response: Response = Response::default(); + /// response.body_mut().push_str("hello world"); + /// assert!(!response.body().is_empty()); + /// ``` + #[inline] + pub fn body_mut(&mut self) -> &mut T { + &mut self.body + } + + /// Consumes the response, returning just the body. + /// + /// # Examples + /// + /// ``` + /// # use http::Response; + /// let response = Response::new(10); + /// let body = response.into_body(); + /// assert_eq!(body, 10); + /// ``` + #[inline] + pub fn into_body(self) -> T { + self.body + } + + /// Consumes the response returning the head and body parts. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let response: Response<()> = Response::default(); + /// let (parts, body) = response.into_parts(); + /// assert_eq!(parts.status, StatusCode::OK); + /// ``` + #[inline] + pub fn into_parts(self) -> (Parts, T) { + (self.head, self.body) + } + + /// Consumes the response returning a new response with body mapped to the + /// return type of the passed in function. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let response = Response::builder().body("some string").unwrap(); + /// let mapped_response: Response<&[u8]> = response.map(|b| { + /// assert_eq!(b, "some string"); + /// b.as_bytes() + /// }); + /// assert_eq!(mapped_response.body(), &"some string".as_bytes()); + /// ``` + #[inline] + pub fn map(self, f: F) -> Response + where + F: FnOnce(T) -> U, + { + Response { + body: f(self.body), + head: self.head, + } + } +} + +impl Default for Response { + #[inline] + fn default() -> Response { + Response::new(T::default()) + } +} + +impl fmt::Debug for Response { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Response") + .field("status", &self.status()) + .field("version", &self.version()) + .field("headers", self.headers()) + // omits Extensions because not useful + .field("body", self.body()) + .finish() + } +} + +impl Parts { + /// Creates a new default instance of `Parts` + fn new() -> Parts { + Parts { + status: StatusCode::default(), + version: Version::default(), + headers: HeaderMap::default(), + extensions: Extensions::default(), + _priv: (), + } + } +} + +impl fmt::Debug for Parts { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Parts") + .field("status", &self.status) + .field("version", &self.version) + .field("headers", &self.headers) + // omits Extensions because not useful + // omits _priv because not useful + .finish() + } +} + +impl Builder { + /// Creates a new default instance of `Builder` to construct either a + /// `Head` or a `Response`. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// + /// let response = response::Builder::new() + /// .status(200) + /// .body(()) + /// .unwrap(); + /// ``` + #[inline] + pub fn new() -> Builder { + Builder::default() + } + + /// Set the HTTP status for this response. + /// + /// By default this is `200`. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// + /// let response = Response::builder() + /// .status(200) + /// .body(()) + /// .unwrap(); + /// ``` + pub fn status(self, status: T) -> Builder + where + StatusCode: TryFrom, + >::Error: Into, + { + self.and_then(move |mut head| { + head.status = TryFrom::try_from(status).map_err(Into::into)?; + Ok(head) + }) + } + + /// Set the HTTP version for this response. + /// + /// By default this is HTTP/1.1 + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// + /// let response = Response::builder() + /// .version(Version::HTTP_2) + /// .body(()) + /// .unwrap(); + /// ``` + pub fn version(self, version: Version) -> Builder { + self.and_then(move |mut head| { + head.version = version; + Ok(head) + }) + } + + /// Appends a header to this response builder. + /// + /// This function will append the provided key/value as a header to the + /// internal `HeaderMap` being constructed. Essentially this is equivalent + /// to calling `HeaderMap::append`. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// # use http::header::HeaderValue; + /// + /// let response = Response::builder() + /// .header("Content-Type", "text/html") + /// .header("X-Custom-Foo", "bar") + /// .header("content-length", 0) + /// .body(()) + /// .unwrap(); + /// ``` + pub fn header(self, key: K, value: V) -> Builder + where + HeaderName: TryFrom, + >::Error: Into, + HeaderValue: TryFrom, + >::Error: Into, + { + self.and_then(move |mut head| { + let name = >::try_from(key).map_err(Into::into)?; + let value = >::try_from(value).map_err(Into::into)?; + head.headers.try_append(name, value)?; + Ok(head) + }) + } + + /// Get header on this response builder. + /// + /// When builder has error returns None. + /// + /// # Example + /// + /// ``` + /// # use http::Response; + /// # use http::header::HeaderValue; + /// let res = Response::builder() + /// .header("Accept", "text/html") + /// .header("X-Custom-Foo", "bar"); + /// let headers = res.headers_ref().unwrap(); + /// assert_eq!( headers["Accept"], "text/html" ); + /// assert_eq!( headers["X-Custom-Foo"], "bar" ); + /// ``` + pub fn headers_ref(&self) -> Option<&HeaderMap> { + self.inner.as_ref().ok().map(|h| &h.headers) + } + + /// Get header on this response builder. + /// when builder has error returns None + /// + /// # Example + /// + /// ``` + /// # use http::*; + /// # use http::header::HeaderValue; + /// # use http::response::Builder; + /// let mut res = Response::builder(); + /// { + /// let headers = res.headers_mut().unwrap(); + /// headers.insert("Accept", HeaderValue::from_static("text/html")); + /// headers.insert("X-Custom-Foo", HeaderValue::from_static("bar")); + /// } + /// let headers = res.headers_ref().unwrap(); + /// assert_eq!( headers["Accept"], "text/html" ); + /// assert_eq!( headers["X-Custom-Foo"], "bar" ); + /// ``` + pub fn headers_mut(&mut self) -> Option<&mut HeaderMap> { + self.inner.as_mut().ok().map(|h| &mut h.headers) + } + + /// Adds an extension to this builder + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// + /// let response = Response::builder() + /// .extension("My Extension") + /// .body(()) + /// .unwrap(); + /// + /// assert_eq!(response.extensions().get::<&'static str>(), + /// Some(&"My Extension")); + /// ``` + pub fn extension(self, extension: T) -> Builder + where + T: Any + Send + Sync + 'static, + { + self.and_then(move |mut head| { + head.extensions.insert(extension); + Ok(head) + }) + } + + /// Get a reference to the extensions for this response builder. + /// + /// If the builder has an error, this returns `None`. + /// + /// # Example + /// + /// ``` + /// # use http::Response; + /// let res = Response::builder().extension("My Extension").extension(5u32); + /// let extensions = res.extensions_ref().unwrap(); + /// assert_eq!(extensions.get::<&'static str>(), Some(&"My Extension")); + /// assert_eq!(extensions.get::(), Some(&5u32)); + /// ``` + pub fn extensions_ref(&self) -> Option<&Extensions> { + self.inner.as_ref().ok().map(|h| &h.extensions) + } + + /// Get a mutable reference to the extensions for this response builder. + /// + /// If the builder has an error, this returns `None`. + /// + /// # Example + /// + /// ``` + /// # use http::Response; + /// let mut res = Response::builder().extension("My Extension"); + /// let mut extensions = res.extensions_mut().unwrap(); + /// assert_eq!(extensions.get::<&'static str>(), Some(&"My Extension")); + /// extensions.insert(5u32); + /// assert_eq!(extensions.get::(), Some(&5u32)); + /// ``` + pub fn extensions_mut(&mut self) -> Option<&mut Extensions> { + self.inner.as_mut().ok().map(|h| &mut h.extensions) + } + + /// "Consumes" this builder, using the provided `body` to return a + /// constructed `Response`. + /// + /// # Errors + /// + /// This function may return an error if any previously configured argument + /// failed to parse or get converted to the internal representation. For + /// example if an invalid `head` was specified via `header("Foo", + /// "Bar\r\n")` the error will be returned when this function is called + /// rather than when `header` was called. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// + /// let response = Response::builder() + /// .body(()) + /// .unwrap(); + /// ``` + pub fn body(self, body: T) -> Result> { + self.inner.map(move |head| { + Response { + head, + body, + } + }) + } + + // private + + fn and_then(self, func: F) -> Self + where + F: FnOnce(Parts) -> Result + { + Builder { + inner: self.inner.and_then(func), + } + } +} + +impl Default for Builder { + #[inline] + fn default() -> Builder { + Builder { + inner: Ok(Parts::new()), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn it_can_map_a_body_from_one_type_to_another() { + let response = Response::builder().body("some string").unwrap(); + let mapped_response = response.map(|s| { + assert_eq!(s, "some string"); + 123u32 + }); + assert_eq!(mapped_response.body(), &123u32); + } +} diff --git a/.cargo-vendor/http-0.2.12/src/status.rs b/.cargo-vendor/http-0.2.12/src/status.rs new file mode 100644 index 0000000000..d98d24c3d9 --- /dev/null +++ b/.cargo-vendor/http-0.2.12/src/status.rs @@ -0,0 +1,588 @@ +//! HTTP status codes +//! +//! This module contains HTTP-status code related structs an errors. The main +//! type in this module is `StatusCode` which is not intended to be used through +//! this module but rather the `http::StatusCode` type. +//! +//! # Examples +//! +//! ``` +//! use http::StatusCode; +//! +//! assert_eq!(StatusCode::from_u16(200).unwrap(), StatusCode::OK); +//! assert_eq!(StatusCode::NOT_FOUND, 404); +//! assert!(StatusCode::OK.is_success()); +//! ``` + +use std::convert::TryFrom; +use std::num::NonZeroU16; +use std::error::Error; +use std::fmt; +use std::str::FromStr; + +/// An HTTP status code (`status-code` in RFC 7230 et al.). +/// +/// Constants are provided for known status codes, including those in the IANA +/// [HTTP Status Code Registry]( +/// https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml). +/// +/// Status code values in the range 100-999 (inclusive) are supported by this +/// type. Values in the range 100-599 are semantically classified by the most +/// significant digit. See [`StatusCode::is_success`], etc. Values above 599 +/// are unclassified but allowed for legacy compatibility, though their use is +/// discouraged. Applications may interpret such values as protocol errors. +/// +/// # Examples +/// +/// ``` +/// use http::StatusCode; +/// +/// assert_eq!(StatusCode::from_u16(200).unwrap(), StatusCode::OK); +/// assert_eq!(StatusCode::NOT_FOUND.as_u16(), 404); +/// assert!(StatusCode::OK.is_success()); +/// ``` +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct StatusCode(NonZeroU16); + +/// A possible error value when converting a `StatusCode` from a `u16` or `&str` +/// +/// This error indicates that the supplied input was not a valid number, was less +/// than 100, or was greater than 999. +pub struct InvalidStatusCode { + _priv: (), +} + +impl StatusCode { + /// Converts a u16 to a status code. + /// + /// The function validates the correctness of the supplied u16. It must be + /// greater or equal to 100 and less than 1000. + /// + /// # Example + /// + /// ``` + /// use http::StatusCode; + /// + /// let ok = StatusCode::from_u16(200).unwrap(); + /// assert_eq!(ok, StatusCode::OK); + /// + /// let err = StatusCode::from_u16(99); + /// assert!(err.is_err()); + /// ``` + #[inline] + pub fn from_u16(src: u16) -> Result { + if src < 100 || src >= 1000 { + return Err(InvalidStatusCode::new()); + } + + NonZeroU16::new(src) + .map(StatusCode) + .ok_or_else(InvalidStatusCode::new) + } + + /// Converts a &[u8] to a status code + pub fn from_bytes(src: &[u8]) -> Result { + if src.len() != 3 { + return Err(InvalidStatusCode::new()); + } + + let a = src[0].wrapping_sub(b'0') as u16; + let b = src[1].wrapping_sub(b'0') as u16; + let c = src[2].wrapping_sub(b'0') as u16; + + if a == 0 || a > 9 || b > 9 || c > 9 { + return Err(InvalidStatusCode::new()); + } + + let status = (a * 100) + (b * 10) + c; + NonZeroU16::new(status) + .map(StatusCode) + .ok_or_else(InvalidStatusCode::new) + } + + /// Returns the `u16` corresponding to this `StatusCode`. + /// + /// # Note + /// + /// This is the same as the `From` implementation, but + /// included as an inherent method because that implementation doesn't + /// appear in rustdocs, as well as a way to force the type instead of + /// relying on inference. + /// + /// # Example + /// + /// ``` + /// let status = http::StatusCode::OK; + /// assert_eq!(status.as_u16(), 200); + /// ``` + #[inline] + pub fn as_u16(&self) -> u16 { + (*self).into() + } + + /// Returns a &str representation of the `StatusCode` + /// + /// The return value only includes a numerical representation of the + /// status code. The canonical reason is not included. + /// + /// # Example + /// + /// ``` + /// let status = http::StatusCode::OK; + /// assert_eq!(status.as_str(), "200"); + /// ``` + #[inline] + pub fn as_str(&self) -> &str { + let offset = (self.0.get() - 100) as usize; + let offset = offset * 3; + + // Invariant: self has checked range [100, 999] and CODE_DIGITS is + // ASCII-only, of length 900 * 3 = 2700 bytes + + #[cfg(debug_assertions)] + { &CODE_DIGITS[offset..offset+3] } + + #[cfg(not(debug_assertions))] + unsafe { CODE_DIGITS.get_unchecked(offset..offset+3) } + } + + /// Get the standardised `reason-phrase` for this status code. + /// + /// This is mostly here for servers writing responses, but could potentially have application + /// at other times. + /// + /// The reason phrase is defined as being exclusively for human readers. You should avoid + /// deriving any meaning from it at all costs. + /// + /// Bear in mind also that in HTTP/2.0 and HTTP/3.0 the reason phrase is abolished from + /// transmission, and so this canonical reason phrase really is the only reason phrase you’ll + /// find. + /// + /// # Example + /// + /// ``` + /// let status = http::StatusCode::OK; + /// assert_eq!(status.canonical_reason(), Some("OK")); + /// ``` + pub fn canonical_reason(&self) -> Option<&'static str> { + canonical_reason(self.0.get()) + } + + /// Check if status is within 100-199. + #[inline] + pub fn is_informational(&self) -> bool { + 200 > self.0.get() && self.0.get() >= 100 + } + + /// Check if status is within 200-299. + #[inline] + pub fn is_success(&self) -> bool { + 300 > self.0.get() && self.0.get() >= 200 + } + + /// Check if status is within 300-399. + #[inline] + pub fn is_redirection(&self) -> bool { + 400 > self.0.get() && self.0.get() >= 300 + } + + /// Check if status is within 400-499. + #[inline] + pub fn is_client_error(&self) -> bool { + 500 > self.0.get() && self.0.get() >= 400 + } + + /// Check if status is within 500-599. + #[inline] + pub fn is_server_error(&self) -> bool { + 600 > self.0.get() && self.0.get() >= 500 + } +} + +impl fmt::Debug for StatusCode { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(&self.0, f) + } +} + +/// Formats the status code, *including* the canonical reason. +/// +/// # Example +/// +/// ``` +/// # use http::StatusCode; +/// assert_eq!(format!("{}", StatusCode::OK), "200 OK"); +/// ``` +impl fmt::Display for StatusCode { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "{} {}", + u16::from(*self), + self.canonical_reason().unwrap_or("") + ) + } +} + +impl Default for StatusCode { + #[inline] + fn default() -> StatusCode { + StatusCode::OK + } +} + +impl PartialEq for StatusCode { + #[inline] + fn eq(&self, other: &u16) -> bool { + self.as_u16() == *other + } +} + +impl PartialEq for u16 { + #[inline] + fn eq(&self, other: &StatusCode) -> bool { + *self == other.as_u16() + } +} + +impl From for u16 { + #[inline] + fn from(status: StatusCode) -> u16 { + status.0.get() + } +} + +impl FromStr for StatusCode { + type Err = InvalidStatusCode; + + fn from_str(s: &str) -> Result { + StatusCode::from_bytes(s.as_ref()) + } +} + +impl<'a> From<&'a StatusCode> for StatusCode { + #[inline] + fn from(t: &'a StatusCode) -> Self { + t.clone() + } +} + +impl<'a> TryFrom<&'a [u8]> for StatusCode { + type Error = InvalidStatusCode; + + #[inline] + fn try_from(t: &'a [u8]) -> Result { + StatusCode::from_bytes(t) + } +} + +impl<'a> TryFrom<&'a str> for StatusCode { + type Error = InvalidStatusCode; + + #[inline] + fn try_from(t: &'a str) -> Result { + t.parse() + } +} + +impl TryFrom for StatusCode { + type Error = InvalidStatusCode; + + #[inline] + fn try_from(t: u16) -> Result { + StatusCode::from_u16(t) + } +} + +macro_rules! status_codes { + ( + $( + $(#[$docs:meta])* + ($num:expr, $konst:ident, $phrase:expr); + )+ + ) => { + impl StatusCode { + $( + $(#[$docs])* + pub const $konst: StatusCode = StatusCode(unsafe { NonZeroU16::new_unchecked($num) }); + )+ + + } + + fn canonical_reason(num: u16) -> Option<&'static str> { + match num { + $( + $num => Some($phrase), + )+ + _ => None + } + } + } +} + +status_codes! { + /// 100 Continue + /// [[RFC7231, Section 6.2.1](https://tools.ietf.org/html/rfc7231#section-6.2.1)] + (100, CONTINUE, "Continue"); + /// 101 Switching Protocols + /// [[RFC7231, Section 6.2.2](https://tools.ietf.org/html/rfc7231#section-6.2.2)] + (101, SWITCHING_PROTOCOLS, "Switching Protocols"); + /// 102 Processing + /// [[RFC2518](https://tools.ietf.org/html/rfc2518)] + (102, PROCESSING, "Processing"); + + /// 200 OK + /// [[RFC7231, Section 6.3.1](https://tools.ietf.org/html/rfc7231#section-6.3.1)] + (200, OK, "OK"); + /// 201 Created + /// [[RFC7231, Section 6.3.2](https://tools.ietf.org/html/rfc7231#section-6.3.2)] + (201, CREATED, "Created"); + /// 202 Accepted + /// [[RFC7231, Section 6.3.3](https://tools.ietf.org/html/rfc7231#section-6.3.3)] + (202, ACCEPTED, "Accepted"); + /// 203 Non-Authoritative Information + /// [[RFC7231, Section 6.3.4](https://tools.ietf.org/html/rfc7231#section-6.3.4)] + (203, NON_AUTHORITATIVE_INFORMATION, "Non Authoritative Information"); + /// 204 No Content + /// [[RFC7231, Section 6.3.5](https://tools.ietf.org/html/rfc7231#section-6.3.5)] + (204, NO_CONTENT, "No Content"); + /// 205 Reset Content + /// [[RFC7231, Section 6.3.6](https://tools.ietf.org/html/rfc7231#section-6.3.6)] + (205, RESET_CONTENT, "Reset Content"); + /// 206 Partial Content + /// [[RFC7233, Section 4.1](https://tools.ietf.org/html/rfc7233#section-4.1)] + (206, PARTIAL_CONTENT, "Partial Content"); + /// 207 Multi-Status + /// [[RFC4918](https://tools.ietf.org/html/rfc4918)] + (207, MULTI_STATUS, "Multi-Status"); + /// 208 Already Reported + /// [[RFC5842](https://tools.ietf.org/html/rfc5842)] + (208, ALREADY_REPORTED, "Already Reported"); + + /// 226 IM Used + /// [[RFC3229](https://tools.ietf.org/html/rfc3229)] + (226, IM_USED, "IM Used"); + + /// 300 Multiple Choices + /// [[RFC7231, Section 6.4.1](https://tools.ietf.org/html/rfc7231#section-6.4.1)] + (300, MULTIPLE_CHOICES, "Multiple Choices"); + /// 301 Moved Permanently + /// [[RFC7231, Section 6.4.2](https://tools.ietf.org/html/rfc7231#section-6.4.2)] + (301, MOVED_PERMANENTLY, "Moved Permanently"); + /// 302 Found + /// [[RFC7231, Section 6.4.3](https://tools.ietf.org/html/rfc7231#section-6.4.3)] + (302, FOUND, "Found"); + /// 303 See Other + /// [[RFC7231, Section 6.4.4](https://tools.ietf.org/html/rfc7231#section-6.4.4)] + (303, SEE_OTHER, "See Other"); + /// 304 Not Modified + /// [[RFC7232, Section 4.1](https://tools.ietf.org/html/rfc7232#section-4.1)] + (304, NOT_MODIFIED, "Not Modified"); + /// 305 Use Proxy + /// [[RFC7231, Section 6.4.5](https://tools.ietf.org/html/rfc7231#section-6.4.5)] + (305, USE_PROXY, "Use Proxy"); + /// 307 Temporary Redirect + /// [[RFC7231, Section 6.4.7](https://tools.ietf.org/html/rfc7231#section-6.4.7)] + (307, TEMPORARY_REDIRECT, "Temporary Redirect"); + /// 308 Permanent Redirect + /// [[RFC7238](https://tools.ietf.org/html/rfc7238)] + (308, PERMANENT_REDIRECT, "Permanent Redirect"); + + /// 400 Bad Request + /// [[RFC7231, Section 6.5.1](https://tools.ietf.org/html/rfc7231#section-6.5.1)] + (400, BAD_REQUEST, "Bad Request"); + /// 401 Unauthorized + /// [[RFC7235, Section 3.1](https://tools.ietf.org/html/rfc7235#section-3.1)] + (401, UNAUTHORIZED, "Unauthorized"); + /// 402 Payment Required + /// [[RFC7231, Section 6.5.2](https://tools.ietf.org/html/rfc7231#section-6.5.2)] + (402, PAYMENT_REQUIRED, "Payment Required"); + /// 403 Forbidden + /// [[RFC7231, Section 6.5.3](https://tools.ietf.org/html/rfc7231#section-6.5.3)] + (403, FORBIDDEN, "Forbidden"); + /// 404 Not Found + /// [[RFC7231, Section 6.5.4](https://tools.ietf.org/html/rfc7231#section-6.5.4)] + (404, NOT_FOUND, "Not Found"); + /// 405 Method Not Allowed + /// [[RFC7231, Section 6.5.5](https://tools.ietf.org/html/rfc7231#section-6.5.5)] + (405, METHOD_NOT_ALLOWED, "Method Not Allowed"); + /// 406 Not Acceptable + /// [[RFC7231, Section 6.5.6](https://tools.ietf.org/html/rfc7231#section-6.5.6)] + (406, NOT_ACCEPTABLE, "Not Acceptable"); + /// 407 Proxy Authentication Required + /// [[RFC7235, Section 3.2](https://tools.ietf.org/html/rfc7235#section-3.2)] + (407, PROXY_AUTHENTICATION_REQUIRED, "Proxy Authentication Required"); + /// 408 Request Timeout + /// [[RFC7231, Section 6.5.7](https://tools.ietf.org/html/rfc7231#section-6.5.7)] + (408, REQUEST_TIMEOUT, "Request Timeout"); + /// 409 Conflict + /// [[RFC7231, Section 6.5.8](https://tools.ietf.org/html/rfc7231#section-6.5.8)] + (409, CONFLICT, "Conflict"); + /// 410 Gone + /// [[RFC7231, Section 6.5.9](https://tools.ietf.org/html/rfc7231#section-6.5.9)] + (410, GONE, "Gone"); + /// 411 Length Required + /// [[RFC7231, Section 6.5.10](https://tools.ietf.org/html/rfc7231#section-6.5.10)] + (411, LENGTH_REQUIRED, "Length Required"); + /// 412 Precondition Failed + /// [[RFC7232, Section 4.2](https://tools.ietf.org/html/rfc7232#section-4.2)] + (412, PRECONDITION_FAILED, "Precondition Failed"); + /// 413 Payload Too Large + /// [[RFC7231, Section 6.5.11](https://tools.ietf.org/html/rfc7231#section-6.5.11)] + (413, PAYLOAD_TOO_LARGE, "Payload Too Large"); + /// 414 URI Too Long + /// [[RFC7231, Section 6.5.12](https://tools.ietf.org/html/rfc7231#section-6.5.12)] + (414, URI_TOO_LONG, "URI Too Long"); + /// 415 Unsupported Media Type + /// [[RFC7231, Section 6.5.13](https://tools.ietf.org/html/rfc7231#section-6.5.13)] + (415, UNSUPPORTED_MEDIA_TYPE, "Unsupported Media Type"); + /// 416 Range Not Satisfiable + /// [[RFC7233, Section 4.4](https://tools.ietf.org/html/rfc7233#section-4.4)] + (416, RANGE_NOT_SATISFIABLE, "Range Not Satisfiable"); + /// 417 Expectation Failed + /// [[RFC7231, Section 6.5.14](https://tools.ietf.org/html/rfc7231#section-6.5.14)] + (417, EXPECTATION_FAILED, "Expectation Failed"); + /// 418 I'm a teapot + /// [curiously not registered by IANA but [RFC2324](https://tools.ietf.org/html/rfc2324)] + (418, IM_A_TEAPOT, "I'm a teapot"); + + /// 421 Misdirected Request + /// [RFC7540, Section 9.1.2](http://tools.ietf.org/html/rfc7540#section-9.1.2) + (421, MISDIRECTED_REQUEST, "Misdirected Request"); + /// 422 Unprocessable Entity + /// [[RFC4918](https://tools.ietf.org/html/rfc4918)] + (422, UNPROCESSABLE_ENTITY, "Unprocessable Entity"); + /// 423 Locked + /// [[RFC4918](https://tools.ietf.org/html/rfc4918)] + (423, LOCKED, "Locked"); + /// 424 Failed Dependency + /// [[RFC4918](https://tools.ietf.org/html/rfc4918)] + (424, FAILED_DEPENDENCY, "Failed Dependency"); + + /// 426 Upgrade Required + /// [[RFC7231, Section 6.5.15](https://tools.ietf.org/html/rfc7231#section-6.5.15)] + (426, UPGRADE_REQUIRED, "Upgrade Required"); + + /// 428 Precondition Required + /// [[RFC6585](https://tools.ietf.org/html/rfc6585)] + (428, PRECONDITION_REQUIRED, "Precondition Required"); + /// 429 Too Many Requests + /// [[RFC6585](https://tools.ietf.org/html/rfc6585)] + (429, TOO_MANY_REQUESTS, "Too Many Requests"); + + /// 431 Request Header Fields Too Large + /// [[RFC6585](https://tools.ietf.org/html/rfc6585)] + (431, REQUEST_HEADER_FIELDS_TOO_LARGE, "Request Header Fields Too Large"); + + /// 451 Unavailable For Legal Reasons + /// [[RFC7725](http://tools.ietf.org/html/rfc7725)] + (451, UNAVAILABLE_FOR_LEGAL_REASONS, "Unavailable For Legal Reasons"); + + /// 500 Internal Server Error + /// [[RFC7231, Section 6.6.1](https://tools.ietf.org/html/rfc7231#section-6.6.1)] + (500, INTERNAL_SERVER_ERROR, "Internal Server Error"); + /// 501 Not Implemented + /// [[RFC7231, Section 6.6.2](https://tools.ietf.org/html/rfc7231#section-6.6.2)] + (501, NOT_IMPLEMENTED, "Not Implemented"); + /// 502 Bad Gateway + /// [[RFC7231, Section 6.6.3](https://tools.ietf.org/html/rfc7231#section-6.6.3)] + (502, BAD_GATEWAY, "Bad Gateway"); + /// 503 Service Unavailable + /// [[RFC7231, Section 6.6.4](https://tools.ietf.org/html/rfc7231#section-6.6.4)] + (503, SERVICE_UNAVAILABLE, "Service Unavailable"); + /// 504 Gateway Timeout + /// [[RFC7231, Section 6.6.5](https://tools.ietf.org/html/rfc7231#section-6.6.5)] + (504, GATEWAY_TIMEOUT, "Gateway Timeout"); + /// 505 HTTP Version Not Supported + /// [[RFC7231, Section 6.6.6](https://tools.ietf.org/html/rfc7231#section-6.6.6)] + (505, HTTP_VERSION_NOT_SUPPORTED, "HTTP Version Not Supported"); + /// 506 Variant Also Negotiates + /// [[RFC2295](https://tools.ietf.org/html/rfc2295)] + (506, VARIANT_ALSO_NEGOTIATES, "Variant Also Negotiates"); + /// 507 Insufficient Storage + /// [[RFC4918](https://tools.ietf.org/html/rfc4918)] + (507, INSUFFICIENT_STORAGE, "Insufficient Storage"); + /// 508 Loop Detected + /// [[RFC5842](https://tools.ietf.org/html/rfc5842)] + (508, LOOP_DETECTED, "Loop Detected"); + + /// 510 Not Extended + /// [[RFC2774](https://tools.ietf.org/html/rfc2774)] + (510, NOT_EXTENDED, "Not Extended"); + /// 511 Network Authentication Required + /// [[RFC6585](https://tools.ietf.org/html/rfc6585)] + (511, NETWORK_AUTHENTICATION_REQUIRED, "Network Authentication Required"); +} + +impl InvalidStatusCode { + fn new() -> InvalidStatusCode { + InvalidStatusCode { + _priv: (), + } + } +} + +impl fmt::Debug for InvalidStatusCode { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("InvalidStatusCode") + // skip _priv noise + .finish() + } +} + +impl fmt::Display for InvalidStatusCode { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("invalid status code") + } +} + +impl Error for InvalidStatusCode {} + +// A string of packed 3-ASCII-digit status code values for the supported range +// of [100, 999] (900 codes, 2700 bytes). +const CODE_DIGITS: &'static str = "\ +100101102103104105106107108109110111112113114115116117118119\ +120121122123124125126127128129130131132133134135136137138139\ +140141142143144145146147148149150151152153154155156157158159\ +160161162163164165166167168169170171172173174175176177178179\ +180181182183184185186187188189190191192193194195196197198199\ +200201202203204205206207208209210211212213214215216217218219\ +220221222223224225226227228229230231232233234235236237238239\ +240241242243244245246247248249250251252253254255256257258259\ +260261262263264265266267268269270271272273274275276277278279\ +280281282283284285286287288289290291292293294295296297298299\ +300301302303304305306307308309310311312313314315316317318319\ +320321322323324325326327328329330331332333334335336337338339\ +340341342343344345346347348349350351352353354355356357358359\ +360361362363364365366367368369370371372373374375376377378379\ +380381382383384385386387388389390391392393394395396397398399\ +400401402403404405406407408409410411412413414415416417418419\ +420421422423424425426427428429430431432433434435436437438439\ +440441442443444445446447448449450451452453454455456457458459\ +460461462463464465466467468469470471472473474475476477478479\ +480481482483484485486487488489490491492493494495496497498499\ +500501502503504505506507508509510511512513514515516517518519\ +520521522523524525526527528529530531532533534535536537538539\ +540541542543544545546547548549550551552553554555556557558559\ +560561562563564565566567568569570571572573574575576577578579\ +580581582583584585586587588589590591592593594595596597598599\ +600601602603604605606607608609610611612613614615616617618619\ +620621622623624625626627628629630631632633634635636637638639\ +640641642643644645646647648649650651652653654655656657658659\ +660661662663664665666667668669670671672673674675676677678679\ +680681682683684685686687688689690691692693694695696697698699\ +700701702703704705706707708709710711712713714715716717718719\ +720721722723724725726727728729730731732733734735736737738739\ +740741742743744745746747748749750751752753754755756757758759\ +760761762763764765766767768769770771772773774775776777778779\ +780781782783784785786787788789790791792793794795796797798799\ +800801802803804805806807808809810811812813814815816817818819\ +820821822823824825826827828829830831832833834835836837838839\ +840841842843844845846847848849850851852853854855856857858859\ +860861862863864865866867868869870871872873874875876877878879\ +880881882883884885886887888889890891892893894895896897898899\ +900901902903904905906907908909910911912913914915916917918919\ +920921922923924925926927928929930931932933934935936937938939\ +940941942943944945946947948949950951952953954955956957958959\ +960961962963964965966967968969970971972973974975976977978979\ +980981982983984985986987988989990991992993994995996997998999"; diff --git a/.cargo-vendor/http-0.2.12/src/uri/authority.rs b/.cargo-vendor/http-0.2.12/src/uri/authority.rs new file mode 100644 index 0000000000..f41ddd19cb --- /dev/null +++ b/.cargo-vendor/http-0.2.12/src/uri/authority.rs @@ -0,0 +1,684 @@ +use std::convert::TryFrom; +use std::hash::{Hash, Hasher}; +use std::str::FromStr; +use std::{cmp, fmt, str}; + +use bytes::Bytes; + +use super::{ErrorKind, InvalidUri, Port, URI_CHARS}; +use crate::byte_str::ByteStr; + +/// Represents the authority component of a URI. +#[derive(Clone)] +pub struct Authority { + pub(super) data: ByteStr, +} + +impl Authority { + pub(super) fn empty() -> Self { + Authority { + data: ByteStr::new(), + } + } + + // Not public while `bytes` is unstable. + pub(super) fn from_shared(s: Bytes) -> Result { + // Precondition on create_authority: trivially satisfied by the + // identity clousre + create_authority(s, |s| s) + } + + /// Attempt to convert an `Authority` from a static string. + /// + /// This function will not perform any copying, and the string will be + /// checked if it is empty or contains an invalid character. + /// + /// # Panics + /// + /// This function panics if the argument contains invalid characters or + /// is empty. + /// + /// # Examples + /// + /// ``` + /// # use http::uri::Authority; + /// let authority = Authority::from_static("example.com"); + /// assert_eq!(authority.host(), "example.com"); + /// ``` + pub fn from_static(src: &'static str) -> Self { + Authority::from_shared(Bytes::from_static(src.as_bytes())) + .expect("static str is not valid authority") + } + + /// Attempt to convert a `Bytes` buffer to a `Authority`. + /// + /// This will try to prevent a copy if the type passed is the type used + /// internally, and will copy the data if it is not. + pub fn from_maybe_shared(src: T) -> Result + where + T: AsRef<[u8]> + 'static, + { + if_downcast_into!(T, Bytes, src, { + return Authority::from_shared(src); + }); + + Authority::try_from(src.as_ref()) + } + + // Note: this may return an *empty* Authority. You might want `parse_non_empty`. + // Postcondition: for all Ok() returns, s[..ret.unwrap()] is valid UTF-8 where + // ret is the return value. + pub(super) fn parse(s: &[u8]) -> Result { + let mut colon_cnt = 0u32; + let mut start_bracket = false; + let mut end_bracket = false; + let mut has_percent = false; + let mut end = s.len(); + let mut at_sign_pos = None; + const MAX_COLONS: u32 = 8; // e.g., [FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:80 + + // Among other things, this loop checks that every byte in s up to the + // first '/', '?', or '#' is a valid URI character (or in some contexts, + // a '%'). This means that each such byte is a valid single-byte UTF-8 + // code point. + for (i, &b) in s.iter().enumerate() { + match URI_CHARS[b as usize] { + b'/' | b'?' | b'#' => { + end = i; + break; + } + b':' => { + if colon_cnt >= MAX_COLONS { + return Err(ErrorKind::InvalidAuthority.into()); + } + colon_cnt += 1; + } + b'[' => { + if has_percent || start_bracket { + // Something other than the userinfo has a `%`, so reject it. + return Err(ErrorKind::InvalidAuthority.into()); + } + start_bracket = true; + } + b']' => { + if (!start_bracket) || end_bracket { + return Err(ErrorKind::InvalidAuthority.into()); + } + end_bracket = true; + + // Those were part of an IPv6 hostname, so forget them... + colon_cnt = 0; + has_percent = false; + } + b'@' => { + at_sign_pos = Some(i); + + // Those weren't a port colon, but part of the + // userinfo, so it needs to be forgotten. + colon_cnt = 0; + has_percent = false; + } + 0 if b == b'%' => { + // Per https://tools.ietf.org/html/rfc3986#section-3.2.1 and + // https://url.spec.whatwg.org/#authority-state + // the userinfo can have a percent-encoded username and password, + // so record that a `%` was found. If this turns out to be + // part of the userinfo, this flag will be cleared. + // Also per https://tools.ietf.org/html/rfc6874, percent-encoding can + // be used to indicate a zone identifier. + // If the flag hasn't been cleared at the end, that means this + // was part of the hostname (and not part of an IPv6 address), and + // will fail with an error. + has_percent = true; + } + 0 => { + return Err(ErrorKind::InvalidUriChar.into()); + } + _ => {} + } + } + + if start_bracket ^ end_bracket { + return Err(ErrorKind::InvalidAuthority.into()); + } + + if colon_cnt > 1 { + // Things like 'localhost:8080:3030' are rejected. + return Err(ErrorKind::InvalidAuthority.into()); + } + + if end > 0 && at_sign_pos == Some(end - 1) { + // If there's nothing after an `@`, this is bonkers. + return Err(ErrorKind::InvalidAuthority.into()); + } + + if has_percent { + // Something after the userinfo has a `%`, so reject it. + return Err(ErrorKind::InvalidAuthority.into()); + } + + Ok(end) + } + + // Parse bytes as an Authority, not allowing an empty string. + // + // This should be used by functions that allow a user to parse + // an `Authority` by itself. + // + // Postcondition: for all Ok() returns, s[..ret.unwrap()] is valid UTF-8 where + // ret is the return value. + fn parse_non_empty(s: &[u8]) -> Result { + if s.is_empty() { + return Err(ErrorKind::Empty.into()); + } + Authority::parse(s) + } + + /// Get the host of this `Authority`. + /// + /// The host subcomponent of authority is identified by an IP literal + /// encapsulated within square brackets, an IPv4 address in dotted- decimal + /// form, or a registered name. The host subcomponent is **case-insensitive**. + /// + /// ```notrust + /// abc://username:password@example.com:123/path/data?key=value&key2=value2#fragid1 + /// |---------| + /// | + /// host + /// ``` + /// + /// # Examples + /// + /// ``` + /// # use http::uri::*; + /// let authority: Authority = "example.org:80".parse().unwrap(); + /// + /// assert_eq!(authority.host(), "example.org"); + /// ``` + #[inline] + pub fn host(&self) -> &str { + host(self.as_str()) + } + + /// Get the port part of this `Authority`. + /// + /// The port subcomponent of authority is designated by an optional port + /// number following the host and delimited from it by a single colon (":") + /// character. It can be turned into a decimal port number with the `as_u16` + /// method or as a `str` with the `as_str` method. + /// + /// ```notrust + /// abc://username:password@example.com:123/path/data?key=value&key2=value2#fragid1 + /// |-| + /// | + /// port + /// ``` + /// + /// # Examples + /// + /// Authority with port + /// + /// ``` + /// # use http::uri::Authority; + /// let authority: Authority = "example.org:80".parse().unwrap(); + /// + /// let port = authority.port().unwrap(); + /// assert_eq!(port.as_u16(), 80); + /// assert_eq!(port.as_str(), "80"); + /// ``` + /// + /// Authority without port + /// + /// ``` + /// # use http::uri::Authority; + /// let authority: Authority = "example.org".parse().unwrap(); + /// + /// assert!(authority.port().is_none()); + /// ``` + pub fn port(&self) -> Option> { + let bytes = self.as_str(); + bytes + .rfind(":") + .and_then(|i| Port::from_str(&bytes[i + 1..]).ok()) + } + + /// Get the port of this `Authority` as a `u16`. + /// + /// # Example + /// + /// ``` + /// # use http::uri::Authority; + /// let authority: Authority = "example.org:80".parse().unwrap(); + /// + /// assert_eq!(authority.port_u16(), Some(80)); + /// ``` + pub fn port_u16(&self) -> Option { + self.port().and_then(|p| Some(p.as_u16())) + } + + /// Return a str representation of the authority + #[inline] + pub fn as_str(&self) -> &str { + &self.data[..] + } +} + +// Purposefully not public while `bytes` is unstable. +// impl TryFrom for Authority + +impl AsRef for Authority { + fn as_ref(&self) -> &str { + self.as_str() + } +} + +impl PartialEq for Authority { + fn eq(&self, other: &Authority) -> bool { + self.data.eq_ignore_ascii_case(&other.data) + } +} + +impl Eq for Authority {} + +/// Case-insensitive equality +/// +/// # Examples +/// +/// ``` +/// # use http::uri::Authority; +/// let authority: Authority = "HELLO.com".parse().unwrap(); +/// assert_eq!(authority, "hello.coM"); +/// assert_eq!("hello.com", authority); +/// ``` +impl PartialEq for Authority { + fn eq(&self, other: &str) -> bool { + self.data.eq_ignore_ascii_case(other) + } +} + +impl PartialEq for str { + fn eq(&self, other: &Authority) -> bool { + self.eq_ignore_ascii_case(other.as_str()) + } +} + +impl<'a> PartialEq for &'a str { + fn eq(&self, other: &Authority) -> bool { + self.eq_ignore_ascii_case(other.as_str()) + } +} + +impl<'a> PartialEq<&'a str> for Authority { + fn eq(&self, other: &&'a str) -> bool { + self.data.eq_ignore_ascii_case(other) + } +} + +impl PartialEq for Authority { + fn eq(&self, other: &String) -> bool { + self.data.eq_ignore_ascii_case(other.as_str()) + } +} + +impl PartialEq for String { + fn eq(&self, other: &Authority) -> bool { + self.as_str().eq_ignore_ascii_case(other.as_str()) + } +} + +/// Case-insensitive ordering +/// +/// # Examples +/// +/// ``` +/// # use http::uri::Authority; +/// let authority: Authority = "DEF.com".parse().unwrap(); +/// assert!(authority < "ghi.com"); +/// assert!(authority > "abc.com"); +/// ``` +impl PartialOrd for Authority { + fn partial_cmp(&self, other: &Authority) -> Option { + let left = self.data.as_bytes().iter().map(|b| b.to_ascii_lowercase()); + let right = other.data.as_bytes().iter().map(|b| b.to_ascii_lowercase()); + left.partial_cmp(right) + } +} + +impl PartialOrd for Authority { + fn partial_cmp(&self, other: &str) -> Option { + let left = self.data.as_bytes().iter().map(|b| b.to_ascii_lowercase()); + let right = other.as_bytes().iter().map(|b| b.to_ascii_lowercase()); + left.partial_cmp(right) + } +} + +impl PartialOrd for str { + fn partial_cmp(&self, other: &Authority) -> Option { + let left = self.as_bytes().iter().map(|b| b.to_ascii_lowercase()); + let right = other.data.as_bytes().iter().map(|b| b.to_ascii_lowercase()); + left.partial_cmp(right) + } +} + +impl<'a> PartialOrd for &'a str { + fn partial_cmp(&self, other: &Authority) -> Option { + let left = self.as_bytes().iter().map(|b| b.to_ascii_lowercase()); + let right = other.data.as_bytes().iter().map(|b| b.to_ascii_lowercase()); + left.partial_cmp(right) + } +} + +impl<'a> PartialOrd<&'a str> for Authority { + fn partial_cmp(&self, other: &&'a str) -> Option { + let left = self.data.as_bytes().iter().map(|b| b.to_ascii_lowercase()); + let right = other.as_bytes().iter().map(|b| b.to_ascii_lowercase()); + left.partial_cmp(right) + } +} + +impl PartialOrd for Authority { + fn partial_cmp(&self, other: &String) -> Option { + let left = self.data.as_bytes().iter().map(|b| b.to_ascii_lowercase()); + let right = other.as_bytes().iter().map(|b| b.to_ascii_lowercase()); + left.partial_cmp(right) + } +} + +impl PartialOrd for String { + fn partial_cmp(&self, other: &Authority) -> Option { + let left = self.as_bytes().iter().map(|b| b.to_ascii_lowercase()); + let right = other.data.as_bytes().iter().map(|b| b.to_ascii_lowercase()); + left.partial_cmp(right) + } +} + +/// Case-insensitive hashing +/// +/// # Examples +/// +/// ``` +/// # use http::uri::Authority; +/// # use std::hash::{Hash, Hasher}; +/// # use std::collections::hash_map::DefaultHasher; +/// +/// let a: Authority = "HELLO.com".parse().unwrap(); +/// let b: Authority = "hello.coM".parse().unwrap(); +/// +/// let mut s = DefaultHasher::new(); +/// a.hash(&mut s); +/// let a = s.finish(); +/// +/// let mut s = DefaultHasher::new(); +/// b.hash(&mut s); +/// let b = s.finish(); +/// +/// assert_eq!(a, b); +/// ``` +impl Hash for Authority { + fn hash(&self, state: &mut H) + where + H: Hasher, + { + self.data.len().hash(state); + for &b in self.data.as_bytes() { + state.write_u8(b.to_ascii_lowercase()); + } + } +} + +impl<'a> TryFrom<&'a [u8]> for Authority { + type Error = InvalidUri; + #[inline] + fn try_from(s: &'a [u8]) -> Result { + // parse first, and only turn into Bytes if valid + + // Preconditon on create_authority: copy_from_slice() copies all of + // bytes from the [u8] parameter into a new Bytes + create_authority(s, |s| Bytes::copy_from_slice(s)) + } +} + +impl<'a> TryFrom<&'a str> for Authority { + type Error = InvalidUri; + #[inline] + fn try_from(s: &'a str) -> Result { + TryFrom::try_from(s.as_bytes()) + } +} + +impl TryFrom> for Authority { + type Error = InvalidUri; + + #[inline] + fn try_from(vec: Vec) -> Result { + Authority::from_shared(vec.into()) + } +} + +impl TryFrom for Authority { + type Error = InvalidUri; + + #[inline] + fn try_from(t: String) -> Result { + Authority::from_shared(t.into()) + } +} + +impl FromStr for Authority { + type Err = InvalidUri; + + fn from_str(s: &str) -> Result { + TryFrom::try_from(s) + } +} + +impl fmt::Debug for Authority { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(self.as_str()) + } +} + +impl fmt::Display for Authority { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(self.as_str()) + } +} + +fn host(auth: &str) -> &str { + let host_port = auth + .rsplitn(2, '@') + .next() + .expect("split always has at least 1 item"); + + if host_port.as_bytes()[0] == b'[' { + let i = host_port + .find(']') + .expect("parsing should validate brackets"); + // ..= ranges aren't available in 1.20, our minimum Rust version... + &host_port[0..i + 1] + } else { + host_port + .split(':') + .next() + .expect("split always has at least 1 item") + } +} + +// Precondition: f converts all of the bytes in the passed in B into the +// returned Bytes. +fn create_authority(b: B, f: F) -> Result +where + B: AsRef<[u8]>, + F: FnOnce(B) -> Bytes, +{ + let s = b.as_ref(); + let authority_end = Authority::parse_non_empty(s)?; + + if authority_end != s.len() { + return Err(ErrorKind::InvalidUriChar.into()); + } + + let bytes = f(b); + + Ok(Authority { + // Safety: the postcondition on parse_non_empty() and the check against + // s.len() ensure that b is valid UTF-8. The precondition on f ensures + // that this is carried through to bytes. + data: unsafe { ByteStr::from_utf8_unchecked(bytes) }, + }) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parse_empty_string_is_error() { + let err = Authority::parse_non_empty(b"").unwrap_err(); + assert_eq!(err.0, ErrorKind::Empty); + } + + #[test] + fn equal_to_self_of_same_authority() { + let authority1: Authority = "example.com".parse().unwrap(); + let authority2: Authority = "EXAMPLE.COM".parse().unwrap(); + assert_eq!(authority1, authority2); + assert_eq!(authority2, authority1); + } + + #[test] + fn not_equal_to_self_of_different_authority() { + let authority1: Authority = "example.com".parse().unwrap(); + let authority2: Authority = "test.com".parse().unwrap(); + assert_ne!(authority1, authority2); + assert_ne!(authority2, authority1); + } + + #[test] + fn equates_with_a_str() { + let authority: Authority = "example.com".parse().unwrap(); + assert_eq!(&authority, "EXAMPLE.com"); + assert_eq!("EXAMPLE.com", &authority); + assert_eq!(authority, "EXAMPLE.com"); + assert_eq!("EXAMPLE.com", authority); + } + + #[test] + fn from_static_equates_with_a_str() { + let authority = Authority::from_static("example.com"); + assert_eq!(authority, "example.com"); + } + + #[test] + fn not_equal_with_a_str_of_a_different_authority() { + let authority: Authority = "example.com".parse().unwrap(); + assert_ne!(&authority, "test.com"); + assert_ne!("test.com", &authority); + assert_ne!(authority, "test.com"); + assert_ne!("test.com", authority); + } + + #[test] + fn equates_with_a_string() { + let authority: Authority = "example.com".parse().unwrap(); + assert_eq!(authority, "EXAMPLE.com".to_string()); + assert_eq!("EXAMPLE.com".to_string(), authority); + } + + #[test] + fn equates_with_a_string_of_a_different_authority() { + let authority: Authority = "example.com".parse().unwrap(); + assert_ne!(authority, "test.com".to_string()); + assert_ne!("test.com".to_string(), authority); + } + + #[test] + fn compares_to_self() { + let authority1: Authority = "abc.com".parse().unwrap(); + let authority2: Authority = "def.com".parse().unwrap(); + assert!(authority1 < authority2); + assert!(authority2 > authority1); + } + + #[test] + fn compares_with_a_str() { + let authority: Authority = "def.com".parse().unwrap(); + // with ref + assert!(&authority < "ghi.com"); + assert!("ghi.com" > &authority); + assert!(&authority > "abc.com"); + assert!("abc.com" < &authority); + + // no ref + assert!(authority < "ghi.com"); + assert!("ghi.com" > authority); + assert!(authority > "abc.com"); + assert!("abc.com" < authority); + } + + #[test] + fn compares_with_a_string() { + let authority: Authority = "def.com".parse().unwrap(); + assert!(authority < "ghi.com".to_string()); + assert!("ghi.com".to_string() > authority); + assert!(authority > "abc.com".to_string()); + assert!("abc.com".to_string() < authority); + } + + #[test] + fn allows_percent_in_userinfo() { + let authority_str = "a%2f:b%2f@example.com"; + let authority: Authority = authority_str.parse().unwrap(); + assert_eq!(authority, authority_str); + } + + #[test] + fn rejects_percent_in_hostname() { + let err = Authority::parse_non_empty(b"example%2f.com").unwrap_err(); + assert_eq!(err.0, ErrorKind::InvalidAuthority); + + let err = Authority::parse_non_empty(b"a%2f:b%2f@example%2f.com").unwrap_err(); + assert_eq!(err.0, ErrorKind::InvalidAuthority); + } + + #[test] + fn allows_percent_in_ipv6_address() { + let authority_str = "[fe80::1:2:3:4%25eth0]"; + let result: Authority = authority_str.parse().unwrap(); + assert_eq!(result, authority_str); + } + + #[test] + fn reject_obviously_invalid_ipv6_address() { + let err = Authority::parse_non_empty(b"[0:1:2:3:4:5:6:7:8:9:10:11:12:13:14]").unwrap_err(); + assert_eq!(err.0, ErrorKind::InvalidAuthority); + } + + #[test] + fn rejects_percent_outside_ipv6_address() { + let err = Authority::parse_non_empty(b"1234%20[fe80::1:2:3:4]").unwrap_err(); + assert_eq!(err.0, ErrorKind::InvalidAuthority); + + let err = Authority::parse_non_empty(b"[fe80::1:2:3:4]%20").unwrap_err(); + assert_eq!(err.0, ErrorKind::InvalidAuthority); + } + + #[test] + fn rejects_invalid_utf8() { + let err = Authority::try_from([0xc0u8].as_ref()).unwrap_err(); + assert_eq!(err.0, ErrorKind::InvalidUriChar); + + let err = Authority::from_shared(Bytes::from_static([0xc0u8].as_ref())).unwrap_err(); + assert_eq!(err.0, ErrorKind::InvalidUriChar); + } + + #[test] + fn rejects_invalid_use_of_brackets() { + let err = Authority::parse_non_empty(b"[]@[").unwrap_err(); + assert_eq!(err.0, ErrorKind::InvalidAuthority); + + // reject tie-fighter + let err = Authority::parse_non_empty(b"]o[").unwrap_err(); + assert_eq!(err.0, ErrorKind::InvalidAuthority); + } +} diff --git a/.cargo-vendor/http-0.2.12/src/uri/builder.rs b/.cargo-vendor/http-0.2.12/src/uri/builder.rs new file mode 100644 index 0000000000..825c0fafcc --- /dev/null +++ b/.cargo-vendor/http-0.2.12/src/uri/builder.rs @@ -0,0 +1,197 @@ +use std::convert::{TryFrom, TryInto}; + +use super::{Authority, Parts, PathAndQuery, Scheme}; +use crate::Uri; + +/// A builder for `Uri`s. +/// +/// This type can be used to construct an instance of `Uri` +/// through a builder pattern. +#[derive(Debug)] +pub struct Builder { + parts: Result, +} + +impl Builder { + /// Creates a new default instance of `Builder` to construct a `Uri`. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// + /// let uri = uri::Builder::new() + /// .scheme("https") + /// .authority("hyper.rs") + /// .path_and_query("/") + /// .build() + /// .unwrap(); + /// ``` + #[inline] + pub fn new() -> Builder { + Builder::default() + } + + /// Set the `Scheme` for this URI. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// + /// let mut builder = uri::Builder::new(); + /// builder.scheme("https"); + /// ``` + pub fn scheme(self, scheme: T) -> Self + where + Scheme: TryFrom, + >::Error: Into, + { + self.map(move |mut parts| { + let scheme = scheme.try_into().map_err(Into::into)?; + parts.scheme = Some(scheme); + Ok(parts) + }) + } + + /// Set the `Authority` for this URI. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// + /// let uri = uri::Builder::new() + /// .authority("tokio.rs") + /// .build() + /// .unwrap(); + /// ``` + pub fn authority(self, auth: T) -> Self + where + Authority: TryFrom, + >::Error: Into, + { + self.map(move |mut parts| { + let auth = auth.try_into().map_err(Into::into)?; + parts.authority = Some(auth); + Ok(parts) + }) + } + + /// Set the `PathAndQuery` for this URI. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// + /// let uri = uri::Builder::new() + /// .path_and_query("/hello?foo=bar") + /// .build() + /// .unwrap(); + /// ``` + pub fn path_and_query(self, p_and_q: T) -> Self + where + PathAndQuery: TryFrom, + >::Error: Into, + { + self.map(move |mut parts| { + let p_and_q = p_and_q.try_into().map_err(Into::into)?; + parts.path_and_query = Some(p_and_q); + Ok(parts) + }) + } + + /// Consumes this builder, and tries to construct a valid `Uri` from + /// the configured pieces. + /// + /// # Errors + /// + /// This function may return an error if any previously configured argument + /// failed to parse or get converted to the internal representation. For + /// example if an invalid `scheme` was specified via `scheme("!@#%/^")` + /// the error will be returned when this function is called rather than + /// when `scheme` was called. + /// + /// Additionally, the various forms of URI require certain combinations of + /// parts to be set to be valid. If the parts don't fit into any of the + /// valid forms of URI, a new error is returned. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// + /// let uri = Uri::builder() + /// .build() + /// .unwrap(); + /// ``` + pub fn build(self) -> Result { + let parts = self.parts?; + Uri::from_parts(parts).map_err(Into::into) + } + + // private + + fn map(self, func: F) -> Self + where + F: FnOnce(Parts) -> Result, + { + + Builder { + parts: self.parts.and_then(func), + } + } +} + +impl Default for Builder { + #[inline] + fn default() -> Builder { + Builder { + parts: Ok(Parts::default()), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn build_from_str() { + let uri = Builder::new() + .scheme(Scheme::HTTP) + .authority("hyper.rs") + .path_and_query("/foo?a=1") + .build() + .unwrap(); + assert_eq!(uri.scheme_str(), Some("http")); + assert_eq!(uri.authority().unwrap().host(), "hyper.rs"); + assert_eq!(uri.path(), "/foo"); + assert_eq!(uri.query(), Some("a=1")); + } + + #[test] + fn build_from_string() { + for i in 1..10 { + let uri = Builder::new() + .path_and_query(format!("/foo?a={}", i)) + .build() + .unwrap(); + let expected_query = format!("a={}", i); + assert_eq!(uri.path(), "/foo"); + assert_eq!(uri.query(), Some(expected_query.as_str())); + } + } + + #[test] + fn build_from_string_ref() { + for i in 1..10 { + let p_a_q = format!("/foo?a={}", i); + let uri = Builder::new().path_and_query(&p_a_q).build().unwrap(); + let expected_query = format!("a={}", i); + assert_eq!(uri.path(), "/foo"); + assert_eq!(uri.query(), Some(expected_query.as_str())); + } + } +} diff --git a/.cargo-vendor/http-0.2.12/src/uri/mod.rs b/.cargo-vendor/http-0.2.12/src/uri/mod.rs new file mode 100644 index 0000000000..5ebd47b6f5 --- /dev/null +++ b/.cargo-vendor/http-0.2.12/src/uri/mod.rs @@ -0,0 +1,1118 @@ +//! URI component of request and response lines +//! +//! This module primarily contains the `Uri` type which is a component of all +//! HTTP requests and also reexports this type at the root of the crate. A URI +//! is not always a "full URL" in the sense of something you'd type into a web +//! browser, but HTTP requests may only have paths on servers but may have full +//! schemes and hostnames on clients. +//! +//! # Examples +//! +//! ``` +//! use http::Uri; +//! +//! let uri = "/foo/bar?baz".parse::().unwrap(); +//! assert_eq!(uri.path(), "/foo/bar"); +//! assert_eq!(uri.query(), Some("baz")); +//! assert_eq!(uri.host(), None); +//! +//! let uri = "https://www.rust-lang.org/install.html".parse::().unwrap(); +//! assert_eq!(uri.scheme_str(), Some("https")); +//! assert_eq!(uri.host(), Some("www.rust-lang.org")); +//! assert_eq!(uri.path(), "/install.html"); +//! ``` + +use crate::byte_str::ByteStr; +use std::convert::TryFrom; + +use bytes::Bytes; + +use std::error::Error; +use std::hash::{Hash, Hasher}; +use std::str::{self, FromStr}; +use std::{fmt, u16, u8}; + +use self::scheme::Scheme2; + +pub use self::authority::Authority; +pub use self::builder::Builder; +pub use self::path::PathAndQuery; +pub use self::port::Port; +pub use self::scheme::Scheme; + +mod authority; +mod builder; +mod path; +mod port; +mod scheme; +#[cfg(test)] +mod tests; + +/// The URI component of a request. +/// +/// For HTTP 1, this is included as part of the request line. From Section 5.3, +/// Request Target: +/// +/// > Once an inbound connection is obtained, the client sends an HTTP +/// > request message (Section 3) with a request-target derived from the +/// > target URI. There are four distinct formats for the request-target, +/// > depending on both the method being requested and whether the request +/// > is to a proxy. +/// > +/// > ```notrust +/// > request-target = origin-form +/// > / absolute-form +/// > / authority-form +/// > / asterisk-form +/// > ``` +/// +/// The URI is structured as follows: +/// +/// ```notrust +/// abc://username:password@example.com:123/path/data?key=value&key2=value2#fragid1 +/// |-| |-------------------------------||--------| |-------------------| |-----| +/// | | | | | +/// scheme authority path query fragment +/// ``` +/// +/// For HTTP 2.0, the URI is encoded using pseudoheaders. +/// +/// # Examples +/// +/// ``` +/// use http::Uri; +/// +/// let uri = "/foo/bar?baz".parse::().unwrap(); +/// assert_eq!(uri.path(), "/foo/bar"); +/// assert_eq!(uri.query(), Some("baz")); +/// assert_eq!(uri.host(), None); +/// +/// let uri = "https://www.rust-lang.org/install.html".parse::().unwrap(); +/// assert_eq!(uri.scheme_str(), Some("https")); +/// assert_eq!(uri.host(), Some("www.rust-lang.org")); +/// assert_eq!(uri.path(), "/install.html"); +/// ``` +#[derive(Clone)] +pub struct Uri { + scheme: Scheme, + authority: Authority, + path_and_query: PathAndQuery, +} + +/// The various parts of a URI. +/// +/// This struct is used to provide to and retrieve from a URI. +#[derive(Debug, Default)] +pub struct Parts { + /// The scheme component of a URI + pub scheme: Option, + + /// The authority component of a URI + pub authority: Option, + + /// The origin-form component of a URI + pub path_and_query: Option, + + /// Allow extending in the future + _priv: (), +} + +/// An error resulting from a failed attempt to construct a URI. +#[derive(Debug)] +pub struct InvalidUri(ErrorKind); + +/// An error resulting from a failed attempt to construct a URI. +#[derive(Debug)] +pub struct InvalidUriParts(InvalidUri); + +#[derive(Debug, Eq, PartialEq)] +enum ErrorKind { + InvalidUriChar, + InvalidScheme, + InvalidAuthority, + InvalidPort, + InvalidFormat, + SchemeMissing, + AuthorityMissing, + PathAndQueryMissing, + TooLong, + Empty, + SchemeTooLong, +} + +// u16::MAX is reserved for None +const MAX_LEN: usize = (u16::MAX - 1) as usize; + +// URI_CHARS is a table of valid characters in a URI. An entry in the table is +// 0 for invalid characters. For valid characters the entry is itself (i.e. +// the entry for 33 is b'!' because b'!' == 33u8). An important characteristic +// of this table is that all entries above 127 are invalid. This makes all of the +// valid entries a valid single-byte UTF-8 code point. This means that a slice +// of such valid entries is valid UTF-8. +const URI_CHARS: [u8; 256] = [ + // 0 1 2 3 4 5 6 7 8 9 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 1x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 2x + 0, 0, 0, b'!', 0, b'#', b'$', 0, b'&', b'\'', // 3x + b'(', b')', b'*', b'+', b',', b'-', b'.', b'/', b'0', b'1', // 4x + b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', b':', b';', // 5x + 0, b'=', 0, b'?', b'@', b'A', b'B', b'C', b'D', b'E', // 6x + b'F', b'G', b'H', b'I', b'J', b'K', b'L', b'M', b'N', b'O', // 7x + b'P', b'Q', b'R', b'S', b'T', b'U', b'V', b'W', b'X', b'Y', // 8x + b'Z', b'[', 0, b']', 0, b'_', 0, b'a', b'b', b'c', // 9x + b'd', b'e', b'f', b'g', b'h', b'i', b'j', b'k', b'l', b'm', // 10x + b'n', b'o', b'p', b'q', b'r', b's', b't', b'u', b'v', b'w', // 11x + b'x', b'y', b'z', 0, 0, 0, b'~', 0, 0, 0, // 12x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 13x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 14x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 15x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 16x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 17x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 18x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 19x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 21x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 22x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 23x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 24x + 0, 0, 0, 0, 0, 0 // 25x +]; + +impl Uri { + /// Creates a new builder-style object to manufacture a `Uri`. + /// + /// This method returns an instance of `Builder` which can be usd to + /// create a `Uri`. + /// + /// # Examples + /// + /// ``` + /// use http::Uri; + /// + /// let uri = Uri::builder() + /// .scheme("https") + /// .authority("hyper.rs") + /// .path_and_query("/") + /// .build() + /// .unwrap(); + /// ``` + pub fn builder() -> Builder { + Builder::new() + } + + /// Attempt to convert a `Parts` into a `Uri`. + /// + /// # Examples + /// + /// Relative URI + /// + /// ``` + /// # use http::uri::*; + /// let mut parts = Parts::default(); + /// parts.path_and_query = Some("/foo".parse().unwrap()); + /// + /// let uri = Uri::from_parts(parts).unwrap(); + /// + /// assert_eq!(uri.path(), "/foo"); + /// + /// assert!(uri.scheme().is_none()); + /// assert!(uri.authority().is_none()); + /// ``` + /// + /// Absolute URI + /// + /// ``` + /// # use http::uri::*; + /// let mut parts = Parts::default(); + /// parts.scheme = Some("http".parse().unwrap()); + /// parts.authority = Some("foo.com".parse().unwrap()); + /// parts.path_and_query = Some("/foo".parse().unwrap()); + /// + /// let uri = Uri::from_parts(parts).unwrap(); + /// + /// assert_eq!(uri.scheme().unwrap().as_str(), "http"); + /// assert_eq!(uri.authority().unwrap(), "foo.com"); + /// assert_eq!(uri.path(), "/foo"); + /// ``` + pub fn from_parts(src: Parts) -> Result { + if src.scheme.is_some() { + if src.authority.is_none() { + return Err(ErrorKind::AuthorityMissing.into()); + } + + if src.path_and_query.is_none() { + return Err(ErrorKind::PathAndQueryMissing.into()); + } + } else { + if src.authority.is_some() && src.path_and_query.is_some() { + return Err(ErrorKind::SchemeMissing.into()); + } + } + + let scheme = match src.scheme { + Some(scheme) => scheme, + None => Scheme { + inner: Scheme2::None, + }, + }; + + let authority = match src.authority { + Some(authority) => authority, + None => Authority::empty(), + }; + + let path_and_query = match src.path_and_query { + Some(path_and_query) => path_and_query, + None => PathAndQuery::empty(), + }; + + Ok(Uri { + scheme: scheme, + authority: authority, + path_and_query: path_and_query, + }) + } + + /// Attempt to convert a `Bytes` buffer to a `Uri`. + /// + /// This will try to prevent a copy if the type passed is the type used + /// internally, and will copy the data if it is not. + pub fn from_maybe_shared(src: T) -> Result + where + T: AsRef<[u8]> + 'static, + { + if_downcast_into!(T, Bytes, src, { + return Uri::from_shared(src); + }); + + Uri::try_from(src.as_ref()) + } + + // Not public while `bytes` is unstable. + fn from_shared(s: Bytes) -> Result { + use self::ErrorKind::*; + + if s.len() > MAX_LEN { + return Err(TooLong.into()); + } + + match s.len() { + 0 => { + return Err(Empty.into()); + } + 1 => match s[0] { + b'/' => { + return Ok(Uri { + scheme: Scheme::empty(), + authority: Authority::empty(), + path_and_query: PathAndQuery::slash(), + }); + } + b'*' => { + return Ok(Uri { + scheme: Scheme::empty(), + authority: Authority::empty(), + path_and_query: PathAndQuery::star(), + }); + } + _ => { + let authority = Authority::from_shared(s)?; + + return Ok(Uri { + scheme: Scheme::empty(), + authority: authority, + path_and_query: PathAndQuery::empty(), + }); + } + }, + _ => {} + } + + if s[0] == b'/' { + return Ok(Uri { + scheme: Scheme::empty(), + authority: Authority::empty(), + path_and_query: PathAndQuery::from_shared(s)?, + }); + } + + parse_full(s) + } + + /// Convert a `Uri` from a static string. + /// + /// This function will not perform any copying, however the string is + /// checked to ensure that it is valid. + /// + /// # Panics + /// + /// This function panics if the argument is an invalid URI. + /// + /// # Examples + /// + /// ``` + /// # use http::uri::Uri; + /// let uri = Uri::from_static("http://example.com/foo"); + /// + /// assert_eq!(uri.host().unwrap(), "example.com"); + /// assert_eq!(uri.path(), "/foo"); + /// ``` + pub fn from_static(src: &'static str) -> Self { + let s = Bytes::from_static(src.as_bytes()); + match Uri::from_shared(s) { + Ok(uri) => uri, + Err(e) => panic!("static str is not valid URI: {}", e), + } + } + + /// Convert a `Uri` into `Parts`. + /// + /// # Note + /// + /// This is just an inherent method providing the same functionality as + /// `let parts: Parts = uri.into()` + /// + /// # Examples + /// + /// ``` + /// # use http::uri::*; + /// let uri: Uri = "/foo".parse().unwrap(); + /// + /// let parts = uri.into_parts(); + /// + /// assert_eq!(parts.path_and_query.unwrap(), "/foo"); + /// + /// assert!(parts.scheme.is_none()); + /// assert!(parts.authority.is_none()); + /// ``` + #[inline] + pub fn into_parts(self) -> Parts { + self.into() + } + + /// Returns the path & query components of the Uri + #[inline] + pub fn path_and_query(&self) -> Option<&PathAndQuery> { + if !self.scheme.inner.is_none() || self.authority.data.is_empty() { + Some(&self.path_and_query) + } else { + None + } + } + + /// Get the path of this `Uri`. + /// + /// Both relative and absolute URIs contain a path component, though it + /// might be the empty string. The path component is **case sensitive**. + /// + /// ```notrust + /// abc://username:password@example.com:123/path/data?key=value&key2=value2#fragid1 + /// |--------| + /// | + /// path + /// ``` + /// + /// If the URI is `*` then the path component is equal to `*`. + /// + /// # Examples + /// + /// A relative URI + /// + /// ``` + /// # use http::Uri; + /// + /// let uri: Uri = "/hello/world".parse().unwrap(); + /// + /// assert_eq!(uri.path(), "/hello/world"); + /// ``` + /// + /// An absolute URI + /// + /// ``` + /// # use http::Uri; + /// let uri: Uri = "http://example.org/hello/world".parse().unwrap(); + /// + /// assert_eq!(uri.path(), "/hello/world"); + /// ``` + #[inline] + pub fn path(&self) -> &str { + if self.has_path() { + self.path_and_query.path() + } else { + "" + } + } + + /// Get the scheme of this `Uri`. + /// + /// The URI scheme refers to a specification for assigning identifiers + /// within that scheme. Only absolute URIs contain a scheme component, but + /// not all absolute URIs will contain a scheme component. Although scheme + /// names are case-insensitive, the canonical form is lowercase. + /// + /// ```notrust + /// abc://username:password@example.com:123/path/data?key=value&key2=value2#fragid1 + /// |-| + /// | + /// scheme + /// ``` + /// + /// # Examples + /// + /// Absolute URI + /// + /// ``` + /// use http::uri::{Scheme, Uri}; + /// + /// let uri: Uri = "http://example.org/hello/world".parse().unwrap(); + /// + /// assert_eq!(uri.scheme(), Some(&Scheme::HTTP)); + /// ``` + /// + /// + /// Relative URI + /// + /// ``` + /// # use http::Uri; + /// let uri: Uri = "/hello/world".parse().unwrap(); + /// + /// assert!(uri.scheme().is_none()); + /// ``` + #[inline] + pub fn scheme(&self) -> Option<&Scheme> { + if self.scheme.inner.is_none() { + None + } else { + Some(&self.scheme) + } + } + + /// Get the scheme of this `Uri` as a `&str`. + /// + /// # Example + /// + /// ``` + /// # use http::Uri; + /// let uri: Uri = "http://example.org/hello/world".parse().unwrap(); + /// + /// assert_eq!(uri.scheme_str(), Some("http")); + /// ``` + #[inline] + pub fn scheme_str(&self) -> Option<&str> { + if self.scheme.inner.is_none() { + None + } else { + Some(self.scheme.as_str()) + } + } + + /// Get the authority of this `Uri`. + /// + /// The authority is a hierarchical element for naming authority such that + /// the remainder of the URI is delegated to that authority. For HTTP, the + /// authority consists of the host and port. The host portion of the + /// authority is **case-insensitive**. + /// + /// The authority also includes a `username:password` component, however + /// the use of this is deprecated and should be avoided. + /// + /// ```notrust + /// abc://username:password@example.com:123/path/data?key=value&key2=value2#fragid1 + /// |-------------------------------| + /// | + /// authority + /// ``` + /// + /// # Examples + /// + /// Absolute URI + /// + /// ``` + /// # use http::Uri; + /// let uri: Uri = "http://example.org:80/hello/world".parse().unwrap(); + /// + /// assert_eq!(uri.authority().map(|a| a.as_str()), Some("example.org:80")); + /// ``` + /// + /// + /// Relative URI + /// + /// ``` + /// # use http::Uri; + /// let uri: Uri = "/hello/world".parse().unwrap(); + /// + /// assert!(uri.authority().is_none()); + /// ``` + #[inline] + pub fn authority(&self) -> Option<&Authority> { + if self.authority.data.is_empty() { + None + } else { + Some(&self.authority) + } + } + + /// Get the host of this `Uri`. + /// + /// The host subcomponent of authority is identified by an IP literal + /// encapsulated within square brackets, an IPv4 address in dotted- decimal + /// form, or a registered name. The host subcomponent is **case-insensitive**. + /// + /// ```notrust + /// abc://username:password@example.com:123/path/data?key=value&key2=value2#fragid1 + /// |---------| + /// | + /// host + /// ``` + /// + /// # Examples + /// + /// Absolute URI + /// + /// ``` + /// # use http::Uri; + /// let uri: Uri = "http://example.org:80/hello/world".parse().unwrap(); + /// + /// assert_eq!(uri.host(), Some("example.org")); + /// ``` + /// + /// + /// Relative URI + /// + /// ``` + /// # use http::Uri; + /// let uri: Uri = "/hello/world".parse().unwrap(); + /// + /// assert!(uri.host().is_none()); + /// ``` + #[inline] + pub fn host(&self) -> Option<&str> { + self.authority().map(|a| a.host()) + } + + /// Get the port part of this `Uri`. + /// + /// The port subcomponent of authority is designated by an optional port + /// number following the host and delimited from it by a single colon (":") + /// character. It can be turned into a decimal port number with the `as_u16` + /// method or as a `str` with the `as_str` method. + /// + /// ```notrust + /// abc://username:password@example.com:123/path/data?key=value&key2=value2#fragid1 + /// |-| + /// | + /// port + /// ``` + /// + /// # Examples + /// + /// Absolute URI with port + /// + /// ``` + /// # use http::Uri; + /// let uri: Uri = "http://example.org:80/hello/world".parse().unwrap(); + /// + /// let port = uri.port().unwrap(); + /// assert_eq!(port.as_u16(), 80); + /// ``` + /// + /// Absolute URI without port + /// + /// ``` + /// # use http::Uri; + /// let uri: Uri = "http://example.org/hello/world".parse().unwrap(); + /// + /// assert!(uri.port().is_none()); + /// ``` + /// + /// Relative URI + /// + /// ``` + /// # use http::Uri; + /// let uri: Uri = "/hello/world".parse().unwrap(); + /// + /// assert!(uri.port().is_none()); + /// ``` + pub fn port(&self) -> Option> { + self.authority().and_then(|a| a.port()) + } + + /// Get the port of this `Uri` as a `u16`. + /// + /// + /// # Example + /// + /// ``` + /// # use http::{Uri, uri::Port}; + /// let uri: Uri = "http://example.org:80/hello/world".parse().unwrap(); + /// + /// assert_eq!(uri.port_u16(), Some(80)); + /// ``` + pub fn port_u16(&self) -> Option { + self.port().and_then(|p| Some(p.as_u16())) + } + + /// Get the query string of this `Uri`, starting after the `?`. + /// + /// The query component contains non-hierarchical data that, along with data + /// in the path component, serves to identify a resource within the scope of + /// the URI's scheme and naming authority (if any). The query component is + /// indicated by the first question mark ("?") character and terminated by a + /// number sign ("#") character or by the end of the URI. + /// + /// ```notrust + /// abc://username:password@example.com:123/path/data?key=value&key2=value2#fragid1 + /// |-------------------| + /// | + /// query + /// ``` + /// + /// # Examples + /// + /// Absolute URI + /// + /// ``` + /// # use http::Uri; + /// let uri: Uri = "http://example.org/hello/world?key=value".parse().unwrap(); + /// + /// assert_eq!(uri.query(), Some("key=value")); + /// ``` + /// + /// Relative URI with a query string component + /// + /// ``` + /// # use http::Uri; + /// let uri: Uri = "/hello/world?key=value&foo=bar".parse().unwrap(); + /// + /// assert_eq!(uri.query(), Some("key=value&foo=bar")); + /// ``` + /// + /// Relative URI without a query string component + /// + /// ``` + /// # use http::Uri; + /// let uri: Uri = "/hello/world".parse().unwrap(); + /// + /// assert!(uri.query().is_none()); + /// ``` + #[inline] + pub fn query(&self) -> Option<&str> { + self.path_and_query.query() + } + + fn has_path(&self) -> bool { + !self.path_and_query.data.is_empty() || !self.scheme.inner.is_none() + } +} + +impl<'a> TryFrom<&'a [u8]> for Uri { + type Error = InvalidUri; + + #[inline] + fn try_from(t: &'a [u8]) -> Result { + Uri::from_shared(Bytes::copy_from_slice(t)) + } +} + +impl<'a> TryFrom<&'a str> for Uri { + type Error = InvalidUri; + + #[inline] + fn try_from(t: &'a str) -> Result { + t.parse() + } +} + +impl<'a> TryFrom<&'a String> for Uri { + type Error = InvalidUri; + + #[inline] + fn try_from(t: &'a String) -> Result { + t.parse() + } +} + +impl TryFrom for Uri { + type Error = InvalidUri; + + #[inline] + fn try_from(t: String) -> Result { + Uri::from_shared(Bytes::from(t)) + } +} + +impl<'a> TryFrom> for Uri { + type Error = InvalidUri; + + #[inline] + fn try_from(vec: Vec) -> Result { + Uri::from_shared(Bytes::from(vec)) + } +} + +impl TryFrom for Uri { + type Error = InvalidUriParts; + + #[inline] + fn try_from(src: Parts) -> Result { + Uri::from_parts(src) + } +} + +impl<'a> TryFrom<&'a Uri> for Uri { + type Error = crate::Error; + + #[inline] + fn try_from(src: &'a Uri) -> Result { + Ok(src.clone()) + } +} + +/// Convert an `Authority` into a `Uri`. +impl From for Uri { + fn from(authority: Authority) -> Self { + Self { + scheme: Scheme::empty(), + authority, + path_and_query: PathAndQuery::empty(), + } + } +} + +/// Convert a `PathAndQuery` into a `Uri`. +impl From for Uri { + fn from(path_and_query: PathAndQuery) -> Self { + Self { + scheme: Scheme::empty(), + authority: Authority::empty(), + path_and_query, + } + } +} + +/// Convert a `Uri` into `Parts` +impl From for Parts { + fn from(src: Uri) -> Self { + let path_and_query = if src.has_path() { + Some(src.path_and_query) + } else { + None + }; + + let scheme = match src.scheme.inner { + Scheme2::None => None, + _ => Some(src.scheme), + }; + + let authority = if src.authority.data.is_empty() { + None + } else { + Some(src.authority) + }; + + Parts { + scheme: scheme, + authority: authority, + path_and_query: path_and_query, + _priv: (), + } + } +} + +fn parse_full(mut s: Bytes) -> Result { + // Parse the scheme + let scheme = match Scheme2::parse(&s[..])? { + Scheme2::None => Scheme2::None, + Scheme2::Standard(p) => { + // TODO: use truncate + let _ = s.split_to(p.len() + 3); + Scheme2::Standard(p) + } + Scheme2::Other(n) => { + // Grab the protocol + let mut scheme = s.split_to(n + 3); + + // Strip ://, TODO: truncate + let _ = scheme.split_off(n); + + // Allocate the ByteStr + let val = unsafe { ByteStr::from_utf8_unchecked(scheme) }; + + Scheme2::Other(Box::new(val)) + } + }; + + // Find the end of the authority. The scheme will already have been + // extracted. + let authority_end = Authority::parse(&s[..])?; + + if scheme.is_none() { + if authority_end != s.len() { + return Err(ErrorKind::InvalidFormat.into()); + } + + let authority = Authority { + data: unsafe { ByteStr::from_utf8_unchecked(s) }, + }; + + return Ok(Uri { + scheme: scheme.into(), + authority: authority, + path_and_query: PathAndQuery::empty(), + }); + } + + // Authority is required when absolute + if authority_end == 0 { + return Err(ErrorKind::InvalidFormat.into()); + } + + let authority = s.split_to(authority_end); + let authority = Authority { + data: unsafe { ByteStr::from_utf8_unchecked(authority) }, + }; + + Ok(Uri { + scheme: scheme.into(), + authority: authority, + path_and_query: PathAndQuery::from_shared(s)?, + }) +} + +impl FromStr for Uri { + type Err = InvalidUri; + + #[inline] + fn from_str(s: &str) -> Result { + Uri::try_from(s.as_bytes()) + } +} + +impl PartialEq for Uri { + fn eq(&self, other: &Uri) -> bool { + if self.scheme() != other.scheme() { + return false; + } + + if self.authority() != other.authority() { + return false; + } + + if self.path() != other.path() { + return false; + } + + if self.query() != other.query() { + return false; + } + + true + } +} + +impl PartialEq for Uri { + fn eq(&self, other: &str) -> bool { + let mut other = other.as_bytes(); + let mut absolute = false; + + if let Some(scheme) = self.scheme() { + let scheme = scheme.as_str().as_bytes(); + absolute = true; + + if other.len() < scheme.len() + 3 { + return false; + } + + if !scheme.eq_ignore_ascii_case(&other[..scheme.len()]) { + return false; + } + + other = &other[scheme.len()..]; + + if &other[..3] != b"://" { + return false; + } + + other = &other[3..]; + } + + if let Some(auth) = self.authority() { + let len = auth.data.len(); + absolute = true; + + if other.len() < len { + return false; + } + + if !auth.data.as_bytes().eq_ignore_ascii_case(&other[..len]) { + return false; + } + + other = &other[len..]; + } + + let path = self.path(); + + if other.len() < path.len() || path.as_bytes() != &other[..path.len()] { + if absolute && path == "/" { + // PathAndQuery can be omitted, fall through + } else { + return false; + } + } else { + other = &other[path.len()..]; + } + + if let Some(query) = self.query() { + if other.len() == 0 { + return query.len() == 0; + } + + if other[0] != b'?' { + return false; + } + + other = &other[1..]; + + if other.len() < query.len() { + return false; + } + + if query.as_bytes() != &other[..query.len()] { + return false; + } + + other = &other[query.len()..]; + } + + other.is_empty() || other[0] == b'#' + } +} + +impl PartialEq for str { + fn eq(&self, uri: &Uri) -> bool { + uri == self + } +} + +impl<'a> PartialEq<&'a str> for Uri { + fn eq(&self, other: &&'a str) -> bool { + self == *other + } +} + +impl<'a> PartialEq for &'a str { + fn eq(&self, uri: &Uri) -> bool { + uri == *self + } +} + +impl Eq for Uri {} + +/// Returns a `Uri` representing `/` +impl Default for Uri { + #[inline] + fn default() -> Uri { + Uri { + scheme: Scheme::empty(), + authority: Authority::empty(), + path_and_query: PathAndQuery::slash(), + } + } +} + +impl fmt::Display for Uri { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if let Some(scheme) = self.scheme() { + write!(f, "{}://", scheme)?; + } + + if let Some(authority) = self.authority() { + write!(f, "{}", authority)?; + } + + write!(f, "{}", self.path())?; + + if let Some(query) = self.query() { + write!(f, "?{}", query)?; + } + + Ok(()) + } +} + +impl fmt::Debug for Uri { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, f) + } +} + +impl From for InvalidUri { + fn from(src: ErrorKind) -> InvalidUri { + InvalidUri(src) + } +} + +impl From for InvalidUriParts { + fn from(src: ErrorKind) -> InvalidUriParts { + InvalidUriParts(src.into()) + } +} + +impl InvalidUri { + fn s(&self) -> &str { + match self.0 { + ErrorKind::InvalidUriChar => "invalid uri character", + ErrorKind::InvalidScheme => "invalid scheme", + ErrorKind::InvalidAuthority => "invalid authority", + ErrorKind::InvalidPort => "invalid port", + ErrorKind::InvalidFormat => "invalid format", + ErrorKind::SchemeMissing => "scheme missing", + ErrorKind::AuthorityMissing => "authority missing", + ErrorKind::PathAndQueryMissing => "path missing", + ErrorKind::TooLong => "uri too long", + ErrorKind::Empty => "empty string", + ErrorKind::SchemeTooLong => "scheme too long", + } + } +} + +impl fmt::Display for InvalidUri { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.s().fmt(f) + } +} + +impl Error for InvalidUri {} + +impl fmt::Display for InvalidUriParts { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +impl Error for InvalidUriParts {} + +impl Hash for Uri { + fn hash(&self, state: &mut H) + where + H: Hasher, + { + if !self.scheme.inner.is_none() { + self.scheme.hash(state); + state.write_u8(0xff); + } + + if let Some(auth) = self.authority() { + auth.hash(state); + } + + Hash::hash_slice(self.path().as_bytes(), state); + + if let Some(query) = self.query() { + b'?'.hash(state); + Hash::hash_slice(query.as_bytes(), state); + } + } +} diff --git a/.cargo-vendor/http-0.2.12/src/uri/path.rs b/.cargo-vendor/http-0.2.12/src/uri/path.rs new file mode 100644 index 0000000000..be2cb65c1b --- /dev/null +++ b/.cargo-vendor/http-0.2.12/src/uri/path.rs @@ -0,0 +1,564 @@ +use std::convert::TryFrom; +use std::str::FromStr; +use std::{cmp, fmt, hash, str}; + +use bytes::Bytes; + +use super::{ErrorKind, InvalidUri}; +use crate::byte_str::ByteStr; + +/// Represents the path component of a URI +#[derive(Clone)] +pub struct PathAndQuery { + pub(super) data: ByteStr, + pub(super) query: u16, +} + +const NONE: u16 = ::std::u16::MAX; + +impl PathAndQuery { + // Not public while `bytes` is unstable. + pub(super) fn from_shared(mut src: Bytes) -> Result { + let mut query = NONE; + let mut fragment = None; + + // block for iterator borrow + { + let mut iter = src.as_ref().iter().enumerate(); + + // path ... + for (i, &b) in &mut iter { + // See https://url.spec.whatwg.org/#path-state + match b { + b'?' => { + debug_assert_eq!(query, NONE); + query = i as u16; + break; + } + b'#' => { + fragment = Some(i); + break; + } + + // This is the range of bytes that don't need to be + // percent-encoded in the path. If it should have been + // percent-encoded, then error. + 0x21 | + 0x24..=0x3B | + 0x3D | + 0x40..=0x5F | + 0x61..=0x7A | + 0x7C | + 0x7E => {}, + + // These are code points that are supposed to be + // percent-encoded in the path but there are clients + // out there sending them as is and httparse accepts + // to parse those requests, so they are allowed here + // for parity. + // + // For reference, those are code points that are used + // to send requests with JSON directly embedded in + // the URI path. Yes, those things happen for real. + b'"' | + b'{' | b'}' => {}, + + _ => return Err(ErrorKind::InvalidUriChar.into()), + } + } + + // query ... + if query != NONE { + for (i, &b) in iter { + match b { + // While queries *should* be percent-encoded, most + // bytes are actually allowed... + // See https://url.spec.whatwg.org/#query-state + // + // Allowed: 0x21 / 0x24 - 0x3B / 0x3D / 0x3F - 0x7E + 0x21 | + 0x24..=0x3B | + 0x3D | + 0x3F..=0x7E => {}, + + b'#' => { + fragment = Some(i); + break; + } + + _ => return Err(ErrorKind::InvalidUriChar.into()), + } + } + } + } + + if let Some(i) = fragment { + src.truncate(i); + } + + Ok(PathAndQuery { + data: unsafe { ByteStr::from_utf8_unchecked(src) }, + query: query, + }) + } + + /// Convert a `PathAndQuery` from a static string. + /// + /// This function will not perform any copying, however the string is + /// checked to ensure that it is valid. + /// + /// # Panics + /// + /// This function panics if the argument is an invalid path and query. + /// + /// # Examples + /// + /// ``` + /// # use http::uri::*; + /// let v = PathAndQuery::from_static("/hello?world"); + /// + /// assert_eq!(v.path(), "/hello"); + /// assert_eq!(v.query(), Some("world")); + /// ``` + #[inline] + pub fn from_static(src: &'static str) -> Self { + let src = Bytes::from_static(src.as_bytes()); + + PathAndQuery::from_shared(src).unwrap() + } + + /// Attempt to convert a `Bytes` buffer to a `PathAndQuery`. + /// + /// This will try to prevent a copy if the type passed is the type used + /// internally, and will copy the data if it is not. + pub fn from_maybe_shared(src: T) -> Result + where + T: AsRef<[u8]> + 'static, + { + if_downcast_into!(T, Bytes, src, { + return PathAndQuery::from_shared(src); + }); + + PathAndQuery::try_from(src.as_ref()) + } + + pub(super) fn empty() -> Self { + PathAndQuery { + data: ByteStr::new(), + query: NONE, + } + } + + pub(super) fn slash() -> Self { + PathAndQuery { + data: ByteStr::from_static("/"), + query: NONE, + } + } + + pub(super) fn star() -> Self { + PathAndQuery { + data: ByteStr::from_static("*"), + query: NONE, + } + } + + /// Returns the path component + /// + /// The path component is **case sensitive**. + /// + /// ```notrust + /// abc://username:password@example.com:123/path/data?key=value&key2=value2#fragid1 + /// |--------| + /// | + /// path + /// ``` + /// + /// If the URI is `*` then the path component is equal to `*`. + /// + /// # Examples + /// + /// ``` + /// # use http::uri::*; + /// + /// let path_and_query: PathAndQuery = "/hello/world".parse().unwrap(); + /// + /// assert_eq!(path_and_query.path(), "/hello/world"); + /// ``` + #[inline] + pub fn path(&self) -> &str { + let ret = if self.query == NONE { + &self.data[..] + } else { + &self.data[..self.query as usize] + }; + + if ret.is_empty() { + return "/"; + } + + ret + } + + /// Returns the query string component + /// + /// The query component contains non-hierarchical data that, along with data + /// in the path component, serves to identify a resource within the scope of + /// the URI's scheme and naming authority (if any). The query component is + /// indicated by the first question mark ("?") character and terminated by a + /// number sign ("#") character or by the end of the URI. + /// + /// ```notrust + /// abc://username:password@example.com:123/path/data?key=value&key2=value2#fragid1 + /// |-------------------| + /// | + /// query + /// ``` + /// + /// # Examples + /// + /// With a query string component + /// + /// ``` + /// # use http::uri::*; + /// let path_and_query: PathAndQuery = "/hello/world?key=value&foo=bar".parse().unwrap(); + /// + /// assert_eq!(path_and_query.query(), Some("key=value&foo=bar")); + /// ``` + /// + /// Without a query string component + /// + /// ``` + /// # use http::uri::*; + /// let path_and_query: PathAndQuery = "/hello/world".parse().unwrap(); + /// + /// assert!(path_and_query.query().is_none()); + /// ``` + #[inline] + pub fn query(&self) -> Option<&str> { + if self.query == NONE { + None + } else { + let i = self.query + 1; + Some(&self.data[i as usize..]) + } + } + + /// Returns the path and query as a string component. + /// + /// # Examples + /// + /// With a query string component + /// + /// ``` + /// # use http::uri::*; + /// let path_and_query: PathAndQuery = "/hello/world?key=value&foo=bar".parse().unwrap(); + /// + /// assert_eq!(path_and_query.as_str(), "/hello/world?key=value&foo=bar"); + /// ``` + /// + /// Without a query string component + /// + /// ``` + /// # use http::uri::*; + /// let path_and_query: PathAndQuery = "/hello/world".parse().unwrap(); + /// + /// assert_eq!(path_and_query.as_str(), "/hello/world"); + /// ``` + #[inline] + pub fn as_str(&self) -> &str { + let ret = &self.data[..]; + if ret.is_empty() { + return "/"; + } + ret + } +} + +impl<'a> TryFrom<&'a [u8]> for PathAndQuery { + type Error = InvalidUri; + #[inline] + fn try_from(s: &'a [u8]) -> Result { + PathAndQuery::from_shared(Bytes::copy_from_slice(s)) + } +} + +impl<'a> TryFrom<&'a str> for PathAndQuery { + type Error = InvalidUri; + #[inline] + fn try_from(s: &'a str) -> Result { + TryFrom::try_from(s.as_bytes()) + } +} + +impl<'a> TryFrom> for PathAndQuery { + type Error = InvalidUri; + #[inline] + fn try_from(vec: Vec) -> Result { + PathAndQuery::from_shared(vec.into()) + } +} + +impl TryFrom for PathAndQuery { + type Error = InvalidUri; + #[inline] + fn try_from(s: String) -> Result { + PathAndQuery::from_shared(s.into()) + } +} + +impl TryFrom<&String> for PathAndQuery { + type Error = InvalidUri; + #[inline] + fn try_from(s: &String) -> Result { + TryFrom::try_from(s.as_bytes()) + } +} + +impl FromStr for PathAndQuery { + type Err = InvalidUri; + #[inline] + fn from_str(s: &str) -> Result { + TryFrom::try_from(s) + } +} + +impl fmt::Debug for PathAndQuery { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, f) + } +} + +impl fmt::Display for PathAndQuery { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + if !self.data.is_empty() { + match self.data.as_bytes()[0] { + b'/' | b'*' => write!(fmt, "{}", &self.data[..]), + _ => write!(fmt, "/{}", &self.data[..]), + } + } else { + write!(fmt, "/") + } + } +} + +impl hash::Hash for PathAndQuery { + fn hash(&self, state: &mut H) { + self.data.hash(state); + } +} + +// ===== PartialEq / PartialOrd ===== + +impl PartialEq for PathAndQuery { + #[inline] + fn eq(&self, other: &PathAndQuery) -> bool { + self.data == other.data + } +} + +impl Eq for PathAndQuery {} + +impl PartialEq for PathAndQuery { + #[inline] + fn eq(&self, other: &str) -> bool { + self.as_str() == other + } +} + +impl<'a> PartialEq for &'a str { + #[inline] + fn eq(&self, other: &PathAndQuery) -> bool { + self == &other.as_str() + } +} + +impl<'a> PartialEq<&'a str> for PathAndQuery { + #[inline] + fn eq(&self, other: &&'a str) -> bool { + self.as_str() == *other + } +} + +impl PartialEq for str { + #[inline] + fn eq(&self, other: &PathAndQuery) -> bool { + self == other.as_str() + } +} + +impl PartialEq for PathAndQuery { + #[inline] + fn eq(&self, other: &String) -> bool { + self.as_str() == other.as_str() + } +} + +impl PartialEq for String { + #[inline] + fn eq(&self, other: &PathAndQuery) -> bool { + self.as_str() == other.as_str() + } +} + +impl PartialOrd for PathAndQuery { + #[inline] + fn partial_cmp(&self, other: &PathAndQuery) -> Option { + self.as_str().partial_cmp(other.as_str()) + } +} + +impl PartialOrd for PathAndQuery { + #[inline] + fn partial_cmp(&self, other: &str) -> Option { + self.as_str().partial_cmp(other) + } +} + +impl PartialOrd for str { + #[inline] + fn partial_cmp(&self, other: &PathAndQuery) -> Option { + self.partial_cmp(other.as_str()) + } +} + +impl<'a> PartialOrd<&'a str> for PathAndQuery { + #[inline] + fn partial_cmp(&self, other: &&'a str) -> Option { + self.as_str().partial_cmp(*other) + } +} + +impl<'a> PartialOrd for &'a str { + #[inline] + fn partial_cmp(&self, other: &PathAndQuery) -> Option { + self.partial_cmp(&other.as_str()) + } +} + +impl PartialOrd for PathAndQuery { + #[inline] + fn partial_cmp(&self, other: &String) -> Option { + self.as_str().partial_cmp(other.as_str()) + } +} + +impl PartialOrd for String { + #[inline] + fn partial_cmp(&self, other: &PathAndQuery) -> Option { + self.as_str().partial_cmp(other.as_str()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn equal_to_self_of_same_path() { + let p1: PathAndQuery = "/hello/world&foo=bar".parse().unwrap(); + let p2: PathAndQuery = "/hello/world&foo=bar".parse().unwrap(); + assert_eq!(p1, p2); + assert_eq!(p2, p1); + } + + #[test] + fn not_equal_to_self_of_different_path() { + let p1: PathAndQuery = "/hello/world&foo=bar".parse().unwrap(); + let p2: PathAndQuery = "/world&foo=bar".parse().unwrap(); + assert_ne!(p1, p2); + assert_ne!(p2, p1); + } + + #[test] + fn equates_with_a_str() { + let path_and_query: PathAndQuery = "/hello/world&foo=bar".parse().unwrap(); + assert_eq!(&path_and_query, "/hello/world&foo=bar"); + assert_eq!("/hello/world&foo=bar", &path_and_query); + assert_eq!(path_and_query, "/hello/world&foo=bar"); + assert_eq!("/hello/world&foo=bar", path_and_query); + } + + #[test] + fn not_equal_with_a_str_of_a_different_path() { + let path_and_query: PathAndQuery = "/hello/world&foo=bar".parse().unwrap(); + // as a reference + assert_ne!(&path_and_query, "/hello&foo=bar"); + assert_ne!("/hello&foo=bar", &path_and_query); + // without reference + assert_ne!(path_and_query, "/hello&foo=bar"); + assert_ne!("/hello&foo=bar", path_and_query); + } + + #[test] + fn equates_with_a_string() { + let path_and_query: PathAndQuery = "/hello/world&foo=bar".parse().unwrap(); + assert_eq!(path_and_query, "/hello/world&foo=bar".to_string()); + assert_eq!("/hello/world&foo=bar".to_string(), path_and_query); + } + + #[test] + fn not_equal_with_a_string_of_a_different_path() { + let path_and_query: PathAndQuery = "/hello/world&foo=bar".parse().unwrap(); + assert_ne!(path_and_query, "/hello&foo=bar".to_string()); + assert_ne!("/hello&foo=bar".to_string(), path_and_query); + } + + #[test] + fn compares_to_self() { + let p1: PathAndQuery = "/a/world&foo=bar".parse().unwrap(); + let p2: PathAndQuery = "/b/world&foo=bar".parse().unwrap(); + assert!(p1 < p2); + assert!(p2 > p1); + } + + #[test] + fn compares_with_a_str() { + let path_and_query: PathAndQuery = "/b/world&foo=bar".parse().unwrap(); + // by ref + assert!(&path_and_query < "/c/world&foo=bar"); + assert!("/c/world&foo=bar" > &path_and_query); + assert!(&path_and_query > "/a/world&foo=bar"); + assert!("/a/world&foo=bar" < &path_and_query); + + // by val + assert!(path_and_query < "/c/world&foo=bar"); + assert!("/c/world&foo=bar" > path_and_query); + assert!(path_and_query > "/a/world&foo=bar"); + assert!("/a/world&foo=bar" < path_and_query); + } + + #[test] + fn compares_with_a_string() { + let path_and_query: PathAndQuery = "/b/world&foo=bar".parse().unwrap(); + assert!(path_and_query < "/c/world&foo=bar".to_string()); + assert!("/c/world&foo=bar".to_string() > path_and_query); + assert!(path_and_query > "/a/world&foo=bar".to_string()); + assert!("/a/world&foo=bar".to_string() < path_and_query); + } + + #[test] + fn ignores_valid_percent_encodings() { + assert_eq!("/a%20b", pq("/a%20b?r=1").path()); + assert_eq!("qr=%31", pq("/a/b?qr=%31").query().unwrap()); + } + + #[test] + fn ignores_invalid_percent_encodings() { + assert_eq!("/a%%b", pq("/a%%b?r=1").path()); + assert_eq!("/aaa%", pq("/aaa%").path()); + assert_eq!("/aaa%", pq("/aaa%?r=1").path()); + assert_eq!("/aa%2", pq("/aa%2").path()); + assert_eq!("/aa%2", pq("/aa%2?r=1").path()); + assert_eq!("qr=%3", pq("/a/b?qr=%3").query().unwrap()); + } + + #[test] + fn json_is_fine() { + assert_eq!(r#"/{"bread":"baguette"}"#, pq(r#"/{"bread":"baguette"}"#).path()); + } + + fn pq(s: &str) -> PathAndQuery { + s.parse().expect(&format!("parsing {}", s)) + } +} diff --git a/.cargo-vendor/http-0.2.12/src/uri/port.rs b/.cargo-vendor/http-0.2.12/src/uri/port.rs new file mode 100644 index 0000000000..8f5c5f3f7d --- /dev/null +++ b/.cargo-vendor/http-0.2.12/src/uri/port.rs @@ -0,0 +1,151 @@ +use std::fmt; + +use super::{ErrorKind, InvalidUri}; + +/// The port component of a URI. +pub struct Port { + port: u16, + repr: T, +} + +impl Port { + /// Returns the port number as a `u16`. + /// + /// # Examples + /// + /// Port as `u16`. + /// + /// ``` + /// # use http::uri::Authority; + /// let authority: Authority = "example.org:80".parse().unwrap(); + /// + /// let port = authority.port().unwrap(); + /// assert_eq!(port.as_u16(), 80); + /// ``` + pub fn as_u16(&self) -> u16 { + self.port + } +} + +impl Port +where + T: AsRef, +{ + /// Converts a `str` to a port number. + /// + /// The supplied `str` must be a valid u16. + pub(crate) fn from_str(bytes: T) -> Result { + bytes + .as_ref() + .parse::() + .map(|port| Port { port, repr: bytes }) + .map_err(|_| ErrorKind::InvalidPort.into()) + } + + /// Returns the port number as a `str`. + /// + /// # Examples + /// + /// Port as `str`. + /// + /// ``` + /// # use http::uri::Authority; + /// let authority: Authority = "example.org:80".parse().unwrap(); + /// + /// let port = authority.port().unwrap(); + /// assert_eq!(port.as_str(), "80"); + /// ``` + pub fn as_str(&self) -> &str { + self.repr.as_ref() + } +} + +impl fmt::Debug for Port +where + T: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("Port").field(&self.port).finish() + } +} + +impl fmt::Display for Port { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // Use `u16::fmt` so that it respects any formatting flags that + // may have been set (like padding, align, etc). + fmt::Display::fmt(&self.port, f) + } +} + +impl From> for u16 { + fn from(port: Port) -> Self { + port.as_u16() + } +} + +impl AsRef for Port +where + T: AsRef, +{ + fn as_ref(&self) -> &str { + self.as_str() + } +} + +impl PartialEq> for Port { + fn eq(&self, other: &Port) -> bool { + self.port == other.port + } +} + +impl PartialEq for Port { + fn eq(&self, other: &u16) -> bool { + self.port == *other + } +} + +impl PartialEq> for u16 { + fn eq(&self, other: &Port) -> bool { + other.port == *self + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn partialeq_port() { + let port_a = Port::from_str("8080").unwrap(); + let port_b = Port::from_str("8080").unwrap(); + assert_eq!(port_a, port_b); + } + + #[test] + fn partialeq_port_different_reprs() { + let port_a = Port { + repr: "8081", + port: 8081, + }; + let port_b = Port { + repr: String::from("8081"), + port: 8081, + }; + assert_eq!(port_a, port_b); + assert_eq!(port_b, port_a); + } + + #[test] + fn partialeq_u16() { + let port = Port::from_str("8080").unwrap(); + // test equals in both directions + assert_eq!(port, 8080); + assert_eq!(8080, port); + } + + #[test] + fn u16_from_port() { + let port = Port::from_str("8080").unwrap(); + assert_eq!(8080, u16::from(port)); + } +} diff --git a/.cargo-vendor/http-0.2.12/src/uri/scheme.rs b/.cargo-vendor/http-0.2.12/src/uri/scheme.rs new file mode 100644 index 0000000000..682b11eeea --- /dev/null +++ b/.cargo-vendor/http-0.2.12/src/uri/scheme.rs @@ -0,0 +1,363 @@ +use std::convert::TryFrom; +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::str::FromStr; + +use bytes::Bytes; + +use super::{ErrorKind, InvalidUri}; +use crate::byte_str::ByteStr; + +/// Represents the scheme component of a URI +#[derive(Clone)] +pub struct Scheme { + pub(super) inner: Scheme2, +} + +#[derive(Clone, Debug)] +pub(super) enum Scheme2> { + None, + Standard(Protocol), + Other(T), +} + +#[derive(Copy, Clone, Debug)] +pub(super) enum Protocol { + Http, + Https, +} + +impl Scheme { + /// HTTP protocol scheme + pub const HTTP: Scheme = Scheme { + inner: Scheme2::Standard(Protocol::Http), + }; + + /// HTTP protocol over TLS. + pub const HTTPS: Scheme = Scheme { + inner: Scheme2::Standard(Protocol::Https), + }; + + pub(super) fn empty() -> Self { + Scheme { + inner: Scheme2::None, + } + } + + /// Return a str representation of the scheme + /// + /// # Examples + /// + /// ``` + /// # use http::uri::*; + /// let scheme: Scheme = "http".parse().unwrap(); + /// assert_eq!(scheme.as_str(), "http"); + /// ``` + #[inline] + pub fn as_str(&self) -> &str { + use self::Protocol::*; + use self::Scheme2::*; + + match self.inner { + Standard(Http) => "http", + Standard(Https) => "https", + Other(ref v) => &v[..], + None => unreachable!(), + } + } +} + +impl<'a> TryFrom<&'a [u8]> for Scheme { + type Error = InvalidUri; + #[inline] + fn try_from(s: &'a [u8]) -> Result { + use self::Scheme2::*; + + match Scheme2::parse_exact(s)? { + None => Err(ErrorKind::InvalidScheme.into()), + Standard(p) => Ok(Standard(p).into()), + Other(_) => { + let bytes = Bytes::copy_from_slice(s); + + // Safety: postcondition on parse_exact() means that s and + // hence bytes are valid UTF-8. + let string = unsafe { ByteStr::from_utf8_unchecked(bytes) }; + + Ok(Other(Box::new(string)).into()) + } + } + } +} + +impl<'a> TryFrom<&'a str> for Scheme { + type Error = InvalidUri; + #[inline] + fn try_from(s: &'a str) -> Result { + TryFrom::try_from(s.as_bytes()) + } +} + +impl FromStr for Scheme { + type Err = InvalidUri; + + fn from_str(s: &str) -> Result { + TryFrom::try_from(s) + } +} + +impl fmt::Debug for Scheme { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(self.as_str(), f) + } +} + +impl fmt::Display for Scheme { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(self.as_str()) + } +} + +impl AsRef for Scheme { + #[inline] + fn as_ref(&self) -> &str { + self.as_str() + } +} + +impl PartialEq for Scheme { + fn eq(&self, other: &Scheme) -> bool { + use self::Protocol::*; + use self::Scheme2::*; + + match (&self.inner, &other.inner) { + (&Standard(Http), &Standard(Http)) => true, + (&Standard(Https), &Standard(Https)) => true, + (&Other(ref a), &Other(ref b)) => a.eq_ignore_ascii_case(b), + (&None, _) | (_, &None) => unreachable!(), + _ => false, + } + } +} + +impl Eq for Scheme {} + +/// Case-insensitive equality +/// +/// # Examples +/// +/// ``` +/// # use http::uri::Scheme; +/// let scheme: Scheme = "HTTP".parse().unwrap(); +/// assert_eq!(scheme, *"http"); +/// ``` +impl PartialEq for Scheme { + fn eq(&self, other: &str) -> bool { + self.as_str().eq_ignore_ascii_case(other) + } +} + +/// Case-insensitive equality +impl PartialEq for str { + fn eq(&self, other: &Scheme) -> bool { + other == self + } +} + +/// Case-insensitive hashing +impl Hash for Scheme { + fn hash(&self, state: &mut H) + where + H: Hasher, + { + match self.inner { + Scheme2::None => (), + Scheme2::Standard(Protocol::Http) => state.write_u8(1), + Scheme2::Standard(Protocol::Https) => state.write_u8(2), + Scheme2::Other(ref other) => { + other.len().hash(state); + for &b in other.as_bytes() { + state.write_u8(b.to_ascii_lowercase()); + } + } + } + } +} + +impl Scheme2 { + pub(super) fn is_none(&self) -> bool { + match *self { + Scheme2::None => true, + _ => false, + } + } +} + +// Require the scheme to not be too long in order to enable further +// optimizations later. +const MAX_SCHEME_LEN: usize = 64; + +// scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." ) +// +// SCHEME_CHARS is a table of valid characters in the scheme part of a URI. An +// entry in the table is 0 for invalid characters. For valid characters the +// entry is itself (i.e. the entry for 43 is b'+' because b'+' == 43u8). An +// important characteristic of this table is that all entries above 127 are +// invalid. This makes all of the valid entries a valid single-byte UTF-8 code +// point. This means that a slice of such valid entries is valid UTF-8. +const SCHEME_CHARS: [u8; 256] = [ + // 0 1 2 3 4 5 6 7 8 9 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 1x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 2x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 3x + 0, 0, 0, b'+', 0, b'-', b'.', 0, b'0', b'1', // 4x + b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', b':', 0, // 5x + 0, 0, 0, 0, 0, b'A', b'B', b'C', b'D', b'E', // 6x + b'F', b'G', b'H', b'I', b'J', b'K', b'L', b'M', b'N', b'O', // 7x + b'P', b'Q', b'R', b'S', b'T', b'U', b'V', b'W', b'X', b'Y', // 8x + b'Z', 0, 0, 0, 0, 0, 0, b'a', b'b', b'c', // 9x + b'd', b'e', b'f', b'g', b'h', b'i', b'j', b'k', b'l', b'm', // 10x + b'n', b'o', b'p', b'q', b'r', b's', b't', b'u', b'v', b'w', // 11x + b'x', b'y', b'z', 0, 0, 0, b'~', 0, 0, 0, // 12x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 13x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 14x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 15x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 16x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 17x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 18x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 19x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 21x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 22x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 23x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 24x + 0, 0, 0, 0, 0, 0 // 25x +]; + +impl Scheme2 { + // Postcondition: On all Ok() returns, s is valid UTF-8 + fn parse_exact(s: &[u8]) -> Result, InvalidUri> { + match s { + b"http" => Ok(Protocol::Http.into()), + b"https" => Ok(Protocol::Https.into()), + _ => { + if s.len() > MAX_SCHEME_LEN { + return Err(ErrorKind::SchemeTooLong.into()); + } + + // check that each byte in s is a SCHEME_CHARS which implies + // that it is a valid single byte UTF-8 code point. + for &b in s { + match SCHEME_CHARS[b as usize] { + b':' => { + // Don't want :// here + return Err(ErrorKind::InvalidScheme.into()); + } + 0 => { + return Err(ErrorKind::InvalidScheme.into()); + } + _ => {} + } + } + + Ok(Scheme2::Other(())) + } + } + } + + pub(super) fn parse(s: &[u8]) -> Result, InvalidUri> { + if s.len() >= 7 { + // Check for HTTP + if s[..7].eq_ignore_ascii_case(b"http://") { + // Prefix will be striped + return Ok(Protocol::Http.into()); + } + } + + if s.len() >= 8 { + // Check for HTTPs + if s[..8].eq_ignore_ascii_case(b"https://") { + return Ok(Protocol::Https.into()); + } + } + + if s.len() > 3 { + for i in 0..s.len() { + let b = s[i]; + + match SCHEME_CHARS[b as usize] { + b':' => { + // Not enough data remaining + if s.len() < i + 3 { + break; + } + + // Not a scheme + if &s[i + 1..i + 3] != b"//" { + break; + } + + if i > MAX_SCHEME_LEN { + return Err(ErrorKind::SchemeTooLong.into()); + } + + // Return scheme + return Ok(Scheme2::Other(i)); + } + // Invald scheme character, abort + 0 => break, + _ => {} + } + } + } + + Ok(Scheme2::None) + } +} + +impl Protocol { + pub(super) fn len(&self) -> usize { + match *self { + Protocol::Http => 4, + Protocol::Https => 5, + } + } +} + +impl From for Scheme2 { + fn from(src: Protocol) -> Self { + Scheme2::Standard(src) + } +} + +#[doc(hidden)] +impl From for Scheme { + fn from(src: Scheme2) -> Self { + Scheme { inner: src } + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn scheme_eq_to_str() { + assert_eq!(&scheme("http"), "http"); + assert_eq!(&scheme("https"), "https"); + assert_eq!(&scheme("ftp"), "ftp"); + assert_eq!(&scheme("my+funky+scheme"), "my+funky+scheme"); + } + + #[test] + fn invalid_scheme_is_error() { + Scheme::try_from("my_funky_scheme").expect_err("Unexpectly valid Scheme"); + + // Invalid UTF-8 + Scheme::try_from([0xC0].as_ref()).expect_err("Unexpectly valid Scheme"); + } + + fn scheme(s: &str) -> Scheme { + s.parse().expect(&format!("Invalid scheme: {}", s)) + } +} diff --git a/.cargo-vendor/http-0.2.12/src/uri/tests.rs b/.cargo-vendor/http-0.2.12/src/uri/tests.rs new file mode 100644 index 0000000000..719cb94ee3 --- /dev/null +++ b/.cargo-vendor/http-0.2.12/src/uri/tests.rs @@ -0,0 +1,519 @@ +use std::str::FromStr; + +use super::{ErrorKind, InvalidUri, Port, Uri, URI_CHARS}; + +#[test] +fn test_char_table() { + for (i, &v) in URI_CHARS.iter().enumerate() { + if v != 0 { + assert_eq!(i, v as usize); + } + } +} + +macro_rules! part { + ($s:expr) => { + Some(&$s.parse().unwrap()) + }; +} + +macro_rules! test_parse { + ( + $test_name:ident, + $str:expr, + $alt:expr, + $($method:ident = $value:expr,)* + ) => ( + #[test] + fn $test_name() { + let orig_str = $str; + let uri = match Uri::from_str(orig_str) { + Ok(uri) => uri, + Err(err) => { + panic!("parse error {:?} from {:?}", err, orig_str); + }, + }; + $( + assert_eq!(uri.$method(), $value, "{}: uri = {:?}", stringify!($method), uri); + )+ + assert_eq!(uri, orig_str, "partial eq to original str"); + assert_eq!(uri, uri.clone(), "clones are equal"); + + let new_str = uri.to_string(); + let new_uri = Uri::from_str(&new_str).expect("to_string output parses again as a Uri"); + assert_eq!(new_uri, orig_str, "round trip still equals original str"); + + const ALT: &'static [&'static str] = &$alt; + + for &alt in ALT.iter() { + let other: Uri = alt.parse().unwrap(); + assert_eq!(uri, *alt); + assert_eq!(uri, other); + } + } + ); +} + +test_parse! { + test_uri_parse_path_and_query, + "/some/path/here?and=then&hello#and-bye", + [], + + scheme = None, + authority = None, + path = "/some/path/here", + query = Some("and=then&hello"), + host = None, +} + +test_parse! { + test_uri_parse_absolute_form, + "http://127.0.0.1:61761/chunks", + [], + + scheme = part!("http"), + authority = part!("127.0.0.1:61761"), + path = "/chunks", + query = None, + host = Some("127.0.0.1"), + port = Port::from_str("61761").ok(), +} + +test_parse! { + test_uri_parse_absolute_form_without_path, + "https://127.0.0.1:61761", + ["https://127.0.0.1:61761/"], + + scheme = part!("https"), + authority = part!("127.0.0.1:61761"), + path = "/", + query = None, + host = Some("127.0.0.1"), + port = Port::from_str("61761").ok(), +} + +test_parse! { + test_uri_parse_asterisk_form, + "*", + [], + + scheme = None, + authority = None, + path = "*", + query = None, + host = None, +} + +test_parse! { + test_uri_parse_authority_no_port, + "localhost", + ["LOCALHOST", "LocaLHOSt"], + + scheme = None, + authority = part!("localhost"), + path = "", + query = None, + port = None, + host = Some("localhost"), +} + +test_parse! { + test_uri_authority_only_one_character_issue_197, + "S", + [], + + scheme = None, + authority = part!("S"), + path = "", + query = None, + port = None, + host = Some("S"), +} + +test_parse! { + test_uri_parse_authority_form, + "localhost:3000", + ["localhosT:3000"], + + scheme = None, + authority = part!("localhost:3000"), + path = "", + query = None, + host = Some("localhost"), + port = Port::from_str("3000").ok(), +} + +test_parse! { + test_uri_parse_absolute_with_default_port_http, + "http://127.0.0.1:80", + ["http://127.0.0.1:80/"], + + scheme = part!("http"), + authority = part!("127.0.0.1:80"), + host = Some("127.0.0.1"), + path = "/", + query = None, + port = Port::from_str("80").ok(), +} + +test_parse! { + test_uri_parse_absolute_with_default_port_https, + "https://127.0.0.1:443", + ["https://127.0.0.1:443/"], + + scheme = part!("https"), + authority = part!("127.0.0.1:443"), + host = Some("127.0.0.1"), + path = "/", + query = None, + port = Port::from_str("443").ok(), +} + +test_parse! { + test_uri_parse_fragment_questionmark, + "http://127.0.0.1/#?", + [], + + scheme = part!("http"), + authority = part!("127.0.0.1"), + host = Some("127.0.0.1"), + path = "/", + query = None, + port = None, +} + +test_parse! { + test_uri_parse_path_with_terminating_questionmark, + "http://127.0.0.1/path?", + [], + + scheme = part!("http"), + authority = part!("127.0.0.1"), + path = "/path", + query = Some(""), + port = None, +} + +test_parse! { + test_uri_parse_absolute_form_with_empty_path_and_nonempty_query, + "http://127.0.0.1?foo=bar", + [], + + scheme = part!("http"), + authority = part!("127.0.0.1"), + path = "/", + query = Some("foo=bar"), + port = None, +} + +test_parse! { + test_uri_parse_absolute_form_with_empty_path_and_fragment_with_slash, + "http://127.0.0.1#foo/bar", + [], + + scheme = part!("http"), + authority = part!("127.0.0.1"), + path = "/", + query = None, + port = None, +} + +test_parse! { + test_uri_parse_absolute_form_with_empty_path_and_fragment_with_questionmark, + "http://127.0.0.1#foo?bar", + [], + + scheme = part!("http"), + authority = part!("127.0.0.1"), + path = "/", + query = None, + port = None, +} + +test_parse! { + test_uri_parse_long_host_with_no_scheme, + "thequickbrownfoxjumpedoverthelazydogtofindthelargedangerousdragon.localhost", + [], + + scheme = None, + authority = part!("thequickbrownfoxjumpedoverthelazydogtofindthelargedangerousdragon.localhost"), + path = "", + query = None, + port = None, +} + +test_parse! { + test_uri_parse_long_host_with_port_and_no_scheme, + "thequickbrownfoxjumpedoverthelazydogtofindthelargedangerousdragon.localhost:1234", + [], + + scheme = None, + authority = part!("thequickbrownfoxjumpedoverthelazydogtofindthelargedangerousdragon.localhost:1234"), + path = "", + query = None, + port = Port::from_str("1234").ok(), +} + +test_parse! { + test_userinfo1, + "http://a:b@127.0.0.1:1234/", + [], + + scheme = part!("http"), + authority = part!("a:b@127.0.0.1:1234"), + host = Some("127.0.0.1"), + path = "/", + query = None, + port = Port::from_str("1234").ok(), +} + +test_parse! { + test_userinfo2, + "http://a:b@127.0.0.1/", + [], + + scheme = part!("http"), + authority = part!("a:b@127.0.0.1"), + host = Some("127.0.0.1"), + path = "/", + query = None, + port = None, +} + +test_parse! { + test_userinfo3, + "http://a@127.0.0.1/", + [], + + scheme = part!("http"), + authority = part!("a@127.0.0.1"), + host = Some("127.0.0.1"), + path = "/", + query = None, + port = None, +} + +test_parse! { + test_userinfo_with_port, + "user@localhost:3000", + [], + + scheme = None, + authority = part!("user@localhost:3000"), + path = "", + query = None, + host = Some("localhost"), + port = Port::from_str("3000").ok(), +} + +test_parse! { + test_userinfo_pass_with_port, + "user:pass@localhost:3000", + [], + + scheme = None, + authority = part!("user:pass@localhost:3000"), + path = "", + query = None, + host = Some("localhost"), + port = Port::from_str("3000").ok(), +} + +test_parse! { + test_ipv6, + "http://[2001:0db8:85a3:0000:0000:8a2e:0370:7334]/", + [], + + scheme = part!("http"), + authority = part!("[2001:0db8:85a3:0000:0000:8a2e:0370:7334]"), + host = Some("[2001:0db8:85a3:0000:0000:8a2e:0370:7334]"), + path = "/", + query = None, + port = None, +} + +test_parse! { + test_ipv6_shorthand, + "http://[::1]/", + [], + + scheme = part!("http"), + authority = part!("[::1]"), + host = Some("[::1]"), + path = "/", + query = None, + port = None, +} + +test_parse! { + test_ipv6_shorthand2, + "http://[::]/", + [], + + scheme = part!("http"), + authority = part!("[::]"), + host = Some("[::]"), + path = "/", + query = None, + port = None, +} + +test_parse! { + test_ipv6_shorthand3, + "http://[2001:db8::2:1]/", + [], + + scheme = part!("http"), + authority = part!("[2001:db8::2:1]"), + host = Some("[2001:db8::2:1]"), + path = "/", + query = None, + port = None, +} + +test_parse! { + test_ipv6_with_port, + "http://[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:8008/", + [], + + scheme = part!("http"), + authority = part!("[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:8008"), + host = Some("[2001:0db8:85a3:0000:0000:8a2e:0370:7334]"), + path = "/", + query = None, + port = Port::from_str("8008").ok(), +} + +test_parse! { + test_percentage_encoded_path, + "/echo/abcdefgh_i-j%20/abcdefg_i-j%20478", + [], + + scheme = None, + authority = None, + host = None, + path = "/echo/abcdefgh_i-j%20/abcdefg_i-j%20478", + query = None, + port = None, +} + +test_parse! { + test_path_permissive, + "/foo=bar|baz\\^~%", + [], + + path = "/foo=bar|baz\\^~%", +} + +test_parse! { + test_query_permissive, + "/?foo={bar|baz}\\^`", + [], + + query = Some("foo={bar|baz}\\^`"), +} + +#[test] +fn test_uri_parse_error() { + fn err(s: &str) { + Uri::from_str(s).unwrap_err(); + } + + err("http://"); + err("htt:p//host"); + err("hyper.rs/"); + err("hyper.rs?key=val"); + err("?key=val"); + err("localhost/"); + err("localhost?key=val"); + err("\0"); + err("http://[::1"); + err("http://::1]"); + err("localhost:8080:3030"); + err("@"); + err("http://username:password@/wut"); + + // illegal queries + err("/?foo\rbar"); + err("/?foo\nbar"); + err("/?<"); + err("/?>"); +} + +#[test] +fn test_max_uri_len() { + let mut uri = vec![]; + uri.extend(b"http://localhost/"); + uri.extend(vec![b'a'; 70 * 1024]); + + let uri = String::from_utf8(uri).unwrap(); + let res: Result = uri.parse(); + + assert_eq!(res.unwrap_err().0, ErrorKind::TooLong); +} + +#[test] +fn test_overflowing_scheme() { + let mut uri = vec![]; + uri.extend(vec![b'a'; 256]); + uri.extend(b"://localhost/"); + + let uri = String::from_utf8(uri).unwrap(); + let res: Result = uri.parse(); + + assert_eq!(res.unwrap_err().0, ErrorKind::SchemeTooLong); +} + +#[test] +fn test_max_length_scheme() { + let mut uri = vec![]; + uri.extend(vec![b'a'; 64]); + uri.extend(b"://localhost/"); + + let uri = String::from_utf8(uri).unwrap(); + let uri: Uri = uri.parse().unwrap(); + + assert_eq!(uri.scheme_str().unwrap().len(), 64); +} + +#[test] +fn test_uri_to_path_and_query() { + let cases = vec![ + ("/", "/"), + ("/foo?bar", "/foo?bar"), + ("/foo?bar#nope", "/foo?bar"), + ("http://hyper.rs", "/"), + ("http://hyper.rs/", "/"), + ("http://hyper.rs/path", "/path"), + ("http://hyper.rs?query", "/?query"), + ("*", "*"), + ]; + + for case in cases { + let uri = Uri::from_str(case.0).unwrap(); + let s = uri.path_and_query().unwrap().to_string(); + + assert_eq!(s, case.1); + } +} + +#[test] +fn test_authority_uri_parts_round_trip() { + let s = "hyper.rs"; + let uri = Uri::from_str(s).expect("first parse"); + assert_eq!(uri, s); + assert_eq!(uri.to_string(), s); + + let parts = uri.into_parts(); + let uri2 = Uri::from_parts(parts).expect("from_parts"); + assert_eq!(uri2, s); + assert_eq!(uri2.to_string(), s); +} + +#[test] +fn test_partial_eq_path_with_terminating_questionmark() { + let a = "/path"; + let uri = Uri::from_str("/path?").expect("first parse"); + + assert_eq!(uri, a); +} diff --git a/.cargo-vendor/http-0.2.12/src/version.rs b/.cargo-vendor/http-0.2.12/src/version.rs new file mode 100644 index 0000000000..d8b713061e --- /dev/null +++ b/.cargo-vendor/http-0.2.12/src/version.rs @@ -0,0 +1,75 @@ +//! HTTP version +//! +//! This module contains a definition of the `Version` type. The `Version` +//! type is intended to be accessed through the root of the crate +//! (`http::Version`) rather than this module. +//! +//! The `Version` type contains constants that represent the various versions +//! of the HTTP protocol. +//! +//! # Examples +//! +//! ``` +//! use http::Version; +//! +//! let http11 = Version::HTTP_11; +//! let http2 = Version::HTTP_2; +//! assert!(http11 != http2); +//! +//! println!("{:?}", http2); +//! ``` + +use std::fmt; + +/// Represents a version of the HTTP spec. +#[derive(PartialEq, PartialOrd, Copy, Clone, Eq, Ord, Hash)] +pub struct Version(Http); + +impl Version { + /// `HTTP/0.9` + pub const HTTP_09: Version = Version(Http::Http09); + + /// `HTTP/1.0` + pub const HTTP_10: Version = Version(Http::Http10); + + /// `HTTP/1.1` + pub const HTTP_11: Version = Version(Http::Http11); + + /// `HTTP/2.0` + pub const HTTP_2: Version = Version(Http::H2); + + /// `HTTP/3.0` + pub const HTTP_3: Version = Version(Http::H3); +} + +#[derive(PartialEq, PartialOrd, Copy, Clone, Eq, Ord, Hash)] +enum Http { + Http09, + Http10, + Http11, + H2, + H3, + __NonExhaustive, +} + +impl Default for Version { + #[inline] + fn default() -> Version { + Version::HTTP_11 + } +} + +impl fmt::Debug for Version { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + use self::Http::*; + + f.write_str(match self.0 { + Http09 => "HTTP/0.9", + Http10 => "HTTP/1.0", + Http11 => "HTTP/1.1", + H2 => "HTTP/2.0", + H3 => "HTTP/3.0", + __NonExhaustive => unreachable!(), + }) + } +} diff --git a/.cargo-vendor/http-0.2.12/tests/header_map.rs b/.cargo-vendor/http-0.2.12/tests/header_map.rs new file mode 100644 index 0000000000..f2beba0862 --- /dev/null +++ b/.cargo-vendor/http-0.2.12/tests/header_map.rs @@ -0,0 +1,638 @@ +use http::header::*; +use http::*; + +#[test] +fn smoke() { + let mut headers = HeaderMap::new(); + + assert!(headers.get("hello").is_none()); + + let name: HeaderName = "hello".parse().unwrap(); + + match headers.entry(&name) { + Entry::Vacant(e) => { + e.insert("world".parse().unwrap()); + } + _ => panic!(), + } + + assert!(headers.get("hello").is_some()); + + match headers.entry(&name) { + Entry::Occupied(mut e) => { + assert_eq!(e.get(), &"world"); + + // Push another value + e.append("zomg".parse().unwrap()); + + let mut i = e.iter(); + + assert_eq!(*i.next().unwrap(), "world"); + assert_eq!(*i.next().unwrap(), "zomg"); + assert!(i.next().is_none()); + } + _ => panic!(), + } +} + +#[test] +#[should_panic] +fn reserve_over_capacity() { + // See https://github.com/hyperium/http/issues/352 + let mut headers = HeaderMap::::with_capacity(32); + headers.reserve(50_000); // over MAX_SIZE +} + +#[test] +fn with_capacity_max() { + // The largest capacity such that (cap + cap / 3) < MAX_SIZE. + HeaderMap::::with_capacity(24_576); +} + +#[test] +#[should_panic] +fn with_capacity_overflow() { + HeaderMap::::with_capacity(24_577); +} + +#[test] +#[should_panic] +fn reserve_overflow() { + // See https://github.com/hyperium/http/issues/352 + let mut headers = HeaderMap::::with_capacity(0); + headers.reserve(std::usize::MAX); // next_power_of_two overflows +} + +#[test] +fn drain() { + let mut headers = HeaderMap::new(); + + // Insert a single value + let name: HeaderName = "hello".parse().unwrap(); + headers.insert(name, "world".parse().unwrap()); + + { + let mut iter = headers.drain(); + let (name, value) = iter.next().unwrap(); + assert_eq!(name.unwrap().as_str(), "hello"); + + assert_eq!(value, "world"); + + assert!(iter.next().is_none()); + } + + assert!(headers.is_empty()); + + // Insert two sequential values + headers.insert( + "hello".parse::().unwrap(), + "world".parse().unwrap(), + ); + headers.insert( + "zomg".parse::().unwrap(), + "bar".parse().unwrap(), + ); + headers.append( + "hello".parse::().unwrap(), + "world2".parse().unwrap(), + ); + + // Drain... + { + let mut iter = headers.drain(); + + let (name, value) = iter.next().unwrap(); + assert_eq!(name.unwrap().as_str(), "hello"); + assert_eq!(value, "world"); + + let (name, value) = iter.next().unwrap(); + assert_eq!(name, None); + assert_eq!(value, "world2"); + + let (name, value) = iter.next().unwrap(); + assert_eq!(name.unwrap().as_str(), "zomg"); + assert_eq!(value, "bar"); + + assert!(iter.next().is_none()); + } +} + +#[test] +fn drain_drop_immediately() { + // test mem::forgetting does not double-free + + let mut headers = HeaderMap::new(); + headers.insert("hello", "world".parse().unwrap()); + headers.insert("zomg", "bar".parse().unwrap()); + headers.append("hello", "world2".parse().unwrap()); + + let iter = headers.drain(); + assert_eq!(iter.size_hint(), (2, Some(3))); + // not consuming `iter` +} + +#[test] +fn drain_forget() { + // test mem::forgetting does not double-free + + let mut headers = HeaderMap::::new(); + headers.insert("hello", "world".parse().unwrap()); + headers.insert("zomg", "bar".parse().unwrap()); + + assert_eq!(headers.len(), 2); + + { + let mut iter = headers.drain(); + assert_eq!(iter.size_hint(), (2, Some(2))); + let _ = iter.next().unwrap(); + std::mem::forget(iter); + } + + assert_eq!(headers.len(), 0); +} + +#[test] +fn drain_entry() { + let mut headers = HeaderMap::new(); + + headers.insert( + "hello".parse::().unwrap(), + "world".parse().unwrap(), + ); + headers.insert( + "zomg".parse::().unwrap(), + "foo".parse().unwrap(), + ); + headers.append( + "hello".parse::().unwrap(), + "world2".parse().unwrap(), + ); + headers.insert( + "more".parse::().unwrap(), + "words".parse().unwrap(), + ); + headers.append( + "more".parse::().unwrap(), + "insertions".parse().unwrap(), + ); + assert_eq!(5, headers.len()); + + // Using insert_mult + { + let mut e = match headers.entry("hello") { + Entry::Occupied(e) => e, + _ => panic!(), + }; + + let vals: Vec<_> = e.insert_mult("wat".parse().unwrap()).collect(); + assert_eq!(2, vals.len()); + assert_eq!(vals[0], "world"); + assert_eq!(vals[1], "world2"); + } + + assert_eq!(5-2+1, headers.len()); +} + +#[test] +fn eq() { + let mut a = HeaderMap::new(); + let mut b = HeaderMap::new(); + + assert_eq!(a, b); + + a.insert( + "hello".parse::().unwrap(), + "world".parse().unwrap(), + ); + assert_ne!(a, b); + + b.insert( + "hello".parse::().unwrap(), + "world".parse().unwrap(), + ); + assert_eq!(a, b); + + a.insert("foo".parse::().unwrap(), "bar".parse().unwrap()); + a.append("foo".parse::().unwrap(), "baz".parse().unwrap()); + assert_ne!(a, b); + + b.insert("foo".parse::().unwrap(), "bar".parse().unwrap()); + assert_ne!(a, b); + + b.append("foo".parse::().unwrap(), "baz".parse().unwrap()); + assert_eq!(a, b); + + a.append("a".parse::().unwrap(), "a".parse().unwrap()); + a.append("a".parse::().unwrap(), "b".parse().unwrap()); + b.append("a".parse::().unwrap(), "b".parse().unwrap()); + b.append("a".parse::().unwrap(), "a".parse().unwrap()); + + assert_ne!(a, b); +} + +#[test] +fn into_header_name() { + let mut m = HeaderMap::new(); + m.insert(HOST, "localhost".parse().unwrap()); + m.insert(&ACCEPT, "*/*".parse().unwrap()); + m.insert("connection", "keep-alive".parse().unwrap()); + + m.append(LOCATION, "/".parse().unwrap()); + m.append(&VIA, "bob".parse().unwrap()); + m.append("transfer-encoding", "chunked".parse().unwrap()); + + assert_eq!(m.len(), 6); +} + +#[test] +fn as_header_name() { + let mut m = HeaderMap::new(); + let v: HeaderValue = "localhost".parse().unwrap(); + m.insert(HOST, v.clone()); + + let expected = Some(&v); + + assert_eq!(m.get("host"), expected); + assert_eq!(m.get(&HOST), expected); + + let s = String::from("host"); + assert_eq!(m.get(&s), expected); + assert_eq!(m.get(s.as_str()), expected); +} + +#[test] +fn insert_all_std_headers() { + let mut m = HeaderMap::new(); + + for (i, hdr) in STD.iter().enumerate() { + m.insert(hdr.clone(), hdr.as_str().parse().unwrap()); + + for j in 0..(i + 1) { + assert_eq!(m[&STD[j]], STD[j].as_str()); + } + + if i != 0 { + for j in (i + 1)..STD.len() { + assert!( + m.get(&STD[j]).is_none(), + "contained {}; j={}", + STD[j].as_str(), + j + ); + } + } + } +} + +#[test] +fn insert_79_custom_std_headers() { + let mut h = HeaderMap::new(); + let hdrs = custom_std(79); + + for (i, hdr) in hdrs.iter().enumerate() { + h.insert(hdr.clone(), hdr.as_str().parse().unwrap()); + + for j in 0..(i + 1) { + assert_eq!(h[&hdrs[j]], hdrs[j].as_str()); + } + + for j in (i + 1)..hdrs.len() { + assert!(h.get(&hdrs[j]).is_none()); + } + } +} + +#[test] +fn append_multiple_values() { + let mut map = HeaderMap::new(); + + map.append(header::CONTENT_TYPE, "json".parse().unwrap()); + map.append(header::CONTENT_TYPE, "html".parse().unwrap()); + map.append(header::CONTENT_TYPE, "xml".parse().unwrap()); + + let vals = map + .get_all(&header::CONTENT_TYPE) + .iter() + .collect::>(); + + assert_eq!(&vals, &[&"json", &"html", &"xml"]); +} + +fn custom_std(n: usize) -> Vec { + (0..n) + .map(|i| { + let s = format!("{}-{}", STD[i % STD.len()].as_str(), i); + s.parse().unwrap() + }) + .collect() +} + +const STD: &'static [HeaderName] = &[ + ACCEPT, + ACCEPT_CHARSET, + ACCEPT_ENCODING, + ACCEPT_LANGUAGE, + ACCEPT_RANGES, + ACCESS_CONTROL_ALLOW_CREDENTIALS, + ACCESS_CONTROL_ALLOW_HEADERS, + ACCESS_CONTROL_ALLOW_METHODS, + ACCESS_CONTROL_ALLOW_ORIGIN, + ACCESS_CONTROL_EXPOSE_HEADERS, + ACCESS_CONTROL_MAX_AGE, + ACCESS_CONTROL_REQUEST_HEADERS, + ACCESS_CONTROL_REQUEST_METHOD, + AGE, + ALLOW, + ALT_SVC, + AUTHORIZATION, + CACHE_CONTROL, + CACHE_STATUS, + CDN_CACHE_CONTROL, + CONNECTION, + CONTENT_DISPOSITION, + CONTENT_ENCODING, + CONTENT_LANGUAGE, + CONTENT_LENGTH, + CONTENT_LOCATION, + CONTENT_RANGE, + CONTENT_SECURITY_POLICY, + CONTENT_SECURITY_POLICY_REPORT_ONLY, + CONTENT_TYPE, + COOKIE, + DNT, + DATE, + ETAG, + EXPECT, + EXPIRES, + FORWARDED, + FROM, + HOST, + IF_MATCH, + IF_MODIFIED_SINCE, + IF_NONE_MATCH, + IF_RANGE, + IF_UNMODIFIED_SINCE, + LAST_MODIFIED, + LINK, + LOCATION, + MAX_FORWARDS, + ORIGIN, + PRAGMA, + PROXY_AUTHENTICATE, + PROXY_AUTHORIZATION, + PUBLIC_KEY_PINS, + PUBLIC_KEY_PINS_REPORT_ONLY, + RANGE, + REFERER, + REFERRER_POLICY, + RETRY_AFTER, + SERVER, + SET_COOKIE, + STRICT_TRANSPORT_SECURITY, + TE, + TRAILER, + TRANSFER_ENCODING, + USER_AGENT, + UPGRADE, + UPGRADE_INSECURE_REQUESTS, + VARY, + VIA, + WARNING, + WWW_AUTHENTICATE, + X_CONTENT_TYPE_OPTIONS, + X_DNS_PREFETCH_CONTROL, + X_FRAME_OPTIONS, + X_XSS_PROTECTION, +]; + +#[test] +fn get_invalid() { + let mut headers = HeaderMap::new(); + headers.insert("foo", "bar".parse().unwrap()); + assert!(headers.get("Evil\r\nKey").is_none()); +} + +#[test] +#[should_panic] +fn insert_invalid() { + let mut headers = HeaderMap::new(); + headers.insert("evil\r\nfoo", "bar".parse().unwrap()); +} + +#[test] +fn value_htab() { + // RFC 7230 Section 3.2: + // > field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] + HeaderValue::from_static("hello\tworld"); + HeaderValue::from_str("hello\tworld").unwrap(); +} + + +#[test] +fn remove_multiple_a() { + let mut headers = HeaderMap::new(); + headers.insert(VIA, "1.1 example.com".parse().unwrap()); + headers.insert(SET_COOKIE, "cookie_1=value 1".parse().unwrap()); + headers.append(SET_COOKIE, "cookie_2=value 2".parse().unwrap()); + headers.append(VIA, "1.1 other.com".parse().unwrap()); + headers.append(SET_COOKIE, "cookie_3=value 3".parse().unwrap()); + headers.insert(VARY, "*".parse().unwrap()); + + assert_eq!(headers.len(), 6); + + let cookie = headers.remove(SET_COOKIE); + assert_eq!(cookie, Some("cookie_1=value 1".parse().unwrap())); + assert_eq!(headers.len(), 3); + + let via = headers.remove(VIA); + assert_eq!(via, Some("1.1 example.com".parse().unwrap())); + assert_eq!(headers.len(), 1); + + let vary = headers.remove(VARY); + assert_eq!(vary, Some("*".parse().unwrap())); + assert_eq!(headers.len(), 0); +} + +#[test] +fn remove_multiple_b() { + let mut headers = HeaderMap::new(); + headers.insert(VIA, "1.1 example.com".parse().unwrap()); + headers.insert(SET_COOKIE, "cookie_1=value 1".parse().unwrap()); + headers.append(SET_COOKIE, "cookie_2=value 2".parse().unwrap()); + headers.append(VIA, "1.1 other.com".parse().unwrap()); + headers.append(SET_COOKIE, "cookie_3=value 3".parse().unwrap()); + headers.insert(VARY, "*".parse().unwrap()); + + assert_eq!(headers.len(), 6); + + let vary = headers.remove(VARY); + assert_eq!(vary, Some("*".parse().unwrap())); + assert_eq!(headers.len(), 5); + + let via = headers.remove(VIA); + assert_eq!(via, Some("1.1 example.com".parse().unwrap())); + assert_eq!(headers.len(), 3); + + let cookie = headers.remove(SET_COOKIE); + assert_eq!(cookie, Some("cookie_1=value 1".parse().unwrap())); + assert_eq!(headers.len(), 0); +} + +#[test] +fn remove_entry_multi_0() { + let mut headers = HeaderMap::new(); + let cookies = remove_all_values(&mut headers, SET_COOKIE); + assert_eq!(cookies.len(), 0); + assert_eq!(headers.len(), 0); +} + +#[test] +fn remove_entry_multi_0_others() { + let mut headers = HeaderMap::new(); + headers.insert(VIA, "1.1 example.com".parse().unwrap()); + headers.append(VIA, "1.1 other.com".parse().unwrap()); + + let cookies = remove_all_values(&mut headers, SET_COOKIE); + assert_eq!(cookies.len(), 0); + assert_eq!(headers.len(), 2); +} + +#[test] +fn remove_entry_multi_1() { + let mut headers = HeaderMap::new(); + headers.insert(SET_COOKIE, "cookie_1=value 1".parse().unwrap()); + + let cookies = remove_all_values(&mut headers, SET_COOKIE); + assert_eq!(cookies.len(), 1); + assert_eq!(headers.len(), 0); +} + +#[test] +fn remove_entry_multi_1_other() { + let mut headers = HeaderMap::new(); + headers.insert(SET_COOKIE, "cookie_1=value 1".parse().unwrap()); + headers.insert(VIA, "1.1 example.com".parse().unwrap()); + + let cookies = remove_all_values(&mut headers, SET_COOKIE); + assert_eq!(cookies.len(), 1); + assert_eq!(headers.len(), 1); + + let vias = remove_all_values(&mut headers, VIA); + assert_eq!(vias.len(), 1); + assert_eq!(headers.len(), 0); +} + +// For issue hyperimum/http#446 +#[test] +fn remove_entry_multi_2() { + let mut headers = HeaderMap::new(); + headers.insert(SET_COOKIE, "cookie_1=value 1".parse().unwrap()); + headers.append(SET_COOKIE, "cookie_2=value 2".parse().unwrap()); + + let cookies = remove_all_values(&mut headers, SET_COOKIE); + assert_eq!(cookies.len(), 2); + assert_eq!(headers.len(), 0); +} + +#[test] +fn remove_entry_multi_3() { + let mut headers = HeaderMap::new(); + headers.insert(SET_COOKIE, "cookie_1=value 1".parse().unwrap()); + headers.append(SET_COOKIE, "cookie_2=value 2".parse().unwrap()); + headers.append(SET_COOKIE, "cookie_3=value 3".parse().unwrap()); + + let cookies = remove_all_values(&mut headers, SET_COOKIE); + assert_eq!(cookies.len(), 3); + assert_eq!(headers.len(), 0); +} + +#[test] +fn remove_entry_multi_3_others() { + let mut headers = HeaderMap::new(); + headers.insert(VIA, "1.1 example.com".parse().unwrap()); + headers.insert(SET_COOKIE, "cookie_1=value 1".parse().unwrap()); + headers.append(SET_COOKIE, "cookie_2=value 2".parse().unwrap()); + headers.append(VIA, "1.1 other.com".parse().unwrap()); + headers.append(SET_COOKIE, "cookie_3=value 3".parse().unwrap()); + headers.insert(VARY, "*".parse().unwrap()); + + let cookies = remove_all_values(&mut headers, SET_COOKIE); + assert_eq!(cookies.len(), 3); + assert_eq!(headers.len(), 3); + + let vias = remove_all_values(&mut headers, VIA); + assert_eq!(vias.len(), 2); + assert_eq!(headers.len(), 1); + + let varies = remove_all_values(&mut headers, VARY); + assert_eq!(varies.len(), 1); + assert_eq!(headers.len(), 0); +} + +fn remove_all_values(headers: &mut HeaderMap, key: K) -> Vec + where K: IntoHeaderName +{ + match headers.entry(key) { + Entry::Occupied(e) => e.remove_entry_mult().1.collect(), + Entry::Vacant(_) => vec![], + } +} + +#[test] +fn remove_entry_3_others_a() { + let mut headers = HeaderMap::new(); + headers.insert(VIA, "1.1 example.com".parse().unwrap()); + headers.insert(SET_COOKIE, "cookie_1=value 1".parse().unwrap()); + headers.append(SET_COOKIE, "cookie_2=value 2".parse().unwrap()); + headers.append(VIA, "1.1 other.com".parse().unwrap()); + headers.append(SET_COOKIE, "cookie_3=value 3".parse().unwrap()); + headers.insert(VARY, "*".parse().unwrap()); + + assert_eq!(headers.len(), 6); + + let cookie = remove_values(&mut headers, SET_COOKIE); + assert_eq!(cookie, Some("cookie_1=value 1".parse().unwrap())); + assert_eq!(headers.len(), 3); + + let via = remove_values(&mut headers, VIA); + assert_eq!(via, Some("1.1 example.com".parse().unwrap())); + assert_eq!(headers.len(), 1); + + let vary = remove_values(&mut headers, VARY); + assert_eq!(vary, Some("*".parse().unwrap())); + assert_eq!(headers.len(), 0); +} + +#[test] +fn remove_entry_3_others_b() { + let mut headers = HeaderMap::new(); + headers.insert(VIA, "1.1 example.com".parse().unwrap()); + headers.insert(SET_COOKIE, "cookie_1=value 1".parse().unwrap()); + headers.append(SET_COOKIE, "cookie_2=value 2".parse().unwrap()); + headers.append(VIA, "1.1 other.com".parse().unwrap()); + headers.append(SET_COOKIE, "cookie_3=value 3".parse().unwrap()); + headers.insert(VARY, "*".parse().unwrap()); + + assert_eq!(headers.len(), 6); + + let vary = remove_values(&mut headers, VARY); + assert_eq!(vary, Some("*".parse().unwrap())); + assert_eq!(headers.len(), 5); + + let via = remove_values(&mut headers, VIA); + assert_eq!(via, Some("1.1 example.com".parse().unwrap())); + assert_eq!(headers.len(), 3); + + let cookie = remove_values(&mut headers, SET_COOKIE); + assert_eq!(cookie, Some("cookie_1=value 1".parse().unwrap())); + assert_eq!(headers.len(), 0); +} + +fn remove_values(headers: &mut HeaderMap, key: K) -> Option + where K: IntoHeaderName +{ + match headers.entry(key) { + Entry::Occupied(e) => Some(e.remove_entry().1), + Entry::Vacant(_) => None, + } +} diff --git a/.cargo-vendor/http-0.2.12/tests/header_map_fuzz.rs b/.cargo-vendor/http-0.2.12/tests/header_map_fuzz.rs new file mode 100644 index 0000000000..c3af2e52e7 --- /dev/null +++ b/.cargo-vendor/http-0.2.12/tests/header_map_fuzz.rs @@ -0,0 +1,376 @@ +use http::header::*; +use http::*; + +use quickcheck::{Arbitrary, Gen, QuickCheck, TestResult}; +use rand::rngs::StdRng; +use rand::seq::SliceRandom; +use rand::{Rng, SeedableRng}; + +use std::collections::HashMap; + +#[cfg(not(miri))] +#[test] +fn header_map_fuzz() { + fn prop(fuzz: Fuzz) -> TestResult { + fuzz.run(); + TestResult::from_bool(true) + } + + QuickCheck::new().quickcheck(prop as fn(Fuzz) -> TestResult) +} + +#[derive(Debug, Clone)] +#[allow(dead_code)] +struct Fuzz { + // The magic seed that makes the test case reproducible + seed: [u8; 32], + + // Actions to perform + steps: Vec, + + // Number of steps to drop + reduce: usize, +} + +#[derive(Debug)] +struct Weight { + insert: usize, + remove: usize, + append: usize, +} + +#[derive(Debug, Clone)] +struct Step { + action: Action, + expect: AltMap, +} + +#[derive(Debug, Clone)] +enum Action { + Insert { + name: HeaderName, // Name to insert + val: HeaderValue, // Value to insert + old: Option, // Old value + }, + Append { + name: HeaderName, + val: HeaderValue, + ret: bool, + }, + Remove { + name: HeaderName, // Name to remove + val: Option, // Value to get + }, +} + +// An alternate implementation of HeaderMap backed by HashMap +#[derive(Debug, Clone, Default)] +struct AltMap { + map: HashMap>, +} + +impl Fuzz { + fn new(seed: [u8; 32]) -> Fuzz { + // Seed the RNG + let mut rng = StdRng::from_seed(seed); + + let mut steps = vec![]; + let mut expect = AltMap::default(); + let num = rng.gen_range(5, 500); + + let weight = Weight { + insert: rng.gen_range(1, 10), + remove: rng.gen_range(1, 10), + append: rng.gen_range(1, 10), + }; + + while steps.len() < num { + steps.push(expect.gen_step(&weight, &mut rng)); + } + + Fuzz { + seed: seed, + steps: steps, + reduce: 0, + } + } + + fn run(self) { + // Create a new header map + let mut map = HeaderMap::new(); + + // Number of steps to perform + let take = self.steps.len() - self.reduce; + + for step in self.steps.into_iter().take(take) { + step.action.apply(&mut map); + + step.expect.assert_identical(&map); + } + } +} + +impl Arbitrary for Fuzz { + fn arbitrary(g: &mut G) -> Self { + Fuzz::new(Rng::gen(g)) + } +} + +impl AltMap { + fn gen_step(&mut self, weight: &Weight, rng: &mut StdRng) -> Step { + let action = self.gen_action(weight, rng); + + Step { + action: action, + expect: self.clone(), + } + } + + /// This will also apply the action against `self` + fn gen_action(&mut self, weight: &Weight, rng: &mut StdRng) -> Action { + let sum = weight.insert + weight.remove + weight.append; + + let mut num = rng.gen_range(0, sum); + + if num < weight.insert { + return self.gen_insert(rng); + } + + num -= weight.insert; + + if num < weight.remove { + return self.gen_remove(rng); + } + + num -= weight.remove; + + if num < weight.append { + return self.gen_append(rng); + } + + unreachable!(); + } + + fn gen_insert(&mut self, rng: &mut StdRng) -> Action { + let name = self.gen_name(4, rng); + let val = gen_header_value(rng); + let old = self.insert(name.clone(), val.clone()); + + Action::Insert { + name: name, + val: val, + old: old, + } + } + + fn gen_remove(&mut self, rng: &mut StdRng) -> Action { + let name = self.gen_name(-4, rng); + let val = self.remove(&name); + + Action::Remove { + name: name, + val: val, + } + } + + fn gen_append(&mut self, rng: &mut StdRng) -> Action { + let name = self.gen_name(-5, rng); + let val = gen_header_value(rng); + + let vals = self.map.entry(name.clone()).or_insert(vec![]); + + let ret = !vals.is_empty(); + vals.push(val.clone()); + + Action::Append { + name: name, + val: val, + ret: ret, + } + } + + /// Negative numbers weigh finding an existing header higher + fn gen_name(&self, weight: i32, rng: &mut StdRng) -> HeaderName { + let mut existing = rng.gen_ratio(1, weight.abs() as u32); + + if weight < 0 { + existing = !existing; + } + + if existing { + // Existing header + if let Some(name) = self.find_random_name(rng) { + name + } else { + gen_header_name(rng) + } + } else { + gen_header_name(rng) + } + } + + fn find_random_name(&self, rng: &mut StdRng) -> Option { + if self.map.is_empty() { + None + } else { + let n = rng.gen_range(0, self.map.len()); + self.map.keys().nth(n).map(Clone::clone) + } + } + + fn insert(&mut self, name: HeaderName, val: HeaderValue) -> Option { + let old = self.map.insert(name, vec![val]); + old.and_then(|v| v.into_iter().next()) + } + + fn remove(&mut self, name: &HeaderName) -> Option { + self.map.remove(name).and_then(|v| v.into_iter().next()) + } + + fn assert_identical(&self, other: &HeaderMap) { + assert_eq!(self.map.len(), other.keys_len()); + + for (key, val) in &self.map { + // Test get + assert_eq!(other.get(key), val.get(0)); + + // Test get_all + let vals = other.get_all(key); + let actual: Vec<_> = vals.iter().collect(); + assert_eq!(&actual[..], &val[..]); + } + } +} + +impl Action { + fn apply(self, map: &mut HeaderMap) { + match self { + Action::Insert { name, val, old } => { + let actual = map.insert(name, val); + assert_eq!(actual, old); + } + Action::Remove { name, val } => { + // Just to help track the state, load all associated values. + let _ = map.get_all(&name).iter().collect::>(); + + let actual = map.remove(&name); + assert_eq!(actual, val); + } + Action::Append { name, val, ret } => { + assert_eq!(ret, map.append(name, val)); + } + } + } +} + +fn gen_header_name(g: &mut StdRng) -> HeaderName { + const STANDARD_HEADERS: &'static [HeaderName] = &[ + header::ACCEPT, + header::ACCEPT_CHARSET, + header::ACCEPT_ENCODING, + header::ACCEPT_LANGUAGE, + header::ACCEPT_RANGES, + header::ACCESS_CONTROL_ALLOW_CREDENTIALS, + header::ACCESS_CONTROL_ALLOW_HEADERS, + header::ACCESS_CONTROL_ALLOW_METHODS, + header::ACCESS_CONTROL_ALLOW_ORIGIN, + header::ACCESS_CONTROL_EXPOSE_HEADERS, + header::ACCESS_CONTROL_MAX_AGE, + header::ACCESS_CONTROL_REQUEST_HEADERS, + header::ACCESS_CONTROL_REQUEST_METHOD, + header::AGE, + header::ALLOW, + header::ALT_SVC, + header::AUTHORIZATION, + header::CACHE_CONTROL, + header::CACHE_STATUS, + header::CDN_CACHE_CONTROL, + header::CONNECTION, + header::CONTENT_DISPOSITION, + header::CONTENT_ENCODING, + header::CONTENT_LANGUAGE, + header::CONTENT_LENGTH, + header::CONTENT_LOCATION, + header::CONTENT_RANGE, + header::CONTENT_SECURITY_POLICY, + header::CONTENT_SECURITY_POLICY_REPORT_ONLY, + header::CONTENT_TYPE, + header::COOKIE, + header::DNT, + header::DATE, + header::ETAG, + header::EXPECT, + header::EXPIRES, + header::FORWARDED, + header::FROM, + header::HOST, + header::IF_MATCH, + header::IF_MODIFIED_SINCE, + header::IF_NONE_MATCH, + header::IF_RANGE, + header::IF_UNMODIFIED_SINCE, + header::LAST_MODIFIED, + header::LINK, + header::LOCATION, + header::MAX_FORWARDS, + header::ORIGIN, + header::PRAGMA, + header::PROXY_AUTHENTICATE, + header::PROXY_AUTHORIZATION, + header::PUBLIC_KEY_PINS, + header::PUBLIC_KEY_PINS_REPORT_ONLY, + header::RANGE, + header::REFERER, + header::REFERRER_POLICY, + header::REFRESH, + header::RETRY_AFTER, + header::SEC_WEBSOCKET_ACCEPT, + header::SEC_WEBSOCKET_EXTENSIONS, + header::SEC_WEBSOCKET_KEY, + header::SEC_WEBSOCKET_PROTOCOL, + header::SEC_WEBSOCKET_VERSION, + header::SERVER, + header::SET_COOKIE, + header::STRICT_TRANSPORT_SECURITY, + header::TE, + header::TRAILER, + header::TRANSFER_ENCODING, + header::UPGRADE, + header::UPGRADE_INSECURE_REQUESTS, + header::USER_AGENT, + header::VARY, + header::VIA, + header::WARNING, + header::WWW_AUTHENTICATE, + header::X_CONTENT_TYPE_OPTIONS, + header::X_DNS_PREFETCH_CONTROL, + header::X_FRAME_OPTIONS, + header::X_XSS_PROTECTION, + ]; + + if g.gen_ratio(1, 2) { + STANDARD_HEADERS.choose(g).unwrap().clone() + } else { + let value = gen_string(g, 1, 25); + HeaderName::from_bytes(value.as_bytes()).unwrap() + } +} + +fn gen_header_value(g: &mut StdRng) -> HeaderValue { + let value = gen_string(g, 0, 70); + HeaderValue::from_bytes(value.as_bytes()).unwrap() +} + +fn gen_string(g: &mut StdRng, min: usize, max: usize) -> String { + let bytes: Vec<_> = (min..max) + .map(|_| { + // Chars to pick from + b"ABCDEFGHIJKLMNOPQRSTUVabcdefghilpqrstuvwxyz----" + .choose(g) + .unwrap() + .clone() + }) + .collect(); + + String::from_utf8(bytes).unwrap() +} diff --git a/.cargo-vendor/http-0.2.12/tests/status_code.rs b/.cargo-vendor/http-0.2.12/tests/status_code.rs new file mode 100644 index 0000000000..160df6bad5 --- /dev/null +++ b/.cargo-vendor/http-0.2.12/tests/status_code.rs @@ -0,0 +1,82 @@ +use http::*; + +#[test] +fn from_bytes() { + for ok in &[ + "100", "101", "199", "200", "250", "299", "321", "399", "499", "599", "600", "999" + ] { + assert!(StatusCode::from_bytes(ok.as_bytes()).is_ok()); + } + + for not_ok in &[ + "0", "00", "10", "40", "99", "000", "010", "099", "1000", "1999", + ] { + assert!(StatusCode::from_bytes(not_ok.as_bytes()).is_err()); + } +} + +#[test] +fn equates_with_u16() { + let status = StatusCode::from_u16(200u16).unwrap(); + assert_eq!(200u16, status); + assert_eq!(status, 200u16); +} + +#[test] +fn roundtrip() { + for s in 100..1000 { + let sstr = s.to_string(); + let status = StatusCode::from_bytes(sstr.as_bytes()).unwrap(); + assert_eq!(s, u16::from(status)); + assert_eq!(sstr, status.as_str()); + } +} + +#[test] +fn is_informational() { + assert!(status_code(100).is_informational()); + assert!(status_code(199).is_informational()); + + assert!(!status_code(200).is_informational()); +} + +#[test] +fn is_success() { + assert!(status_code(200).is_success()); + assert!(status_code(299).is_success()); + + assert!(!status_code(199).is_success()); + assert!(!status_code(300).is_success()); +} + +#[test] +fn is_redirection() { + assert!(status_code(300).is_redirection()); + assert!(status_code(399).is_redirection()); + + assert!(!status_code(299).is_redirection()); + assert!(!status_code(400).is_redirection()); +} + +#[test] +fn is_client_error() { + assert!(status_code(400).is_client_error()); + assert!(status_code(499).is_client_error()); + + assert!(!status_code(399).is_client_error()); + assert!(!status_code(500).is_client_error()); +} + +#[test] +fn is_server_error() { + assert!(status_code(500).is_server_error()); + assert!(status_code(599).is_server_error()); + + assert!(!status_code(499).is_server_error()); + assert!(!status_code(600).is_server_error()); +} + +/// Helper method for readability +fn status_code(status_code: u16) -> StatusCode { + StatusCode::from_u16(status_code).unwrap() +} diff --git a/.cargo-vendor/http-body-0.4.6/.cargo-checksum.json b/.cargo-vendor/http-body-0.4.6/.cargo-checksum.json new file mode 100644 index 0000000000..a3d29455a1 --- /dev/null +++ b/.cargo-vendor/http-body-0.4.6/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"07aa2743e33442fdd69d4c8beeaaba938a3f177b5a48922886c5cd07c2f1a72d","Cargo.toml":"451b4b8125ced2429c8d2c6c33d972fab2dc3b59365bb38fe6e7f4d14613e44e","LICENSE":"0345e2b98685e3807fd802a2478085dcae35023e3da59b5a00f712504314d83a","README.md":"0f90f61ee419eefd4104005ef6900445fafce9a710dd1989463f3cebaf0fafe8","src/collect.rs":"a71ea9be6aa7100df2bb3520507ad8ebaede8ea5339310a63a0cd03081e14122","src/combinators/box_body.rs":"d27dfa9f289c9c8d1fe714415fb5df5bdaafafb80a5cff66fbbe720841e806bf","src/combinators/map_data.rs":"3063f44d1318feeec639eff6544e7fb91ad9abf9a295770af4cc69b48a691796","src/combinators/map_err.rs":"9db485a5904579147673ac7f9f347e322d283d95a421daaf5541d048045eec7e","src/combinators/mod.rs":"c9e32f64ab2f4866d14256fff4256ba61d4c1bcfaf2748754c561de3abe1eccd","src/empty.rs":"3e44cee68410101cb8bf88c0de504885075c084357e83bcd3a6761ba5c7c58d2","src/full.rs":"efcbf6831d32271170e2ed86c328bfb887aec0c93689f1218ab5a18c319b0fa8","src/lib.rs":"2fa07ea03a0afede091ea4bc02a70025754ce1b28a48d7269637a5397ded3df1","src/limited.rs":"485fc1c58bba29d2c7afdb4a032cd0e3c3578979ccd71f7459ddcd67e0f16077","src/next.rs":"d6863067b20c4bb42dced5c17bd954816b1338ce53e8d34ab81dbe240a1601cf","src/size_hint.rs":"017ed58c59b446b93aa4922e35b596490bf8f03af37c631610cc6576f1c21439","tests/is_end_stream.rs":"3a66d80d064f8a447bfa9fd212c2f91855604b1b41f554da3a029bc4a5be3a7e"},"package":"7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2"} \ No newline at end of file diff --git a/.cargo-vendor/http-body-0.4.6/CHANGELOG.md b/.cargo-vendor/http-body-0.4.6/CHANGELOG.md new file mode 100644 index 0000000000..bdb3a25158 --- /dev/null +++ b/.cargo-vendor/http-body-0.4.6/CHANGELOG.md @@ -0,0 +1,62 @@ +# 0.4.6 (December 8, 2023) + +- Add `Collect` combinator (backported from http-body-util). + +# 0.4.5 (May 20, 2022) + +- Add `String` impl for `Body`. +- Add `Limited` body implementation. + +# 0.4.4 (October 22, 2021) + +- Add `UnsyncBoxBody` and `Body::boxed_unsync`. + +# 0.4.3 (August 8, 2021) + +- Implement `Default` for `BoxBody`. + +# 0.4.2 (May 8, 2021) + +- Correctly override `Body::size_hint` and `Body::is_end_stream` for `Empty`. +- Add `Full` which is a body that consists of a single chunk. + +# 0.4.1 (March 18, 2021) + +- Add combinators to `Body`: + - `map_data`: Change the `Data` chunks produced by the body. + - `map_err`: Change the `Error`s produced by the body. + - `boxed`: Convert the `Body` into a boxed trait object. +- Add `Empty`. + +# 0.4.0 (December 23, 2020) + +- Update `bytes` to v1.0. + +# 0.3.1 (December 13, 2019) + +- Implement `Body` for `http::Request` and `http::Response`. + +# 0.3.0 (December 4, 2019) + +- Rename `next` combinator to `data`. + +# 0.2.0 (December 3, 2019) + +- Update `http` to v0.2. +- Update `bytes` to v0.5. + +# 0.2.0-alpha.3 (October 1, 2019) + +- Fix `Body` to be object-safe. + +# 0.2.0-alpha.2 (October 1, 2019) + +- Add `next` and `trailers` combinator methods. + +# 0.2.0-alpha.1 (August 20, 2019) + +- Update to use `Pin` in `poll_data` and `poll_trailers`. + +# 0.1.0 (May 7, 2019) + +- Initial release diff --git a/.cargo-vendor/http-body-0.4.6/Cargo.toml b/.cargo-vendor/http-body-0.4.6/Cargo.toml new file mode 100644 index 0000000000..8a6a94ba68 --- /dev/null +++ b/.cargo-vendor/http-body-0.4.6/Cargo.toml @@ -0,0 +1,45 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +name = "http-body" +version = "0.4.6" +authors = [ + "Carl Lerche ", + "Lucio Franco ", + "Sean McArthur ", +] +description = """ +Trait representing an asynchronous, streaming, HTTP request or response body. +""" +documentation = "https://docs.rs/http-body" +readme = "README.md" +keywords = ["http"] +categories = ["web-programming"] +license = "MIT" +repository = "https://github.com/hyperium/http-body" + +[dependencies.bytes] +version = "1" + +[dependencies.http] +version = "0.2" + +[dependencies.pin-project-lite] +version = "0.2" + +[dev-dependencies.tokio] +version = "1" +features = [ + "macros", + "rt", +] diff --git a/.cargo-vendor/http-body-0.4.6/LICENSE b/.cargo-vendor/http-body-0.4.6/LICENSE new file mode 100644 index 0000000000..27b08f2874 --- /dev/null +++ b/.cargo-vendor/http-body-0.4.6/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2019 Hyper Contributors + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/.cargo-vendor/http-body-0.4.6/README.md b/.cargo-vendor/http-body-0.4.6/README.md new file mode 100644 index 0000000000..c82ba29056 --- /dev/null +++ b/.cargo-vendor/http-body-0.4.6/README.md @@ -0,0 +1,27 @@ +# HTTP Body + +A trait representing asynchronous operations on an HTTP body. + +[![crates.io][crates-badge]][crates-url] +[![documentation][docs-badge]][docs-url] +[![MIT License][mit-badge]][mit-url] +[![CI Status][ci-badge]][ci-url] + +[crates-badge]: https://img.shields.io/crates/v/http-body.svg +[crates-url]: https://crates.io/crates/http-body +[docs-badge]: https://docs.rs/http-body/badge.svg +[docs-url]: https://docs.rs/http-body +[mit-badge]: https://img.shields.io/badge/license-MIT-blue.svg +[mit-url]: LICENSE +[ci-badge]: https://github.com/hyperium/http-body/workflows/CI/badge.svg +[ci-url]: https://github.com/hyperium/http-body/actions?query=workflow%3ACI + +## License + +This project is licensed under the [MIT license](LICENSE). + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in `http-body` by you, shall be licensed as MIT, without any additional +terms or conditions. diff --git a/.cargo-vendor/http-body/src/collect.rs b/.cargo-vendor/http-body-0.4.6/src/collect.rs similarity index 100% rename from .cargo-vendor/http-body/src/collect.rs rename to .cargo-vendor/http-body-0.4.6/src/collect.rs diff --git a/.cargo-vendor/http-body/src/combinators/box_body.rs b/.cargo-vendor/http-body-0.4.6/src/combinators/box_body.rs similarity index 100% rename from .cargo-vendor/http-body/src/combinators/box_body.rs rename to .cargo-vendor/http-body-0.4.6/src/combinators/box_body.rs diff --git a/.cargo-vendor/http-body/src/combinators/map_data.rs b/.cargo-vendor/http-body-0.4.6/src/combinators/map_data.rs similarity index 100% rename from .cargo-vendor/http-body/src/combinators/map_data.rs rename to .cargo-vendor/http-body-0.4.6/src/combinators/map_data.rs diff --git a/.cargo-vendor/http-body/src/combinators/map_err.rs b/.cargo-vendor/http-body-0.4.6/src/combinators/map_err.rs similarity index 100% rename from .cargo-vendor/http-body/src/combinators/map_err.rs rename to .cargo-vendor/http-body-0.4.6/src/combinators/map_err.rs diff --git a/.cargo-vendor/http-body/src/combinators/mod.rs b/.cargo-vendor/http-body-0.4.6/src/combinators/mod.rs similarity index 100% rename from .cargo-vendor/http-body/src/combinators/mod.rs rename to .cargo-vendor/http-body-0.4.6/src/combinators/mod.rs diff --git a/.cargo-vendor/http-body/src/empty.rs b/.cargo-vendor/http-body-0.4.6/src/empty.rs similarity index 100% rename from .cargo-vendor/http-body/src/empty.rs rename to .cargo-vendor/http-body-0.4.6/src/empty.rs diff --git a/.cargo-vendor/http-body/src/full.rs b/.cargo-vendor/http-body-0.4.6/src/full.rs similarity index 100% rename from .cargo-vendor/http-body/src/full.rs rename to .cargo-vendor/http-body-0.4.6/src/full.rs diff --git a/.cargo-vendor/http-body-0.4.6/src/lib.rs b/.cargo-vendor/http-body-0.4.6/src/lib.rs new file mode 100644 index 0000000000..2535cda161 --- /dev/null +++ b/.cargo-vendor/http-body-0.4.6/src/lib.rs @@ -0,0 +1,335 @@ +#![doc(html_root_url = "https://docs.rs/http-body/0.4.6")] +#![deny( + missing_debug_implementations, + missing_docs, + unreachable_pub, + broken_intra_doc_links +)] +#![cfg_attr(test, deny(warnings))] + +//! Asynchronous HTTP request or response body. +//! +//! See [`Body`] for more details. +//! +//! [`Body`]: trait.Body.html + +mod collect; +mod empty; +mod full; +mod limited; +mod next; +mod size_hint; + +pub mod combinators; + +pub use self::collect::Collected; +pub use self::empty::Empty; +pub use self::full::Full; +pub use self::limited::{LengthLimitError, Limited}; +pub use self::next::{Data, Trailers}; +pub use self::size_hint::SizeHint; + +use self::combinators::{BoxBody, MapData, MapErr, UnsyncBoxBody}; +use bytes::{Buf, Bytes}; +use http::HeaderMap; +use std::convert::Infallible; +use std::ops; +use std::pin::Pin; +use std::task::{Context, Poll}; + +/// Trait representing a streaming body of a Request or Response. +/// +/// Data is streamed via the `poll_data` function, which asynchronously yields `T: Buf` values. The +/// `size_hint` function provides insight into the total number of bytes that will be streamed. +/// +/// The `poll_trailers` function returns an optional set of trailers used to finalize the request / +/// response exchange. This is mostly used when using the HTTP/2.0 protocol. +/// +pub trait Body { + /// Values yielded by the `Body`. + type Data: Buf; + + /// The error type this `Body` might generate. + type Error; + + /// Attempt to pull out the next data buffer of this stream. + fn poll_data( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll>>; + + /// Poll for an optional **single** `HeaderMap` of trailers. + /// + /// This function should only be called once `poll_data` returns `None`. + fn poll_trailers( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>; + + /// Returns `true` when the end of stream has been reached. + /// + /// An end of stream means that both `poll_data` and `poll_trailers` will + /// return `None`. + /// + /// A return value of `false` **does not** guarantee that a value will be + /// returned from `poll_stream` or `poll_trailers`. + fn is_end_stream(&self) -> bool { + false + } + + /// Returns the bounds on the remaining length of the stream. + /// + /// When the **exact** remaining length of the stream is known, the upper bound will be set and + /// will equal the lower bound. + fn size_hint(&self) -> SizeHint { + SizeHint::default() + } + + /// Returns future that resolves to next data chunk, if any. + fn data(&mut self) -> Data<'_, Self> + where + Self: Unpin + Sized, + { + Data(self) + } + + /// Returns future that resolves to trailers, if any. + fn trailers(&mut self) -> Trailers<'_, Self> + where + Self: Unpin + Sized, + { + Trailers(self) + } + + /// Maps this body's data value to a different value. + fn map_data(self, f: F) -> MapData + where + Self: Sized, + F: FnMut(Self::Data) -> B, + B: Buf, + { + MapData::new(self, f) + } + + /// Maps this body's error value to a different value. + fn map_err(self, f: F) -> MapErr + where + Self: Sized, + F: FnMut(Self::Error) -> E, + { + MapErr::new(self, f) + } + + /// Turn this body into [`Collected`] body which will collect all the DATA frames + /// and trailers. + fn collect(self) -> crate::collect::Collect + where + Self: Sized, + { + collect::Collect::new(self) + } + + /// Turn this body into a boxed trait object. + fn boxed(self) -> BoxBody + where + Self: Sized + Send + Sync + 'static, + { + BoxBody::new(self) + } + + /// Turn this body into a boxed trait object that is !Sync. + fn boxed_unsync(self) -> UnsyncBoxBody + where + Self: Sized + Send + 'static, + { + UnsyncBoxBody::new(self) + } +} + +impl Body for &mut T { + type Data = T::Data; + type Error = T::Error; + + fn poll_data( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll>> { + Pin::new(&mut **self).poll_data(cx) + } + + fn poll_trailers( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>> { + Pin::new(&mut **self).poll_trailers(cx) + } + + fn is_end_stream(&self) -> bool { + Pin::new(&**self).is_end_stream() + } + + fn size_hint(&self) -> SizeHint { + Pin::new(&**self).size_hint() + } +} + +impl

Body for Pin

+where + P: Unpin + ops::DerefMut, + P::Target: Body, +{ + type Data = <

::Target as Body>::Data; + type Error = <

::Target as Body>::Error; + + fn poll_data( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll>> { + Pin::get_mut(self).as_mut().poll_data(cx) + } + + fn poll_trailers( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>> { + Pin::get_mut(self).as_mut().poll_trailers(cx) + } + + fn is_end_stream(&self) -> bool { + self.as_ref().is_end_stream() + } + + fn size_hint(&self) -> SizeHint { + self.as_ref().size_hint() + } +} + +impl Body for Box { + type Data = T::Data; + type Error = T::Error; + + fn poll_data( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll>> { + Pin::new(&mut **self).poll_data(cx) + } + + fn poll_trailers( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>> { + Pin::new(&mut **self).poll_trailers(cx) + } + + fn is_end_stream(&self) -> bool { + self.as_ref().is_end_stream() + } + + fn size_hint(&self) -> SizeHint { + self.as_ref().size_hint() + } +} + +impl Body for http::Request { + type Data = B::Data; + type Error = B::Error; + + fn poll_data( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll>> { + unsafe { + self.map_unchecked_mut(http::Request::body_mut) + .poll_data(cx) + } + } + + fn poll_trailers( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>> { + unsafe { + self.map_unchecked_mut(http::Request::body_mut) + .poll_trailers(cx) + } + } + + fn is_end_stream(&self) -> bool { + self.body().is_end_stream() + } + + fn size_hint(&self) -> SizeHint { + self.body().size_hint() + } +} + +impl Body for http::Response { + type Data = B::Data; + type Error = B::Error; + + fn poll_data( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll>> { + unsafe { + self.map_unchecked_mut(http::Response::body_mut) + .poll_data(cx) + } + } + + fn poll_trailers( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>> { + unsafe { + self.map_unchecked_mut(http::Response::body_mut) + .poll_trailers(cx) + } + } + + fn is_end_stream(&self) -> bool { + self.body().is_end_stream() + } + + fn size_hint(&self) -> SizeHint { + self.body().size_hint() + } +} + +impl Body for String { + type Data = Bytes; + type Error = Infallible; + + fn poll_data( + mut self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll>> { + if !self.is_empty() { + let s = std::mem::take(&mut *self); + Poll::Ready(Some(Ok(s.into_bytes().into()))) + } else { + Poll::Ready(None) + } + } + + fn poll_trailers( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll, Self::Error>> { + Poll::Ready(Ok(None)) + } + + fn is_end_stream(&self) -> bool { + self.is_empty() + } + + fn size_hint(&self) -> SizeHint { + SizeHint::with_exact(self.len() as u64) + } +} + +#[cfg(test)] +fn _assert_bounds() { + fn can_be_trait_object(_: &dyn Body>, Error = std::io::Error>) {} +} diff --git a/.cargo-vendor/http-body/src/limited.rs b/.cargo-vendor/http-body-0.4.6/src/limited.rs similarity index 100% rename from .cargo-vendor/http-body/src/limited.rs rename to .cargo-vendor/http-body-0.4.6/src/limited.rs diff --git a/.cargo-vendor/http-body/src/next.rs b/.cargo-vendor/http-body-0.4.6/src/next.rs similarity index 100% rename from .cargo-vendor/http-body/src/next.rs rename to .cargo-vendor/http-body-0.4.6/src/next.rs diff --git a/.cargo-vendor/http-body-0.4.6/src/size_hint.rs b/.cargo-vendor/http-body-0.4.6/src/size_hint.rs new file mode 100644 index 0000000000..00a8f19177 --- /dev/null +++ b/.cargo-vendor/http-body-0.4.6/src/size_hint.rs @@ -0,0 +1,86 @@ +use std::u64; + +/// A `Body` size hint +/// +/// The default implementation returns: +/// +/// * 0 for `lower` +/// * `None` for `upper`. +#[derive(Debug, Default, Clone)] +pub struct SizeHint { + lower: u64, + upper: Option, +} + +impl SizeHint { + /// Returns a new `SizeHint` with default values + #[inline] + pub fn new() -> SizeHint { + SizeHint::default() + } + + /// Returns a new `SizeHint` with both upper and lower bounds set to the + /// given value. + #[inline] + pub fn with_exact(value: u64) -> SizeHint { + SizeHint { + lower: value, + upper: Some(value), + } + } + + /// Returns the lower bound of data that the `Body` will yield before + /// completing. + #[inline] + pub fn lower(&self) -> u64 { + self.lower + } + + /// Set the value of the `lower` hint. + /// + /// # Panics + /// + /// The function panics if `value` is greater than `upper`. + #[inline] + pub fn set_lower(&mut self, value: u64) { + assert!(value <= self.upper.unwrap_or(u64::MAX)); + self.lower = value; + } + + /// Returns the upper bound of data the `Body` will yield before + /// completing, or `None` if the value is unknown. + #[inline] + pub fn upper(&self) -> Option { + self.upper + } + + /// Set the value of the `upper` hint value. + /// + /// # Panics + /// + /// This function panics if `value` is less than `lower`. + #[inline] + pub fn set_upper(&mut self, value: u64) { + assert!(value >= self.lower, "`value` is less than than `lower`"); + + self.upper = Some(value); + } + + /// Returns the exact size of data that will be yielded **if** the + /// `lower` and `upper` bounds are equal. + #[inline] + pub fn exact(&self) -> Option { + if Some(self.lower) == self.upper { + self.upper + } else { + None + } + } + + /// Set the value of the `lower` and `upper` bounds to exactly the same. + #[inline] + pub fn set_exact(&mut self, value: u64) { + self.lower = value; + self.upper = Some(value); + } +} diff --git a/.cargo-vendor/http-body-0.4.6/tests/is_end_stream.rs b/.cargo-vendor/http-body-0.4.6/tests/is_end_stream.rs new file mode 100644 index 0000000000..beaeb0b1a0 --- /dev/null +++ b/.cargo-vendor/http-body-0.4.6/tests/is_end_stream.rs @@ -0,0 +1,79 @@ +use http::HeaderMap; +use http_body::{Body, SizeHint}; +use std::pin::Pin; +use std::task::{Context, Poll}; + +struct Mock { + size_hint: SizeHint, +} + +impl Body for Mock { + type Data = ::std::io::Cursor>; + type Error = (); + + fn poll_data( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll>> { + Poll::Ready(None) + } + + fn poll_trailers( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll, Self::Error>> { + Poll::Ready(Ok(None)) + } + + fn size_hint(&self) -> SizeHint { + self.size_hint.clone() + } +} + +#[test] +fn is_end_stream_true() { + let combos = [ + (None, None, false), + (Some(123), None, false), + (Some(0), Some(123), false), + (Some(123), Some(123), false), + (Some(0), Some(0), false), + ]; + + for &(lower, upper, is_end_stream) in &combos { + let mut size_hint = SizeHint::new(); + assert_eq!(0, size_hint.lower()); + assert!(size_hint.upper().is_none()); + + if let Some(lower) = lower { + size_hint.set_lower(lower); + } + + if let Some(upper) = upper { + size_hint.set_upper(upper); + } + + let mut mock = Mock { size_hint }; + + assert_eq!( + is_end_stream, + Pin::new(&mut mock).is_end_stream(), + "size_hint = {:?}", + mock.size_hint.clone() + ); + } +} + +#[test] +fn is_end_stream_default_false() { + let mut mock = Mock { + size_hint: SizeHint::default(), + }; + + assert_eq!( + false, + Pin::new(&mut mock).is_end_stream(), + "size_hint = {:?}", + mock.size_hint.clone() + ); +} diff --git a/.cargo-vendor/http-body-util/.cargo-checksum.json b/.cargo-vendor/http-body-util/.cargo-checksum.json new file mode 100644 index 0000000000..ed74df3db4 --- /dev/null +++ b/.cargo-vendor/http-body-util/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"9e4fb34f528b34df0cc8f6f03deb39e3df0791a73db7e7bae52ea132b3ffa756","Cargo.toml":"05e6a10d8a9dc504f59460e8d6ff9862c614b7c2a146edb3411ef66763678f37","LICENSE":"0345e2b98685e3807fd802a2478085dcae35023e3da59b5a00f712504314d83a","README.md":"0f90f61ee419eefd4104005ef6900445fafce9a710dd1989463f3cebaf0fafe8","src/collected.rs":"23d193d5c44cc80b14cc2f7c469a13634acd1fe577b985bcb476010ed7f8c6fb","src/combinators/box_body.rs":"254b6760a47cc166c7e6ebf83267ec721fb0bb998c3d45e3471b062b51408181","src/combinators/collect.rs":"641291a187a6a86220d709f981b5920e11089a201eb65b03b630f5e9ac46d566","src/combinators/frame.rs":"e08cfaa40178e95124b2833cf20b724b5fd567830bf2f1d476478758f74fda9f","src/combinators/map_err.rs":"825954afe9fd843e86afa3b8fa6fb50005291d647ea2413f9538dde9f48bcb19","src/combinators/map_frame.rs":"8eb64fe3a4077c36351ed41c59704209e4b64552fa9b4bcc79ec86c8a929905c","src/combinators/mod.rs":"3d03db85c5d1b1d7a66c13c7a814bd6cb170739d46cb74f454ce341ee20828fc","src/combinators/with_trailers.rs":"7d3e445721e949de618285647c68f2a561858f291f5cb45b67375577a9f58176","src/either.rs":"bb9507a54cc4ef0a7cb02bf4e3a4c03f94fbc41bd02d48f7d3933e95dcb4b435","src/empty.rs":"a0ee1dd7edd2e035cd95e8ab887e8ef000fe972d7465e36f79178060b7a2fb62","src/full.rs":"7b249decc34dbad3c7c5561f7484cc792370eb3946a6f0a178d5170fe629b8fc","src/lib.rs":"db73a5b6717d9e43d0f802b17973e83c794487f64e0ba7b066476e1e8e768804","src/limited.rs":"c12ee827d8e09935a96f7ff9bdc9ccc6a5f0bbdc061faa4a3519fa738c49491e","src/stream.rs":"4996edfbfd475f2d54af1f33ae9e40dba47a26c314ed88b7fbdd70af6409b865","src/util.rs":"d23ff3fb96171fb2ff9bf9db1c176f862fca3f40f5c0b4ca26d0137103ecb780"},"package":"793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f"} \ No newline at end of file diff --git a/.cargo-vendor/http-body-util/CHANGELOG.md b/.cargo-vendor/http-body-util/CHANGELOG.md new file mode 100644 index 0000000000..e999e73cd1 --- /dev/null +++ b/.cargo-vendor/http-body-util/CHANGELOG.md @@ -0,0 +1,25 @@ +# v0.1.2 + +- Add `BodyDataStream` type to convert a body to a stream of its data. + +# v0.1.1 + +- Add `BodyExt::with_trailers()` combinator. +- Improve performance of `BodyExt::collect().to_bytes()`. + +# v0.1.0 + +- Update `http` to 1.0. +- Update `http-body` to 1.0. + +# v0.1.0-rc.3 + +- Fix `BodyExt::collect()` from panicking on an empty frame. + +# v0.1.0-rc.2 + +- Update to `http-body` rc.2. + +# v0.1.0-rc.1 + +- Initial release, split from http-body 0.4.5. diff --git a/.cargo-vendor/http-body-util/Cargo.toml b/.cargo-vendor/http-body-util/Cargo.toml new file mode 100644 index 0000000000..f9fb74dbf2 --- /dev/null +++ b/.cargo-vendor/http-body-util/Cargo.toml @@ -0,0 +1,64 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +rust-version = "1.49" +name = "http-body-util" +version = "0.1.2" +authors = [ + "Carl Lerche ", + "Lucio Franco ", + "Sean McArthur ", +] +build = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = """ +Combinators and adapters for HTTP request or response bodies. +""" +documentation = "https://docs.rs/http-body-util" +readme = "README.md" +keywords = ["http"] +categories = ["web-programming"] +license = "MIT" +repository = "https://github.com/hyperium/http-body" + +[lib] +name = "http_body_util" +path = "src/lib.rs" + +[dependencies.bytes] +version = "1" + +[dependencies.futures-util] +version = "0.3" +default-features = false + +[dependencies.http] +version = "1" + +[dependencies.http-body] +version = "1" + +[dependencies.pin-project-lite] +version = "0.2" + +[dev-dependencies.tokio] +version = "1" +features = [ + "macros", + "rt", + "sync", + "rt-multi-thread", +] diff --git a/.cargo-vendor/http-body-util/LICENSE b/.cargo-vendor/http-body-util/LICENSE new file mode 100644 index 0000000000..27b08f2874 --- /dev/null +++ b/.cargo-vendor/http-body-util/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2019 Hyper Contributors + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/.cargo-vendor/http-body-util/README.md b/.cargo-vendor/http-body-util/README.md new file mode 100644 index 0000000000..c82ba29056 --- /dev/null +++ b/.cargo-vendor/http-body-util/README.md @@ -0,0 +1,27 @@ +# HTTP Body + +A trait representing asynchronous operations on an HTTP body. + +[![crates.io][crates-badge]][crates-url] +[![documentation][docs-badge]][docs-url] +[![MIT License][mit-badge]][mit-url] +[![CI Status][ci-badge]][ci-url] + +[crates-badge]: https://img.shields.io/crates/v/http-body.svg +[crates-url]: https://crates.io/crates/http-body +[docs-badge]: https://docs.rs/http-body/badge.svg +[docs-url]: https://docs.rs/http-body +[mit-badge]: https://img.shields.io/badge/license-MIT-blue.svg +[mit-url]: LICENSE +[ci-badge]: https://github.com/hyperium/http-body/workflows/CI/badge.svg +[ci-url]: https://github.com/hyperium/http-body/actions?query=workflow%3ACI + +## License + +This project is licensed under the [MIT license](LICENSE). + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in `http-body` by you, shall be licensed as MIT, without any additional +terms or conditions. diff --git a/.cargo-vendor/http-body-util/src/collected.rs b/.cargo-vendor/http-body-util/src/collected.rs new file mode 100644 index 0000000000..a78f395de4 --- /dev/null +++ b/.cargo-vendor/http-body-util/src/collected.rs @@ -0,0 +1,178 @@ +use std::{ + convert::Infallible, + pin::Pin, + task::{Context, Poll}, +}; + +use bytes::{Buf, Bytes}; +use http::HeaderMap; +use http_body::{Body, Frame}; + +use crate::util::BufList; + +/// A collected body produced by [`BodyExt::collect`] which collects all the DATA frames +/// and trailers. +/// +/// [`BodyExt::collect`]: crate::BodyExt::collect +#[derive(Debug)] +pub struct Collected { + bufs: BufList, + trailers: Option, +} + +impl Collected { + /// If there is a trailers frame buffered, returns a reference to it. + /// + /// Returns `None` if the body contained no trailers. + pub fn trailers(&self) -> Option<&HeaderMap> { + self.trailers.as_ref() + } + + /// Aggregate this buffered into a [`Buf`]. + pub fn aggregate(self) -> impl Buf { + self.bufs + } + + /// Convert this body into a [`Bytes`]. + pub fn to_bytes(mut self) -> Bytes { + self.bufs.copy_to_bytes(self.bufs.remaining()) + } + + pub(crate) fn push_frame(&mut self, frame: Frame) { + let frame = match frame.into_data() { + Ok(data) => { + // Only push this frame if it has some data in it, to avoid crashing on + // `BufList::push`. + if data.has_remaining() { + self.bufs.push(data); + } + return; + } + Err(frame) => frame, + }; + + if let Ok(trailers) = frame.into_trailers() { + if let Some(current) = &mut self.trailers { + current.extend(trailers); + } else { + self.trailers = Some(trailers); + } + }; + } +} + +impl Body for Collected { + type Data = B; + type Error = Infallible; + + fn poll_frame( + mut self: Pin<&mut Self>, + _: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + let frame = if let Some(data) = self.bufs.pop() { + Frame::data(data) + } else if let Some(trailers) = self.trailers.take() { + Frame::trailers(trailers) + } else { + return Poll::Ready(None); + }; + + Poll::Ready(Some(Ok(frame))) + } +} + +impl Default for Collected { + fn default() -> Self { + Self { + bufs: BufList::default(), + trailers: None, + } + } +} + +impl Unpin for Collected {} + +#[cfg(test)] +mod tests { + use std::convert::TryInto; + + use futures_util::stream; + + use crate::{BodyExt, Full, StreamBody}; + + use super::*; + + #[tokio::test] + async fn full_body() { + let body = Full::new(&b"hello"[..]); + + let buffered = body.collect().await.unwrap(); + + let mut buf = buffered.to_bytes(); + + assert_eq!(&buf.copy_to_bytes(buf.remaining())[..], &b"hello"[..]); + } + + #[tokio::test] + async fn segmented_body() { + let bufs = [&b"hello"[..], &b"world"[..], &b"!"[..]]; + let body = StreamBody::new(stream::iter(bufs.map(Frame::data).map(Ok::<_, Infallible>))); + + let buffered = body.collect().await.unwrap(); + + let mut buf = buffered.to_bytes(); + + assert_eq!(&buf.copy_to_bytes(buf.remaining())[..], b"helloworld!"); + } + + #[tokio::test] + async fn delayed_segments() { + let one = stream::once(async { Ok::<_, Infallible>(Frame::data(&b"hello "[..])) }); + let two = stream::once(async { + // a yield just so its not ready immediately + tokio::task::yield_now().await; + Ok::<_, Infallible>(Frame::data(&b"world!"[..])) + }); + let stream = futures_util::StreamExt::chain(one, two); + + let body = StreamBody::new(stream); + + let buffered = body.collect().await.unwrap(); + + let mut buf = buffered.to_bytes(); + + assert_eq!(&buf.copy_to_bytes(buf.remaining())[..], b"hello world!"); + } + + #[tokio::test] + async fn trailers() { + let mut trailers = HeaderMap::new(); + trailers.insert("this", "a trailer".try_into().unwrap()); + let bufs = [ + Frame::data(&b"hello"[..]), + Frame::data(&b"world!"[..]), + Frame::trailers(trailers.clone()), + ]; + + let body = StreamBody::new(stream::iter(bufs.map(Ok::<_, Infallible>))); + + let buffered = body.collect().await.unwrap(); + + assert_eq!(&trailers, buffered.trailers().unwrap()); + + let mut buf = buffered.to_bytes(); + + assert_eq!(&buf.copy_to_bytes(buf.remaining())[..], b"helloworld!"); + } + + /// Test for issue [#88](https://github.com/hyperium/http-body/issues/88). + #[tokio::test] + async fn empty_frame() { + let bufs: [&[u8]; 1] = [&[]]; + + let body = StreamBody::new(stream::iter(bufs.map(Frame::data).map(Ok::<_, Infallible>))); + let buffered = body.collect().await.unwrap(); + + assert_eq!(buffered.to_bytes().len(), 0); + } +} diff --git a/.cargo-vendor/http-body-util/src/combinators/box_body.rs b/.cargo-vendor/http-body-util/src/combinators/box_body.rs new file mode 100644 index 0000000000..2ec801041c --- /dev/null +++ b/.cargo-vendor/http-body-util/src/combinators/box_body.rs @@ -0,0 +1,122 @@ +use crate::BodyExt as _; + +use bytes::Buf; +use http_body::{Body, Frame, SizeHint}; +use std::{ + fmt, + pin::Pin, + task::{Context, Poll}, +}; + +/// A boxed [`Body`] trait object. +pub struct BoxBody { + inner: Pin + Send + Sync + 'static>>, +} + +/// A boxed [`Body`] trait object that is !Sync. +pub struct UnsyncBoxBody { + inner: Pin + Send + 'static>>, +} + +impl BoxBody { + /// Create a new `BoxBody`. + pub fn new(body: B) -> Self + where + B: Body + Send + Sync + 'static, + D: Buf, + { + Self { + inner: Box::pin(body), + } + } +} + +impl fmt::Debug for BoxBody { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("BoxBody").finish() + } +} + +impl Body for BoxBody +where + D: Buf, +{ + type Data = D; + type Error = E; + + fn poll_frame( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + self.inner.as_mut().poll_frame(cx) + } + + fn is_end_stream(&self) -> bool { + self.inner.is_end_stream() + } + + fn size_hint(&self) -> SizeHint { + self.inner.size_hint() + } +} + +impl Default for BoxBody +where + D: Buf + 'static, +{ + fn default() -> Self { + BoxBody::new(crate::Empty::new().map_err(|err| match err {})) + } +} + +// === UnsyncBoxBody === +impl UnsyncBoxBody { + /// Create a new `BoxBody`. + pub fn new(body: B) -> Self + where + B: Body + Send + 'static, + D: Buf, + { + Self { + inner: Box::pin(body), + } + } +} + +impl fmt::Debug for UnsyncBoxBody { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("UnsyncBoxBody").finish() + } +} + +impl Body for UnsyncBoxBody +where + D: Buf, +{ + type Data = D; + type Error = E; + + fn poll_frame( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + self.inner.as_mut().poll_frame(cx) + } + + fn is_end_stream(&self) -> bool { + self.inner.is_end_stream() + } + + fn size_hint(&self) -> SizeHint { + self.inner.size_hint() + } +} + +impl Default for UnsyncBoxBody +where + D: Buf + 'static, +{ + fn default() -> Self { + UnsyncBoxBody::new(crate::Empty::new().map_err(|err| match err {})) + } +} diff --git a/.cargo-vendor/http-body-util/src/combinators/collect.rs b/.cargo-vendor/http-body-util/src/combinators/collect.rs new file mode 100644 index 0000000000..d89e7214cf --- /dev/null +++ b/.cargo-vendor/http-body-util/src/combinators/collect.rs @@ -0,0 +1,43 @@ +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; + +use http_body::Body; +use pin_project_lite::pin_project; + +pin_project! { + /// Future that resolves into a [`Collected`]. + /// + /// [`Collected`]: crate::Collected + pub struct Collect + where + T: Body, + T: ?Sized, + { + pub(crate) collected: Option>, + #[pin] + pub(crate) body: T, + } +} + +impl Future for Collect { + type Output = Result, T::Error>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> std::task::Poll { + let mut me = self.project(); + + loop { + let frame = futures_util::ready!(me.body.as_mut().poll_frame(cx)); + + let frame = if let Some(frame) = frame { + frame? + } else { + return Poll::Ready(Ok(me.collected.take().expect("polled after complete"))); + }; + + me.collected.as_mut().unwrap().push_frame(frame); + } + } +} diff --git a/.cargo-vendor/http-body-util/src/combinators/frame.rs b/.cargo-vendor/http-body-util/src/combinators/frame.rs new file mode 100644 index 0000000000..211fa08780 --- /dev/null +++ b/.cargo-vendor/http-body-util/src/combinators/frame.rs @@ -0,0 +1,18 @@ +use http_body::Body; + +use core::future::Future; +use core::pin::Pin; +use core::task; + +#[must_use = "futures don't do anything unless polled"] +#[derive(Debug)] +/// Future that resolves to the next frame from a [`Body`]. +pub struct Frame<'a, T: ?Sized>(pub(crate) &'a mut T); + +impl<'a, T: Body + Unpin + ?Sized> Future for Frame<'a, T> { + type Output = Option, T::Error>>; + + fn poll(mut self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> task::Poll { + Pin::new(&mut self.0).poll_frame(ctx) + } +} diff --git a/.cargo-vendor/http-body-util/src/combinators/map_err.rs b/.cargo-vendor/http-body-util/src/combinators/map_err.rs new file mode 100644 index 0000000000..384cfc56ba --- /dev/null +++ b/.cargo-vendor/http-body-util/src/combinators/map_err.rs @@ -0,0 +1,89 @@ +use http_body::{Body, Frame, SizeHint}; +use pin_project_lite::pin_project; +use std::{ + any::type_name, + fmt, + pin::Pin, + task::{Context, Poll}, +}; + +pin_project! { + /// Body returned by the [`map_err`] combinator. + /// + /// [`map_err`]: crate::BodyExt::map_err + #[derive(Clone, Copy)] + pub struct MapErr { + #[pin] + inner: B, + f: F + } +} + +impl MapErr { + #[inline] + pub(crate) fn new(body: B, f: F) -> Self { + Self { inner: body, f } + } + + /// Get a reference to the inner body + pub fn get_ref(&self) -> &B { + &self.inner + } + + /// Get a mutable reference to the inner body + pub fn get_mut(&mut self) -> &mut B { + &mut self.inner + } + + /// Get a pinned mutable reference to the inner body + pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut B> { + self.project().inner + } + + /// Consume `self`, returning the inner body + pub fn into_inner(self) -> B { + self.inner + } +} + +impl Body for MapErr +where + B: Body, + F: FnMut(B::Error) -> E, +{ + type Data = B::Data; + type Error = E; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + let this = self.project(); + match this.inner.poll_frame(cx) { + Poll::Pending => Poll::Pending, + Poll::Ready(None) => Poll::Ready(None), + Poll::Ready(Some(Ok(frame))) => Poll::Ready(Some(Ok(frame))), + Poll::Ready(Some(Err(err))) => Poll::Ready(Some(Err((this.f)(err)))), + } + } + + fn is_end_stream(&self) -> bool { + self.inner.is_end_stream() + } + + fn size_hint(&self) -> SizeHint { + self.inner.size_hint() + } +} + +impl fmt::Debug for MapErr +where + B: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("MapErr") + .field("inner", &self.inner) + .field("f", &type_name::()) + .finish() + } +} diff --git a/.cargo-vendor/http-body-util/src/combinators/map_frame.rs b/.cargo-vendor/http-body-util/src/combinators/map_frame.rs new file mode 100644 index 0000000000..44886bdeab --- /dev/null +++ b/.cargo-vendor/http-body-util/src/combinators/map_frame.rs @@ -0,0 +1,87 @@ +use bytes::Buf; +use http_body::{Body, Frame}; +use pin_project_lite::pin_project; +use std::{ + any::type_name, + fmt, + pin::Pin, + task::{Context, Poll}, +}; + +pin_project! { + /// Body returned by the [`map_frame`] combinator. + /// + /// [`map_frame`]: crate::BodyExt::map_frame + #[derive(Clone, Copy)] + pub struct MapFrame { + #[pin] + inner: B, + f: F + } +} + +impl MapFrame { + #[inline] + pub(crate) fn new(body: B, f: F) -> Self { + Self { inner: body, f } + } + + /// Get a reference to the inner body + pub fn get_ref(&self) -> &B { + &self.inner + } + + /// Get a mutable reference to the inner body + pub fn get_mut(&mut self) -> &mut B { + &mut self.inner + } + + /// Get a pinned mutable reference to the inner body + pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut B> { + self.project().inner + } + + /// Consume `self`, returning the inner body + pub fn into_inner(self) -> B { + self.inner + } +} + +impl Body for MapFrame +where + B: Body, + F: FnMut(Frame) -> Frame, + B2: Buf, +{ + type Data = B2; + type Error = B::Error; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + let this = self.project(); + match this.inner.poll_frame(cx) { + Poll::Pending => Poll::Pending, + Poll::Ready(None) => Poll::Ready(None), + Poll::Ready(Some(Ok(frame))) => Poll::Ready(Some(Ok((this.f)(frame)))), + Poll::Ready(Some(Err(err))) => Poll::Ready(Some(Err(err))), + } + } + + fn is_end_stream(&self) -> bool { + self.inner.is_end_stream() + } +} + +impl fmt::Debug for MapFrame +where + B: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("MapFrame") + .field("inner", &self.inner) + .field("f", &type_name::()) + .finish() + } +} diff --git a/.cargo-vendor/http-body-util/src/combinators/mod.rs b/.cargo-vendor/http-body-util/src/combinators/mod.rs new file mode 100644 index 0000000000..38d263788d --- /dev/null +++ b/.cargo-vendor/http-body-util/src/combinators/mod.rs @@ -0,0 +1,17 @@ +//! Combinators for the `Body` trait. + +mod box_body; +mod collect; +mod frame; +mod map_err; +mod map_frame; +mod with_trailers; + +pub use self::{ + box_body::{BoxBody, UnsyncBoxBody}, + collect::Collect, + frame::Frame, + map_err::MapErr, + map_frame::MapFrame, + with_trailers::WithTrailers, +}; diff --git a/.cargo-vendor/http-body-util/src/combinators/with_trailers.rs b/.cargo-vendor/http-body-util/src/combinators/with_trailers.rs new file mode 100644 index 0000000000..92f466a9d6 --- /dev/null +++ b/.cargo-vendor/http-body-util/src/combinators/with_trailers.rs @@ -0,0 +1,213 @@ +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; + +use futures_util::ready; +use http::HeaderMap; +use http_body::{Body, Frame}; +use pin_project_lite::pin_project; + +pin_project! { + /// Adds trailers to a body. + /// + /// See [`BodyExt::with_trailers`] for more details. + pub struct WithTrailers { + #[pin] + state: State, + } +} + +impl WithTrailers { + pub(crate) fn new(body: T, trailers: F) -> Self { + Self { + state: State::PollBody { + body, + trailers: Some(trailers), + }, + } + } +} + +pin_project! { + #[project = StateProj] + enum State { + PollBody { + #[pin] + body: T, + trailers: Option, + }, + PollTrailers { + #[pin] + trailers: F, + prev_trailers: Option, + }, + Done, + } +} + +impl Body for WithTrailers +where + T: Body, + F: Future>>, +{ + type Data = T::Data; + type Error = T::Error; + + fn poll_frame( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + loop { + let mut this = self.as_mut().project(); + + match this.state.as_mut().project() { + StateProj::PollBody { body, trailers } => match ready!(body.poll_frame(cx)?) { + Some(frame) => match frame.into_trailers() { + Ok(prev_trailers) => { + let trailers = trailers.take().unwrap(); + this.state.set(State::PollTrailers { + trailers, + prev_trailers: Some(prev_trailers), + }); + } + Err(frame) => { + return Poll::Ready(Some(Ok(frame))); + } + }, + None => { + let trailers = trailers.take().unwrap(); + this.state.set(State::PollTrailers { + trailers, + prev_trailers: None, + }); + } + }, + StateProj::PollTrailers { + trailers, + prev_trailers, + } => { + let trailers = ready!(trailers.poll(cx)?); + match (trailers, prev_trailers.take()) { + (None, None) => return Poll::Ready(None), + (None, Some(trailers)) | (Some(trailers), None) => { + this.state.set(State::Done); + return Poll::Ready(Some(Ok(Frame::trailers(trailers)))); + } + (Some(new_trailers), Some(mut prev_trailers)) => { + prev_trailers.extend(new_trailers); + this.state.set(State::Done); + return Poll::Ready(Some(Ok(Frame::trailers(prev_trailers)))); + } + } + } + StateProj::Done => { + return Poll::Ready(None); + } + } + } + } + + #[inline] + fn size_hint(&self) -> http_body::SizeHint { + match &self.state { + State::PollBody { body, .. } => body.size_hint(), + State::PollTrailers { .. } | State::Done => Default::default(), + } + } +} + +#[cfg(test)] +mod tests { + use std::convert::Infallible; + + use bytes::Bytes; + use http::{HeaderName, HeaderValue}; + + use crate::{BodyExt, Empty, Full}; + + #[allow(unused_imports)] + use super::*; + + #[tokio::test] + async fn works() { + let mut trailers = HeaderMap::new(); + trailers.insert( + HeaderName::from_static("foo"), + HeaderValue::from_static("bar"), + ); + + let body = + Full::::from("hello").with_trailers(std::future::ready(Some( + Ok::<_, Infallible>(trailers.clone()), + ))); + + futures_util::pin_mut!(body); + let waker = futures_util::task::noop_waker(); + let mut cx = Context::from_waker(&waker); + + let data = unwrap_ready(body.as_mut().poll_frame(&mut cx)) + .unwrap() + .unwrap() + .into_data() + .unwrap(); + assert_eq!(data, "hello"); + + let body_trailers = unwrap_ready(body.as_mut().poll_frame(&mut cx)) + .unwrap() + .unwrap() + .into_trailers() + .unwrap(); + assert_eq!(body_trailers, trailers); + + assert!(unwrap_ready(body.as_mut().poll_frame(&mut cx)).is_none()); + } + + #[tokio::test] + async fn merges_trailers() { + let mut trailers_1 = HeaderMap::new(); + trailers_1.insert( + HeaderName::from_static("foo"), + HeaderValue::from_static("bar"), + ); + + let mut trailers_2 = HeaderMap::new(); + trailers_2.insert( + HeaderName::from_static("baz"), + HeaderValue::from_static("qux"), + ); + + let body = Empty::::new() + .with_trailers(std::future::ready(Some(Ok::<_, Infallible>( + trailers_1.clone(), + )))) + .with_trailers(std::future::ready(Some(Ok::<_, Infallible>( + trailers_2.clone(), + )))); + + futures_util::pin_mut!(body); + let waker = futures_util::task::noop_waker(); + let mut cx = Context::from_waker(&waker); + + let body_trailers = unwrap_ready(body.as_mut().poll_frame(&mut cx)) + .unwrap() + .unwrap() + .into_trailers() + .unwrap(); + + let mut all_trailers = HeaderMap::new(); + all_trailers.extend(trailers_1); + all_trailers.extend(trailers_2); + assert_eq!(body_trailers, all_trailers); + + assert!(unwrap_ready(body.as_mut().poll_frame(&mut cx)).is_none()); + } + + fn unwrap_ready(poll: Poll) -> T { + match poll { + Poll::Ready(t) => t, + Poll::Pending => panic!("pending"), + } + } +} diff --git a/.cargo-vendor/http-body-util/src/either.rs b/.cargo-vendor/http-body-util/src/either.rs new file mode 100644 index 0000000000..9e0cc4329e --- /dev/null +++ b/.cargo-vendor/http-body-util/src/either.rs @@ -0,0 +1,186 @@ +use std::error::Error; +use std::fmt::Debug; +use std::pin::Pin; +use std::task::{Context, Poll}; + +use bytes::Buf; +use http_body::{Body, Frame, SizeHint}; +use proj::EitherProj; + +/// Sum type with two cases: [`Left`] and [`Right`], used if a body can be one of +/// two distinct types. +/// +/// [`Left`]: Either::Left +/// [`Right`]: Either::Right +#[derive(Debug, Clone, Copy)] +pub enum Either { + /// A value of type `L` + Left(L), + /// A value of type `R` + Right(R), +} + +impl Either { + /// This function is part of the generated code from `pin-project-lite`, + /// for a more in depth explanation and the rest of the generated code refer + /// to the [`proj`] module. + pub(crate) fn project(self: Pin<&mut Self>) -> EitherProj { + unsafe { + match self.get_unchecked_mut() { + Self::Left(left) => EitherProj::Left(Pin::new_unchecked(left)), + Self::Right(right) => EitherProj::Right(Pin::new_unchecked(right)), + } + } + } +} + +impl Either { + /// Convert [`Either`] into the inner type, if both `Left` and `Right` are + /// of the same type. + pub fn into_inner(self) -> L { + match self { + Either::Left(left) => left, + Either::Right(right) => right, + } + } +} + +impl Body for Either +where + L: Body, + R: Body, + L::Error: Into>, + R::Error: Into>, + Data: Buf, +{ + type Data = Data; + type Error = Box; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + match self.project() { + EitherProj::Left(left) => left + .poll_frame(cx) + .map(|poll| poll.map(|opt| opt.map_err(Into::into))), + EitherProj::Right(right) => right + .poll_frame(cx) + .map(|poll| poll.map(|opt| opt.map_err(Into::into))), + } + } + + fn is_end_stream(&self) -> bool { + match self { + Either::Left(left) => left.is_end_stream(), + Either::Right(right) => right.is_end_stream(), + } + } + + fn size_hint(&self) -> SizeHint { + match self { + Either::Left(left) => left.size_hint(), + Either::Right(right) => right.size_hint(), + } + } +} + +pub(crate) mod proj { + //! This code is the (cleaned output) generated by [pin-project-lite], as it + //! does not support tuple variants. + //! + //! This is the altered expansion from the following snippet, expanded by + //! `cargo-expand`: + //! + //! ```rust + //! use pin_project_lite::pin_project; + //! + //! pin_project! { + //! #[project = EitherProj] + //! pub enum Either { + //! Left {#[pin] left: L}, + //! Right {#[pin] right: R} + //! } + //! } + //! ``` + //! + //! [pin-project-lite]: https://docs.rs/pin-project-lite/latest/pin_project_lite/ + use std::marker::PhantomData; + use std::pin::Pin; + + use super::Either; + + #[allow(dead_code)] + #[allow(single_use_lifetimes)] + #[allow(unknown_lints)] + #[allow(clippy::mut_mut)] + #[allow(clippy::redundant_pub_crate)] + #[allow(clippy::ref_option_ref)] + #[allow(clippy::type_repetition_in_bounds)] + pub(crate) enum EitherProj<'__pin, L, R> + where + Either: '__pin, + { + Left(Pin<&'__pin mut L>), + Right(Pin<&'__pin mut R>), + } + + #[allow(single_use_lifetimes)] + #[allow(unknown_lints)] + #[allow(clippy::used_underscore_binding)] + #[allow(missing_debug_implementations)] + const _: () = { + #[allow(non_snake_case)] + pub struct __Origin<'__pin, L, R> { + __dummy_lifetime: PhantomData<&'__pin ()>, + _Left: L, + _Right: R, + } + impl<'__pin, L, R> Unpin for Either where __Origin<'__pin, L, R>: Unpin {} + + trait MustNotImplDrop {} + #[allow(drop_bounds)] + impl MustNotImplDrop for T {} + impl MustNotImplDrop for Either {} + }; +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{BodyExt, Empty, Full}; + + #[tokio::test] + async fn data_left() { + let full = Full::new(&b"hello"[..]); + + let mut value: Either<_, Empty<&[u8]>> = Either::Left(full); + + assert_eq!(value.size_hint().exact(), Some(b"hello".len() as u64)); + assert_eq!( + value.frame().await.unwrap().unwrap().into_data().unwrap(), + &b"hello"[..] + ); + assert!(value.frame().await.is_none()); + } + + #[tokio::test] + async fn data_right() { + let full = Full::new(&b"hello!"[..]); + + let mut value: Either, _> = Either::Right(full); + + assert_eq!(value.size_hint().exact(), Some(b"hello!".len() as u64)); + assert_eq!( + value.frame().await.unwrap().unwrap().into_data().unwrap(), + &b"hello!"[..] + ); + assert!(value.frame().await.is_none()); + } + + #[test] + fn into_inner() { + let a = Either::::Left(2); + assert_eq!(a.into_inner(), 2) + } +} diff --git a/.cargo-vendor/http-body-util/src/empty.rs b/.cargo-vendor/http-body-util/src/empty.rs new file mode 100644 index 0000000000..d1445b41c9 --- /dev/null +++ b/.cargo-vendor/http-body-util/src/empty.rs @@ -0,0 +1,64 @@ +use bytes::Buf; +use http_body::{Body, Frame, SizeHint}; +use std::{ + convert::Infallible, + fmt, + marker::PhantomData, + pin::Pin, + task::{Context, Poll}, +}; + +/// A body that is always empty. +pub struct Empty { + _marker: PhantomData D>, +} + +impl Empty { + /// Create a new `Empty`. + pub fn new() -> Self { + Self::default() + } +} + +impl Body for Empty { + type Data = D; + type Error = Infallible; + + #[inline] + fn poll_frame( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + Poll::Ready(None) + } + + fn is_end_stream(&self) -> bool { + true + } + + fn size_hint(&self) -> SizeHint { + SizeHint::with_exact(0) + } +} + +impl fmt::Debug for Empty { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Empty").finish() + } +} + +impl Default for Empty { + fn default() -> Self { + Self { + _marker: PhantomData, + } + } +} + +impl Clone for Empty { + fn clone(&self) -> Self { + *self + } +} + +impl Copy for Empty {} diff --git a/.cargo-vendor/http-body-util/src/full.rs b/.cargo-vendor/http-body-util/src/full.rs new file mode 100644 index 0000000000..30f0c6a11d --- /dev/null +++ b/.cargo-vendor/http-body-util/src/full.rs @@ -0,0 +1,147 @@ +use bytes::{Buf, Bytes}; +use http_body::{Body, Frame, SizeHint}; +use pin_project_lite::pin_project; +use std::borrow::Cow; +use std::convert::{Infallible, TryFrom}; +use std::pin::Pin; +use std::task::{Context, Poll}; + +pin_project! { + /// A body that consists of a single chunk. + #[derive(Clone, Copy, Debug)] + pub struct Full { + data: Option, + } +} + +impl Full +where + D: Buf, +{ + /// Create a new `Full`. + pub fn new(data: D) -> Self { + let data = if data.has_remaining() { + Some(data) + } else { + None + }; + Full { data } + } +} + +impl Body for Full +where + D: Buf, +{ + type Data = D; + type Error = Infallible; + + fn poll_frame( + mut self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + Poll::Ready(self.data.take().map(|d| Ok(Frame::data(d)))) + } + + fn is_end_stream(&self) -> bool { + self.data.is_none() + } + + fn size_hint(&self) -> SizeHint { + self.data + .as_ref() + .map(|data| SizeHint::with_exact(u64::try_from(data.remaining()).unwrap())) + .unwrap_or_else(|| SizeHint::with_exact(0)) + } +} + +impl Default for Full +where + D: Buf, +{ + /// Create an empty `Full`. + fn default() -> Self { + Full { data: None } + } +} + +impl From for Full +where + D: Buf + From, +{ + fn from(bytes: Bytes) -> Self { + Full::new(D::from(bytes)) + } +} + +impl From> for Full +where + D: Buf + From>, +{ + fn from(vec: Vec) -> Self { + Full::new(D::from(vec)) + } +} + +impl From<&'static [u8]> for Full +where + D: Buf + From<&'static [u8]>, +{ + fn from(slice: &'static [u8]) -> Self { + Full::new(D::from(slice)) + } +} + +impl From> for Full +where + D: Buf + From<&'static B> + From, + B: ToOwned + ?Sized, +{ + fn from(cow: Cow<'static, B>) -> Self { + match cow { + Cow::Borrowed(b) => Full::new(D::from(b)), + Cow::Owned(o) => Full::new(D::from(o)), + } + } +} + +impl From for Full +where + D: Buf + From, +{ + fn from(s: String) -> Self { + Full::new(D::from(s)) + } +} + +impl From<&'static str> for Full +where + D: Buf + From<&'static str>, +{ + fn from(slice: &'static str) -> Self { + Full::new(D::from(slice)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::BodyExt; + + #[tokio::test] + async fn full_returns_some() { + let mut full = Full::new(&b"hello"[..]); + assert_eq!(full.size_hint().exact(), Some(b"hello".len() as u64)); + assert_eq!( + full.frame().await.unwrap().unwrap().into_data().unwrap(), + &b"hello"[..] + ); + assert!(full.frame().await.is_none()); + } + + #[tokio::test] + async fn empty_full_returns_none() { + assert!(Full::<&[u8]>::default().frame().await.is_none()); + assert!(Full::new(&b""[..]).frame().await.is_none()); + } +} diff --git a/.cargo-vendor/http-body-util/src/lib.rs b/.cargo-vendor/http-body-util/src/lib.rs new file mode 100644 index 0000000000..dee852cf49 --- /dev/null +++ b/.cargo-vendor/http-body-util/src/lib.rs @@ -0,0 +1,141 @@ +#![deny(missing_debug_implementations, missing_docs, unreachable_pub)] +#![cfg_attr(test, deny(warnings))] + +//! Utilities for [`http_body::Body`]. +//! +//! [`BodyExt`] adds extensions to the common trait. +//! +//! [`Empty`] and [`Full`] provide simple implementations. + +mod collected; +pub mod combinators; +mod either; +mod empty; +mod full; +mod limited; +mod stream; + +mod util; + +use self::combinators::{BoxBody, MapErr, MapFrame, UnsyncBoxBody}; + +pub use self::collected::Collected; +pub use self::either::Either; +pub use self::empty::Empty; +pub use self::full::Full; +pub use self::limited::{LengthLimitError, Limited}; +pub use self::stream::{BodyDataStream, BodyStream, StreamBody}; + +/// An extension trait for [`http_body::Body`] adding various combinators and adapters +pub trait BodyExt: http_body::Body { + /// Returns a future that resolves to the next [`Frame`], if any. + /// + /// [`Frame`]: combinators::Frame + fn frame(&mut self) -> combinators::Frame<'_, Self> + where + Self: Unpin, + { + combinators::Frame(self) + } + + /// Maps this body's frame to a different kind. + fn map_frame(self, f: F) -> MapFrame + where + Self: Sized, + F: FnMut(http_body::Frame) -> http_body::Frame, + B: bytes::Buf, + { + MapFrame::new(self, f) + } + + /// Maps this body's error value to a different value. + fn map_err(self, f: F) -> MapErr + where + Self: Sized, + F: FnMut(Self::Error) -> E, + { + MapErr::new(self, f) + } + + /// Turn this body into a boxed trait object. + fn boxed(self) -> BoxBody + where + Self: Sized + Send + Sync + 'static, + { + BoxBody::new(self) + } + + /// Turn this body into a boxed trait object that is !Sync. + fn boxed_unsync(self) -> UnsyncBoxBody + where + Self: Sized + Send + 'static, + { + UnsyncBoxBody::new(self) + } + + /// Turn this body into [`Collected`] body which will collect all the DATA frames + /// and trailers. + fn collect(self) -> combinators::Collect + where + Self: Sized, + { + combinators::Collect { + body: self, + collected: Some(crate::Collected::default()), + } + } + + /// Add trailers to the body. + /// + /// The trailers will be sent when all previous frames have been sent and the `trailers` future + /// resolves. + /// + /// # Example + /// + /// ``` + /// use http::HeaderMap; + /// use http_body_util::{Full, BodyExt}; + /// use bytes::Bytes; + /// + /// # #[tokio::main] + /// async fn main() { + /// let (tx, rx) = tokio::sync::oneshot::channel::(); + /// + /// let body = Full::::from("Hello, World!") + /// // add trailers via a future + /// .with_trailers(async move { + /// match rx.await { + /// Ok(trailers) => Some(Ok(trailers)), + /// Err(_err) => None, + /// } + /// }); + /// + /// // compute the trailers in the background + /// tokio::spawn(async move { + /// let _ = tx.send(compute_trailers().await); + /// }); + /// + /// async fn compute_trailers() -> HeaderMap { + /// // ... + /// # unimplemented!() + /// } + /// # } + /// ``` + fn with_trailers(self, trailers: F) -> combinators::WithTrailers + where + Self: Sized, + F: std::future::Future>>, + { + combinators::WithTrailers::new(self, trailers) + } + + /// Turn this body into [`BodyDataStream`]. + fn into_data_stream(self) -> BodyDataStream + where + Self: Sized, + { + BodyDataStream::new(self) + } +} + +impl BodyExt for T where T: http_body::Body {} diff --git a/.cargo-vendor/http-body-util/src/limited.rs b/.cargo-vendor/http-body-util/src/limited.rs new file mode 100644 index 0000000000..c4c5c8bcc0 --- /dev/null +++ b/.cargo-vendor/http-body-util/src/limited.rs @@ -0,0 +1,265 @@ +use bytes::Buf; +use http_body::{Body, Frame, SizeHint}; +use pin_project_lite::pin_project; +use std::error::Error; +use std::fmt; +use std::pin::Pin; +use std::task::{Context, Poll}; + +pin_project! { + /// A length limited body. + /// + /// This body will return an error if more than the configured number + /// of bytes are returned on polling the wrapped body. + #[derive(Clone, Copy, Debug)] + pub struct Limited { + remaining: usize, + #[pin] + inner: B, + } +} + +impl Limited { + /// Create a new `Limited`. + pub fn new(inner: B, limit: usize) -> Self { + Self { + remaining: limit, + inner, + } + } +} + +impl Body for Limited +where + B: Body, + B::Error: Into>, +{ + type Data = B::Data; + type Error = Box; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + let this = self.project(); + let res = match this.inner.poll_frame(cx) { + Poll::Pending => return Poll::Pending, + Poll::Ready(None) => None, + Poll::Ready(Some(Ok(frame))) => { + if let Some(data) = frame.data_ref() { + if data.remaining() > *this.remaining { + *this.remaining = 0; + Some(Err(LengthLimitError.into())) + } else { + *this.remaining -= data.remaining(); + Some(Ok(frame)) + } + } else { + Some(Ok(frame)) + } + } + Poll::Ready(Some(Err(err))) => Some(Err(err.into())), + }; + + Poll::Ready(res) + } + + fn is_end_stream(&self) -> bool { + self.inner.is_end_stream() + } + + fn size_hint(&self) -> SizeHint { + use std::convert::TryFrom; + match u64::try_from(self.remaining) { + Ok(n) => { + let mut hint = self.inner.size_hint(); + if hint.lower() >= n { + hint.set_exact(n) + } else if let Some(max) = hint.upper() { + hint.set_upper(n.min(max)) + } else { + hint.set_upper(n) + } + hint + } + Err(_) => self.inner.size_hint(), + } + } +} + +/// An error returned when body length exceeds the configured limit. +#[derive(Debug)] +#[non_exhaustive] +pub struct LengthLimitError; + +impl fmt::Display for LengthLimitError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str("length limit exceeded") + } +} + +impl Error for LengthLimitError {} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{BodyExt, Full, StreamBody}; + use bytes::Bytes; + use std::convert::Infallible; + + #[tokio::test] + async fn read_for_body_under_limit_returns_data() { + const DATA: &[u8] = b"testing"; + let inner = Full::new(Bytes::from(DATA)); + let body = &mut Limited::new(inner, 8); + + let mut hint = SizeHint::new(); + hint.set_upper(7); + assert_eq!(body.size_hint().upper(), hint.upper()); + + let data = body.frame().await.unwrap().unwrap().into_data().unwrap(); + assert_eq!(data, DATA); + hint.set_upper(0); + assert_eq!(body.size_hint().upper(), hint.upper()); + + assert!(body.frame().await.is_none()); + } + + #[tokio::test] + async fn read_for_body_over_limit_returns_error() { + const DATA: &[u8] = b"testing a string that is too long"; + let inner = Full::new(Bytes::from(DATA)); + let body = &mut Limited::new(inner, 8); + + let mut hint = SizeHint::new(); + hint.set_upper(8); + assert_eq!(body.size_hint().upper(), hint.upper()); + + let error = body.frame().await.unwrap().unwrap_err(); + assert!(matches!(error.downcast_ref(), Some(LengthLimitError))); + } + + fn body_from_iter(into_iter: I) -> impl Body + where + I: IntoIterator, + I::Item: Into + 'static, + I::IntoIter: Send + 'static, + { + let iter = into_iter + .into_iter() + .map(|it| Frame::data(it.into())) + .map(Ok::<_, Infallible>); + + StreamBody::new(futures_util::stream::iter(iter)) + } + + #[tokio::test] + async fn read_for_chunked_body_around_limit_returns_first_chunk_but_returns_error_on_over_limit_chunk( + ) { + const DATA: [&[u8]; 2] = [b"testing ", b"a string that is too long"]; + let inner = body_from_iter(DATA); + let body = &mut Limited::new(inner, 8); + + let mut hint = SizeHint::new(); + hint.set_upper(8); + assert_eq!(body.size_hint().upper(), hint.upper()); + + let data = body.frame().await.unwrap().unwrap().into_data().unwrap(); + assert_eq!(data, DATA[0]); + hint.set_upper(0); + assert_eq!(body.size_hint().upper(), hint.upper()); + + let error = body.frame().await.unwrap().unwrap_err(); + assert!(matches!(error.downcast_ref(), Some(LengthLimitError))); + } + + #[tokio::test] + async fn read_for_chunked_body_over_limit_on_first_chunk_returns_error() { + const DATA: [&[u8]; 2] = [b"testing a string", b" that is too long"]; + let inner = body_from_iter(DATA); + let body = &mut Limited::new(inner, 8); + + let mut hint = SizeHint::new(); + hint.set_upper(8); + assert_eq!(body.size_hint().upper(), hint.upper()); + + let error = body.frame().await.unwrap().unwrap_err(); + assert!(matches!(error.downcast_ref(), Some(LengthLimitError))); + } + + #[tokio::test] + async fn read_for_chunked_body_under_limit_is_okay() { + const DATA: [&[u8]; 2] = [b"test", b"ing!"]; + let inner = body_from_iter(DATA); + let body = &mut Limited::new(inner, 8); + + let mut hint = SizeHint::new(); + hint.set_upper(8); + assert_eq!(body.size_hint().upper(), hint.upper()); + + let data = body.frame().await.unwrap().unwrap().into_data().unwrap(); + assert_eq!(data, DATA[0]); + hint.set_upper(4); + assert_eq!(body.size_hint().upper(), hint.upper()); + + let data = body.frame().await.unwrap().unwrap().into_data().unwrap(); + assert_eq!(data, DATA[1]); + hint.set_upper(0); + assert_eq!(body.size_hint().upper(), hint.upper()); + + assert!(body.frame().await.is_none()); + } + + struct SomeTrailers; + + impl Body for SomeTrailers { + type Data = Bytes; + type Error = Infallible; + + fn poll_frame( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + Poll::Ready(Some(Ok(Frame::trailers(http::HeaderMap::new())))) + } + } + + #[tokio::test] + async fn read_for_trailers_propagates_inner_trailers() { + let body = &mut Limited::new(SomeTrailers, 8); + let frame = body.frame().await.unwrap().unwrap(); + assert!(frame.is_trailers()); + } + + #[derive(Debug)] + struct ErrorBodyError; + + impl fmt::Display for ErrorBodyError { + fn fmt(&self, _f: &mut fmt::Formatter) -> fmt::Result { + Ok(()) + } + } + + impl Error for ErrorBodyError {} + + struct ErrorBody; + + impl Body for ErrorBody { + type Data = &'static [u8]; + type Error = ErrorBodyError; + + fn poll_frame( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + Poll::Ready(Some(Err(ErrorBodyError))) + } + } + + #[tokio::test] + async fn read_for_body_returning_error_propagates_error() { + let body = &mut Limited::new(ErrorBody, 8); + let error = body.frame().await.unwrap().unwrap_err(); + assert!(matches!(error.downcast_ref(), Some(ErrorBodyError))); + } +} diff --git a/.cargo-vendor/http-body-util/src/stream.rs b/.cargo-vendor/http-body-util/src/stream.rs new file mode 100644 index 0000000000..7eeafad764 --- /dev/null +++ b/.cargo-vendor/http-body-util/src/stream.rs @@ -0,0 +1,240 @@ +use bytes::Buf; +use futures_util::{ready, stream::Stream}; +use http_body::{Body, Frame}; +use pin_project_lite::pin_project; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; + +pin_project! { + /// A body created from a [`Stream`]. + #[derive(Clone, Copy, Debug)] + pub struct StreamBody { + #[pin] + stream: S, + } +} + +impl StreamBody { + /// Create a new `StreamBody`. + pub fn new(stream: S) -> Self { + Self { stream } + } +} + +impl Body for StreamBody +where + S: Stream, E>>, + D: Buf, +{ + type Data = D; + type Error = E; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + match self.project().stream.poll_next(cx) { + Poll::Ready(Some(result)) => Poll::Ready(Some(result)), + Poll::Ready(None) => Poll::Ready(None), + Poll::Pending => Poll::Pending, + } + } +} + +impl Stream for StreamBody { + type Item = S::Item; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.project().stream.poll_next(cx) + } + + fn size_hint(&self) -> (usize, Option) { + self.stream.size_hint() + } +} + +pin_project! { + /// A stream created from a [`Body`]. + #[derive(Clone, Copy, Debug)] + pub struct BodyStream { + #[pin] + body: B, + } +} + +impl BodyStream { + /// Create a new `BodyStream`. + pub fn new(body: B) -> Self { + Self { body } + } +} + +impl Body for BodyStream +where + B: Body, +{ + type Data = B::Data; + type Error = B::Error; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + self.project().body.poll_frame(cx) + } +} + +impl Stream for BodyStream +where + B: Body, +{ + type Item = Result, B::Error>; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match self.project().body.poll_frame(cx) { + Poll::Ready(Some(frame)) => Poll::Ready(Some(frame)), + Poll::Ready(None) => Poll::Ready(None), + Poll::Pending => Poll::Pending, + } + } +} + +pin_project! { + /// A data stream created from a [`Body`]. + #[derive(Clone, Copy, Debug)] + pub struct BodyDataStream { + #[pin] + body: B, + } +} + +impl BodyDataStream { + /// Create a new `BodyDataStream` + pub fn new(body: B) -> Self { + Self { body } + } +} + +impl Stream for BodyDataStream +where + B: Body, +{ + type Item = Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + loop { + return match ready!(self.as_mut().project().body.poll_frame(cx)) { + Some(Ok(frame)) => match frame.into_data() { + Ok(bytes) => Poll::Ready(Some(Ok(bytes))), + Err(_) => continue, + }, + Some(Err(err)) => Poll::Ready(Some(Err(err))), + None => Poll::Ready(None), + }; + } + } +} + +#[cfg(test)] +mod tests { + use crate::{BodyExt, BodyStream, StreamBody}; + use bytes::Bytes; + use futures_util::StreamExt; + use http_body::Frame; + use std::convert::Infallible; + + #[tokio::test] + async fn body_from_stream() { + let chunks: Vec> = vec![ + Ok(Frame::data(Bytes::from(vec![1]))), + Ok(Frame::data(Bytes::from(vec![2]))), + Ok(Frame::data(Bytes::from(vec![3]))), + ]; + let stream = futures_util::stream::iter(chunks); + let mut body = StreamBody::new(stream); + + assert_eq!( + body.frame() + .await + .unwrap() + .unwrap() + .into_data() + .unwrap() + .as_ref(), + [1] + ); + assert_eq!( + body.frame() + .await + .unwrap() + .unwrap() + .into_data() + .unwrap() + .as_ref(), + [2] + ); + assert_eq!( + body.frame() + .await + .unwrap() + .unwrap() + .into_data() + .unwrap() + .as_ref(), + [3] + ); + + assert!(body.frame().await.is_none()); + } + + #[tokio::test] + async fn stream_from_body() { + let chunks: Vec> = vec![ + Ok(Frame::data(Bytes::from(vec![1]))), + Ok(Frame::data(Bytes::from(vec![2]))), + Ok(Frame::data(Bytes::from(vec![3]))), + ]; + let stream = futures_util::stream::iter(chunks); + let body = StreamBody::new(stream); + + let mut stream = BodyStream::new(body); + + assert_eq!( + stream + .next() + .await + .unwrap() + .unwrap() + .into_data() + .unwrap() + .as_ref(), + [1] + ); + assert_eq!( + stream + .next() + .await + .unwrap() + .unwrap() + .into_data() + .unwrap() + .as_ref(), + [2] + ); + assert_eq!( + stream + .next() + .await + .unwrap() + .unwrap() + .into_data() + .unwrap() + .as_ref(), + [3] + ); + + assert!(stream.next().await.is_none()); + } +} diff --git a/.cargo-vendor/http-body-util/src/util.rs b/.cargo-vendor/http-body-util/src/util.rs new file mode 100644 index 0000000000..50fa17560f --- /dev/null +++ b/.cargo-vendor/http-body-util/src/util.rs @@ -0,0 +1,164 @@ +use std::collections::VecDeque; +use std::io::IoSlice; + +use bytes::{Buf, BufMut, Bytes, BytesMut}; + +#[derive(Debug)] +pub(crate) struct BufList { + bufs: VecDeque, +} + +impl BufList { + #[inline] + pub(crate) fn push(&mut self, buf: T) { + debug_assert!(buf.has_remaining()); + self.bufs.push_back(buf); + } + + #[inline] + pub(crate) fn pop(&mut self) -> Option { + self.bufs.pop_front() + } +} + +impl Buf for BufList { + #[inline] + fn remaining(&self) -> usize { + self.bufs.iter().map(|buf| buf.remaining()).sum() + } + + #[inline] + fn has_remaining(&self) -> bool { + self.bufs.iter().any(|buf| buf.has_remaining()) + } + + #[inline] + fn chunk(&self) -> &[u8] { + self.bufs.front().map(Buf::chunk).unwrap_or_default() + } + + #[inline] + fn advance(&mut self, mut cnt: usize) { + while cnt > 0 { + { + let front = &mut self.bufs[0]; + let rem = front.remaining(); + if rem > cnt { + front.advance(cnt); + return; + } else { + front.advance(rem); + cnt -= rem; + } + } + self.bufs.pop_front(); + } + } + + #[inline] + fn chunks_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize { + if dst.is_empty() { + return 0; + } + let mut vecs = 0; + for buf in &self.bufs { + vecs += buf.chunks_vectored(&mut dst[vecs..]); + if vecs == dst.len() { + break; + } + } + vecs + } + + #[inline] + fn copy_to_bytes(&mut self, len: usize) -> Bytes { + // Our inner buffer may have an optimized version of copy_to_bytes, and if the whole + // request can be fulfilled by the front buffer, we can take advantage. + match self.bufs.front_mut() { + Some(front) if front.remaining() == len => { + let b = front.copy_to_bytes(len); + self.bufs.pop_front(); + b + } + Some(front) if front.remaining() > len => front.copy_to_bytes(len), + _ => { + let rem = self.remaining(); + assert!(len <= rem, "`len` greater than remaining"); + let mut bm = BytesMut::with_capacity(len); + if rem == len { + // .take() costs a lot more, so skip it if we don't need it + bm.put(self); + } else { + bm.put(self.take(len)); + } + bm.freeze() + } + } + } +} + +impl Default for BufList { + fn default() -> Self { + BufList { + bufs: VecDeque::new(), + } + } +} + +#[cfg(test)] +mod tests { + use std::ptr; + + use super::*; + + fn hello_world_buf() -> BufList { + BufList { + bufs: vec![Bytes::from("Hello"), Bytes::from(" "), Bytes::from("World")].into(), + } + } + + #[test] + fn to_bytes_shorter() { + let mut bufs = hello_world_buf(); + let old_ptr = bufs.chunk().as_ptr(); + let start = bufs.copy_to_bytes(4); + assert_eq!(start, "Hell"); + assert!(ptr::eq(old_ptr, start.as_ptr())); + assert_eq!(bufs.chunk(), b"o"); + assert!(ptr::eq(old_ptr.wrapping_add(4), bufs.chunk().as_ptr())); + assert_eq!(bufs.remaining(), 7); + } + + #[test] + fn to_bytes_eq() { + let mut bufs = hello_world_buf(); + let old_ptr = bufs.chunk().as_ptr(); + let start = bufs.copy_to_bytes(5); + assert_eq!(start, "Hello"); + assert!(ptr::eq(old_ptr, start.as_ptr())); + assert_eq!(bufs.chunk(), b" "); + assert_eq!(bufs.remaining(), 6); + } + + #[test] + fn to_bytes_longer() { + let mut bufs = hello_world_buf(); + let start = bufs.copy_to_bytes(7); + assert_eq!(start, "Hello W"); + assert_eq!(bufs.remaining(), 4); + } + + #[test] + fn one_long_buf_to_bytes() { + let mut buf = BufList::default(); + buf.push(b"Hello World" as &[_]); + assert_eq!(buf.copy_to_bytes(5), "Hello"); + assert_eq!(buf.chunk(), b" World"); + } + + #[test] + #[should_panic(expected = "`len` greater than remaining")] + fn buf_to_bytes_too_many() { + hello_world_buf().copy_to_bytes(42); + } +} diff --git a/.cargo-vendor/http-body/.cargo-checksum.json b/.cargo-vendor/http-body/.cargo-checksum.json index a3d29455a1..9abba0ff61 100644 --- a/.cargo-vendor/http-body/.cargo-checksum.json +++ b/.cargo-vendor/http-body/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"CHANGELOG.md":"07aa2743e33442fdd69d4c8beeaaba938a3f177b5a48922886c5cd07c2f1a72d","Cargo.toml":"451b4b8125ced2429c8d2c6c33d972fab2dc3b59365bb38fe6e7f4d14613e44e","LICENSE":"0345e2b98685e3807fd802a2478085dcae35023e3da59b5a00f712504314d83a","README.md":"0f90f61ee419eefd4104005ef6900445fafce9a710dd1989463f3cebaf0fafe8","src/collect.rs":"a71ea9be6aa7100df2bb3520507ad8ebaede8ea5339310a63a0cd03081e14122","src/combinators/box_body.rs":"d27dfa9f289c9c8d1fe714415fb5df5bdaafafb80a5cff66fbbe720841e806bf","src/combinators/map_data.rs":"3063f44d1318feeec639eff6544e7fb91ad9abf9a295770af4cc69b48a691796","src/combinators/map_err.rs":"9db485a5904579147673ac7f9f347e322d283d95a421daaf5541d048045eec7e","src/combinators/mod.rs":"c9e32f64ab2f4866d14256fff4256ba61d4c1bcfaf2748754c561de3abe1eccd","src/empty.rs":"3e44cee68410101cb8bf88c0de504885075c084357e83bcd3a6761ba5c7c58d2","src/full.rs":"efcbf6831d32271170e2ed86c328bfb887aec0c93689f1218ab5a18c319b0fa8","src/lib.rs":"2fa07ea03a0afede091ea4bc02a70025754ce1b28a48d7269637a5397ded3df1","src/limited.rs":"485fc1c58bba29d2c7afdb4a032cd0e3c3578979ccd71f7459ddcd67e0f16077","src/next.rs":"d6863067b20c4bb42dced5c17bd954816b1338ce53e8d34ab81dbe240a1601cf","src/size_hint.rs":"017ed58c59b446b93aa4922e35b596490bf8f03af37c631610cc6576f1c21439","tests/is_end_stream.rs":"3a66d80d064f8a447bfa9fd212c2f91855604b1b41f554da3a029bc4a5be3a7e"},"package":"7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2"} \ No newline at end of file +{"files":{"CHANGELOG.md":"3a34963d9921a37f74ae4206a8f917f01a97a1d025261c77db882ebc339ef63b","Cargo.toml":"7b78266ec726606a0c655becd3fa68b3c6155b2e680c6ef2d06bda034e239206","LICENSE":"cddabf8adc6ccd6c3e68f5d71eac9fae3094116623cf23a46af0a5fd6b8ee813","README.md":"0f90f61ee419eefd4104005ef6900445fafce9a710dd1989463f3cebaf0fafe8","src/frame.rs":"0be0bd92430fcd69fd95fe38037ecd087427ddfac33c400caf6f49cc9ffa91ac","src/lib.rs":"bed3a9204261003af33b7ab907fdfbd9184c16cd0fa4977d832adc1c9ab2813b","src/size_hint.rs":"0168c1d6d326f43cef68c23f987cb81629a684aa43e8c15ce9505c23f16b8836","tests/is_end_stream.rs":"fd6ce2dea2d127d17af87ca5a5574cb5cd66975aba32440ab59c5af3c920f987"},"package":"1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184"} \ No newline at end of file diff --git a/.cargo-vendor/http-body/CHANGELOG.md b/.cargo-vendor/http-body/CHANGELOG.md index bdb3a25158..a9e7fb3011 100644 --- a/.cargo-vendor/http-body/CHANGELOG.md +++ b/.cargo-vendor/http-body/CHANGELOG.md @@ -1,6 +1,25 @@ -# 0.4.6 (December 8, 2023) +# 1.0.1 (July 12, 2024) -- Add `Collect` combinator (backported from http-body-util). +- Include LICENSE file. + +# 1.0.0 (November 15, 2023) + +- Update `http` to 1.0. +- Add `Frame::map_data()`. + +# 1.0.0-rc.2 (Dec 28, 2022) + +- Change return type of `Frame::into_data()` and `Frame::into_trailers()` methods to return `Result` instead of `Option`. + +# 1.0.0-rc1 (Oct 25, 2022) + +- Body trait forward-compat redesign (#67). + - `poll_data`/`poll_trailers` removed in favor of `poll_frame`. + - New `Frame` type that represents http frames such as DATA and trailers, as + well as unknown frames for future implementations like h3. + - For more information on this change the proposal can be found + [here](https://github.com/hyperium/hyper/issues/2840). +- Move adatpers and other utilities to `http-body-util`. # 0.4.5 (May 20, 2022) diff --git a/.cargo-vendor/http-body/Cargo.toml b/.cargo-vendor/http-body/Cargo.toml index 8a6a94ba68..04fa7f4251 100644 --- a/.cargo-vendor/http-body/Cargo.toml +++ b/.cargo-vendor/http-body/Cargo.toml @@ -11,13 +11,19 @@ [package] edition = "2018" +rust-version = "1.49" name = "http-body" -version = "0.4.6" +version = "1.0.1" authors = [ "Carl Lerche ", "Lucio Franco ", "Sean McArthur ", ] +build = false +autobins = false +autoexamples = false +autotests = false +autobenches = false description = """ Trait representing an asynchronous, streaming, HTTP request or response body. """ @@ -28,18 +34,16 @@ categories = ["web-programming"] license = "MIT" repository = "https://github.com/hyperium/http-body" +[lib] +name = "http_body" +path = "src/lib.rs" + +[[test]] +name = "is_end_stream" +path = "tests/is_end_stream.rs" + [dependencies.bytes] version = "1" [dependencies.http] -version = "0.2" - -[dependencies.pin-project-lite] -version = "0.2" - -[dev-dependencies.tokio] version = "1" -features = [ - "macros", - "rt", -] diff --git a/.cargo-vendor/http-body/LICENSE b/.cargo-vendor/http-body/LICENSE index 27b08f2874..47773c5557 100644 --- a/.cargo-vendor/http-body/LICENSE +++ b/.cargo-vendor/http-body/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2019 Hyper Contributors +Copyright (c) 2019-2024 Sean McArthur & Hyper Contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated diff --git a/.cargo-vendor/http-body/src/frame.rs b/.cargo-vendor/http-body/src/frame.rs new file mode 100644 index 0000000000..395e9fdd73 --- /dev/null +++ b/.cargo-vendor/http-body/src/frame.rs @@ -0,0 +1,120 @@ +use http::HeaderMap; + +/// A frame of any kind related to an HTTP stream (body). +#[derive(Debug)] +pub struct Frame { + kind: Kind, +} + +#[derive(Debug)] +enum Kind { + // The first two variants are "inlined" since they are undoubtedly + // the most common. This saves us from having to allocate a + // boxed trait object for them. + Data(T), + Trailers(HeaderMap), + //Unknown(Box), +} + +impl Frame { + /// Create a DATA frame with the provided `Buf`. + pub fn data(buf: T) -> Self { + Self { + kind: Kind::Data(buf), + } + } + + /// Create a trailers frame. + pub fn trailers(map: HeaderMap) -> Self { + Self { + kind: Kind::Trailers(map), + } + } + + /// Maps this frame's data to a different type. + pub fn map_data(self, f: F) -> Frame + where + F: FnOnce(T) -> D, + { + match self.kind { + Kind::Data(data) => Frame { + kind: Kind::Data(f(data)), + }, + Kind::Trailers(trailers) => Frame { + kind: Kind::Trailers(trailers), + }, + } + } + + /// Returns whether this is a DATA frame. + pub fn is_data(&self) -> bool { + matches!(self.kind, Kind::Data(..)) + } + + /// Consumes self into the buf of the DATA frame. + /// + /// Returns an [`Err`] containing the original [`Frame`] when frame is not a DATA frame. + /// `Frame::is_data` can also be used to determine if the frame is a DATA frame. + pub fn into_data(self) -> Result { + match self.kind { + Kind::Data(data) => Ok(data), + _ => Err(self), + } + } + + /// If this is a DATA frame, returns a reference to it. + /// + /// Returns `None` if not a DATA frame. + pub fn data_ref(&self) -> Option<&T> { + match self.kind { + Kind::Data(ref data) => Some(data), + _ => None, + } + } + + /// If this is a DATA frame, returns a mutable reference to it. + /// + /// Returns `None` if not a DATA frame. + pub fn data_mut(&mut self) -> Option<&mut T> { + match self.kind { + Kind::Data(ref mut data) => Some(data), + _ => None, + } + } + + /// Returns whether this is a trailers frame. + pub fn is_trailers(&self) -> bool { + matches!(self.kind, Kind::Trailers(..)) + } + + /// Consumes self into the buf of the trailers frame. + /// + /// Returns an [`Err`] containing the original [`Frame`] when frame is not a trailers frame. + /// `Frame::is_trailers` can also be used to determine if the frame is a trailers frame. + pub fn into_trailers(self) -> Result { + match self.kind { + Kind::Trailers(trailers) => Ok(trailers), + _ => Err(self), + } + } + + /// If this is a trailers frame, returns a reference to it. + /// + /// Returns `None` if not a trailers frame. + pub fn trailers_ref(&self) -> Option<&HeaderMap> { + match self.kind { + Kind::Trailers(ref trailers) => Some(trailers), + _ => None, + } + } + + /// If this is a trailers frame, returns a mutable reference to it. + /// + /// Returns `None` if not a trailers frame. + pub fn trailers_mut(&mut self) -> Option<&mut HeaderMap> { + match self.kind { + Kind::Trailers(ref mut trailers) => Some(trailers), + _ => None, + } + } +} diff --git a/.cargo-vendor/http-body/src/lib.rs b/.cargo-vendor/http-body/src/lib.rs index 2535cda161..70e7f36bac 100644 --- a/.cargo-vendor/http-body/src/lib.rs +++ b/.cargo-vendor/http-body/src/lib.rs @@ -1,9 +1,9 @@ -#![doc(html_root_url = "https://docs.rs/http-body/0.4.6")] #![deny( missing_debug_implementations, missing_docs, unreachable_pub, - broken_intra_doc_links + clippy::missing_safety_doc, + clippy::undocumented_unsafe_blocks )] #![cfg_attr(test, deny(warnings))] @@ -13,25 +13,13 @@ //! //! [`Body`]: trait.Body.html -mod collect; -mod empty; -mod full; -mod limited; -mod next; +mod frame; mod size_hint; -pub mod combinators; - -pub use self::collect::Collected; -pub use self::empty::Empty; -pub use self::full::Full; -pub use self::limited::{LengthLimitError, Limited}; -pub use self::next::{Data, Trailers}; +pub use self::frame::Frame; pub use self::size_hint::SizeHint; -use self::combinators::{BoxBody, MapData, MapErr, UnsyncBoxBody}; use bytes::{Buf, Bytes}; -use http::HeaderMap; use std::convert::Infallible; use std::ops; use std::pin::Pin; @@ -39,12 +27,14 @@ use std::task::{Context, Poll}; /// Trait representing a streaming body of a Request or Response. /// -/// Data is streamed via the `poll_data` function, which asynchronously yields `T: Buf` values. The -/// `size_hint` function provides insight into the total number of bytes that will be streamed. +/// Individual frames are streamed via the `poll_frame` function, which asynchronously yields +/// instances of [`Frame`]. /// -/// The `poll_trailers` function returns an optional set of trailers used to finalize the request / -/// response exchange. This is mostly used when using the HTTP/2.0 protocol. +/// Frames can contain a data buffer of type `Self::Data`. Frames can also contain an optional +/// set of trailers used to finalize the request/response exchange. This is mostly used when using +/// the HTTP/2.0 protocol. /// +/// The `size_hint` function provides insight into the total number of bytes that will be streamed. pub trait Body { /// Values yielded by the `Body`. type Data: Buf; @@ -52,27 +42,19 @@ pub trait Body { /// The error type this `Body` might generate. type Error; + #[allow(clippy::type_complexity)] /// Attempt to pull out the next data buffer of this stream. - fn poll_data( + fn poll_frame( self: Pin<&mut Self>, cx: &mut Context<'_>, - ) -> Poll>>; - - /// Poll for an optional **single** `HeaderMap` of trailers. - /// - /// This function should only be called once `poll_data` returns `None`. - fn poll_trailers( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll, Self::Error>>; + ) -> Poll, Self::Error>>>; /// Returns `true` when the end of stream has been reached. /// - /// An end of stream means that both `poll_data` and `poll_trailers` will - /// return `None`. + /// An end of stream means that `poll_frame` will return `None`. /// /// A return value of `false` **does not** guarantee that a value will be - /// returned from `poll_stream` or `poll_trailers`. + /// returned from `poll_frame`. fn is_end_stream(&self) -> bool { false } @@ -84,84 +66,17 @@ pub trait Body { fn size_hint(&self) -> SizeHint { SizeHint::default() } - - /// Returns future that resolves to next data chunk, if any. - fn data(&mut self) -> Data<'_, Self> - where - Self: Unpin + Sized, - { - Data(self) - } - - /// Returns future that resolves to trailers, if any. - fn trailers(&mut self) -> Trailers<'_, Self> - where - Self: Unpin + Sized, - { - Trailers(self) - } - - /// Maps this body's data value to a different value. - fn map_data(self, f: F) -> MapData - where - Self: Sized, - F: FnMut(Self::Data) -> B, - B: Buf, - { - MapData::new(self, f) - } - - /// Maps this body's error value to a different value. - fn map_err(self, f: F) -> MapErr - where - Self: Sized, - F: FnMut(Self::Error) -> E, - { - MapErr::new(self, f) - } - - /// Turn this body into [`Collected`] body which will collect all the DATA frames - /// and trailers. - fn collect(self) -> crate::collect::Collect - where - Self: Sized, - { - collect::Collect::new(self) - } - - /// Turn this body into a boxed trait object. - fn boxed(self) -> BoxBody - where - Self: Sized + Send + Sync + 'static, - { - BoxBody::new(self) - } - - /// Turn this body into a boxed trait object that is !Sync. - fn boxed_unsync(self) -> UnsyncBoxBody - where - Self: Sized + Send + 'static, - { - UnsyncBoxBody::new(self) - } } impl Body for &mut T { type Data = T::Data; type Error = T::Error; - fn poll_data( + fn poll_frame( mut self: Pin<&mut Self>, cx: &mut Context<'_>, - ) -> Poll>> { - Pin::new(&mut **self).poll_data(cx) - } - - fn poll_trailers( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - Pin::new(&mut **self).poll_trailers(cx) + ) -> Poll, Self::Error>>> { + Pin::new(&mut **self).poll_frame(cx) } fn is_end_stream(&self) -> bool { @@ -181,18 +96,11 @@ where type Data = <

::Target as Body>::Data; type Error = <

::Target as Body>::Error; - fn poll_data( + fn poll_frame( self: Pin<&mut Self>, cx: &mut Context<'_>, - ) -> Poll>> { - Pin::get_mut(self).as_mut().poll_data(cx) - } - - fn poll_trailers( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - Pin::get_mut(self).as_mut().poll_trailers(cx) + ) -> Poll, Self::Error>>> { + Pin::get_mut(self).as_mut().poll_frame(cx) } fn is_end_stream(&self) -> bool { @@ -208,18 +116,11 @@ impl Body for Box { type Data = T::Data; type Error = T::Error; - fn poll_data( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>> { - Pin::new(&mut **self).poll_data(cx) - } - - fn poll_trailers( + fn poll_frame( mut self: Pin<&mut Self>, cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - Pin::new(&mut **self).poll_trailers(cx) + ) -> Poll, Self::Error>>> { + Pin::new(&mut **self).poll_frame(cx) } fn is_end_stream(&self) -> bool { @@ -235,23 +136,15 @@ impl Body for http::Request { type Data = B::Data; type Error = B::Error; - fn poll_data( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>> { - unsafe { - self.map_unchecked_mut(http::Request::body_mut) - .poll_data(cx) - } - } - - fn poll_trailers( + fn poll_frame( self: Pin<&mut Self>, cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { + ) -> Poll, Self::Error>>> { + // SAFETY: + // A pin projection. unsafe { self.map_unchecked_mut(http::Request::body_mut) - .poll_trailers(cx) + .poll_frame(cx) } } @@ -268,23 +161,15 @@ impl Body for http::Response { type Data = B::Data; type Error = B::Error; - fn poll_data( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>> { - unsafe { - self.map_unchecked_mut(http::Response::body_mut) - .poll_data(cx) - } - } - - fn poll_trailers( + fn poll_frame( self: Pin<&mut Self>, cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { + ) -> Poll, Self::Error>>> { + // SAFETY: + // A pin projection. unsafe { self.map_unchecked_mut(http::Response::body_mut) - .poll_trailers(cx) + .poll_frame(cx) } } @@ -301,25 +186,18 @@ impl Body for String { type Data = Bytes; type Error = Infallible; - fn poll_data( + fn poll_frame( mut self: Pin<&mut Self>, _cx: &mut Context<'_>, - ) -> Poll>> { + ) -> Poll, Self::Error>>> { if !self.is_empty() { let s = std::mem::take(&mut *self); - Poll::Ready(Some(Ok(s.into_bytes().into()))) + Poll::Ready(Some(Ok(Frame::data(s.into_bytes().into())))) } else { Poll::Ready(None) } } - fn poll_trailers( - self: Pin<&mut Self>, - _cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - Poll::Ready(Ok(None)) - } - fn is_end_stream(&self) -> bool { self.is_empty() } diff --git a/.cargo-vendor/http-body/src/size_hint.rs b/.cargo-vendor/http-body/src/size_hint.rs index 00a8f19177..2cad79d2eb 100644 --- a/.cargo-vendor/http-body/src/size_hint.rs +++ b/.cargo-vendor/http-body/src/size_hint.rs @@ -1,5 +1,3 @@ -use std::u64; - /// A `Body` size hint /// /// The default implementation returns: diff --git a/.cargo-vendor/http-body/tests/is_end_stream.rs b/.cargo-vendor/http-body/tests/is_end_stream.rs index beaeb0b1a0..94b7c3dd7d 100644 --- a/.cargo-vendor/http-body/tests/is_end_stream.rs +++ b/.cargo-vendor/http-body/tests/is_end_stream.rs @@ -1,5 +1,4 @@ -use http::HeaderMap; -use http_body::{Body, SizeHint}; +use http_body::{Body, Frame, SizeHint}; use std::pin::Pin; use std::task::{Context, Poll}; @@ -11,20 +10,13 @@ impl Body for Mock { type Data = ::std::io::Cursor>; type Error = (); - fn poll_data( + fn poll_frame( self: Pin<&mut Self>, _cx: &mut Context<'_>, - ) -> Poll>> { + ) -> Poll, Self::Error>>> { Poll::Ready(None) } - fn poll_trailers( - self: Pin<&mut Self>, - _cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - Poll::Ready(Ok(None)) - } - fn size_hint(&self) -> SizeHint { self.size_hint.clone() } @@ -70,9 +62,8 @@ fn is_end_stream_default_false() { size_hint: SizeHint::default(), }; - assert_eq!( - false, - Pin::new(&mut mock).is_end_stream(), + assert!( + !Pin::new(&mut mock).is_end_stream(), "size_hint = {:?}", mock.size_hint.clone() ); diff --git a/.cargo-vendor/http/.cargo-checksum.json b/.cargo-vendor/http/.cargo-checksum.json index 6cff05060b..f4f2b4adcd 100644 --- a/.cargo-vendor/http/.cargo-checksum.json +++ b/.cargo-vendor/http/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"CHANGELOG.md":"2df3a57e1185cbfc8eab81f0ea8b9eab3e28854e1c5f5e712ed5db24427f5054","Cargo.toml":"de90ca352de60a54cf5dfedeb4589b7a6ef6e523a2a90fea1a0f956a7e2f7caf","LICENSE-APACHE":"8bb1b50b0e5c9399ae33bd35fab2769010fa6c14e8860c729a52295d84896b7a","LICENSE-MIT":"dc91f8200e4b2a1f9261035d4c18c33c246911a6c0f7b543d75347e61b249cff","README.md":"2b08369b8ce261843a84103642fd4d8b1ab556af6d6397dbc78c19f7025d255a","src/byte_str.rs":"e1131683d8a3234f6b0983ad1f18a46794961ce401d590378370e58c60f4fbdc","src/convert.rs":"a31a4351cd3ee36a58ff4f5b30ce2c8967cde8486faea2d2673a8f8cb74b3204","src/error.rs":"e9a0c5c2af9eb98a23f967d9ff416095c80f6998fbd14a63acebeeca9e8aedac","src/extensions.rs":"5f85c3e1eef53d0fcbd4a24a6c13828790dac74ad60f71cad365e14d39b196a6","src/header/map.rs":"8f4f9022c899abf723294f6017348238f116df939abf54f891bcc74a95cbfe71","src/header/mod.rs":"aa07991ab517f1cef50bd8ebbec6ea76b95d8faedeaa61d3e410b8bf7f737da9","src/header/name.rs":"9554b80b81ea7cfd807c1d6ff52801a07ca675c8d0dffb0eee77c22a3a3a1a26","src/header/value.rs":"ffea8236f38178fa3dd600b893d1eb8b698e3a052aaad2dbdda4a14e1b3c7108","src/lib.rs":"d4bbd2761bc5fb93f71e037c838853c2460ac43e8e176c9e4b7739ece97c4060","src/method.rs":"a40a8219cdbe1071cd448bc154dbe88e78a29d755bca2bde095190bcd595f7dd","src/request.rs":"4bf726a91d5776f11f2d29b270090550838b1cebf812ef5acdd62d00878325fc","src/response.rs":"137adc01d53225ce07c06f8f64cd082af437bcbf297dce20a3a5907e3f2544fe","src/status.rs":"fd9d1c1670bde5f94934ff2a9fa9c7f2db5bbe32a750e4e202bf2775b5c5cac3","src/uri/authority.rs":"605ab42eed3ed6692746a846f845c8f2ba7e34c4738e929e5683714f17c7a162","src/uri/builder.rs":"875506b3a603a6e35557548ed0cf3beb7de0a4d1c898316e7293f3bc2ffb05c5","src/uri/mod.rs":"fd083d2bb380268a2c1c6236aed6f312d469a55cd259fd55b20a801e72e6c8b1","src/uri/path.rs":"1a87eaedf4ce65a0af9020eff5ca4e78d1eaba0a3d05a0a99ed2cc8912054f64","src/uri/port.rs":"a30793678abc96e833d026d96f060244183ab631e19eafbbad8e4643c7bb9d86","src/uri/scheme.rs":"59e6f12d3e1e1ee982e68a4a6556f25e94073ca3d77c372b6d8d71daf8f62f2a","src/uri/tests.rs":"61f88b73490c2442ec12cb0829aa1ddd28f1bce874b4fc6dd7a544c80280aeb1","src/version.rs":"623ef60a450203b051f3457e2f095508b66aaaa799b1447fb1b34d92cb2e7d62","tests/header_map.rs":"749ef0461bff58a01d96b5072268da7b36105f60d0db585e0c616e7e440f1601","tests/header_map_fuzz.rs":"7f8be3f097ceb9e0c5c4b44ef6ae1ee209cd7b6d1ea4b4be45356142792190de","tests/status_code.rs":"4c1bd08baffa6265aad5e837b189c269a3bef9031984b37980c24a8c671ac22c"},"package":"601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1"} \ No newline at end of file +{"files":{"CHANGELOG.md":"1da6b6201ce97bfdddbed898a3a48def191dc28a48be1699708e314f33f2f21a","Cargo.toml":"2b6b868d24df9fcb28bc60fab248ea9bfc0d2260a88126e6dd397f8c736512ad","LICENSE-APACHE":"8bb1b50b0e5c9399ae33bd35fab2769010fa6c14e8860c729a52295d84896b7a","LICENSE-MIT":"dc91f8200e4b2a1f9261035d4c18c33c246911a6c0f7b543d75347e61b249cff","README.md":"cc532272a1fd874d3c3aada992457a98edd03f08a4f3becdd059d3a77c7ccf7a","src/byte_str.rs":"f044ef419cb44aeeafd6a9c4f508dce702a2adfe6854306f4ead7755f8477af9","src/convert.rs":"648ef74bc21184c960ee21e43033584390d1900d107cdae8c1d9a4a35f9a3fc7","src/error.rs":"e9a0c5c2af9eb98a23f967d9ff416095c80f6998fbd14a63acebeeca9e8aedac","src/extensions.rs":"a85e4d0740bde5a8cfefbaf83dd5da5d1b65f6014a4d842e0cd998943b176c24","src/header/map.rs":"a9de9e1dc5b5351f30284ea43ab748b8ddf5d0905f513ffb3ccf316469eca2b6","src/header/mod.rs":"e2662325ca13a63cbbf587b751a749dccdf4896748990abd3bc8003f81d2845f","src/header/name.rs":"168b745a0edd29b0d9241c66d1a3b8035fc69a8b8f1dc1a8f65a1aea63ef5343","src/header/value.rs":"b6aa8368e6568dee57185aef11768a3916a0843b6f2dc5cacafe78442f37586f","src/lib.rs":"57e93842eb578c536b0a9405308c30a028d6b9086c9cb22fea88b603655a4a10","src/method.rs":"3deb7feb7a9abce28142e0079c170c9c33eccd50b4a87d725d5997335062793f","src/request.rs":"2fa42d467fa9c51c61300a66acec698bef11bb43e20d7a80f3a911548490c4de","src/response.rs":"06a79aea7df311b3348a6fe4c16ab1ee175c16887cf67b4481696bacfc0624b1","src/status.rs":"339297f54054584682e17b6069628227aa207025d9dcb0973081583fefa93e9f","src/uri/authority.rs":"45b1b515ef9c86961ed341eded05a4270a27661ec4129d3356d3e2c4440698b0","src/uri/builder.rs":"a63abf45eef89e29498b85e6216319d760fd6d83c8549e315285e1ace67fb328","src/uri/mod.rs":"8d76c2350ed19b96b7218d62f538a1973d59d28cafc556b17c72f924e817ad0f","src/uri/path.rs":"a73668e6131a9b4fffadcfef7ea803086784d88d9a540404647ab7ea874926ca","src/uri/port.rs":"a30793678abc96e833d026d96f060244183ab631e19eafbbad8e4643c7bb9d86","src/uri/scheme.rs":"25332dea742ef623d10d0e3fd04d97bcf6a1d63c3b1e7f9f7eaab45787ec42de","src/uri/tests.rs":"61f88b73490c2442ec12cb0829aa1ddd28f1bce874b4fc6dd7a544c80280aeb1","src/version.rs":"623ef60a450203b051f3457e2f095508b66aaaa799b1447fb1b34d92cb2e7d62","tests/header_map.rs":"ce60f833128ca9da3eac6ace25459f89e2f9e1571fa64f0d2d4e1b4bcf012e5b","tests/header_map_fuzz.rs":"f5690d242d045aa9ad139941e3f3384f220c496c39e845fbe07a2ef533ec2d50","tests/status_code.rs":"165cfa22544de42c9ce708be0921a445cd2b75f042192b7d677501fff79cb641"},"package":"21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258"} \ No newline at end of file diff --git a/.cargo-vendor/http/CHANGELOG.md b/.cargo-vendor/http/CHANGELOG.md index 40e0476adf..e189d618de 100644 --- a/.cargo-vendor/http/CHANGELOG.md +++ b/.cargo-vendor/http/CHANGELOG.md @@ -1,11 +1,16 @@ -# 0.2.12 (March 4, 2024) +# 1.1.0 (March 4, 2024) * Add methods to allow trying to allocate in the `HeaderMap`, returning an error if oversize instead of panicking. +* Add `Extensions::get_or_insert()` method. +* Implement `From` for `uri::Builder`. * Fix `HeaderName::from_lowercase` that could allow NUL bytes in some cases. -# 0.2.11 (November 13, 2023) +# 1.0.0 (November 15, 2023) -* Fix MIRI error in `header::Iter`. +- Implement `Clone` for `Request`, `Response`, and `Extensions`. This breaking change requires + that all extensions now implement `Clone`. +- Add a default-on `std` feature. Disabling it currently is not supported. +- Fix MIRI warnings in `HeaderMap::iter()`. # 0.2.10 (November 10, 2023) diff --git a/.cargo-vendor/http/Cargo.toml b/.cargo-vendor/http/Cargo.toml index d83b0e9cde..037980d382 100644 --- a/.cargo-vendor/http/Cargo.toml +++ b/.cargo-vendor/http/Cargo.toml @@ -13,7 +13,7 @@ edition = "2018" rust-version = "1.49.0" name = "http" -version = "0.2.12" +version = "1.1.0" authors = [ "Alex Crichton ", "Carl Lerche ", @@ -41,20 +41,18 @@ version = "1" [dev-dependencies.doc-comment] version = "0.3" -[dev-dependencies.indexmap] -version = "<=1.8" - [dev-dependencies.quickcheck] -version = "0.9.0" +version = "1" [dev-dependencies.rand] -version = "0.7.0" - -[dev-dependencies.seahash] -version = "3.0.5" +version = "0.8.0" [dev-dependencies.serde] version = "1.0" [dev-dependencies.serde_json] version = "1.0" + +[features] +default = ["std"] +std = [] diff --git a/.cargo-vendor/http/README.md b/.cargo-vendor/http/README.md index 2ae8d56cd3..a009003286 100644 --- a/.cargo-vendor/http/README.md +++ b/.cargo-vendor/http/README.md @@ -17,7 +17,7 @@ To use `http`, first add this to your `Cargo.toml`: ```toml [dependencies] -http = "0.2" +http = "1.0" ``` Next, add this to your crate: diff --git a/.cargo-vendor/http/src/byte_str.rs b/.cargo-vendor/http/src/byte_str.rs index dec222b5f4..181ced9e77 100644 --- a/.cargo-vendor/http/src/byte_str.rs +++ b/.cargo-vendor/http/src/byte_str.rs @@ -43,7 +43,7 @@ impl ByteStr { } } // Invariant: assumed by the safety requirements of this function. - ByteStr { bytes: bytes } + ByteStr { bytes } } } diff --git a/.cargo-vendor/http/src/convert.rs b/.cargo-vendor/http/src/convert.rs index aeee2219a9..682e0ed5a8 100644 --- a/.cargo-vendor/http/src/convert.rs +++ b/.cargo-vendor/http/src/convert.rs @@ -1,5 +1,5 @@ macro_rules! if_downcast_into { - ($in_ty:ty, $out_ty:ty, $val:ident, $body:expr) => ({ + ($in_ty:ty, $out_ty:ty, $val:ident, $body:expr) => {{ if std::any::TypeId::of::<$in_ty>() == std::any::TypeId::of::<$out_ty>() { // Store the value in an `Option` so we can `take` // it after casting to `&mut dyn Any`. @@ -13,5 +13,5 @@ macro_rules! if_downcast_into { // Run the $body in scope of the replaced val. $body } - }) + }}; } diff --git a/.cargo-vendor/http/src/extensions.rs b/.cargo-vendor/http/src/extensions.rs index 7e815df772..3764a558a6 100644 --- a/.cargo-vendor/http/src/extensions.rs +++ b/.cargo-vendor/http/src/extensions.rs @@ -3,7 +3,7 @@ use std::collections::HashMap; use std::fmt; use std::hash::{BuildHasherDefault, Hasher}; -type AnyMap = HashMap, BuildHasherDefault>; +type AnyMap = HashMap, BuildHasherDefault>; // With TypeIds as keys, there's no need to hash them. They are already hashes // themselves, coming from the compiler. The IdHasher just holds the u64 of @@ -31,7 +31,7 @@ impl Hasher for IdHasher { /// /// `Extensions` can be used by `Request` and `Response` to store /// extra data derived from the underlying protocol. -#[derive(Default)] +#[derive(Clone, Default)] pub struct Extensions { // If extensions are never used, no need to carry around an empty HashMap. // That's 3 words. Instead, this is only 1 word. @@ -59,16 +59,11 @@ impl Extensions { /// assert!(ext.insert(4u8).is_none()); /// assert_eq!(ext.insert(9i32), Some(5i32)); /// ``` - pub fn insert(&mut self, val: T) -> Option { + pub fn insert(&mut self, val: T) -> Option { self.map - .get_or_insert_with(|| Box::new(HashMap::default())) + .get_or_insert_with(Box::default) .insert(TypeId::of::(), Box::new(val)) - .and_then(|boxed| { - (boxed as Box) - .downcast() - .ok() - .map(|boxed| *boxed) - }) + .and_then(|boxed| boxed.into_any().downcast().ok().map(|boxed| *boxed)) } /// Get a reference to a type previously inserted on this `Extensions`. @@ -87,7 +82,7 @@ impl Extensions { self.map .as_ref() .and_then(|map| map.get(&TypeId::of::())) - .and_then(|boxed| (&**boxed as &(dyn Any + 'static)).downcast_ref()) + .and_then(|boxed| (**boxed).as_any().downcast_ref()) } /// Get a mutable reference to a type previously inserted on this `Extensions`. @@ -106,7 +101,63 @@ impl Extensions { self.map .as_mut() .and_then(|map| map.get_mut(&TypeId::of::())) - .and_then(|boxed| (&mut **boxed as &mut (dyn Any + 'static)).downcast_mut()) + .and_then(|boxed| (**boxed).as_any_mut().downcast_mut()) + } + + /// Get a mutable reference to a type, inserting `value` if not already present on this + /// `Extensions`. + /// + /// # Example + /// + /// ``` + /// # use http::Extensions; + /// let mut ext = Extensions::new(); + /// *ext.get_or_insert(1i32) += 2; + /// + /// assert_eq!(*ext.get::().unwrap(), 3); + /// ``` + pub fn get_or_insert(&mut self, value: T) -> &mut T { + self.get_or_insert_with(|| value) + } + + /// Get a mutable reference to a type, inserting the value created by `f` if not already present + /// on this `Extensions`. + /// + /// # Example + /// + /// ``` + /// # use http::Extensions; + /// let mut ext = Extensions::new(); + /// *ext.get_or_insert_with(|| 1i32) += 2; + /// + /// assert_eq!(*ext.get::().unwrap(), 3); + /// ``` + pub fn get_or_insert_with T>( + &mut self, + f: F, + ) -> &mut T { + let out = self + .map + .get_or_insert_with(Box::default) + .entry(TypeId::of::()) + .or_insert_with(|| Box::new(f())); + (**out).as_any_mut().downcast_mut().unwrap() + } + + /// Get a mutable reference to a type, inserting the type's default value if not already present + /// on this `Extensions`. + /// + /// # Example + /// + /// ``` + /// # use http::Extensions; + /// let mut ext = Extensions::new(); + /// *ext.get_or_insert_default::() += 2; + /// + /// assert_eq!(*ext.get::().unwrap(), 2); + /// ``` + pub fn get_or_insert_default(&mut self) -> &mut T { + self.get_or_insert_with(T::default) } /// Remove a type from this `Extensions`. @@ -126,12 +177,7 @@ impl Extensions { self.map .as_mut() .and_then(|map| map.remove(&TypeId::of::())) - .and_then(|boxed| { - (boxed as Box) - .downcast() - .ok() - .map(|boxed| *boxed) - }) + .and_then(|boxed| boxed.into_any().downcast().ok().map(|boxed| *boxed)) } /// Clear the `Extensions` of all inserted extensions. @@ -166,9 +212,7 @@ impl Extensions { /// ``` #[inline] pub fn is_empty(&self) -> bool { - self.map - .as_ref() - .map_or(true, |map| map.is_empty()) + self.map.as_ref().map_or(true, |map| map.is_empty()) } /// Get the numer of extensions available. @@ -184,28 +228,26 @@ impl Extensions { /// ``` #[inline] pub fn len(&self) -> usize { - self.map - .as_ref() - .map_or(0, |map| map.len()) + self.map.as_ref().map_or(0, |map| map.len()) } /// Extends `self` with another `Extensions`. /// /// If an instance of a specific type exists in both, the one in `self` is overwritten with the /// one from `other`. - /// + /// /// # Example - /// + /// /// ``` /// # use http::Extensions; /// let mut ext_a = Extensions::new(); /// ext_a.insert(8u8); /// ext_a.insert(16u16); - /// + /// /// let mut ext_b = Extensions::new(); /// ext_b.insert(4u8); /// ext_b.insert("hello"); - /// + /// /// ext_a.extend(ext_b); /// assert_eq!(ext_a.len(), 3); /// assert_eq!(ext_a.get::(), Some(&4u8)); @@ -229,9 +271,40 @@ impl fmt::Debug for Extensions { } } +trait AnyClone: Any { + fn clone_box(&self) -> Box; + fn as_any(&self) -> &dyn Any; + fn as_any_mut(&mut self) -> &mut dyn Any; + fn into_any(self: Box) -> Box; +} + +impl AnyClone for T { + fn clone_box(&self) -> Box { + Box::new(self.clone()) + } + + fn as_any(&self) -> &dyn Any { + self + } + + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } + + fn into_any(self: Box) -> Box { + self + } +} + +impl Clone for Box { + fn clone(&self) -> Self { + (**self).clone_box() + } +} + #[test] fn test_extensions() { - #[derive(Debug, PartialEq)] + #[derive(Clone, Debug, PartialEq)] struct MyType(i32); let mut extensions = Extensions::new(); @@ -242,9 +315,15 @@ fn test_extensions() { assert_eq!(extensions.get(), Some(&5i32)); assert_eq!(extensions.get_mut(), Some(&mut 5i32)); + let ext2 = extensions.clone(); + assert_eq!(extensions.remove::(), Some(5i32)); assert!(extensions.get::().is_none()); + // clone still has it + assert_eq!(ext2.get(), Some(&5i32)); + assert_eq!(ext2.get(), Some(&MyType(10))); + assert_eq!(extensions.get::(), None); assert_eq!(extensions.get(), Some(&MyType(10))); } diff --git a/.cargo-vendor/http/src/header/map.rs b/.cargo-vendor/http/src/header/map.rs index 36f1c92654..e1960a0c7e 100644 --- a/.cargo-vendor/http/src/header/map.rs +++ b/.cargo-vendor/http/src/header/map.rs @@ -1,5 +1,5 @@ -use std::collections::HashMap; use std::collections::hash_map::RandomState; +use std::collections::HashMap; use std::convert::TryFrom; use std::hash::{BuildHasher, Hash, Hasher}; use std::iter::{FromIterator, FusedIterator}; @@ -8,15 +8,15 @@ use std::{fmt, mem, ops, ptr, vec}; use crate::Error; -use super::HeaderValue; use super::name::{HdrName, HeaderName, InvalidHeaderName}; +use super::HeaderValue; pub use self::as_header_name::AsHeaderName; pub use self::into_header_name::IntoHeaderName; /// A set of HTTP headers /// -/// `HeaderMap` is an multimap of [`HeaderName`] to values. +/// `HeaderMap` is a multimap of [`HeaderName`] to values. /// /// [`HeaderName`]: struct.HeaderName.html /// @@ -459,7 +459,7 @@ impl HeaderMap { /// allocations before `capacity` headers are stored in the map. /// /// More capacity than requested may be allocated. - /// + /// /// # Panics /// /// This method panics if capacity exceeds max `HeaderMap` capacity. @@ -715,7 +715,7 @@ impl HeaderMap { return Err(MaxSizeReached::new()); } - if self.entries.len() == 0 { + if self.entries.is_empty() { self.mask = cap as Size - 1; self.indices = vec![Pos::none(); cap].into_boxed_slice(); self.entries = Vec::with_capacity(usable_capacity(cap)); @@ -1033,7 +1033,9 @@ impl HeaderMap { let entries = &mut self.entries[..] as *mut _; let extra_values = &mut self.extra_values as *mut _; let len = self.entries.len(); - unsafe { self.entries.set_len(0); } + unsafe { + self.entries.set_len(0); + } Drain { idx: 0, @@ -1170,22 +1172,22 @@ impl HeaderMap { danger, Entry::Vacant(VacantEntry { map: self, - hash: hash, + hash, key: key.into(), - probe: probe, - danger: danger, + probe, + danger, }), Entry::Occupied(OccupiedEntry { map: self, index: pos, - probe: probe, + probe, }), Entry::Vacant(VacantEntry { map: self, - hash: hash, + hash, key: key.into(), - probe: probe, - danger: danger, + probe, + danger, }) )) } @@ -1324,14 +1326,12 @@ impl HeaderMap { let raw_links = self.raw_links(); let extra_values = &mut self.extra_values; - let next = links.map(|l| { - drain_all_extra_values(raw_links, extra_values, l.next) - .into_iter() - }); + let next = + links.map(|l| drain_all_extra_values(raw_links, extra_values, l.next).into_iter()); ValueDrain { first: Some(old), - next: next, + next, lt: PhantomData, } } @@ -1497,7 +1497,7 @@ impl HeaderMap { if danger || num_displaced >= DISPLACEMENT_THRESHOLD { // Increase danger level - self.danger.to_yellow(); + self.danger.set_yellow(); } Ok(index) @@ -1579,7 +1579,7 @@ impl HeaderMap { // backward shift deletion in self.indices // after probe, shift all non-ideally placed indices backward - if self.entries.len() > 0 { + if !self.entries.is_empty() { let mut last_probe = probe; let mut probe = probe + 1; @@ -1633,9 +1633,9 @@ impl HeaderMap { } self.entries.push(Bucket { - hash: hash, - key: key, - value: value, + hash, + key, + value, links: None, }); @@ -1697,7 +1697,7 @@ impl HeaderMap { if load_factor >= LOAD_FACTOR_THRESHOLD { // Transition back to green danger level - self.danger.to_green(); + self.danger.set_green(); // Double the capacity let new_cap = self.indices.len() * 2; @@ -1705,7 +1705,7 @@ impl HeaderMap { // Grow the capacity self.try_grow(new_cap)?; } else { - self.danger.to_red(); + self.danger.set_red(); // Rebuild hash table for index in self.indices.iter_mut() { @@ -1780,9 +1780,8 @@ impl HeaderMap { fn remove_extra_value( mut raw_links: RawLinks, extra_values: &mut Vec>, - idx: usize) - -> ExtraValue -{ + idx: usize, +) -> ExtraValue { let prev; let next; @@ -1803,8 +1802,7 @@ fn remove_extra_value( (Link::Entry(prev), Link::Extra(next)) => { debug_assert!(raw_links[prev].is_some()); - raw_links[prev].as_mut().unwrap() - .next = next; + raw_links[prev].as_mut().unwrap().next = next; debug_assert!(extra_values.len() > next); extra_values[next].prev = Link::Entry(prev); @@ -1812,8 +1810,7 @@ fn remove_extra_value( (Link::Extra(prev), Link::Entry(next)) => { debug_assert!(raw_links[next].is_some()); - raw_links[next].as_mut().unwrap() - .tail = prev; + raw_links[next].as_mut().unwrap().tail = prev; debug_assert!(extra_values.len() > prev); extra_values[prev].next = Link::Entry(next); @@ -1901,9 +1898,8 @@ fn remove_extra_value( fn drain_all_extra_values( raw_links: RawLinks, extra_values: &mut Vec>, - mut head: usize) - -> Vec -{ + mut head: usize, +) -> Vec { let mut vec = Vec::new(); loop { let extra = remove_extra_value(raw_links, extra_values, head); @@ -2025,17 +2021,17 @@ impl FromIterator<(HeaderName, T)> for HeaderMap { /// assert_eq!(headers["X-Custom-Header"], "my value"); /// ``` impl<'a, K, V, T> TryFrom<&'a HashMap> for HeaderMap - where - K: Eq + Hash, - HeaderName: TryFrom<&'a K>, - >::Error: Into, - T: TryFrom<&'a V>, - T::Error: Into, +where + K: Eq + Hash, + HeaderName: TryFrom<&'a K>, + >::Error: Into, + T: TryFrom<&'a V>, + T::Error: Into, { type Error = Error; fn try_from(c: &'a HashMap) -> Result { - c.into_iter() + c.iter() .map(|(k, v)| -> crate::Result<(HeaderName, T)> { let name = TryFrom::try_from(k).map_err(Into::into)?; let value = TryFrom::try_from(v).map_err(Into::into)?; @@ -2172,7 +2168,7 @@ impl Default for HeaderMap { } } -impl<'a, K, T> ops::Index for HeaderMap +impl ops::Index for HeaderMap where K: AsHeaderName, { @@ -2222,7 +2218,7 @@ fn append_value( Some(links) => { let idx = extra.len(); extra.push(ExtraValue { - value: value, + value, prev: Link::Extra(links.tail), next: Link::Entry(entry_idx), }); @@ -2234,7 +2230,7 @@ fn append_value( None => { let idx = extra.len(); extra.push(ExtraValue { - value: value, + value, prev: Link::Entry(entry_idx), next: Link::Entry(entry_idx), }); @@ -2376,6 +2372,18 @@ impl<'a, T> Iterator for Keys<'a, T> { fn size_hint(&self) -> (usize, Option) { self.inner.size_hint() } + + fn nth(&mut self, n: usize) -> Option { + self.inner.nth(n).map(|b| &b.key) + } + + fn count(self) -> usize { + self.inner.count() + } + + fn last(self) -> Option { + self.inner.last().map(|b| &b.key) + } } impl<'a, T> ExactSizeIterator for Keys<'a, T> {} @@ -2423,9 +2431,7 @@ impl<'a, T> Iterator for Drain<'a, T> { // Remove the extra value let raw_links = RawLinks(self.entries); - let extra = unsafe { - remove_extra_value(raw_links, &mut *self.extra_values, next) - }; + let extra = unsafe { remove_extra_value(raw_links, &mut *self.extra_values, next) }; match extra.next { Link::Extra(idx) => self.next = Some(idx), @@ -2794,7 +2800,7 @@ impl<'a, T> VacantEntry<'a, T> { Ok(OccupiedEntry { map: self.map, - index: index, + index, probe: self.probe, }) } @@ -3200,7 +3206,7 @@ impl<'a, T> OccupiedEntry<'a, T> { /// assert_eq!("earth", map["host"]); /// ``` pub fn insert(&mut self, value: T) -> T { - self.map.insert_occupied(self.index, value.into()) + self.map.insert_occupied(self.index, value) } /// Sets the value of the entry. @@ -3226,7 +3232,7 @@ impl<'a, T> OccupiedEntry<'a, T> { /// assert_eq!("earth", map["host"]); /// ``` pub fn insert_mult(&mut self, value: T) -> ValueDrain<'_, T> { - self.map.insert_occupied_mult(self.index, value.into()) + self.map.insert_occupied_mult(self.index, value) } /// Insert the value into the entry. @@ -3253,7 +3259,7 @@ impl<'a, T> OccupiedEntry<'a, T> { pub fn append(&mut self, value: T) { let idx = self.index; let entry = &mut self.map.entries[idx]; - append_value(idx, entry, &mut self.map.extra_values, value.into()); + append_value(idx, entry, &mut self.map.extra_values, value); } /// Remove the entry from the map. @@ -3318,10 +3324,9 @@ impl<'a, T> OccupiedEntry<'a, T> { let raw_links = self.map.raw_links(); let extra_values = &mut self.map.extra_values; - let next = self.map.entries[self.index].links.map(|l| { - drain_all_extra_values(raw_links, extra_values, l.next) - .into_iter() - }); + let next = self.map.entries[self.index] + .links + .map(|l| drain_all_extra_values(raw_links, extra_values, l.next).into_iter()); let entry = self.map.remove_found(self.probe, self.index); @@ -3432,12 +3437,12 @@ impl<'a, T> Iterator for ValueDrain<'a, T> { // Exactly 1 (&Some(_), &None) => (1, Some(1)), // 1 + extras - (&Some(_), &Some(ref extras)) => { + (&Some(_), Some(extras)) => { let (l, u) = extras.size_hint(); (l + 1, u.map(|u| u + 1)) - }, + } // Extras only - (&None, &Some(ref extras)) => extras.size_hint(), + (&None, Some(extras)) => extras.size_hint(), // No more (&None, &None) => (0, Some(0)), } @@ -3448,7 +3453,7 @@ impl<'a, T> FusedIterator for ValueDrain<'a, T> {} impl<'a, T> Drop for ValueDrain<'a, T> { fn drop(&mut self) { - while let Some(_) = self.next() {} + for _ in self.by_ref() {} } } @@ -3469,17 +3474,13 @@ impl ops::Index for RawLinks { type Output = Option; fn index(&self, idx: usize) -> &Self::Output { - unsafe { - &(*self.0)[idx].links - } + unsafe { &(*self.0)[idx].links } } } impl ops::IndexMut for RawLinks { fn index_mut(&mut self, idx: usize) -> &mut Self::Output { - unsafe { - &mut (*self.0)[idx].links - } + unsafe { &mut (*self.0)[idx].links } } } @@ -3491,7 +3492,7 @@ impl Pos { debug_assert!(index < MAX_SIZE); Pos { index: index as Size, - hash: hash, + hash, } } @@ -3525,34 +3526,25 @@ impl Pos { impl Danger { fn is_red(&self) -> bool { - match *self { - Danger::Red(_) => true, - _ => false, - } + matches!(*self, Danger::Red(_)) } - fn to_red(&mut self) { + fn set_red(&mut self) { debug_assert!(self.is_yellow()); *self = Danger::Red(RandomState::new()); } fn is_yellow(&self) -> bool { - match *self { - Danger::Yellow => true, - _ => false, - } + matches!(*self, Danger::Yellow) } - fn to_yellow(&mut self) { - match *self { - Danger::Green => { - *self = Danger::Yellow; - } - _ => {} + fn set_yellow(&mut self) { + if let Danger::Green = *self { + *self = Danger::Yellow; } } - fn to_green(&mut self) { + fn set_green(&mut self) { debug_assert!(self.is_yellow()); *self = Danger::Green; } @@ -3817,7 +3809,7 @@ mod as_header_name { } fn as_str(&self) -> &str { - ::as_str(*self) + ::as_str(self) } } @@ -3873,7 +3865,7 @@ mod as_header_name { } fn as_str(&self) -> &str { - *self + self } } diff --git a/.cargo-vendor/http/src/header/mod.rs b/.cargo-vendor/http/src/header/mod.rs index 4995541209..5d405767fb 100644 --- a/.cargo-vendor/http/src/header/mod.rs +++ b/.cargo-vendor/http/src/header/mod.rs @@ -83,6 +83,7 @@ pub use self::name::{HeaderName, InvalidHeaderName}; pub use self::value::{HeaderValue, InvalidHeaderValue, ToStrError}; // Use header name constants +#[rustfmt::skip] pub use self::name::{ ACCEPT, ACCEPT_CHARSET, diff --git a/.cargo-vendor/http/src/header/name.rs b/.cargo-vendor/http/src/header/name.rs index e51bc355f7..64a276aeef 100644 --- a/.cargo-vendor/http/src/header/name.rs +++ b/.cargo-vendor/http/src/header/name.rs @@ -2,12 +2,12 @@ use crate::byte_str::ByteStr; use bytes::{Bytes, BytesMut}; use std::borrow::Borrow; +use std::convert::TryFrom; use std::error::Error; -use std::convert::{TryFrom}; +use std::fmt; use std::hash::{Hash, Hasher}; use std::mem::MaybeUninit; use std::str::FromStr; -use std::fmt; /// Represents an HTTP header field name /// @@ -1002,6 +1002,7 @@ standard_headers! { /// ``` // HEADER_CHARS maps every byte that is 128 or larger to 0 so everything that is // mapped by HEADER_CHARS, maps to a valid single-byte UTF-8 codepoint. +#[rustfmt::skip] const HEADER_CHARS: [u8; 256] = [ // 0 1 2 3 4 5 6 7 8 9 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // x @@ -1035,6 +1036,7 @@ const HEADER_CHARS: [u8; 256] = [ /// Valid header name characters for HTTP/2.0 and HTTP/3.0 // HEADER_CHARS_H2 maps every byte that is 128 or larger to 0 so everything that is // mapped by HEADER_CHARS_H2, maps to a valid single-byte UTF-8 codepoint. +#[rustfmt::skip] const HEADER_CHARS_H2: [u8; 256] = [ // 0 1 2 3 4 5 6 7 8 9 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // x @@ -1095,11 +1097,11 @@ fn parse_hdr<'a>( } } - - impl<'a> From for HdrName<'a> { fn from(hdr: StandardHeader) -> HdrName<'a> { - HdrName { inner: Repr::Standard(hdr) } + HdrName { + inner: Repr::Standard(hdr), + } } } @@ -1119,7 +1121,7 @@ impl HeaderName { Ok(Custom(val).into()) } Repr::Custom(MaybeLower { buf, lower: false }) => { - use bytes::{BufMut}; + use bytes::BufMut; let mut dst = BytesMut::with_capacity(buf.len()); for b in buf.iter() { @@ -1251,12 +1253,12 @@ impl HeaderName { pub const fn from_static(src: &'static str) -> HeaderName { let name_bytes = src.as_bytes(); if let Some(standard) = StandardHeader::from_bytes(name_bytes) { - return HeaderName{ + return HeaderName { inner: Repr::Standard(standard), }; } - if name_bytes.len() == 0 || name_bytes.len() > super::MAX_HEADER_NAME_LEN || { + if name_bytes.is_empty() || name_bytes.len() > super::MAX_HEADER_NAME_LEN || { let mut i = 0; loop { if i >= name_bytes.len() { @@ -1267,11 +1269,17 @@ impl HeaderName { i += 1; } } { + // TODO: When msrv is bumped to larger than 1.57, this should be + // replaced with `panic!` macro. + // https://blog.rust-lang.org/2021/12/02/Rust-1.57.0.html#panic-in-const-contexts + // + // See the panics section of this method's document for details. + #[allow(clippy::no_effect)] ([] as [u8; 0])[0]; // Invalid header name } HeaderName { - inner: Repr::Custom(Custom(ByteStr::from_static(src))) + inner: Repr::Custom(Custom(ByteStr::from_static(src))), } } @@ -1282,7 +1290,7 @@ impl HeaderName { pub fn as_str(&self) -> &str { match self.inner { Repr::Standard(v) => v.as_str(), - Repr::Custom(ref v) => &*v.0, + Repr::Custom(ref v) => &v.0, } } @@ -1514,15 +1522,13 @@ impl<'a> HdrName<'a> { fn custom(buf: &'a [u8], lower: bool) -> HdrName<'a> { HdrName { // Invariant (on MaybeLower): follows from the precondition - inner: Repr::Custom(MaybeLower { - buf: buf, - lower: lower, - }), + inner: Repr::Custom(MaybeLower { buf, lower }), } } pub fn from_bytes(hdr: &[u8], f: F) -> Result - where F: FnOnce(HdrName<'_>) -> U, + where + F: FnOnce(HdrName<'_>) -> U, { let mut buf = uninit_u8_array(); // Precondition: HEADER_CHARS is a valid table for parse_hdr(). @@ -1551,7 +1557,7 @@ impl<'a> From> for HeaderName { }, Repr::Custom(maybe_lower) => { if maybe_lower.lower { - let buf = Bytes::copy_from_slice(&maybe_lower.buf[..]); + let buf = Bytes::copy_from_slice(maybe_lower.buf); // Safety: the invariant on MaybeLower ensures buf is valid UTF-8. let byte_str = unsafe { ByteStr::from_utf8_unchecked(buf) }; @@ -1636,9 +1642,10 @@ fn eq_ignore_ascii_case(lower: &[u8], s: &[u8]) -> bool { return false; } - lower.iter().zip(s).all(|(a, b)| { - *a == HEADER_CHARS[*b as usize] - }) + lower + .iter() + .zip(s) + .all(|(a, b)| *a == HEADER_CHARS[*b as usize]) } // Utility functions for MaybeUninit<>. These are drawn from unstable API's on @@ -1663,8 +1670,8 @@ unsafe fn slice_assume_init(slice: &[MaybeUninit]) -> &[T] { #[cfg(test)] mod tests { - use super::*; use self::StandardHeader::Vary; + use super::*; #[test] fn test_bounds() { @@ -1676,11 +1683,15 @@ mod tests { fn test_parse_invalid_headers() { for i in 0..128 { let hdr = vec![1u8; i]; - assert!(HeaderName::from_bytes(&hdr).is_err(), "{} invalid header chars did not fail", i); + assert!( + HeaderName::from_bytes(&hdr).is_err(), + "{} invalid header chars did not fail", + i + ); } } - const ONE_TOO_LONG: &[u8] = &[b'a'; super::super::MAX_HEADER_NAME_LEN+1]; + const ONE_TOO_LONG: &[u8] = &[b'a'; super::super::MAX_HEADER_NAME_LEN + 1]; #[test] fn test_invalid_name_lengths() { @@ -1728,7 +1739,10 @@ mod tests { }), }); - assert_eq!(name.inner, Repr::Custom(Custom(ByteStr::from_static("hello-world")))); + assert_eq!( + name.inner, + Repr::Custom(Custom(ByteStr::from_static("hello-world"))) + ); let name = HeaderName::from(HdrName { inner: Repr::Custom(MaybeLower { @@ -1737,49 +1751,68 @@ mod tests { }), }); - assert_eq!(name.inner, Repr::Custom(Custom(ByteStr::from_static("hello-world")))); + assert_eq!( + name.inner, + Repr::Custom(Custom(ByteStr::from_static("hello-world"))) + ); } #[test] fn test_eq_hdr_name() { use self::StandardHeader::Vary; - let a = HeaderName { inner: Repr::Standard(Vary) }; - let b = HdrName { inner: Repr::Standard(Vary) }; + let a = HeaderName { + inner: Repr::Standard(Vary), + }; + let b = HdrName { + inner: Repr::Standard(Vary), + }; assert_eq!(a, b); - let a = HeaderName { inner: Repr::Custom(Custom(ByteStr::from_static("vaary"))) }; + let a = HeaderName { + inner: Repr::Custom(Custom(ByteStr::from_static("vaary"))), + }; assert_ne!(a, b); - let b = HdrName { inner: Repr::Custom(MaybeLower { - buf: b"vaary", - lower: true, - })}; + let b = HdrName { + inner: Repr::Custom(MaybeLower { + buf: b"vaary", + lower: true, + }), + }; assert_eq!(a, b); - let b = HdrName { inner: Repr::Custom(MaybeLower { - buf: b"vaary", - lower: false, - })}; + let b = HdrName { + inner: Repr::Custom(MaybeLower { + buf: b"vaary", + lower: false, + }), + }; assert_eq!(a, b); - let b = HdrName { inner: Repr::Custom(MaybeLower { - buf: b"VAARY", - lower: false, - })}; + let b = HdrName { + inner: Repr::Custom(MaybeLower { + buf: b"VAARY", + lower: false, + }), + }; assert_eq!(a, b); - let a = HeaderName { inner: Repr::Standard(Vary) }; + let a = HeaderName { + inner: Repr::Standard(Vary), + }; assert_ne!(a, b); } #[test] fn test_from_static_std() { - let a = HeaderName { inner: Repr::Standard(Vary) }; + let a = HeaderName { + inner: Repr::Standard(Vary), + }; let b = HeaderName::from_static("vary"); assert_eq!(a, b); @@ -1803,7 +1836,9 @@ mod tests { // MaybeLower { lower: true } #[test] fn test_from_static_custom_short() { - let a = HeaderName { inner: Repr::Custom(Custom(ByteStr::from_static("customheader"))) }; + let a = HeaderName { + inner: Repr::Custom(Custom(ByteStr::from_static("customheader"))), + }; let b = HeaderName::from_static("customheader"); assert_eq!(a, b); } @@ -1823,11 +1858,13 @@ mod tests { // MaybeLower { lower: false } #[test] fn test_from_static_custom_long() { - let a = HeaderName { inner: Repr::Custom(Custom(ByteStr::from_static( - "longer-than-63--thisheaderislongerthansixtythreecharactersandthushandleddifferent" - ))) }; + let a = HeaderName { + inner: Repr::Custom(Custom(ByteStr::from_static( + "longer-than-63--thisheaderislongerthansixtythreecharactersandthushandleddifferent", + ))), + }; let b = HeaderName::from_static( - "longer-than-63--thisheaderislongerthansixtythreecharactersandthushandleddifferent" + "longer-than-63--thisheaderislongerthansixtythreecharactersandthushandleddifferent", ); assert_eq!(a, b); } @@ -1836,7 +1873,7 @@ mod tests { #[should_panic] fn test_from_static_custom_long_uppercase() { HeaderName::from_static( - "Longer-Than-63--ThisHeaderIsLongerThanSixtyThreeCharactersAndThusHandledDifferent" + "Longer-Than-63--ThisHeaderIsLongerThanSixtyThreeCharactersAndThusHandledDifferent", ); } @@ -1850,7 +1887,9 @@ mod tests { #[test] fn test_from_static_custom_single_char() { - let a = HeaderName { inner: Repr::Custom(Custom(ByteStr::from_static("a"))) }; + let a = HeaderName { + inner: Repr::Custom(Custom(ByteStr::from_static("a"))), + }; let b = HeaderName::from_static("a"); assert_eq!(a, b); } diff --git a/.cargo-vendor/http/src/header/value.rs b/.cargo-vendor/http/src/header/value.rs index bf05f16f4e..b7978caca2 100644 --- a/.cargo-vendor/http/src/header/value.rs +++ b/.cargo-vendor/http/src/header/value.rs @@ -3,6 +3,7 @@ use bytes::{Bytes, BytesMut}; use std::convert::TryFrom; use std::error::Error; use std::fmt::Write; +use std::hash::{Hash, Hasher}; use std::str::FromStr; use std::{cmp, fmt, mem, str}; @@ -17,7 +18,7 @@ use crate::header::name::HeaderName; /// To handle this, the `HeaderValue` is useable as a type and can be compared /// with strings and implements `Debug`. A `to_str` fn is provided that returns /// an `Err` if the header value contains non visible ascii characters. -#[derive(Clone, Hash)] +#[derive(Clone)] pub struct HeaderValue { inner: Bytes, is_sensitive: bool, @@ -85,6 +86,12 @@ impl HeaderValue { let mut i = 0; while i < bytes.len() { if !is_visible_ascii(bytes[i]) { + // TODO: When msrv is bumped to larger than 1.57, this should be + // replaced with `panic!` macro. + // https://blog.rust-lang.org/2021/12/02/Rust-1.57.0.html#panic-in-const-contexts + // + // See the panics section of this method's document for details. + #[allow(clippy::no_effect)] ([] as [u8; 0])[0]; // Invalid header value } i += 1; @@ -122,6 +129,7 @@ impl HeaderValue { /// assert!(val.is_err()); /// ``` #[inline] + #[allow(clippy::should_implement_trait)] pub fn from_str(src: &str) -> Result { HeaderValue::try_from_generic(src, |s| Bytes::copy_from_slice(s.as_bytes())) } @@ -191,6 +199,13 @@ impl HeaderValue { /// /// This function does NOT validate that illegal bytes are not contained /// within the buffer. + /// + /// ## Panics + /// In a debug build this will panic if `src` is not valid UTF-8. + /// + /// ## Safety + /// `src` must contain valid UTF-8. In a release build it is undefined + /// behaviour to call this with `src` that is not valid UTF-8. pub unsafe fn from_maybe_shared_unchecked(src: T) -> HeaderValue where T: AsRef<[u8]> + 'static, @@ -203,7 +218,6 @@ impl HeaderValue { } } } else { - if_downcast_into!(T, Bytes, src, { return HeaderValue { inner: src, @@ -223,7 +237,10 @@ impl HeaderValue { HeaderValue::try_from_generic(src, std::convert::identity) } - fn try_from_generic, F: FnOnce(T) -> Bytes>(src: T, into: F) -> Result { + fn try_from_generic, F: FnOnce(T) -> Bytes>( + src: T, + into: F, + ) -> Result { for &b in src.as_ref() { if !is_valid(b) { return Err(InvalidHeaderValue { _priv: () }); @@ -615,6 +632,12 @@ impl Error for ToStrError {} // ===== PartialEq / PartialOrd ===== +impl Hash for HeaderValue { + fn hash(&self, state: &mut H) { + self.inner.hash(state); + } +} + impl PartialEq for HeaderValue { #[inline] fn eq(&self, other: &HeaderValue) -> bool { @@ -627,7 +650,7 @@ impl Eq for HeaderValue {} impl PartialOrd for HeaderValue { #[inline] fn partial_cmp(&self, other: &HeaderValue) -> Option { - self.inner.partial_cmp(&other.inner) + Some(self.cmp(other)) } } @@ -697,7 +720,7 @@ impl PartialOrd for [u8] { impl PartialEq for HeaderValue { #[inline] fn eq(&self, other: &String) -> bool { - *self == &other[..] + *self == other[..] } } diff --git a/.cargo-vendor/http/src/lib.rs b/.cargo-vendor/http/src/lib.rs index d5b3e2d0b3..3e8fd87615 100644 --- a/.cargo-vendor/http/src/lib.rs +++ b/.cargo-vendor/http/src/lib.rs @@ -1,5 +1,3 @@ -#![doc(html_root_url = "https://docs.rs/http/0.2.11")] - //! A general purpose library of common HTTP types //! //! This crate is a general purpose library for common types found when working @@ -158,7 +156,11 @@ //! assert_eq!(uri.query(), None); //! ``` -#![deny(missing_docs, missing_debug_implementations)] +#![deny(warnings, missing_docs, missing_debug_implementations)] + +//#![cfg_attr(not(feature = "std"), no_std)] +#[cfg(not(feature = "std"))] +compile_error!("`std` feature currently required, support for `no_std` may be added later"); #[cfg(test)] #[macro_use] @@ -193,19 +195,19 @@ pub use crate::status::StatusCode; pub use crate::uri::Uri; pub use crate::version::Version; -fn _assert_types() { - fn assert_send() {} - fn assert_sync() {} +#[cfg(test)] +mod tests { + use super::*; - assert_send::>(); - assert_send::>(); + fn assert_send_sync() {} - assert_sync::>(); - assert_sync::>(); -} + #[test] + fn request_satisfies_send_sync() { + assert_send_sync::>(); + } -mod sealed { - /// Private trait to this crate to prevent traits from being implemented in - /// downstream crates. - pub trait Sealed {} + #[test] + fn response_satisfies_send_sync() { + assert_send_sync::>(); + } } diff --git a/.cargo-vendor/http/src/method.rs b/.cargo-vendor/http/src/method.rs index 04261a37ab..94e4d4ecaf 100644 --- a/.cargo-vendor/http/src/method.rs +++ b/.cargo-vendor/http/src/method.rs @@ -15,13 +15,12 @@ //! assert_eq!(Method::POST.as_str(), "POST"); //! ``` +use self::extension::{AllocatedExtension, InlineExtension}; use self::Inner::*; -use self::extension::{InlineExtension, AllocatedExtension}; -use std::convert::AsRef; +use std::convert::TryFrom; use std::error::Error; use std::str::FromStr; -use std::convert::TryFrom; use std::{fmt, str}; /// The Request Method (VERB) @@ -67,7 +66,6 @@ enum Inner { ExtensionAllocated(AllocatedExtension), } - impl Method { /// GET pub const GET: Method = Method(Get); @@ -148,10 +146,7 @@ impl Method { /// See [the spec](https://tools.ietf.org/html/rfc7231#section-4.2.1) /// for more words. pub fn is_safe(&self) -> bool { - match self.0 { - Get | Head | Options | Trace => true, - _ => false, - } + matches!(self.0, Get | Head | Options | Trace) } /// Whether a method is considered "idempotent", meaning the request has @@ -339,7 +334,7 @@ mod extension { let InlineExtension(ref data, len) = self; // Safety: the invariant of InlineExtension ensures that the first // len bytes of data contain valid UTF-8. - unsafe {str::from_utf8_unchecked(&data[..*len as usize])} + unsafe { str::from_utf8_unchecked(&data[..*len as usize]) } } } @@ -357,7 +352,7 @@ mod extension { pub fn as_str(&self) -> &str { // Safety: the invariant of AllocatedExtension ensures that self.0 // contains valid UTF-8. - unsafe {str::from_utf8_unchecked(&self.0)} + unsafe { str::from_utf8_unchecked(&self.0) } } } @@ -376,6 +371,7 @@ mod extension { // Note that this definition means that any &[u8] that consists solely of valid // characters is also valid UTF-8 because the valid method characters are a // subset of the valid 1 byte UTF-8 encoding. + #[rustfmt::skip] const METHOD_CHARS: [u8; 256] = [ // 0 1 2 3 4 5 6 7 8 9 b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // x @@ -468,6 +464,6 @@ mod test { assert_eq!(Method::from_str("wOw!!").unwrap(), "wOw!!"); let long_method = "This_is_a_very_long_method.It_is_valid_but_unlikely."; - assert_eq!(Method::from_str(&long_method).unwrap(), long_method); + assert_eq!(Method::from_str(long_method).unwrap(), long_method); } } diff --git a/.cargo-vendor/http/src/request.rs b/.cargo-vendor/http/src/request.rs index 9940ae0824..d4c5bf5406 100644 --- a/.cargo-vendor/http/src/request.rs +++ b/.cargo-vendor/http/src/request.rs @@ -53,7 +53,7 @@ //! ``` use std::any::Any; -use std::convert::{TryFrom}; +use std::convert::TryFrom; use std::fmt; use crate::header::{HeaderMap, HeaderName, HeaderValue}; @@ -154,6 +154,7 @@ use crate::{Extensions, Result, Uri}; /// # /// # fn main() {} /// ``` +#[derive(Clone)] pub struct Request { head: Parts, body: T, @@ -163,6 +164,7 @@ pub struct Request { /// /// The HTTP request head consists of a method, uri, version, and a set of /// header fields. +#[derive(Clone)] pub struct Parts { /// The request's method pub method: Method, @@ -231,7 +233,6 @@ impl Request<()> { where Uri: TryFrom, >::Error: Into, - { Builder::new().method(Method::GET).uri(uri) } @@ -254,7 +255,6 @@ impl Request<()> { where Uri: TryFrom, >::Error: Into, - { Builder::new().method(Method::PUT).uri(uri) } @@ -277,7 +277,6 @@ impl Request<()> { where Uri: TryFrom, >::Error: Into, - { Builder::new().method(Method::POST).uri(uri) } @@ -300,7 +299,6 @@ impl Request<()> { where Uri: TryFrom, >::Error: Into, - { Builder::new().method(Method::DELETE).uri(uri) } @@ -324,7 +322,6 @@ impl Request<()> { where Uri: TryFrom, >::Error: Into, - { Builder::new().method(Method::OPTIONS).uri(uri) } @@ -347,7 +344,6 @@ impl Request<()> { where Uri: TryFrom, >::Error: Into, - { Builder::new().method(Method::HEAD).uri(uri) } @@ -370,7 +366,6 @@ impl Request<()> { where Uri: TryFrom, >::Error: Into, - { Builder::new().method(Method::CONNECT).uri(uri) } @@ -439,7 +434,7 @@ impl Request { pub fn new(body: T) -> Request { Request { head: Parts::new(), - body: body, + body, } } @@ -457,10 +452,7 @@ impl Request { /// ``` #[inline] pub fn from_parts(parts: Parts, body: T) -> Request { - Request { - head: parts, - body: body, - } + Request { head: parts, body } } /// Returns a reference to the associated HTTP method. @@ -978,7 +970,7 @@ impl Builder { /// ``` pub fn extension(self, extension: T) -> Builder where - T: Any + Send + Sync + 'static, + T: Clone + Any + Send + Sync + 'static, { self.and_then(move |mut head| { head.extensions.insert(extension); @@ -1042,19 +1034,14 @@ impl Builder { /// .unwrap(); /// ``` pub fn body(self, body: T) -> Result> { - self.inner.map(move |head| { - Request { - head, - body, - } - }) + self.inner.map(move |head| Request { head, body }) } // private fn and_then(self, func: F) -> Self where - F: FnOnce(Parts) -> Result + F: FnOnce(Parts) -> Result, { Builder { inner: self.inner.and_then(func), diff --git a/.cargo-vendor/http/src/response.rs b/.cargo-vendor/http/src/response.rs index 1e88a3e55b..312cc5f854 100644 --- a/.cargo-vendor/http/src/response.rs +++ b/.cargo-vendor/http/src/response.rs @@ -176,6 +176,7 @@ use crate::{Extensions, Result}; /// # /// # fn main() {} /// ``` +#[derive(Clone)] pub struct Response { head: Parts, body: T, @@ -185,6 +186,7 @@ pub struct Response { /// /// The HTTP response head consists of a status, version, and a set of /// header fields. +#[derive(Clone)] pub struct Parts { /// The response's status pub status: StatusCode, @@ -235,7 +237,7 @@ impl Response<()> { impl Response { /// Creates a new blank `Response` with the body /// - /// The component ports of this response will be set to their default, e.g. + /// The component parts of this response will be set to their default, e.g. /// the ok status, no headers, etc. /// /// # Examples @@ -251,7 +253,7 @@ impl Response { pub fn new(body: T) -> Response { Response { head: Parts::new(), - body: body, + body, } } @@ -272,10 +274,7 @@ impl Response { /// ``` #[inline] pub fn from_parts(parts: Parts, body: T) -> Response { - Response { - head: parts, - body: body, - } + Response { head: parts, body } } /// Returns the `StatusCode`. @@ -684,7 +683,7 @@ impl Builder { /// ``` pub fn extension(self, extension: T) -> Builder where - T: Any + Send + Sync + 'static, + T: Clone + Any + Send + Sync + 'static, { self.and_then(move |mut head| { head.extensions.insert(extension); @@ -748,19 +747,14 @@ impl Builder { /// .unwrap(); /// ``` pub fn body(self, body: T) -> Result> { - self.inner.map(move |head| { - Response { - head, - body, - } - }) + self.inner.map(move |head| Response { head, body }) } // private fn and_then(self, func: F) -> Self where - F: FnOnce(Parts) -> Result + F: FnOnce(Parts) -> Result, { Builder { inner: self.inner.and_then(func), diff --git a/.cargo-vendor/http/src/status.rs b/.cargo-vendor/http/src/status.rs index d98d24c3d9..1f619ee17d 100644 --- a/.cargo-vendor/http/src/status.rs +++ b/.cargo-vendor/http/src/status.rs @@ -15,9 +15,9 @@ //! ``` use std::convert::TryFrom; -use std::num::NonZeroU16; use std::error::Error; use std::fmt; +use std::num::NonZeroU16; use std::str::FromStr; /// An HTTP status code (`status-code` in RFC 7230 et al.). @@ -71,7 +71,7 @@ impl StatusCode { /// ``` #[inline] pub fn from_u16(src: u16) -> Result { - if src < 100 || src >= 1000 { + if !(100..1000).contains(&src) { return Err(InvalidStatusCode::new()); } @@ -140,10 +140,14 @@ impl StatusCode { // ASCII-only, of length 900 * 3 = 2700 bytes #[cfg(debug_assertions)] - { &CODE_DIGITS[offset..offset+3] } + { + &CODE_DIGITS[offset..offset + 3] + } #[cfg(not(debug_assertions))] - unsafe { CODE_DIGITS.get_unchecked(offset..offset+3) } + unsafe { + CODE_DIGITS.get_unchecked(offset..offset + 3) + } } /// Get the standardised `reason-phrase` for this status code. @@ -263,7 +267,7 @@ impl FromStr for StatusCode { impl<'a> From<&'a StatusCode> for StatusCode { #[inline] fn from(t: &'a StatusCode) -> Self { - t.clone() + t.to_owned() } } @@ -447,7 +451,7 @@ status_codes! { (418, IM_A_TEAPOT, "I'm a teapot"); /// 421 Misdirected Request - /// [RFC7540, Section 9.1.2](http://tools.ietf.org/html/rfc7540#section-9.1.2) + /// [RFC7540, Section 9.1.2](https://tools.ietf.org/html/rfc7540#section-9.1.2) (421, MISDIRECTED_REQUEST, "Misdirected Request"); /// 422 Unprocessable Entity /// [[RFC4918](https://tools.ietf.org/html/rfc4918)] @@ -475,7 +479,7 @@ status_codes! { (431, REQUEST_HEADER_FIELDS_TOO_LARGE, "Request Header Fields Too Large"); /// 451 Unavailable For Legal Reasons - /// [[RFC7725](http://tools.ietf.org/html/rfc7725)] + /// [[RFC7725](https://tools.ietf.org/html/rfc7725)] (451, UNAVAILABLE_FOR_LEGAL_REASONS, "Unavailable For Legal Reasons"); /// 500 Internal Server Error @@ -516,9 +520,7 @@ status_codes! { impl InvalidStatusCode { fn new() -> InvalidStatusCode { - InvalidStatusCode { - _priv: (), - } + InvalidStatusCode { _priv: () } } } @@ -540,7 +542,7 @@ impl Error for InvalidStatusCode {} // A string of packed 3-ASCII-digit status code values for the supported range // of [100, 999] (900 codes, 2700 bytes). -const CODE_DIGITS: &'static str = "\ +const CODE_DIGITS: &str = "\ 100101102103104105106107108109110111112113114115116117118119\ 120121122123124125126127128129130131132133134135136137138139\ 140141142143144145146147148149150151152153154155156157158159\ diff --git a/.cargo-vendor/http/src/uri/authority.rs b/.cargo-vendor/http/src/uri/authority.rs index f41ddd19cb..dab6dcd0e3 100644 --- a/.cargo-vendor/http/src/uri/authority.rs +++ b/.cargo-vendor/http/src/uri/authority.rs @@ -238,7 +238,7 @@ impl Authority { pub fn port(&self) -> Option> { let bytes = self.as_str(); bytes - .rfind(":") + .rfind(':') .and_then(|i| Port::from_str(&bytes[i + 1..]).ok()) } @@ -253,7 +253,7 @@ impl Authority { /// assert_eq!(authority.port_u16(), Some(80)); /// ``` pub fn port_u16(&self) -> Option { - self.port().and_then(|p| Some(p.as_u16())) + self.port().map(|p| p.as_u16()) } /// Return a str representation of the authority @@ -434,7 +434,7 @@ impl<'a> TryFrom<&'a [u8]> for Authority { // Preconditon on create_authority: copy_from_slice() copies all of // bytes from the [u8] parameter into a new Bytes - create_authority(s, |s| Bytes::copy_from_slice(s)) + create_authority(s, Bytes::copy_from_slice) } } @@ -486,7 +486,7 @@ impl fmt::Display for Authority { fn host(auth: &str) -> &str { let host_port = auth - .rsplitn(2, '@') + .rsplit('@') .next() .expect("split always has at least 1 item"); diff --git a/.cargo-vendor/http/src/uri/builder.rs b/.cargo-vendor/http/src/uri/builder.rs index 825c0fafcc..9964d38985 100644 --- a/.cargo-vendor/http/src/uri/builder.rs +++ b/.cargo-vendor/http/src/uri/builder.rs @@ -137,7 +137,6 @@ impl Builder { where F: FnOnce(Parts) -> Result, { - Builder { parts: self.parts.and_then(func), } @@ -153,6 +152,14 @@ impl Default for Builder { } } +impl From for Builder { + fn from(uri: Uri) -> Self { + Self { + parts: Ok(uri.into_parts()), + } + } +} + #[cfg(test)] mod tests { use super::*; @@ -194,4 +201,11 @@ mod tests { assert_eq!(uri.query(), Some(expected_query.as_str())); } } + + #[test] + fn build_from_uri() { + let original_uri = Uri::default(); + let uri = Builder::from(original_uri.clone()).build().unwrap(); + assert_eq!(original_uri, uri); + } } diff --git a/.cargo-vendor/http/src/uri/mod.rs b/.cargo-vendor/http/src/uri/mod.rs index 5ebd47b6f5..6ef2dc8eda 100644 --- a/.cargo-vendor/http/src/uri/mod.rs +++ b/.cargo-vendor/http/src/uri/mod.rs @@ -149,6 +149,7 @@ const MAX_LEN: usize = (u16::MAX - 1) as usize; // of this table is that all entries above 127 are invalid. This makes all of the // valid entries a valid single-byte UTF-8 code point. This means that a slice // of such valid entries is valid UTF-8. +#[rustfmt::skip] const URI_CHARS: [u8; 256] = [ // 0 1 2 3 4 5 6 7 8 9 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // x @@ -244,10 +245,8 @@ impl Uri { if src.path_and_query.is_none() { return Err(ErrorKind::PathAndQueryMissing.into()); } - } else { - if src.authority.is_some() && src.path_and_query.is_some() { - return Err(ErrorKind::SchemeMissing.into()); - } + } else if src.authority.is_some() && src.path_and_query.is_some() { + return Err(ErrorKind::SchemeMissing.into()); } let scheme = match src.scheme { @@ -268,9 +267,9 @@ impl Uri { }; Ok(Uri { - scheme: scheme, - authority: authority, - path_and_query: path_and_query, + scheme, + authority, + path_and_query, }) } @@ -321,7 +320,7 @@ impl Uri { return Ok(Uri { scheme: Scheme::empty(), - authority: authority, + authority, path_and_query: PathAndQuery::empty(), }); } @@ -650,7 +649,7 @@ impl Uri { /// assert_eq!(uri.port_u16(), Some(80)); /// ``` pub fn port_u16(&self) -> Option { - self.port().and_then(|p| Some(p.as_u16())) + self.port().map(|p| p.as_u16()) } /// Get the query string of this `Uri`, starting after the `?`. @@ -742,7 +741,7 @@ impl TryFrom for Uri { } } -impl<'a> TryFrom> for Uri { +impl TryFrom> for Uri { type Error = InvalidUri; #[inline] @@ -812,9 +811,9 @@ impl From for Parts { }; Parts { - scheme: scheme, - authority: authority, - path_and_query: path_and_query, + scheme, + authority, + path_and_query, _priv: (), } } @@ -858,7 +857,7 @@ fn parse_full(mut s: Bytes) -> Result { return Ok(Uri { scheme: scheme.into(), - authority: authority, + authority, path_and_query: PathAndQuery::empty(), }); } @@ -875,7 +874,7 @@ fn parse_full(mut s: Bytes) -> Result { Ok(Uri { scheme: scheme.into(), - authority: authority, + authority, path_and_query: PathAndQuery::from_shared(s)?, }) } @@ -965,8 +964,8 @@ impl PartialEq for Uri { } if let Some(query) = self.query() { - if other.len() == 0 { - return query.len() == 0; + if other.is_empty() { + return query.is_empty(); } if other[0] != b'?' { diff --git a/.cargo-vendor/http/src/uri/path.rs b/.cargo-vendor/http/src/uri/path.rs index be2cb65c1b..341ba2e61f 100644 --- a/.cargo-vendor/http/src/uri/path.rs +++ b/.cargo-vendor/http/src/uri/path.rs @@ -43,13 +43,14 @@ impl PathAndQuery { // This is the range of bytes that don't need to be // percent-encoded in the path. If it should have been // percent-encoded, then error. + #[rustfmt::skip] 0x21 | 0x24..=0x3B | 0x3D | 0x40..=0x5F | 0x61..=0x7A | 0x7C | - 0x7E => {}, + 0x7E => {} // These are code points that are supposed to be // percent-encoded in the path but there are clients @@ -60,8 +61,9 @@ impl PathAndQuery { // For reference, those are code points that are used // to send requests with JSON directly embedded in // the URI path. Yes, those things happen for real. + #[rustfmt::skip] b'"' | - b'{' | b'}' => {}, + b'{' | b'}' => {} _ => return Err(ErrorKind::InvalidUriChar.into()), } @@ -76,10 +78,11 @@ impl PathAndQuery { // See https://url.spec.whatwg.org/#query-state // // Allowed: 0x21 / 0x24 - 0x3B / 0x3D / 0x3F - 0x7E + #[rustfmt::skip] 0x21 | 0x24..=0x3B | 0x3D | - 0x3F..=0x7E => {}, + 0x3F..=0x7E => {} b'#' => { fragment = Some(i); @@ -98,7 +101,7 @@ impl PathAndQuery { Ok(PathAndQuery { data: unsafe { ByteStr::from_utf8_unchecked(src) }, - query: query, + query, }) } @@ -291,7 +294,7 @@ impl<'a> TryFrom<&'a str> for PathAndQuery { } } -impl<'a> TryFrom> for PathAndQuery { +impl TryFrom> for PathAndQuery { type Error = InvalidUri; #[inline] fn try_from(vec: Vec) -> Result { @@ -555,7 +558,10 @@ mod tests { #[test] fn json_is_fine() { - assert_eq!(r#"/{"bread":"baguette"}"#, pq(r#"/{"bread":"baguette"}"#).path()); + assert_eq!( + r#"/{"bread":"baguette"}"#, + pq(r#"/{"bread":"baguette"}"#).path() + ); } fn pq(s: &str) -> PathAndQuery { diff --git a/.cargo-vendor/http/src/uri/scheme.rs b/.cargo-vendor/http/src/uri/scheme.rs index 682b11eeea..c33ec41a61 100644 --- a/.cargo-vendor/http/src/uri/scheme.rs +++ b/.cargo-vendor/http/src/uri/scheme.rs @@ -132,7 +132,7 @@ impl PartialEq for Scheme { match (&self.inner, &other.inner) { (&Standard(Http), &Standard(Http)) => true, (&Standard(Https), &Standard(Https)) => true, - (&Other(ref a), &Other(ref b)) => a.eq_ignore_ascii_case(b), + (Other(a), Other(b)) => a.eq_ignore_ascii_case(b), (&None, _) | (_, &None) => unreachable!(), _ => false, } @@ -185,10 +185,7 @@ impl Hash for Scheme { impl Scheme2 { pub(super) fn is_none(&self) -> bool { - match *self { - Scheme2::None => true, - _ => false, - } + matches!(*self, Scheme2::None) } } @@ -204,6 +201,7 @@ const MAX_SCHEME_LEN: usize = 64; // important characteristic of this table is that all entries above 127 are // invalid. This makes all of the valid entries a valid single-byte UTF-8 code // point. This means that a slice of such valid entries is valid UTF-8. +#[rustfmt::skip] const SCHEME_CHARS: [u8; 256] = [ // 0 1 2 3 4 5 6 7 8 9 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // x diff --git a/.cargo-vendor/http/tests/header_map.rs b/.cargo-vendor/http/tests/header_map.rs index f2beba0862..9859b0a832 100644 --- a/.cargo-vendor/http/tests/header_map.rs +++ b/.cargo-vendor/http/tests/header_map.rs @@ -190,7 +190,7 @@ fn drain_entry() { assert_eq!(vals[1], "world2"); } - assert_eq!(5-2+1, headers.len()); + assert_eq!(5 - 2 + 1, headers.len()); } #[test] @@ -427,7 +427,6 @@ fn value_htab() { HeaderValue::from_str("hello\tworld").unwrap(); } - #[test] fn remove_multiple_a() { let mut headers = HeaderMap::new(); @@ -570,7 +569,8 @@ fn remove_entry_multi_3_others() { } fn remove_all_values(headers: &mut HeaderMap, key: K) -> Vec - where K: IntoHeaderName +where + K: IntoHeaderName, { match headers.entry(key) { Entry::Occupied(e) => e.remove_entry_mult().1.collect(), @@ -629,10 +629,22 @@ fn remove_entry_3_others_b() { } fn remove_values(headers: &mut HeaderMap, key: K) -> Option - where K: IntoHeaderName +where + K: IntoHeaderName, { match headers.entry(key) { Entry::Occupied(e) => Some(e.remove_entry().1), Entry::Vacant(_) => None, } } + +#[test] +fn ensure_miri_sharedreadonly_not_violated() { + let mut headers = HeaderMap::new(); + headers.insert( + HeaderName::from_static("chunky-trailer"), + HeaderValue::from_static("header data"), + ); + + let _foo = &headers.iter().next(); +} diff --git a/.cargo-vendor/http/tests/header_map_fuzz.rs b/.cargo-vendor/http/tests/header_map_fuzz.rs index c3af2e52e7..40db0494b6 100644 --- a/.cargo-vendor/http/tests/header_map_fuzz.rs +++ b/.cargo-vendor/http/tests/header_map_fuzz.rs @@ -76,12 +76,12 @@ impl Fuzz { let mut steps = vec![]; let mut expect = AltMap::default(); - let num = rng.gen_range(5, 500); + let num = rng.gen_range(5..500); let weight = Weight { - insert: rng.gen_range(1, 10), - remove: rng.gen_range(1, 10), - append: rng.gen_range(1, 10), + insert: rng.gen_range(1..10), + remove: rng.gen_range(1..10), + append: rng.gen_range(1..10), }; while steps.len() < num { @@ -89,8 +89,8 @@ impl Fuzz { } Fuzz { - seed: seed, - steps: steps, + seed, + steps, reduce: 0, } } @@ -111,8 +111,8 @@ impl Fuzz { } impl Arbitrary for Fuzz { - fn arbitrary(g: &mut G) -> Self { - Fuzz::new(Rng::gen(g)) + fn arbitrary(_: &mut Gen) -> Self { + Self::new(rand::thread_rng().gen()) } } @@ -121,7 +121,7 @@ impl AltMap { let action = self.gen_action(weight, rng); Step { - action: action, + action, expect: self.clone(), } } @@ -130,7 +130,7 @@ impl AltMap { fn gen_action(&mut self, weight: &Weight, rng: &mut StdRng) -> Action { let sum = weight.insert + weight.remove + weight.append; - let mut num = rng.gen_range(0, sum); + let mut num = rng.gen_range(0..sum); if num < weight.insert { return self.gen_insert(rng); @@ -156,21 +156,14 @@ impl AltMap { let val = gen_header_value(rng); let old = self.insert(name.clone(), val.clone()); - Action::Insert { - name: name, - val: val, - old: old, - } + Action::Insert { name, val, old } } fn gen_remove(&mut self, rng: &mut StdRng) -> Action { let name = self.gen_name(-4, rng); let val = self.remove(&name); - Action::Remove { - name: name, - val: val, - } + Action::Remove { name, val } } fn gen_append(&mut self, rng: &mut StdRng) -> Action { @@ -182,11 +175,7 @@ impl AltMap { let ret = !vals.is_empty(); vals.push(val.clone()); - Action::Append { - name: name, - val: val, - ret: ret, - } + Action::Append { name, val, ret } } /// Negative numbers weigh finding an existing header higher @@ -213,7 +202,7 @@ impl AltMap { if self.map.is_empty() { None } else { - let n = rng.gen_range(0, self.map.len()); + let n = rng.gen_range(0..self.map.len()); self.map.keys().nth(n).map(Clone::clone) } } diff --git a/.cargo-vendor/http/tests/status_code.rs b/.cargo-vendor/http/tests/status_code.rs index 160df6bad5..0a96239804 100644 --- a/.cargo-vendor/http/tests/status_code.rs +++ b/.cargo-vendor/http/tests/status_code.rs @@ -3,7 +3,7 @@ use http::*; #[test] fn from_bytes() { for ok in &[ - "100", "101", "199", "200", "250", "299", "321", "399", "499", "599", "600", "999" + "100", "101", "199", "200", "250", "299", "321", "399", "499", "599", "600", "999", ] { assert!(StatusCode::from_bytes(ok.as_bytes()).is_ok()); } diff --git a/.cargo-vendor/hyper-0.14.30/.cargo-checksum.json b/.cargo-vendor/hyper-0.14.30/.cargo-checksum.json new file mode 100644 index 0000000000..5c8b448d2f --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"5287a86e0a967152d6578b885bcfdf76786f09f7fa2d478f295e79e22c9af437","LICENSE":"25dfd9ec24ebbee73dc93e687526cd4c26deae17bc2179ea0fe3e5dc96105b9b","src/body/aggregate.rs":"a4b05d775a7ef7807ce4eb3ccd0592f542398c7e14c876cb07298dc792b076e3","src/body/body.rs":"1fafdc91f9a48c19ec3eaeede79adc1b7c0162bca03efb40f85e9d7f7ed9eb3f","src/body/length.rs":"2d186f50f05b0562db92d3e1d272521c60c9ad259222ccb1a9310b1ff380a64e","src/body/mod.rs":"8098a367c239f05ba443a9b5394449142c6f144ad187f87f8d9f9cde74405b44","src/body/to_bytes.rs":"517077ed79a987c96a89f70a09eba776db5e8b867768da2ca291d28c2c0d70d2","src/cfg.rs":"de5fee5bba45a982c10e8f09fc24677be5494bf6da0b39a3a132f0f6eb3fe41e","src/client/client.rs":"00df0ef99e35d7b3b60c2da6aabee3418f2d35c45a0127ce64893331db040972","src/client/conn.rs":"1358a7b7b43588bd9863f2243ac78381d20018e530e3f8efc58fe4d7df239d04","src/client/conn/http1.rs":"9e4a9f2565f5a4bb8201d5a7321b89b6597b0a05f899da1326b72a9668639e15","src/client/conn/http2.rs":"95f2ae02b8297d48560049a5fd557d374fa6a325ac563996e88e2ae5fff0b4ec","src/client/connect/dns.rs":"98830a9163eae71cbf8d240c6e531ae98d6b51653c0d158fc1c5bddad4f7321e","src/client/connect/http.rs":"6d5b6e0d4f83ecfe66e79d1d4eb72597588af0ba70576faab2e658e3b781c09e","src/client/connect/mod.rs":"83ef7a4d8e8376bfd69321d4646ac439158f3d2c228acbc74dba208bfa0ae938","src/client/dispatch.rs":"39cac7daaf4b473c030d338e48c6fd4e4133742a06a0047ea7153e56def3cbdc","src/client/mod.rs":"d5580cda5e7dc9c5240c72a9ea0005d34b5412c28067ab3fa2301ba4cf49d9fa","src/client/pool.rs":"584f473408059b230adc1a74e80795d27b402e5de16224017157ed06694f7eab","src/client/service.rs":"e61baf9c206da67265c4fefe48fc037d65d93eebfecca68e3dc2215896ef4bd0","src/client/tests.rs":"f7eb2d1dba18e1bd69990f59e61b73a0477db5cc4de68fd64bd7cd36a9406072","src/common/buf.rs":"c762dc2688828ffc88f0549ceddeef65e57d404c245118bcacf3dd8d402bc9cc","src/common/date.rs":"f9a1a63aa7e8f5d6f5800cd56e7e6182cf07e781089930afb529263c1d8adf14","src/common/drain.rs":"058bbcf26dfeb96d7646c69e03b5a5f363b3bcee9afe0a9fe30ea52a9eb995ff","src/common/exec.rs":"c9e24d6c624b9c237bcdea7c59c60c9b8539510ac053cad50e861567cea3e17b","src/common/io/mod.rs":"6f8e4518df7f24d81fc59b46a2deb61557e8d92700bdc62539fe0f034066fc89","src/common/io/rewind.rs":"a708a373d96e7b1444d8497d57f0fe1902456fda8eb3dc42ade3a8f305880625","src/common/lazy.rs":"5cee776d65f28282e556567b433bddb908947df6601218872496ba088c2a7d12","src/common/mod.rs":"363cbf3853ffe6c4258010b17e67babdb8f518fc3cad39dc6588e7ba75587405","src/common/sync_wrapper.rs":"76206c2a52eeb62cdba279d620a4aef52134c6ac782a9f61e741edc8b653cb50","src/common/task.rs":"5a7c2b9255ab023cceedb8e42bd26db7ba8f11498d2f14d4b23a798618cbc920","src/common/watch.rs":"eb6db13fbb266ec11200ffa1f5e7a9a132d7e6555373895efde1e2daea428e03","src/error.rs":"d3f3c8e2303c964904e84a4bf8b93ff6b036c8918bac2bd66edac5fd5967c7e3","src/ext.rs":"19a65a25be9b821ad4088583f712d4d7f388096ec9d69b33f40129c9945afe2c","src/ext/h1_reason_phrase.rs":"e1d18088f942b52dbee766d81e11a9caeadaf368ff12b58d53b97fc62d08876c","src/ffi/body.rs":"941fb8b79097e5a4eec0c611a1cd5db24bed3479f1a14cf754e33d19f6d25854","src/ffi/client.rs":"6b35700e9dec4a9cb40ad3155327bd6fe11165e0cef1874a3916cf96d8b0c7a6","src/ffi/error.rs":"de3d8c1eb3818b438ed28a9dea800dfdac47bf2dd21a7c3e5fc10cb331b6e39f","src/ffi/http_types.rs":"ae25e0fd07ec80e90d5b4f488ce972fe7858f009261fdf16a35b2bd0b1bbdad3","src/ffi/io.rs":"ab176e866c179a589d22e3aa7410612615e60117019d1e589f9732c36a2282da","src/ffi/macros.rs":"8e1fe58244295db1d19aceeb0e9a777fe484ccc20194fae88b54208e0cbeb515","src/ffi/mod.rs":"0e52ae3586c6a960ae68e862561aabcee690a1e23c6d5d1045fcdc3c74b7fc96","src/ffi/task.rs":"f348cdbe1f1d4e26b339cd9382bb739b0f37aaceb2aa85627b7fda0c6326de56","src/headers.rs":"4d76596bfc90f88fe8b48bb8d0552a215a20c452197ea982b37ba30fa496e007","src/lib.rs":"cff8e513cb2d9611ba30a7a7787fe5220b848819e9f46267a8fe2acaf465ec28","src/mock.rs":"8b455312be74af6c4a27b3beba85a9493c86d43097947c5aad8a5e9dc0dcdbb3","src/proto/h1/conn.rs":"ba7d5bb4875dbd11f622986034cab8eaa2a751235324bf7cf03bea20c66f9f00","src/proto/h1/decode.rs":"ac06e4fb3b0bf07907581949ad361e4ba05559fd8add253b90bd183cfb09e34f","src/proto/h1/dispatch.rs":"da3a986e8e0d255bedac48109a31921b3faf575c6821d5f0f60dd06a24900f75","src/proto/h1/encode.rs":"3a360356431ff3f011226cf81fb4eeb63cfe9ca25140e41e2e3907f6d64725f9","src/proto/h1/io.rs":"321d845a497eb4761dbd4eedb994ae9d6e5aca7faabf3b05e83eb35cb4ebf214","src/proto/h1/mod.rs":"61ec22d19567116aadc365ca424c958744b058b55d2f064b9a74ee88b126c7be","src/proto/h1/role.rs":"f672ed78abda4605cd27cc390ff16ce715093af0c4edba9bb221ea0aedddcfe4","src/proto/h2/client.rs":"5862ca7bc2847f58ed5f57464f8eb74abe3fe89afe4bd632e575a3c51b8a8744","src/proto/h2/mod.rs":"1f3f157aaef6d91b7af8abea7f76ab1c49ee2038b71027c83f83a2648786fafc","src/proto/h2/ping.rs":"1ea4daea2317a72958879a89baecdea02fb7ab238083606461400ed9e3df0c83","src/proto/h2/server.rs":"705f8ecea99dbf5fe74188ba4f5fa2ea22c252fc443eed51171a89f845dc729d","src/proto/mod.rs":"1a935a3da830131f848a6a64c049c559ce07e6b0012fd6e4002bb365f562ebeb","src/rt.rs":"1ef7d4bb3ad6637c6f37189e30d740c061a3c98ca008d266a99f865130909427","src/server/accept.rs":"07b9b520fbf7d8f472455412f359afdd7713fb408f88dbc050585249023fc075","src/server/conn.rs":"25e1b5dfbd74fd62b41800c49a9492f0f74bc20285c3783a6a2f8a1a9ffbb4cd","src/server/conn/http1.rs":"af364abcd92aa78e05af83c9c606de66447170dc73276896294c4257e903a047","src/server/conn/http2.rs":"73bb19450798d804bf1b76e8ac495f9cdbad3453c009756b66f06c1e47f2f664","src/server/mod.rs":"2375370854ac8ae5b80378aa1e29bc781c50aad1d1150c32515393a3316b745c","src/server/server.rs":"5294facdd9abae7a2bc9a7eb7ce1521437780a0f6505fee44e8aa8a2dd909e5e","src/server/server_stub.rs":"ab443f51ede637e0b0c08f36fbc143a34935102af2921edcc257660eeaad4537","src/server/shutdown.rs":"45bf03fc9314873572775fb4ea336230340108239c88f2cd2b435759ad8c693c","src/server/tcp.rs":"8ed09df1ccfb59d0c9ff2561acd0f3d5e2a03929c960f6923e208e08fb5f1806","src/service/http.rs":"ac930efc71bcecc904fa65a44af254501ce8abd6f7d36e591b907eee45e77979","src/service/make.rs":"ee11adc469796427c7b694551d0bbda69f732536079aee53e3ef7f9be4385b2d","src/service/mod.rs":"92c05f08a175fb847868a02e7aca96176df1237458d40a17a7a6aa377476df90","src/service/oneshot.rs":"3ac3f0c7c20fcc3790cef868ca7a70c87a36687ae28c44e0c9978d2e514e4b22","src/service/util.rs":"67f5b4373a4d705a7277cda717b5f3a3ebd00365446e2beb60c1d6193d071a85","src/upgrade.rs":"d6c68680ad74ebbd6ff74ea28b52704c14d4547c67520048f7f84cfe03d11f94"},"package":"a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9"} \ No newline at end of file diff --git a/.cargo-vendor/hyper-0.14.30/Cargo.toml b/.cargo-vendor/hyper-0.14.30/Cargo.toml new file mode 100644 index 0000000000..95bca19bb3 --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/Cargo.toml @@ -0,0 +1,220 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +name = "hyper" +version = "0.14.30" +authors = ["Sean McArthur "] +build = false +include = [ + "Cargo.toml", + "LICENSE", + "src/**/*", +] +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "A fast and correct HTTP library." +homepage = "https://hyper.rs" +documentation = "https://docs.rs/hyper" +readme = "README.md" +keywords = [ + "http", + "hyper", + "hyperium", +] +categories = [ + "network-programming", + "web-programming::http-client", + "web-programming::http-server", +] +license = "MIT" +repository = "https://github.com/hyperium/hyper" + +[package.metadata.docs.rs] +features = [ + "ffi", + "full", +] +rustdoc-args = [ + "--cfg", + "docsrs", + "--cfg", + "hyper_unstable_ffi", +] + +[package.metadata.playground] +features = ["full"] + +[profile.bench] +codegen-units = 1 +incremental = false + +[profile.release] +codegen-units = 1 +incremental = false + +[lib] +name = "hyper" +path = "src/lib.rs" + +[dependencies.bytes] +version = "1" + +[dependencies.futures-channel] +version = "0.3" + +[dependencies.futures-core] +version = "0.3" +default-features = false + +[dependencies.futures-util] +version = "0.3" +default-features = false + +[dependencies.h2] +version = "0.3.24" +optional = true + +[dependencies.http] +version = "0.2" + +[dependencies.http-body] +version = "0.4" + +[dependencies.httparse] +version = "1.8" + +[dependencies.httpdate] +version = "1.0" + +[dependencies.itoa] +version = "1" + +[dependencies.libc] +version = "0.2" +optional = true + +[dependencies.pin-project-lite] +version = "0.2.4" + +[dependencies.socket2] +version = ">=0.4.7, <0.6.0" +features = ["all"] +optional = true + +[dependencies.tokio] +version = "1.27" +features = ["sync"] + +[dependencies.tower-service] +version = "0.3" + +[dependencies.tracing] +version = "0.1" +features = ["std"] +default-features = false + +[dependencies.want] +version = "0.3" + +[dev-dependencies.futures-util] +version = "0.3" +features = ["alloc"] +default-features = false + +[dev-dependencies.matches] +version = "0.1" + +[dev-dependencies.num_cpus] +version = "1.0" + +[dev-dependencies.pretty_env_logger] +version = "0.4" + +[dev-dependencies.serde] +version = "1.0" +features = ["derive"] + +[dev-dependencies.serde_json] +version = "1.0" + +[dev-dependencies.spmc] +version = "0.3" + +[dev-dependencies.tokio] +version = "1.27" +features = [ + "fs", + "macros", + "io-std", + "io-util", + "rt", + "rt-multi-thread", + "sync", + "time", + "test-util", +] + +[dev-dependencies.tokio-test] +version = "0.4" + +[dev-dependencies.tokio-util] +version = "0.7" +features = ["codec"] + +[dev-dependencies.tower] +version = "0.4" +features = [ + "make", + "util", +] +default-features = false + +[dev-dependencies.url] +version = "2.2" + +[features] +__internal_happy_eyeballs_tests = [] +backports = [] +client = [] +default = [] +deprecated = [] +ffi = ["libc"] +full = [ + "client", + "http1", + "http2", + "server", + "stream", + "runtime", +] +http1 = [] +http2 = ["h2"] +nightly = [] +runtime = [ + "tcp", + "tokio/rt", + "tokio/time", +] +server = [] +stream = [] +tcp = [ + "socket2", + "tokio/net", + "tokio/rt", + "tokio/time", +] + +[target.'cfg(any(target_os = "linux", target_os = "macos"))'.dev-dependencies.pnet_datalink] +version = "0.27.2" diff --git a/.cargo-vendor/hyper-0.14.30/LICENSE b/.cargo-vendor/hyper-0.14.30/LICENSE new file mode 100644 index 0000000000..bc1e966ed9 --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2014-2021 Sean McArthur + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/.cargo-vendor/hyper/src/body/aggregate.rs b/.cargo-vendor/hyper-0.14.30/src/body/aggregate.rs similarity index 100% rename from .cargo-vendor/hyper/src/body/aggregate.rs rename to .cargo-vendor/hyper-0.14.30/src/body/aggregate.rs diff --git a/.cargo-vendor/hyper/src/body/body.rs b/.cargo-vendor/hyper-0.14.30/src/body/body.rs similarity index 100% rename from .cargo-vendor/hyper/src/body/body.rs rename to .cargo-vendor/hyper-0.14.30/src/body/body.rs diff --git a/.cargo-vendor/hyper-0.14.30/src/body/length.rs b/.cargo-vendor/hyper-0.14.30/src/body/length.rs new file mode 100644 index 0000000000..e2bbee8039 --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/body/length.rs @@ -0,0 +1,123 @@ +use std::fmt; + +#[derive(Clone, Copy, PartialEq, Eq)] +pub(crate) struct DecodedLength(u64); + +#[cfg(any(feature = "http1", feature = "http2"))] +impl From> for DecodedLength { + fn from(len: Option) -> Self { + len.and_then(|len| { + // If the length is u64::MAX, oh well, just reported chunked. + Self::checked_new(len).ok() + }) + .unwrap_or(DecodedLength::CHUNKED) + } +} + +#[cfg(any(feature = "http1", feature = "http2", test))] +const MAX_LEN: u64 = std::u64::MAX - 2; + +impl DecodedLength { + pub(crate) const CLOSE_DELIMITED: DecodedLength = DecodedLength(::std::u64::MAX); + pub(crate) const CHUNKED: DecodedLength = DecodedLength(::std::u64::MAX - 1); + pub(crate) const ZERO: DecodedLength = DecodedLength(0); + + #[cfg(test)] + pub(crate) fn new(len: u64) -> Self { + debug_assert!(len <= MAX_LEN); + DecodedLength(len) + } + + /// Takes the length as a content-length without other checks. + /// + /// Should only be called if previously confirmed this isn't + /// CLOSE_DELIMITED or CHUNKED. + #[inline] + #[cfg(feature = "http1")] + pub(crate) fn danger_len(self) -> u64 { + debug_assert!(self.0 < Self::CHUNKED.0); + self.0 + } + + /// Converts to an Option representing a Known or Unknown length. + pub(crate) fn into_opt(self) -> Option { + match self { + DecodedLength::CHUNKED | DecodedLength::CLOSE_DELIMITED => None, + DecodedLength(known) => Some(known), + } + } + + /// Checks the `u64` is within the maximum allowed for content-length. + #[cfg(any(feature = "http1", feature = "http2"))] + pub(crate) fn checked_new(len: u64) -> Result { + use tracing::warn; + + if len <= MAX_LEN { + Ok(DecodedLength(len)) + } else { + warn!("content-length bigger than maximum: {} > {}", len, MAX_LEN); + Err(crate::error::Parse::TooLarge) + } + } + + pub(crate) fn sub_if(&mut self, amt: u64) { + match *self { + DecodedLength::CHUNKED | DecodedLength::CLOSE_DELIMITED => (), + DecodedLength(ref mut known) => { + *known -= amt; + } + } + } + + /// Returns whether this represents an exact length. + /// + /// This includes 0, which of course is an exact known length. + /// + /// It would return false if "chunked" or otherwise size-unknown. + #[cfg(feature = "http2")] + pub(crate) fn is_exact(&self) -> bool { + self.0 <= MAX_LEN + } +} + +impl fmt::Debug for DecodedLength { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + DecodedLength::CLOSE_DELIMITED => f.write_str("CLOSE_DELIMITED"), + DecodedLength::CHUNKED => f.write_str("CHUNKED"), + DecodedLength(n) => f.debug_tuple("DecodedLength").field(&n).finish(), + } + } +} + +impl fmt::Display for DecodedLength { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + DecodedLength::CLOSE_DELIMITED => f.write_str("close-delimited"), + DecodedLength::CHUNKED => f.write_str("chunked encoding"), + DecodedLength::ZERO => f.write_str("empty"), + DecodedLength(n) => write!(f, "content-length ({} bytes)", n), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn sub_if_known() { + let mut len = DecodedLength::new(30); + len.sub_if(20); + + assert_eq!(len.0, 10); + } + + #[test] + fn sub_if_chunked() { + let mut len = DecodedLength::CHUNKED; + len.sub_if(20); + + assert_eq!(len, DecodedLength::CHUNKED); + } +} diff --git a/.cargo-vendor/hyper-0.14.30/src/body/mod.rs b/.cargo-vendor/hyper-0.14.30/src/body/mod.rs new file mode 100644 index 0000000000..109b1e6b72 --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/body/mod.rs @@ -0,0 +1,67 @@ +//! Streaming bodies for Requests and Responses +//! +//! For both [Clients](crate::client) and [Servers](crate::server), requests and +//! responses use streaming bodies, instead of complete buffering. This +//! allows applications to not use memory they don't need, and allows exerting +//! back-pressure on connections by only reading when asked. +//! +//! There are two pieces to this in hyper: +//! +//! - **The [`HttpBody`](HttpBody) trait** describes all possible bodies. +//! hyper allows any body type that implements `HttpBody`, allowing +//! applications to have fine-grained control over their streaming. +//! - **The [`Body`](Body) concrete type**, which is an implementation of +//! `HttpBody`, and returned by hyper as a "receive stream" (so, for server +//! requests and client responses). It is also a decent default implementation +//! if you don't have very custom needs of your send streams. + +pub use bytes::{Buf, Bytes}; +pub use http_body::Body as HttpBody; +pub use http_body::SizeHint; + +#[cfg_attr(feature = "deprecated", allow(deprecated))] +pub use self::aggregate::aggregate; +pub use self::body::{Body, Sender}; +pub(crate) use self::length::DecodedLength; +#[cfg_attr(feature = "deprecated", allow(deprecated))] +pub use self::to_bytes::to_bytes; + +mod aggregate; +mod body; +mod length; +mod to_bytes; + +/// An optimization to try to take a full body if immediately available. +/// +/// This is currently limited to *only* `hyper::Body`s. +#[cfg(feature = "http1")] +pub(crate) fn take_full_data(body: &mut T) -> Option { + use std::any::{Any, TypeId}; + + // This static type check can be optimized at compile-time. + if TypeId::of::() == TypeId::of::() { + let mut full = (body as &mut dyn Any) + .downcast_mut::() + .expect("must be Body") + .take_full_data(); + // This second cast is required to make the type system happy. + // Without it, the compiler cannot reason that the type is actually + // `T::Data`. Oh wells. + // + // It's still a measurable win! + (&mut full as &mut dyn Any) + .downcast_mut::>() + .expect("must be T::Data") + .take() + } else { + None + } +} + +fn _assert_send_sync() { + fn _assert_send() {} + fn _assert_sync() {} + + _assert_send::(); + _assert_sync::(); +} diff --git a/.cargo-vendor/hyper/src/body/to_bytes.rs b/.cargo-vendor/hyper-0.14.30/src/body/to_bytes.rs similarity index 100% rename from .cargo-vendor/hyper/src/body/to_bytes.rs rename to .cargo-vendor/hyper-0.14.30/src/body/to_bytes.rs diff --git a/.cargo-vendor/hyper-0.14.30/src/cfg.rs b/.cargo-vendor/hyper-0.14.30/src/cfg.rs new file mode 100644 index 0000000000..71a5351d21 --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/cfg.rs @@ -0,0 +1,44 @@ +macro_rules! cfg_feature { + ( + #![$meta:meta] + $($item:item)* + ) => { + $( + #[cfg($meta)] + #[cfg_attr(docsrs, doc(cfg($meta)))] + $item + )* + } +} + +macro_rules! cfg_proto { + ($($item:item)*) => { + cfg_feature! { + #![all( + any(feature = "http1", feature = "http2"), + any(feature = "client", feature = "server"), + )] + $($item)* + } + } +} + +cfg_proto! { + macro_rules! cfg_client { + ($($item:item)*) => { + cfg_feature! { + #![feature = "client"] + $($item)* + } + } + } + + macro_rules! cfg_server { + ($($item:item)*) => { + cfg_feature! { + #![feature = "server"] + $($item)* + } + } + } +} diff --git a/.cargo-vendor/hyper/src/client/client.rs b/.cargo-vendor/hyper-0.14.30/src/client/client.rs similarity index 100% rename from .cargo-vendor/hyper/src/client/client.rs rename to .cargo-vendor/hyper-0.14.30/src/client/client.rs diff --git a/.cargo-vendor/hyper/src/client/conn.rs b/.cargo-vendor/hyper-0.14.30/src/client/conn.rs similarity index 100% rename from .cargo-vendor/hyper/src/client/conn.rs rename to .cargo-vendor/hyper-0.14.30/src/client/conn.rs diff --git a/.cargo-vendor/hyper-0.14.30/src/client/conn/http1.rs b/.cargo-vendor/hyper-0.14.30/src/client/conn/http1.rs new file mode 100644 index 0000000000..37eda04067 --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/client/conn/http1.rs @@ -0,0 +1,541 @@ +//! HTTP/1 client connections + +use std::error::Error as StdError; +use std::fmt; +use std::future::Future; +use std::marker::Unpin; +use std::pin::Pin; +use std::task::{Context, Poll}; + +use bytes::Bytes; +use http::{Request, Response}; +use httparse::ParserConfig; +use tokio::io::{AsyncRead, AsyncWrite}; + +use super::super::dispatch; +use crate::body::{Body as IncomingBody, HttpBody as Body}; +use crate::proto; +use crate::upgrade::Upgraded; + +type Dispatcher = + proto::dispatch::Dispatcher, B, T, proto::h1::ClientTransaction>; + +/// The sender side of an established connection. +pub struct SendRequest { + dispatch: dispatch::Sender, Response>, +} + +/// Deconstructed parts of a `Connection`. +/// +/// This allows taking apart a `Connection` at a later time, in order to +/// reclaim the IO object, and additional related pieces. +#[derive(Debug)] +pub struct Parts { + /// The original IO object used in the handshake. + pub io: T, + /// A buffer of bytes that have been read but not processed as HTTP. + /// + /// For instance, if the `Connection` is used for an HTTP upgrade request, + /// it is possible the server sent back the first bytes of the new protocol + /// along with the response upgrade. + /// + /// You will want to check for any existing bytes if you plan to continue + /// communicating on the IO object. + pub read_buf: Bytes, + _inner: (), +} + +/// A future that processes all HTTP state for the IO object. +/// +/// In most cases, this should just be spawned into an executor, so that it +/// can process incoming and outgoing messages, notice hangups, and the like. +#[must_use = "futures do nothing unless polled"] +pub struct Connection +where + T: AsyncRead + AsyncWrite + Send + 'static, + B: Body + 'static, +{ + inner: Option>, +} + +impl Connection +where + T: AsyncRead + AsyncWrite + Send + Unpin + 'static, + B: Body + 'static, + B::Error: Into>, +{ + /// Return the inner IO object, and additional information. + /// + /// Only works for HTTP/1 connections. HTTP/2 connections will panic. + pub fn into_parts(self) -> Parts { + let (io, read_buf, _) = self.inner.expect("already upgraded").into_inner(); + Parts { + io, + read_buf, + _inner: (), + } + } + + /// Poll the connection for completion, but without calling `shutdown` + /// on the underlying IO. + /// + /// This is useful to allow running a connection while doing an HTTP + /// upgrade. Once the upgrade is completed, the connection would be "done", + /// but it is not desired to actually shutdown the IO object. Instead you + /// would take it back using `into_parts`. + /// + /// Use [`poll_fn`](https://docs.rs/futures/0.1.25/futures/future/fn.poll_fn.html) + /// and [`try_ready!`](https://docs.rs/futures/0.1.25/futures/macro.try_ready.html) + /// to work with this function; or use the `without_shutdown` wrapper. + pub fn poll_without_shutdown(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner + .as_mut() + .expect("algready upgraded") + .poll_without_shutdown(cx) + } + + /// Prevent shutdown of the underlying IO object at the end of service the request, + /// instead run `into_parts`. This is a convenience wrapper over `poll_without_shutdown`. + pub fn without_shutdown(self) -> impl Future>> { + let mut conn = Some(self); + futures_util::future::poll_fn(move |cx| -> Poll>> { + ready!(conn.as_mut().unwrap().poll_without_shutdown(cx))?; + Poll::Ready(Ok(conn.take().unwrap().into_parts())) + }) + } +} + +/// A builder to configure an HTTP connection. +/// +/// After setting options, the builder is used to create a handshake future. +#[derive(Clone, Debug)] +pub struct Builder { + h09_responses: bool, + h1_parser_config: ParserConfig, + h1_writev: Option, + h1_title_case_headers: bool, + h1_preserve_header_case: bool, + #[cfg(feature = "ffi")] + h1_preserve_header_order: bool, + h1_read_buf_exact_size: Option, + h1_max_buf_size: Option, +} + +/// Returns a handshake future over some IO. +/// +/// This is a shortcut for `Builder::new().handshake(io)`. +/// See [`client::conn`](crate::client::conn) for more. +pub async fn handshake(io: T) -> crate::Result<(SendRequest, Connection)> +where + T: AsyncRead + AsyncWrite + Unpin + Send + 'static, + B: Body + 'static, + B::Data: Send, + B::Error: Into>, +{ + Builder::new().handshake(io).await +} + +// ===== impl SendRequest + +impl SendRequest { + /// Polls to determine whether this sender can be used yet for a request. + /// + /// If the associated connection is closed, this returns an Error. + pub fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.dispatch.poll_ready(cx) + } + + /// Waits until the dispatcher is ready + /// + /// If the associated connection is closed, this returns an Error. + pub async fn ready(&mut self) -> crate::Result<()> { + futures_util::future::poll_fn(|cx| self.poll_ready(cx)).await + } + + /* + pub(super) async fn when_ready(self) -> crate::Result { + let mut me = Some(self); + future::poll_fn(move |cx| { + ready!(me.as_mut().unwrap().poll_ready(cx))?; + Poll::Ready(Ok(me.take().unwrap())) + }) + .await + } + + pub(super) fn is_ready(&self) -> bool { + self.dispatch.is_ready() + } + + pub(super) fn is_closed(&self) -> bool { + self.dispatch.is_closed() + } + */ +} + +impl SendRequest +where + B: Body + 'static, +{ + /// Sends a `Request` on the associated connection. + /// + /// Returns a future that if successful, yields the `Response`. + /// + /// # Note + /// + /// There are some key differences in what automatic things the `Client` + /// does for you that will not be done here: + /// + /// - `Client` requires absolute-form `Uri`s, since the scheme and + /// authority are needed to connect. They aren't required here. + /// - Since the `Client` requires absolute-form `Uri`s, it can add + /// the `Host` header based on it. You must add a `Host` header yourself + /// before calling this method. + /// - Since absolute-form `Uri`s are not required, if received, they will + /// be serialized as-is. + pub fn send_request( + &mut self, + req: Request, + ) -> impl Future>> { + let sent = self.dispatch.send(req); + + async move { + match sent { + Ok(rx) => match rx.await { + Ok(Ok(resp)) => Ok(resp), + Ok(Err(err)) => Err(err), + // this is definite bug if it happens, but it shouldn't happen! + Err(_canceled) => panic!("dispatch dropped without returning error"), + }, + Err(_req) => { + tracing::debug!("connection was not ready"); + + Err(crate::Error::new_canceled().with("connection was not ready")) + } + } + } + } + + /* + pub(super) fn send_request_retryable( + &mut self, + req: Request, + ) -> impl Future, (crate::Error, Option>)>> + Unpin + where + B: Send, + { + match self.dispatch.try_send(req) { + Ok(rx) => { + Either::Left(rx.then(move |res| { + match res { + Ok(Ok(res)) => future::ok(res), + Ok(Err(err)) => future::err(err), + // this is definite bug if it happens, but it shouldn't happen! + Err(_) => panic!("dispatch dropped without returning error"), + } + })) + } + Err(req) => { + tracing::debug!("connection was not ready"); + let err = crate::Error::new_canceled().with("connection was not ready"); + Either::Right(future::err((err, Some(req)))) + } + } + } + */ +} + +impl fmt::Debug for SendRequest { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SendRequest").finish() + } +} + +// ===== impl Connection + +impl fmt::Debug for Connection +where + T: AsyncRead + AsyncWrite + fmt::Debug + Send + 'static, + B: Body + 'static, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Connection").finish() + } +} + +impl Future for Connection +where + T: AsyncRead + AsyncWrite + Unpin + Send + 'static, + B: Body + Send + 'static, + B::Data: Send, + B::Error: Into>, +{ + type Output = crate::Result<()>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match ready!(Pin::new(self.inner.as_mut().unwrap()).poll(cx))? { + proto::Dispatched::Shutdown => Poll::Ready(Ok(())), + proto::Dispatched::Upgrade(pending) => match self.inner.take() { + Some(h1) => { + let (io, buf, _) = h1.into_inner(); + pending.fulfill(Upgraded::new(io, buf)); + Poll::Ready(Ok(())) + } + _ => { + drop(pending); + unreachable!("Upgraded twice"); + } + }, + } + } +} + +// ===== impl Builder + +impl Builder { + /// Creates a new connection builder. + #[inline] + pub fn new() -> Builder { + Builder { + h09_responses: false, + h1_writev: None, + h1_read_buf_exact_size: None, + h1_parser_config: Default::default(), + h1_title_case_headers: false, + h1_preserve_header_case: false, + #[cfg(feature = "ffi")] + h1_preserve_header_order: false, + h1_max_buf_size: None, + } + } + + /// Set whether HTTP/0.9 responses should be tolerated. + /// + /// Default is false. + pub fn http09_responses(&mut self, enabled: bool) -> &mut Builder { + self.h09_responses = enabled; + self + } + + /// Set whether HTTP/1 connections will accept spaces between header names + /// and the colon that follow them in responses. + /// + /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has + /// to say about it: + /// + /// > No whitespace is allowed between the header field-name and colon. In + /// > the past, differences in the handling of such whitespace have led to + /// > security vulnerabilities in request routing and response handling. A + /// > server MUST reject any received request message that contains + /// > whitespace between a header field-name and colon with a response code + /// > of 400 (Bad Request). A proxy MUST remove any such whitespace from a + /// > response message before forwarding the message downstream. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + /// + /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4 + pub fn allow_spaces_after_header_name_in_responses(&mut self, enabled: bool) -> &mut Builder { + self.h1_parser_config + .allow_spaces_after_header_name_in_responses(enabled); + self + } + + /// Set whether HTTP/1 connections will accept obsolete line folding for + /// header values. + /// + /// Newline codepoints (`\r` and `\n`) will be transformed to spaces when + /// parsing. + /// + /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has + /// to say about it: + /// + /// > A server that receives an obs-fold in a request message that is not + /// > within a message/http container MUST either reject the message by + /// > sending a 400 (Bad Request), preferably with a representation + /// > explaining that obsolete line folding is unacceptable, or replace + /// > each received obs-fold with one or more SP octets prior to + /// > interpreting the field value or forwarding the message downstream. + /// + /// > A proxy or gateway that receives an obs-fold in a response message + /// > that is not within a message/http container MUST either discard the + /// > message and replace it with a 502 (Bad Gateway) response, preferably + /// > with a representation explaining that unacceptable line folding was + /// > received, or replace each received obs-fold with one or more SP + /// > octets prior to interpreting the field value or forwarding the + /// > message downstream. + /// + /// > A user agent that receives an obs-fold in a response message that is + /// > not within a message/http container MUST replace each received + /// > obs-fold with one or more SP octets prior to interpreting the field + /// > value. + /// + /// Default is false. + /// + /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4 + pub fn allow_obsolete_multiline_headers_in_responses(&mut self, enabled: bool) -> &mut Builder { + self.h1_parser_config + .allow_obsolete_multiline_headers_in_responses(enabled); + self + } + + /// Set whether HTTP/1 connections will silently ignored malformed header lines. + /// + /// If this is enabled and and a header line does not start with a valid header + /// name, or does not include a colon at all, the line will be silently ignored + /// and no error will be reported. + /// + /// Default is false. + pub fn ignore_invalid_headers_in_responses(&mut self, enabled: bool) -> &mut Builder { + self.h1_parser_config + .ignore_invalid_headers_in_responses(enabled); + self + } + + /// Set whether HTTP/1 connections should try to use vectored writes, + /// or always flatten into a single buffer. + /// + /// Note that setting this to false may mean more copies of body data, + /// but may also improve performance when an IO transport doesn't + /// support vectored writes well, such as most TLS implementations. + /// + /// Setting this to true will force hyper to use queued strategy + /// which may eliminate unnecessary cloning on some TLS backends + /// + /// Default is `auto`. In this mode hyper will try to guess which + /// mode to use + pub fn writev(&mut self, enabled: bool) -> &mut Builder { + self.h1_writev = Some(enabled); + self + } + + /// Set whether HTTP/1 connections will write header names as title case at + /// the socket level. + /// + /// Default is false. + pub fn title_case_headers(&mut self, enabled: bool) -> &mut Builder { + self.h1_title_case_headers = enabled; + self + } + + /// Set whether to support preserving original header cases. + /// + /// Currently, this will record the original cases received, and store them + /// in a private extension on the `Response`. It will also look for and use + /// such an extension in any provided `Request`. + /// + /// Since the relevant extension is still private, there is no way to + /// interact with the original cases. The only effect this can have now is + /// to forward the cases in a proxy-like fashion. + /// + /// Default is false. + pub fn preserve_header_case(&mut self, enabled: bool) -> &mut Builder { + self.h1_preserve_header_case = enabled; + self + } + + /// Set whether to support preserving original header order. + /// + /// Currently, this will record the order in which headers are received, and store this + /// ordering in a private extension on the `Response`. It will also look for and use + /// such an extension in any provided `Request`. + /// + /// Default is false. + #[cfg(feature = "ffi")] + pub fn preserve_header_order(&mut self, enabled: bool) -> &mut Builder { + self.h1_preserve_header_order = enabled; + self + } + + /// Sets the exact size of the read buffer to *always* use. + /// + /// Note that setting this option unsets the `max_buf_size` option. + /// + /// Default is an adaptive read buffer. + pub fn read_buf_exact_size(&mut self, sz: Option) -> &mut Builder { + self.h1_read_buf_exact_size = sz; + self.h1_max_buf_size = None; + self + } + + /// Set the maximum buffer size for the connection. + /// + /// Default is ~400kb. + /// + /// Note that setting this option unsets the `read_exact_buf_size` option. + /// + /// # Panics + /// + /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum. + pub fn max_buf_size(&mut self, max: usize) -> &mut Self { + assert!( + max >= proto::h1::MINIMUM_MAX_BUFFER_SIZE, + "the max_buf_size cannot be smaller than the minimum that h1 specifies." + ); + + self.h1_max_buf_size = Some(max); + self.h1_read_buf_exact_size = None; + self + } + + /// Constructs a connection with the configured options and IO. + /// See [`client::conn`](crate::client::conn) for more. + /// + /// Note, if [`Connection`] is not `await`-ed, [`SendRequest`] will + /// do nothing. + pub fn handshake( + &self, + io: T, + ) -> impl Future, Connection)>> + where + T: AsyncRead + AsyncWrite + Unpin + Send + 'static, + B: Body + 'static, + B::Data: Send, + B::Error: Into>, + { + let opts = self.clone(); + + async move { + tracing::trace!("client handshake HTTP/1"); + + let (tx, rx) = dispatch::channel(); + let mut conn = proto::Conn::new(io); + conn.set_h1_parser_config(opts.h1_parser_config); + if let Some(writev) = opts.h1_writev { + if writev { + conn.set_write_strategy_queue(); + } else { + conn.set_write_strategy_flatten(); + } + } + if opts.h1_title_case_headers { + conn.set_title_case_headers(); + } + if opts.h1_preserve_header_case { + conn.set_preserve_header_case(); + } + #[cfg(feature = "ffi")] + if opts.h1_preserve_header_order { + conn.set_preserve_header_order(); + } + + if opts.h09_responses { + conn.set_h09_responses(); + } + + if let Some(sz) = opts.h1_read_buf_exact_size { + conn.set_read_buf_exact_size(sz); + } + if let Some(max) = opts.h1_max_buf_size { + conn.set_max_buf_size(max); + } + let cd = proto::h1::dispatch::Client::new(rx); + let proto = proto::h1::Dispatcher::new(cd, conn); + + Ok(( + SendRequest { dispatch: tx }, + Connection { inner: Some(proto) }, + )) + } + } +} diff --git a/.cargo-vendor/hyper-0.14.30/src/client/conn/http2.rs b/.cargo-vendor/hyper-0.14.30/src/client/conn/http2.rs new file mode 100644 index 0000000000..5697e9ee47 --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/client/conn/http2.rs @@ -0,0 +1,420 @@ +//! HTTP/2 client connections + +use std::error::Error as StdError; +use std::fmt; +use std::future::Future; +use std::marker::PhantomData; +use std::marker::Unpin; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Context, Poll}; +use std::time::Duration; + +use http::{Request, Response}; +use tokio::io::{AsyncRead, AsyncWrite}; + +use super::super::dispatch; +use crate::body::{Body as IncomingBody, HttpBody as Body}; +use crate::common::exec::{BoxSendFuture, Exec}; +use crate::proto; +use crate::rt::Executor; + +/// The sender side of an established connection. +pub struct SendRequest { + dispatch: dispatch::UnboundedSender, Response>, +} + +impl Clone for SendRequest { + fn clone(&self) -> SendRequest { + SendRequest { + dispatch: self.dispatch.clone(), + } + } +} + +/// A future that processes all HTTP state for the IO object. +/// +/// In most cases, this should just be spawned into an executor, so that it +/// can process incoming and outgoing messages, notice hangups, and the like. +#[must_use = "futures do nothing unless polled"] +pub struct Connection +where + T: AsyncRead + AsyncWrite + Send + 'static, + B: Body + 'static, +{ + inner: (PhantomData, proto::h2::ClientTask), +} + +/// A builder to configure an HTTP connection. +/// +/// After setting options, the builder is used to create a handshake future. +#[derive(Clone, Debug)] +pub struct Builder { + pub(super) exec: Exec, + h2_builder: proto::h2::client::Config, +} + +/// Returns a handshake future over some IO. +/// +/// This is a shortcut for `Builder::new().handshake(io)`. +/// See [`client::conn`](crate::client::conn) for more. +pub async fn handshake(exec: E, io: T) -> crate::Result<(SendRequest, Connection)> +where + E: Executor + Send + Sync + 'static, + T: AsyncRead + AsyncWrite + Unpin + Send + 'static, + B: Body + 'static, + B::Data: Send, + B::Error: Into>, +{ + Builder::new(exec).handshake(io).await +} + +// ===== impl SendRequest + +impl SendRequest { + /// Polls to determine whether this sender can be used yet for a request. + /// + /// If the associated connection is closed, this returns an Error. + pub fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + if self.is_closed() { + Poll::Ready(Err(crate::Error::new_closed())) + } else { + Poll::Ready(Ok(())) + } + } + + /// Waits until the dispatcher is ready + /// + /// If the associated connection is closed, this returns an Error. + pub async fn ready(&mut self) -> crate::Result<()> { + futures_util::future::poll_fn(|cx| self.poll_ready(cx)).await + } + + /* + pub(super) async fn when_ready(self) -> crate::Result { + let mut me = Some(self); + future::poll_fn(move |cx| { + ready!(me.as_mut().unwrap().poll_ready(cx))?; + Poll::Ready(Ok(me.take().unwrap())) + }) + .await + } + + pub(super) fn is_ready(&self) -> bool { + self.dispatch.is_ready() + } + */ + + pub(super) fn is_closed(&self) -> bool { + self.dispatch.is_closed() + } +} + +impl SendRequest +where + B: Body + 'static, +{ + /// Sends a `Request` on the associated connection. + /// + /// Returns a future that if successful, yields the `Response`. + /// + /// # Note + /// + /// There are some key differences in what automatic things the `Client` + /// does for you that will not be done here: + /// + /// - `Client` requires absolute-form `Uri`s, since the scheme and + /// authority are needed to connect. They aren't required here. + /// - Since the `Client` requires absolute-form `Uri`s, it can add + /// the `Host` header based on it. You must add a `Host` header yourself + /// before calling this method. + /// - Since absolute-form `Uri`s are not required, if received, they will + /// be serialized as-is. + pub fn send_request( + &mut self, + req: Request, + ) -> impl Future>> { + let sent = self.dispatch.send(req); + + async move { + match sent { + Ok(rx) => match rx.await { + Ok(Ok(resp)) => Ok(resp), + Ok(Err(err)) => Err(err), + // this is definite bug if it happens, but it shouldn't happen! + Err(_canceled) => panic!("dispatch dropped without returning error"), + }, + Err(_req) => { + tracing::debug!("connection was not ready"); + + Err(crate::Error::new_canceled().with("connection was not ready")) + } + } + } + } + + /* + pub(super) fn send_request_retryable( + &mut self, + req: Request, + ) -> impl Future, (crate::Error, Option>)>> + Unpin + where + B: Send, + { + match self.dispatch.try_send(req) { + Ok(rx) => { + Either::Left(rx.then(move |res| { + match res { + Ok(Ok(res)) => future::ok(res), + Ok(Err(err)) => future::err(err), + // this is definite bug if it happens, but it shouldn't happen! + Err(_) => panic!("dispatch dropped without returning error"), + } + })) + } + Err(req) => { + tracing::debug!("connection was not ready"); + let err = crate::Error::new_canceled().with("connection was not ready"); + Either::Right(future::err((err, Some(req)))) + } + } + } + */ +} + +impl fmt::Debug for SendRequest { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SendRequest").finish() + } +} + +// ===== impl Connection + +impl Connection +where + T: AsyncRead + AsyncWrite + Unpin + Send + 'static, + B: Body + Unpin + Send + 'static, + B::Data: Send, + B::Error: Into>, +{ + /// Returns whether the [extended CONNECT protocol][1] is enabled or not. + /// + /// This setting is configured by the server peer by sending the + /// [`SETTINGS_ENABLE_CONNECT_PROTOCOL` parameter][2] in a `SETTINGS` frame. + /// This method returns the currently acknowledged value received from the + /// remote. + /// + /// [1]: https://datatracker.ietf.org/doc/html/rfc8441#section-4 + /// [2]: https://datatracker.ietf.org/doc/html/rfc8441#section-3 + pub fn is_extended_connect_protocol_enabled(&self) -> bool { + self.inner.1.is_extended_connect_protocol_enabled() + } +} + +impl fmt::Debug for Connection +where + T: AsyncRead + AsyncWrite + fmt::Debug + Send + 'static, + B: Body + 'static, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Connection").finish() + } +} + +impl Future for Connection +where + T: AsyncRead + AsyncWrite + Unpin + Send + 'static, + B: Body + Send + 'static, + B::Data: Send, + B::Error: Into>, +{ + type Output = crate::Result<()>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match ready!(Pin::new(&mut self.inner.1).poll(cx))? { + proto::Dispatched::Shutdown => Poll::Ready(Ok(())), + #[cfg(feature = "http1")] + proto::Dispatched::Upgrade(_pending) => unreachable!("http2 cannot upgrade"), + } + } +} + +// ===== impl Builder + +impl Builder { + /// Creates a new connection builder. + #[inline] + pub fn new(exec: E) -> Builder + where + E: Executor + Send + Sync + 'static, + { + use std::sync::Arc; + Builder { + exec: Exec::Executor(Arc::new(exec)), + h2_builder: Default::default(), + } + } + + /// Provide an executor to execute background HTTP2 tasks. + pub fn executor(&mut self, exec: E) -> &mut Builder + where + E: Executor + Send + Sync + 'static, + { + self.exec = Exec::Executor(Arc::new(exec)); + self + } + + /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2 + /// stream-level flow control. + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + /// + /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE + pub fn initial_stream_window_size(&mut self, sz: impl Into>) -> &mut Self { + if let Some(sz) = sz.into() { + self.h2_builder.adaptive_window = false; + self.h2_builder.initial_stream_window_size = sz; + } + self + } + + /// Sets the max connection-level flow control for HTTP2 + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + pub fn initial_connection_window_size(&mut self, sz: impl Into>) -> &mut Self { + if let Some(sz) = sz.into() { + self.h2_builder.adaptive_window = false; + self.h2_builder.initial_conn_window_size = sz; + } + self + } + + /// Sets whether to use an adaptive flow control. + /// + /// Enabling this will override the limits set in + /// `initial_stream_window_size` and + /// `initial_connection_window_size`. + pub fn adaptive_window(&mut self, enabled: bool) -> &mut Self { + use proto::h2::SPEC_WINDOW_SIZE; + + self.h2_builder.adaptive_window = enabled; + if enabled { + self.h2_builder.initial_conn_window_size = SPEC_WINDOW_SIZE; + self.h2_builder.initial_stream_window_size = SPEC_WINDOW_SIZE; + } + self + } + + /// Sets the maximum frame size to use for HTTP2. + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + pub fn max_frame_size(&mut self, sz: impl Into>) -> &mut Self { + if let Some(sz) = sz.into() { + self.h2_builder.max_frame_size = sz; + } + self + } + + /// Sets an interval for HTTP2 Ping frames should be sent to keep a + /// connection alive. + /// + /// Pass `None` to disable HTTP2 keep-alive. + /// + /// Default is currently disabled. + #[cfg(feature = "runtime")] + pub fn keep_alive_interval(&mut self, interval: impl Into>) -> &mut Self { + self.h2_builder.keep_alive_interval = interval.into(); + self + } + + /// Sets a timeout for receiving an acknowledgement of the keep-alive ping. + /// + /// If the ping is not acknowledged within the timeout, the connection will + /// be closed. Does nothing if `keep_alive_interval` is disabled. + /// + /// Default is 20 seconds. + #[cfg(feature = "runtime")] + pub fn keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self { + self.h2_builder.keep_alive_timeout = timeout; + self + } + + /// Sets whether HTTP2 keep-alive should apply while the connection is idle. + /// + /// If disabled, keep-alive pings are only sent while there are open + /// request/responses streams. If enabled, pings are also sent when no + /// streams are active. Does nothing if `keep_alive_interval` is + /// disabled. + /// + /// Default is `false`. + #[cfg(feature = "runtime")] + pub fn keep_alive_while_idle(&mut self, enabled: bool) -> &mut Self { + self.h2_builder.keep_alive_while_idle = enabled; + self + } + + /// Sets the maximum number of HTTP2 concurrent locally reset streams. + /// + /// See the documentation of [`h2::client::Builder::max_concurrent_reset_streams`] for more + /// details. + /// + /// The default value is determined by the `h2` crate. + /// + /// [`h2::client::Builder::max_concurrent_reset_streams`]: https://docs.rs/h2/client/struct.Builder.html#method.max_concurrent_reset_streams + pub fn max_concurrent_reset_streams(&mut self, max: usize) -> &mut Self { + self.h2_builder.max_concurrent_reset_streams = Some(max); + self + } + + /// Set the maximum write buffer size for each HTTP/2 stream. + /// + /// Default is currently 1MB, but may change. + /// + /// # Panics + /// + /// The value must be no larger than `u32::MAX`. + pub fn max_send_buf_size(&mut self, max: usize) -> &mut Self { + assert!(max <= std::u32::MAX as usize); + self.h2_builder.max_send_buffer_size = max; + self + } + + /// Constructs a connection with the configured options and IO. + /// See [`client::conn`](crate::client::conn) for more. + /// + /// Note, if [`Connection`] is not `await`-ed, [`SendRequest`] will + /// do nothing. + pub fn handshake( + &self, + io: T, + ) -> impl Future, Connection)>> + where + T: AsyncRead + AsyncWrite + Unpin + Send + 'static, + B: Body + 'static, + B::Data: Send, + B::Error: Into>, + { + let opts = self.clone(); + + async move { + tracing::trace!("client handshake HTTP/1"); + + let (tx, rx) = dispatch::channel(); + let h2 = proto::h2::client::handshake(io, rx, &opts.h2_builder, opts.exec).await?; + Ok(( + SendRequest { + dispatch: tx.unbound(), + }, + Connection { + inner: (PhantomData, h2), + }, + )) + } + } +} diff --git a/.cargo-vendor/hyper/src/client/connect/dns.rs b/.cargo-vendor/hyper-0.14.30/src/client/connect/dns.rs similarity index 100% rename from .cargo-vendor/hyper/src/client/connect/dns.rs rename to .cargo-vendor/hyper-0.14.30/src/client/connect/dns.rs diff --git a/.cargo-vendor/hyper/src/client/connect/http.rs b/.cargo-vendor/hyper-0.14.30/src/client/connect/http.rs similarity index 100% rename from .cargo-vendor/hyper/src/client/connect/http.rs rename to .cargo-vendor/hyper-0.14.30/src/client/connect/http.rs diff --git a/.cargo-vendor/hyper/src/client/connect/mod.rs b/.cargo-vendor/hyper-0.14.30/src/client/connect/mod.rs similarity index 100% rename from .cargo-vendor/hyper/src/client/connect/mod.rs rename to .cargo-vendor/hyper-0.14.30/src/client/connect/mod.rs diff --git a/.cargo-vendor/hyper-0.14.30/src/client/dispatch.rs b/.cargo-vendor/hyper-0.14.30/src/client/dispatch.rs new file mode 100644 index 0000000000..a1a93ea964 --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/client/dispatch.rs @@ -0,0 +1,442 @@ +#[cfg(feature = "http2")] +use std::future::Future; +use std::marker::Unpin; +#[cfg(feature = "http2")] +use std::pin::Pin; +use std::task::{Context, Poll}; + +use futures_util::FutureExt; +use tokio::sync::{mpsc, oneshot}; + +pub(crate) type RetryPromise = oneshot::Receiver)>>; +pub(crate) type Promise = oneshot::Receiver>; + +pub(crate) fn channel() -> (Sender, Receiver) { + let (tx, rx) = mpsc::unbounded_channel(); + let (giver, taker) = want::new(); + let tx = Sender { + buffered_once: false, + giver, + inner: tx, + }; + let rx = Receiver { inner: rx, taker }; + (tx, rx) +} + +/// A bounded sender of requests and callbacks for when responses are ready. +/// +/// While the inner sender is unbounded, the Giver is used to determine +/// if the Receiver is ready for another request. +pub(crate) struct Sender { + /// One message is always allowed, even if the Receiver hasn't asked + /// for it yet. This boolean keeps track of whether we've sent one + /// without notice. + buffered_once: bool, + /// The Giver helps watch that the the Receiver side has been polled + /// when the queue is empty. This helps us know when a request and + /// response have been fully processed, and a connection is ready + /// for more. + giver: want::Giver, + /// Actually bounded by the Giver, plus `buffered_once`. + inner: mpsc::UnboundedSender>, +} + +/// An unbounded version. +/// +/// Cannot poll the Giver, but can still use it to determine if the Receiver +/// has been dropped. However, this version can be cloned. +#[cfg(feature = "http2")] +pub(crate) struct UnboundedSender { + /// Only used for `is_closed`, since mpsc::UnboundedSender cannot be checked. + giver: want::SharedGiver, + inner: mpsc::UnboundedSender>, +} + +impl Sender { + pub(crate) fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.giver + .poll_want(cx) + .map_err(|_| crate::Error::new_closed()) + } + + pub(crate) fn is_ready(&self) -> bool { + self.giver.is_wanting() + } + + pub(crate) fn is_closed(&self) -> bool { + self.giver.is_canceled() + } + + fn can_send(&mut self) -> bool { + if self.giver.give() || !self.buffered_once { + // If the receiver is ready *now*, then of course we can send. + // + // If the receiver isn't ready yet, but we don't have anything + // in the channel yet, then allow one message. + self.buffered_once = true; + true + } else { + false + } + } + + pub(crate) fn try_send(&mut self, val: T) -> Result, T> { + if !self.can_send() { + return Err(val); + } + let (tx, rx) = oneshot::channel(); + self.inner + .send(Envelope(Some((val, Callback::Retry(Some(tx)))))) + .map(move |_| rx) + .map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0) + } + + pub(crate) fn send(&mut self, val: T) -> Result, T> { + if !self.can_send() { + return Err(val); + } + let (tx, rx) = oneshot::channel(); + self.inner + .send(Envelope(Some((val, Callback::NoRetry(Some(tx)))))) + .map(move |_| rx) + .map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0) + } + + #[cfg(feature = "http2")] + pub(crate) fn unbound(self) -> UnboundedSender { + UnboundedSender { + giver: self.giver.shared(), + inner: self.inner, + } + } +} + +#[cfg(feature = "http2")] +impl UnboundedSender { + pub(crate) fn is_ready(&self) -> bool { + !self.giver.is_canceled() + } + + pub(crate) fn is_closed(&self) -> bool { + self.giver.is_canceled() + } + + pub(crate) fn try_send(&mut self, val: T) -> Result, T> { + let (tx, rx) = oneshot::channel(); + self.inner + .send(Envelope(Some((val, Callback::Retry(Some(tx)))))) + .map(move |_| rx) + .map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0) + } + + #[cfg(all(feature = "backports", feature = "http2"))] + pub(crate) fn send(&mut self, val: T) -> Result, T> { + let (tx, rx) = oneshot::channel(); + self.inner + .send(Envelope(Some((val, Callback::NoRetry(Some(tx)))))) + .map(move |_| rx) + .map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0) + } +} + +#[cfg(feature = "http2")] +impl Clone for UnboundedSender { + fn clone(&self) -> Self { + UnboundedSender { + giver: self.giver.clone(), + inner: self.inner.clone(), + } + } +} + +pub(crate) struct Receiver { + inner: mpsc::UnboundedReceiver>, + taker: want::Taker, +} + +impl Receiver { + pub(crate) fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll)>> { + match self.inner.poll_recv(cx) { + Poll::Ready(item) => { + Poll::Ready(item.map(|mut env| env.0.take().expect("envelope not dropped"))) + } + Poll::Pending => { + self.taker.want(); + Poll::Pending + } + } + } + + #[cfg(feature = "http1")] + pub(crate) fn close(&mut self) { + self.taker.cancel(); + self.inner.close(); + } + + #[cfg(feature = "http1")] + pub(crate) fn try_recv(&mut self) -> Option<(T, Callback)> { + match self.inner.recv().now_or_never() { + Some(Some(mut env)) => env.0.take(), + _ => None, + } + } +} + +impl Drop for Receiver { + fn drop(&mut self) { + // Notify the giver about the closure first, before dropping + // the mpsc::Receiver. + self.taker.cancel(); + } +} + +struct Envelope(Option<(T, Callback)>); + +impl Drop for Envelope { + fn drop(&mut self) { + if let Some((val, cb)) = self.0.take() { + cb.send(Err(( + crate::Error::new_canceled().with("connection closed"), + Some(val), + ))); + } + } +} + +pub(crate) enum Callback { + Retry(Option)>>>), + NoRetry(Option>>), +} + +impl Drop for Callback { + fn drop(&mut self) { + // FIXME(nox): What errors do we want here? + let error = crate::Error::new_user_dispatch_gone().with(if std::thread::panicking() { + "user code panicked" + } else { + "runtime dropped the dispatch task" + }); + + match self { + Callback::Retry(tx) => { + if let Some(tx) = tx.take() { + let _ = tx.send(Err((error, None))); + } + } + Callback::NoRetry(tx) => { + if let Some(tx) = tx.take() { + let _ = tx.send(Err(error)); + } + } + } + } +} + +impl Callback { + #[cfg(feature = "http2")] + pub(crate) fn is_canceled(&self) -> bool { + match *self { + Callback::Retry(Some(ref tx)) => tx.is_closed(), + Callback::NoRetry(Some(ref tx)) => tx.is_closed(), + _ => unreachable!(), + } + } + + pub(crate) fn poll_canceled(&mut self, cx: &mut Context<'_>) -> Poll<()> { + match *self { + Callback::Retry(Some(ref mut tx)) => tx.poll_closed(cx), + Callback::NoRetry(Some(ref mut tx)) => tx.poll_closed(cx), + _ => unreachable!(), + } + } + + pub(crate) fn send(mut self, val: Result)>) { + match self { + Callback::Retry(ref mut tx) => { + let _ = tx.take().unwrap().send(val); + } + Callback::NoRetry(ref mut tx) => { + let _ = tx.take().unwrap().send(val.map_err(|e| e.0)); + } + } + } + + #[cfg(feature = "http2")] + pub(crate) async fn send_when( + self, + mut when: impl Future)>> + Unpin, + ) { + use futures_util::future; + use tracing::trace; + + let mut cb = Some(self); + + // "select" on this callback being canceled, and the future completing + future::poll_fn(move |cx| { + match Pin::new(&mut when).poll(cx) { + Poll::Ready(Ok(res)) => { + cb.take().expect("polled after complete").send(Ok(res)); + Poll::Ready(()) + } + Poll::Pending => { + // check if the callback is canceled + ready!(cb.as_mut().unwrap().poll_canceled(cx)); + trace!("send_when canceled"); + Poll::Ready(()) + } + Poll::Ready(Err(err)) => { + cb.take().expect("polled after complete").send(Err(err)); + Poll::Ready(()) + } + } + }) + .await + } +} + +#[cfg(test)] +mod tests { + #[cfg(feature = "nightly")] + extern crate test; + + use std::future::Future; + use std::pin::Pin; + use std::task::{Context, Poll}; + + use super::{channel, Callback, Receiver}; + + #[derive(Debug)] + struct Custom(i32); + + impl Future for Receiver { + type Output = Option<(T, Callback)>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + self.poll_recv(cx) + } + } + + /// Helper to check if the future is ready after polling once. + struct PollOnce<'a, F>(&'a mut F); + + impl Future for PollOnce<'_, F> + where + F: Future + Unpin, + { + type Output = Option<()>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match Pin::new(&mut self.0).poll(cx) { + Poll::Ready(_) => Poll::Ready(Some(())), + Poll::Pending => Poll::Ready(None), + } + } + } + + #[tokio::test] + async fn drop_receiver_sends_cancel_errors() { + let _ = pretty_env_logger::try_init(); + + let (mut tx, mut rx) = channel::(); + + // must poll once for try_send to succeed + assert!(PollOnce(&mut rx).await.is_none(), "rx empty"); + + let promise = tx.try_send(Custom(43)).unwrap(); + drop(rx); + + let fulfilled = promise.await; + let err = fulfilled + .expect("fulfilled") + .expect_err("promise should error"); + match (err.0.kind(), err.1) { + (&crate::error::Kind::Canceled, Some(_)) => (), + e => panic!("expected Error::Cancel(_), found {:?}", e), + } + } + + #[tokio::test] + async fn sender_checks_for_want_on_send() { + let (mut tx, mut rx) = channel::(); + + // one is allowed to buffer, second is rejected + let _ = tx.try_send(Custom(1)).expect("1 buffered"); + tx.try_send(Custom(2)).expect_err("2 not ready"); + + assert!(PollOnce(&mut rx).await.is_some(), "rx once"); + + // Even though 1 has been popped, only 1 could be buffered for the + // lifetime of the channel. + tx.try_send(Custom(2)).expect_err("2 still not ready"); + + assert!(PollOnce(&mut rx).await.is_none(), "rx empty"); + + let _ = tx.try_send(Custom(2)).expect("2 ready"); + } + + #[cfg(feature = "http2")] + #[test] + fn unbounded_sender_doesnt_bound_on_want() { + let (tx, rx) = channel::(); + let mut tx = tx.unbound(); + + let _ = tx.try_send(Custom(1)).unwrap(); + let _ = tx.try_send(Custom(2)).unwrap(); + let _ = tx.try_send(Custom(3)).unwrap(); + + drop(rx); + + let _ = tx.try_send(Custom(4)).unwrap_err(); + } + + #[cfg(feature = "nightly")] + #[bench] + fn giver_queue_throughput(b: &mut test::Bencher) { + use crate::{Body, Request, Response}; + + let rt = tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .unwrap(); + let (mut tx, mut rx) = channel::, Response>(); + + b.iter(move || { + let _ = tx.send(Request::default()).unwrap(); + rt.block_on(async { + loop { + let poll_once = PollOnce(&mut rx); + let opt = poll_once.await; + if opt.is_none() { + break; + } + } + }); + }) + } + + #[cfg(feature = "nightly")] + #[bench] + fn giver_queue_not_ready(b: &mut test::Bencher) { + let rt = tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .unwrap(); + let (_tx, mut rx) = channel::(); + b.iter(move || { + rt.block_on(async { + let poll_once = PollOnce(&mut rx); + assert!(poll_once.await.is_none()); + }); + }) + } + + #[cfg(feature = "nightly")] + #[bench] + fn giver_queue_cancel(b: &mut test::Bencher) { + let (_tx, mut rx) = channel::(); + + b.iter(move || { + rx.taker.cancel(); + }) + } +} diff --git a/.cargo-vendor/hyper-0.14.30/src/client/mod.rs b/.cargo-vendor/hyper-0.14.30/src/client/mod.rs new file mode 100644 index 0000000000..734bda8819 --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/client/mod.rs @@ -0,0 +1,68 @@ +//! HTTP Client +//! +//! There are two levels of APIs provided for construct HTTP clients: +//! +//! - The higher-level [`Client`](Client) type. +//! - The lower-level [`conn`](conn) module. +//! +//! # Client +//! +//! The [`Client`](Client) is the main way to send HTTP requests to a server. +//! The default `Client` provides these things on top of the lower-level API: +//! +//! - A default **connector**, able to resolve hostnames and connect to +//! destinations over plain-text TCP. +//! - A **pool** of existing connections, allowing better performance when +//! making multiple requests to the same hostname. +//! - Automatic setting of the `Host` header, based on the request `Uri`. +//! - Automatic request **retries** when a pooled connection is closed by the +//! server before any bytes have been written. +//! +//! Many of these features can configured, by making use of +//! [`Client::builder`](Client::builder). +//! +//! ## Example +//! +//! For a small example program simply fetching a URL, take a look at the +//! [full client example](https://github.com/hyperium/hyper/blob/master/examples/client.rs). +//! +//! ``` +//! # #[cfg(all(feature = "tcp", feature = "client", any(feature = "http1", feature = "http2")))] +//! # async fn fetch_httpbin() -> hyper::Result<()> { +//! use hyper::{body::HttpBody as _, Client, Uri}; +//! +//! let client = Client::new(); +//! +//! // Make a GET /ip to 'http://httpbin.org' +//! let res = client.get(Uri::from_static("http://httpbin.org/ip")).await?; +//! +//! // And then, if the request gets a response... +//! println!("status: {}", res.status()); +//! +//! // Concatenate the body stream into a single buffer... +//! let buf = hyper::body::to_bytes(res).await?; +//! +//! println!("body: {:?}", buf); +//! # Ok(()) +//! # } +//! # fn main () {} +//! ``` + +#[cfg(feature = "tcp")] +pub use self::connect::HttpConnector; + +pub mod connect; +#[cfg(all(test, feature = "runtime"))] +mod tests; + +cfg_feature! { + #![any(feature = "http1", feature = "http2")] + + pub use self::client::{Builder, Client, ResponseFuture}; + + mod client; + pub mod conn; + pub(super) mod dispatch; + mod pool; + pub mod service; +} diff --git a/.cargo-vendor/hyper/src/client/pool.rs b/.cargo-vendor/hyper-0.14.30/src/client/pool.rs similarity index 100% rename from .cargo-vendor/hyper/src/client/pool.rs rename to .cargo-vendor/hyper-0.14.30/src/client/pool.rs diff --git a/.cargo-vendor/hyper/src/client/service.rs b/.cargo-vendor/hyper-0.14.30/src/client/service.rs similarity index 100% rename from .cargo-vendor/hyper/src/client/service.rs rename to .cargo-vendor/hyper-0.14.30/src/client/service.rs diff --git a/.cargo-vendor/hyper-0.14.30/src/client/tests.rs b/.cargo-vendor/hyper-0.14.30/src/client/tests.rs new file mode 100644 index 0000000000..0a281a637d --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/client/tests.rs @@ -0,0 +1,286 @@ +use std::io; + +use futures_util::future; +use tokio::net::TcpStream; + +use super::Client; + +#[tokio::test] +async fn client_connect_uri_argument() { + let connector = tower::service_fn(|dst: http::Uri| { + assert_eq!(dst.scheme(), Some(&http::uri::Scheme::HTTP)); + assert_eq!(dst.host(), Some("example.local")); + assert_eq!(dst.port(), None); + assert_eq!(dst.path(), "/", "path should be removed"); + + future::err::(io::Error::new(io::ErrorKind::Other, "expect me")) + }); + + let client = Client::builder().build::<_, crate::Body>(connector); + let _ = client + .get("http://example.local/and/a/path".parse().unwrap()) + .await + .expect_err("response should fail"); +} + +/* +// FIXME: re-implement tests with `async/await` +#[test] +fn retryable_request() { + let _ = pretty_env_logger::try_init(); + + let mut rt = Runtime::new().expect("new rt"); + let mut connector = MockConnector::new(); + + let sock1 = connector.mock("http://mock.local"); + let sock2 = connector.mock("http://mock.local"); + + let client = Client::builder() + .build::<_, crate::Body>(connector); + + client.pool.no_timer(); + + { + + let req = Request::builder() + .uri("http://mock.local/a") + .body(Default::default()) + .unwrap(); + let res1 = client.request(req); + let srv1 = poll_fn(|| { + try_ready!(sock1.read(&mut [0u8; 512])); + try_ready!(sock1.write(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")); + Ok(Async::Ready(())) + }).map_err(|e: std::io::Error| panic!("srv1 poll_fn error: {}", e)); + rt.block_on(res1.join(srv1)).expect("res1"); + } + drop(sock1); + + let req = Request::builder() + .uri("http://mock.local/b") + .body(Default::default()) + .unwrap(); + let res2 = client.request(req) + .map(|res| { + assert_eq!(res.status().as_u16(), 222); + }); + let srv2 = poll_fn(|| { + try_ready!(sock2.read(&mut [0u8; 512])); + try_ready!(sock2.write(b"HTTP/1.1 222 OK\r\nContent-Length: 0\r\n\r\n")); + Ok(Async::Ready(())) + }).map_err(|e: std::io::Error| panic!("srv2 poll_fn error: {}", e)); + + rt.block_on(res2.join(srv2)).expect("res2"); +} + +#[test] +fn conn_reset_after_write() { + let _ = pretty_env_logger::try_init(); + + let mut rt = Runtime::new().expect("new rt"); + let mut connector = MockConnector::new(); + + let sock1 = connector.mock("http://mock.local"); + + let client = Client::builder() + .build::<_, crate::Body>(connector); + + client.pool.no_timer(); + + { + let req = Request::builder() + .uri("http://mock.local/a") + .body(Default::default()) + .unwrap(); + let res1 = client.request(req); + let srv1 = poll_fn(|| { + try_ready!(sock1.read(&mut [0u8; 512])); + try_ready!(sock1.write(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")); + Ok(Async::Ready(())) + }).map_err(|e: std::io::Error| panic!("srv1 poll_fn error: {}", e)); + rt.block_on(res1.join(srv1)).expect("res1"); + } + + let req = Request::builder() + .uri("http://mock.local/a") + .body(Default::default()) + .unwrap(); + let res2 = client.request(req); + let mut sock1 = Some(sock1); + let srv2 = poll_fn(|| { + // We purposefully keep the socket open until the client + // has written the second request, and THEN disconnect. + // + // Not because we expect servers to be jerks, but to trigger + // state where we write on an assumedly good connection, and + // only reset the close AFTER we wrote bytes. + try_ready!(sock1.as_mut().unwrap().read(&mut [0u8; 512])); + sock1.take(); + Ok(Async::Ready(())) + }).map_err(|e: std::io::Error| panic!("srv2 poll_fn error: {}", e)); + let err = rt.block_on(res2.join(srv2)).expect_err("res2"); + assert!(err.is_incomplete_message(), "{:?}", err); +} + +#[test] +fn checkout_win_allows_connect_future_to_be_pooled() { + let _ = pretty_env_logger::try_init(); + + let mut rt = Runtime::new().expect("new rt"); + let mut connector = MockConnector::new(); + + + let (tx, rx) = oneshot::channel::<()>(); + let sock1 = connector.mock("http://mock.local"); + let sock2 = connector.mock_fut("http://mock.local", rx); + + let client = Client::builder() + .build::<_, crate::Body>(connector); + + client.pool.no_timer(); + + let uri = "http://mock.local/a".parse::().expect("uri parse"); + + // First request just sets us up to have a connection able to be put + // back in the pool. *However*, it doesn't insert immediately. The + // body has 1 pending byte, and we will only drain in request 2, once + // the connect future has been started. + let mut body = { + let res1 = client.get(uri.clone()) + .map(|res| res.into_body().concat2()); + let srv1 = poll_fn(|| { + try_ready!(sock1.read(&mut [0u8; 512])); + // Chunked is used so as to force 2 body reads. + try_ready!(sock1.write(b"\ + HTTP/1.1 200 OK\r\n\ + transfer-encoding: chunked\r\n\ + \r\n\ + 1\r\nx\r\n\ + 0\r\n\r\n\ + ")); + Ok(Async::Ready(())) + }).map_err(|e: std::io::Error| panic!("srv1 poll_fn error: {}", e)); + + rt.block_on(res1.join(srv1)).expect("res1").0 + }; + + + // The second request triggers the only mocked connect future, but then + // the drained body allows the first socket to go back to the pool, + // "winning" the checkout race. + { + let res2 = client.get(uri.clone()); + let drain = poll_fn(move || { + body.poll() + }); + let srv2 = poll_fn(|| { + try_ready!(sock1.read(&mut [0u8; 512])); + try_ready!(sock1.write(b"HTTP/1.1 200 OK\r\nConnection: close\r\n\r\nx")); + Ok(Async::Ready(())) + }).map_err(|e: std::io::Error| panic!("srv2 poll_fn error: {}", e)); + + rt.block_on(res2.join(drain).join(srv2)).expect("res2"); + } + + // "Release" the mocked connect future, and let the runtime spin once so + // it's all setup... + { + let mut tx = Some(tx); + let client = &client; + let key = client.pool.h1_key("http://mock.local"); + let mut tick_cnt = 0; + let fut = poll_fn(move || { + tx.take(); + + if client.pool.idle_count(&key) == 0 { + tick_cnt += 1; + assert!(tick_cnt < 10, "ticked too many times waiting for idle"); + trace!("no idle yet; tick count: {}", tick_cnt); + ::futures::task::current().notify(); + Ok(Async::NotReady) + } else { + Ok::<_, ()>(Async::Ready(())) + } + }); + rt.block_on(fut).unwrap(); + } + + // Third request just tests out that the "loser" connection was pooled. If + // it isn't, this will panic since the MockConnector doesn't have any more + // mocks to give out. + { + let res3 = client.get(uri); + let srv3 = poll_fn(|| { + try_ready!(sock2.read(&mut [0u8; 512])); + try_ready!(sock2.write(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")); + Ok(Async::Ready(())) + }).map_err(|e: std::io::Error| panic!("srv3 poll_fn error: {}", e)); + + rt.block_on(res3.join(srv3)).expect("res3"); + } +} + +#[cfg(feature = "nightly")] +#[bench] +fn bench_http1_get_0b(b: &mut test::Bencher) { + let _ = pretty_env_logger::try_init(); + + let mut rt = Runtime::new().expect("new rt"); + let mut connector = MockConnector::new(); + + + let client = Client::builder() + .build::<_, crate::Body>(connector.clone()); + + client.pool.no_timer(); + + let uri = Uri::from_static("http://mock.local/a"); + + b.iter(move || { + let sock1 = connector.mock("http://mock.local"); + let res1 = client + .get(uri.clone()) + .and_then(|res| { + res.into_body().for_each(|_| Ok(())) + }); + let srv1 = poll_fn(|| { + try_ready!(sock1.read(&mut [0u8; 512])); + try_ready!(sock1.write(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")); + Ok(Async::Ready(())) + }).map_err(|e: std::io::Error| panic!("srv1 poll_fn error: {}", e)); + rt.block_on(res1.join(srv1)).expect("res1"); + }); +} + +#[cfg(feature = "nightly")] +#[bench] +fn bench_http1_get_10b(b: &mut test::Bencher) { + let _ = pretty_env_logger::try_init(); + + let mut rt = Runtime::new().expect("new rt"); + let mut connector = MockConnector::new(); + + + let client = Client::builder() + .build::<_, crate::Body>(connector.clone()); + + client.pool.no_timer(); + + let uri = Uri::from_static("http://mock.local/a"); + + b.iter(move || { + let sock1 = connector.mock("http://mock.local"); + let res1 = client + .get(uri.clone()) + .and_then(|res| { + res.into_body().for_each(|_| Ok(())) + }); + let srv1 = poll_fn(|| { + try_ready!(sock1.read(&mut [0u8; 512])); + try_ready!(sock1.write(b"HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n0123456789")); + Ok(Async::Ready(())) + }).map_err(|e: std::io::Error| panic!("srv1 poll_fn error: {}", e)); + rt.block_on(res1.join(srv1)).expect("res1"); + }); +} +*/ diff --git a/.cargo-vendor/hyper-0.14.30/src/common/buf.rs b/.cargo-vendor/hyper-0.14.30/src/common/buf.rs new file mode 100644 index 0000000000..64e9333ead --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/common/buf.rs @@ -0,0 +1,151 @@ +use std::collections::VecDeque; +use std::io::IoSlice; + +use bytes::{Buf, BufMut, Bytes, BytesMut}; + +pub(crate) struct BufList { + bufs: VecDeque, +} + +impl BufList { + pub(crate) fn new() -> BufList { + BufList { + bufs: VecDeque::new(), + } + } + + #[inline] + pub(crate) fn push(&mut self, buf: T) { + debug_assert!(buf.has_remaining()); + self.bufs.push_back(buf); + } + + #[inline] + #[cfg(feature = "http1")] + pub(crate) fn bufs_cnt(&self) -> usize { + self.bufs.len() + } +} + +impl Buf for BufList { + #[inline] + fn remaining(&self) -> usize { + self.bufs.iter().map(|buf| buf.remaining()).sum() + } + + #[inline] + fn chunk(&self) -> &[u8] { + self.bufs.front().map(Buf::chunk).unwrap_or_default() + } + + #[inline] + fn advance(&mut self, mut cnt: usize) { + while cnt > 0 { + { + let front = &mut self.bufs[0]; + let rem = front.remaining(); + if rem > cnt { + front.advance(cnt); + return; + } else { + front.advance(rem); + cnt -= rem; + } + } + self.bufs.pop_front(); + } + } + + #[inline] + fn chunks_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize { + if dst.is_empty() { + return 0; + } + let mut vecs = 0; + for buf in &self.bufs { + vecs += buf.chunks_vectored(&mut dst[vecs..]); + if vecs == dst.len() { + break; + } + } + vecs + } + + #[inline] + fn copy_to_bytes(&mut self, len: usize) -> Bytes { + // Our inner buffer may have an optimized version of copy_to_bytes, and if the whole + // request can be fulfilled by the front buffer, we can take advantage. + match self.bufs.front_mut() { + Some(front) if front.remaining() == len => { + let b = front.copy_to_bytes(len); + self.bufs.pop_front(); + b + } + Some(front) if front.remaining() > len => front.copy_to_bytes(len), + _ => { + assert!(len <= self.remaining(), "`len` greater than remaining"); + let mut bm = BytesMut::with_capacity(len); + bm.put(self.take(len)); + bm.freeze() + } + } + } +} + +#[cfg(test)] +mod tests { + use std::ptr; + + use super::*; + + fn hello_world_buf() -> BufList { + BufList { + bufs: vec![Bytes::from("Hello"), Bytes::from(" "), Bytes::from("World")].into(), + } + } + + #[test] + fn to_bytes_shorter() { + let mut bufs = hello_world_buf(); + let old_ptr = bufs.chunk().as_ptr(); + let start = bufs.copy_to_bytes(4); + assert_eq!(start, "Hell"); + assert!(ptr::eq(old_ptr, start.as_ptr())); + assert_eq!(bufs.chunk(), b"o"); + assert!(ptr::eq(old_ptr.wrapping_add(4), bufs.chunk().as_ptr())); + assert_eq!(bufs.remaining(), 7); + } + + #[test] + fn to_bytes_eq() { + let mut bufs = hello_world_buf(); + let old_ptr = bufs.chunk().as_ptr(); + let start = bufs.copy_to_bytes(5); + assert_eq!(start, "Hello"); + assert!(ptr::eq(old_ptr, start.as_ptr())); + assert_eq!(bufs.chunk(), b" "); + assert_eq!(bufs.remaining(), 6); + } + + #[test] + fn to_bytes_longer() { + let mut bufs = hello_world_buf(); + let start = bufs.copy_to_bytes(7); + assert_eq!(start, "Hello W"); + assert_eq!(bufs.remaining(), 4); + } + + #[test] + fn one_long_buf_to_bytes() { + let mut buf = BufList::new(); + buf.push(b"Hello World" as &[_]); + assert_eq!(buf.copy_to_bytes(5), "Hello"); + assert_eq!(buf.chunk(), b" World"); + } + + #[test] + #[should_panic(expected = "`len` greater than remaining")] + fn buf_to_bytes_too_many() { + hello_world_buf().copy_to_bytes(42); + } +} diff --git a/.cargo-vendor/hyper-0.14.30/src/common/date.rs b/.cargo-vendor/hyper-0.14.30/src/common/date.rs new file mode 100644 index 0000000000..a436fc07c0 --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/common/date.rs @@ -0,0 +1,124 @@ +use std::cell::RefCell; +use std::fmt::{self, Write}; +use std::str; +use std::time::{Duration, SystemTime}; + +#[cfg(feature = "http2")] +use http::header::HeaderValue; +use httpdate::HttpDate; + +// "Sun, 06 Nov 1994 08:49:37 GMT".len() +pub(crate) const DATE_VALUE_LENGTH: usize = 29; + +#[cfg(feature = "http1")] +pub(crate) fn extend(dst: &mut Vec) { + CACHED.with(|cache| { + dst.extend_from_slice(cache.borrow().buffer()); + }) +} + +#[cfg(feature = "http1")] +pub(crate) fn update() { + CACHED.with(|cache| { + cache.borrow_mut().check(); + }) +} + +#[cfg(feature = "http2")] +pub(crate) fn update_and_header_value() -> HeaderValue { + CACHED.with(|cache| { + let mut cache = cache.borrow_mut(); + cache.check(); + HeaderValue::from_bytes(cache.buffer()).expect("Date format should be valid HeaderValue") + }) +} + +struct CachedDate { + bytes: [u8; DATE_VALUE_LENGTH], + pos: usize, + next_update: SystemTime, +} + +thread_local!(static CACHED: RefCell = RefCell::new(CachedDate::new())); + +impl CachedDate { + fn new() -> Self { + let mut cache = CachedDate { + bytes: [0; DATE_VALUE_LENGTH], + pos: 0, + next_update: SystemTime::now(), + }; + cache.update(cache.next_update); + cache + } + + fn buffer(&self) -> &[u8] { + &self.bytes[..] + } + + fn check(&mut self) { + let now = SystemTime::now(); + if now > self.next_update { + self.update(now); + } + } + + fn update(&mut self, now: SystemTime) { + self.render(now); + self.next_update = now + Duration::new(1, 0); + } + + fn render(&mut self, now: SystemTime) { + self.pos = 0; + let _ = write!(self, "{}", HttpDate::from(now)); + debug_assert!(self.pos == DATE_VALUE_LENGTH); + } +} + +impl fmt::Write for CachedDate { + fn write_str(&mut self, s: &str) -> fmt::Result { + let len = s.len(); + self.bytes[self.pos..self.pos + len].copy_from_slice(s.as_bytes()); + self.pos += len; + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[cfg(feature = "nightly")] + use test::Bencher; + + #[test] + fn test_date_len() { + assert_eq!(DATE_VALUE_LENGTH, "Sun, 06 Nov 1994 08:49:37 GMT".len()); + } + + #[cfg(feature = "nightly")] + #[bench] + fn bench_date_check(b: &mut Bencher) { + let mut date = CachedDate::new(); + // cache the first update + date.check(); + + b.iter(|| { + date.check(); + }); + } + + #[cfg(feature = "nightly")] + #[bench] + fn bench_date_render(b: &mut Bencher) { + let mut date = CachedDate::new(); + let now = SystemTime::now(); + date.render(now); + b.bytes = date.buffer().len() as u64; + + b.iter(|| { + date.render(now); + test::black_box(&date); + }); + } +} diff --git a/.cargo-vendor/hyper/src/common/drain.rs b/.cargo-vendor/hyper-0.14.30/src/common/drain.rs similarity index 100% rename from .cargo-vendor/hyper/src/common/drain.rs rename to .cargo-vendor/hyper-0.14.30/src/common/drain.rs diff --git a/.cargo-vendor/hyper/src/common/exec.rs b/.cargo-vendor/hyper-0.14.30/src/common/exec.rs similarity index 100% rename from .cargo-vendor/hyper/src/common/exec.rs rename to .cargo-vendor/hyper-0.14.30/src/common/exec.rs diff --git a/.cargo-vendor/hyper-0.14.30/src/common/io/mod.rs b/.cargo-vendor/hyper-0.14.30/src/common/io/mod.rs new file mode 100644 index 0000000000..2e6d506153 --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/common/io/mod.rs @@ -0,0 +1,3 @@ +mod rewind; + +pub(crate) use self::rewind::Rewind; diff --git a/.cargo-vendor/hyper-0.14.30/src/common/io/rewind.rs b/.cargo-vendor/hyper-0.14.30/src/common/io/rewind.rs new file mode 100644 index 0000000000..9ed7c42fea --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/common/io/rewind.rs @@ -0,0 +1,155 @@ +use std::marker::Unpin; +use std::pin::Pin; +use std::task::{Context, Poll}; +use std::{cmp, io}; + +use bytes::{Buf, Bytes}; +use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; + +/// Combine a buffer with an IO, rewinding reads to use the buffer. +#[derive(Debug)] +pub(crate) struct Rewind { + pre: Option, + inner: T, +} + +impl Rewind { + #[cfg(any(all(feature = "http2", feature = "server"), test))] + pub(crate) fn new(io: T) -> Self { + Rewind { + pre: None, + inner: io, + } + } + + pub(crate) fn new_buffered(io: T, buf: Bytes) -> Self { + Rewind { + pre: Some(buf), + inner: io, + } + } + + #[cfg(any(all(feature = "http1", feature = "http2", feature = "server"), test))] + pub(crate) fn rewind(&mut self, bs: Bytes) { + debug_assert!(self.pre.is_none()); + self.pre = Some(bs); + } + + pub(crate) fn into_inner(self) -> (T, Bytes) { + (self.inner, self.pre.unwrap_or_else(Bytes::new)) + } + + // pub(crate) fn get_mut(&mut self) -> &mut T { + // &mut self.inner + // } +} + +impl AsyncRead for Rewind +where + T: AsyncRead + Unpin, +{ + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut ReadBuf<'_>, + ) -> Poll> { + if let Some(mut prefix) = self.pre.take() { + // If there are no remaining bytes, let the bytes get dropped. + if !prefix.is_empty() { + let copy_len = cmp::min(prefix.len(), buf.remaining()); + // TODO: There should be a way to do following two lines cleaner... + buf.put_slice(&prefix[..copy_len]); + prefix.advance(copy_len); + // Put back what's left + if !prefix.is_empty() { + self.pre = Some(prefix); + } + + return Poll::Ready(Ok(())); + } + } + Pin::new(&mut self.inner).poll_read(cx, buf) + } +} + +impl AsyncWrite for Rewind +where + T: AsyncWrite + Unpin, +{ + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + Pin::new(&mut self.inner).poll_write(cx, buf) + } + + fn poll_write_vectored( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &[io::IoSlice<'_>], + ) -> Poll> { + Pin::new(&mut self.inner).poll_write_vectored(cx, bufs) + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.inner).poll_flush(cx) + } + + fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.inner).poll_shutdown(cx) + } + + fn is_write_vectored(&self) -> bool { + self.inner.is_write_vectored() + } +} + +#[cfg(test)] +mod tests { + // FIXME: re-implement tests with `async/await`, this import should + // trigger a warning to remind us + use super::Rewind; + use bytes::Bytes; + use tokio::io::AsyncReadExt; + + #[tokio::test] + async fn partial_rewind() { + let underlying = [104, 101, 108, 108, 111]; + + let mock = tokio_test::io::Builder::new().read(&underlying).build(); + + let mut stream = Rewind::new(mock); + + // Read off some bytes, ensure we filled o1 + let mut buf = [0; 2]; + stream.read_exact(&mut buf).await.expect("read1"); + + // Rewind the stream so that it is as if we never read in the first place. + stream.rewind(Bytes::copy_from_slice(&buf[..])); + + let mut buf = [0; 5]; + stream.read_exact(&mut buf).await.expect("read1"); + + // At this point we should have read everything that was in the MockStream + assert_eq!(&buf, &underlying); + } + + #[tokio::test] + async fn full_rewind() { + let underlying = [104, 101, 108, 108, 111]; + + let mock = tokio_test::io::Builder::new().read(&underlying).build(); + + let mut stream = Rewind::new(mock); + + let mut buf = [0; 5]; + stream.read_exact(&mut buf).await.expect("read1"); + + // Rewind the stream so that it is as if we never read in the first place. + stream.rewind(Bytes::copy_from_slice(&buf[..])); + + let mut buf = [0; 5]; + stream.read_exact(&mut buf).await.expect("read1"); + } +} diff --git a/.cargo-vendor/hyper/src/common/lazy.rs b/.cargo-vendor/hyper-0.14.30/src/common/lazy.rs similarity index 100% rename from .cargo-vendor/hyper/src/common/lazy.rs rename to .cargo-vendor/hyper-0.14.30/src/common/lazy.rs diff --git a/.cargo-vendor/hyper-0.14.30/src/common/mod.rs b/.cargo-vendor/hyper-0.14.30/src/common/mod.rs new file mode 100644 index 0000000000..3d83946243 --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/common/mod.rs @@ -0,0 +1,30 @@ +macro_rules! ready { + ($e:expr) => { + match $e { + std::task::Poll::Ready(v) => v, + std::task::Poll::Pending => return std::task::Poll::Pending, + } + }; +} + +pub(crate) mod buf; +#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))] +pub(crate) mod date; +#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))] +pub(crate) mod drain; +#[cfg(any(feature = "http1", feature = "http2", feature = "server"))] +pub(crate) mod exec; +pub(crate) mod io; +#[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] +mod lazy; +#[cfg(any( + feature = "stream", + all(feature = "client", any(feature = "http1", feature = "http2")) +))] +pub(crate) mod sync_wrapper; +#[cfg(feature = "http1")] +pub(crate) mod task; +pub(crate) mod watch; + +#[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] +pub(crate) use self::lazy::{lazy, Started as Lazy}; diff --git a/.cargo-vendor/hyper/src/common/sync_wrapper.rs b/.cargo-vendor/hyper-0.14.30/src/common/sync_wrapper.rs similarity index 100% rename from .cargo-vendor/hyper/src/common/sync_wrapper.rs rename to .cargo-vendor/hyper-0.14.30/src/common/sync_wrapper.rs diff --git a/.cargo-vendor/hyper-0.14.30/src/common/task.rs b/.cargo-vendor/hyper-0.14.30/src/common/task.rs new file mode 100644 index 0000000000..0ac047a462 --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/common/task.rs @@ -0,0 +1,12 @@ +use std::{ + convert::Infallible, + task::{Context, Poll}, +}; + +/// A function to help "yield" a future, such that it is re-scheduled immediately. +/// +/// Useful for spin counts, so a future doesn't hog too much time. +pub(crate) fn yield_now(cx: &mut Context<'_>) -> Poll { + cx.waker().wake_by_ref(); + Poll::Pending +} diff --git a/.cargo-vendor/hyper-0.14.30/src/common/watch.rs b/.cargo-vendor/hyper-0.14.30/src/common/watch.rs new file mode 100644 index 0000000000..ba17d551cb --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/common/watch.rs @@ -0,0 +1,73 @@ +//! An SPSC broadcast channel. +//! +//! - The value can only be a `usize`. +//! - The consumer is only notified if the value is different. +//! - The value `0` is reserved for closed. + +use futures_util::task::AtomicWaker; +use std::sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, +}; +use std::task; + +type Value = usize; + +pub(crate) const CLOSED: usize = 0; + +pub(crate) fn channel(initial: Value) -> (Sender, Receiver) { + debug_assert!( + initial != CLOSED, + "watch::channel initial state of 0 is reserved" + ); + + let shared = Arc::new(Shared { + value: AtomicUsize::new(initial), + waker: AtomicWaker::new(), + }); + + ( + Sender { + shared: shared.clone(), + }, + Receiver { shared }, + ) +} + +pub(crate) struct Sender { + shared: Arc, +} + +pub(crate) struct Receiver { + shared: Arc, +} + +struct Shared { + value: AtomicUsize, + waker: AtomicWaker, +} + +impl Sender { + pub(crate) fn send(&mut self, value: Value) { + if self.shared.value.swap(value, Ordering::SeqCst) != value { + self.shared.waker.wake(); + } + } +} + +impl Drop for Sender { + fn drop(&mut self) { + self.send(CLOSED); + } +} + +impl Receiver { + pub(crate) fn load(&mut self, cx: &mut task::Context<'_>) -> Value { + self.shared.waker.register(cx.waker()); + self.shared.value.load(Ordering::SeqCst) + } + + pub(crate) fn peek(&self) -> Value { + self.shared.value.load(Ordering::Relaxed) + } +} diff --git a/.cargo-vendor/hyper-0.14.30/src/error.rs b/.cargo-vendor/hyper-0.14.30/src/error.rs new file mode 100644 index 0000000000..5beedeb8b2 --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/error.rs @@ -0,0 +1,663 @@ +//! Error and Result module. + +#[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] +use crate::client::connect::Connected; +use std::error::Error as StdError; +use std::fmt; + +/// Result type often returned from methods that can have hyper `Error`s. +pub type Result = std::result::Result; + +type Cause = Box; + +/// Represents errors that can occur handling HTTP streams. +pub struct Error { + inner: Box, +} + +struct ErrorImpl { + kind: Kind, + cause: Option, + #[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] + connect_info: Option, +} + +#[derive(Debug)] +pub(super) enum Kind { + Parse(Parse), + User(User), + /// A message reached EOF, but is not complete. + #[allow(unused)] + IncompleteMessage, + /// A connection received a message (or bytes) when not waiting for one. + #[cfg(feature = "http1")] + UnexpectedMessage, + /// A pending item was dropped before ever being processed. + Canceled, + /// Indicates a channel (client or body sender) is closed. + ChannelClosed, + /// An `io::Error` that occurred while trying to read or write to a network stream. + #[cfg(any(feature = "http1", feature = "http2"))] + Io, + /// Error occurred while connecting. + #[allow(unused)] + Connect, + /// Error creating a TcpListener. + #[cfg(all(feature = "tcp", feature = "server"))] + Listen, + /// Error accepting on an Incoming stream. + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "server")] + Accept, + /// User took too long to send headers + #[cfg(all(feature = "http1", feature = "server", feature = "runtime"))] + HeaderTimeout, + /// Error while reading a body from connection. + #[cfg(any(feature = "http1", feature = "http2", feature = "stream"))] + Body, + /// Error while writing a body to connection. + #[cfg(any(feature = "http1", feature = "http2"))] + BodyWrite, + /// Error calling AsyncWrite::shutdown() + #[cfg(feature = "http1")] + Shutdown, + + /// A general error from h2. + #[cfg(feature = "http2")] + Http2, +} + +#[derive(Debug)] +pub(super) enum Parse { + Method, + Version, + #[cfg(feature = "http1")] + VersionH2, + Uri, + #[cfg_attr(not(all(feature = "http1", feature = "server")), allow(unused))] + UriTooLong, + Header(Header), + TooLarge, + Status, + #[cfg_attr(debug_assertions, allow(unused))] + Internal, +} + +#[derive(Debug)] +pub(super) enum Header { + Token, + #[cfg(feature = "http1")] + ContentLengthInvalid, + #[cfg(all(feature = "http1", feature = "server"))] + TransferEncodingInvalid, + #[cfg(feature = "http1")] + TransferEncodingUnexpected, +} + +#[derive(Debug)] +pub(super) enum User { + /// Error calling user's HttpBody::poll_data(). + #[cfg(any(feature = "http1", feature = "http2"))] + Body, + /// The user aborted writing of the outgoing body. + BodyWriteAborted, + /// Error calling user's MakeService. + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "server")] + MakeService, + /// Error from future of user's Service. + #[cfg(any(feature = "http1", feature = "http2"))] + Service, + /// User tried to send a certain header in an unexpected context. + /// + /// For example, sending both `content-length` and `transfer-encoding`. + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "server")] + UnexpectedHeader, + /// User tried to create a Request with bad version. + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "client")] + UnsupportedVersion, + /// User tried to create a CONNECT Request with the Client. + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "client")] + UnsupportedRequestMethod, + /// User tried to respond with a 1xx (not 101) response code. + #[cfg(feature = "http1")] + #[cfg(feature = "server")] + UnsupportedStatusCode, + /// User tried to send a Request with Client with non-absolute URI. + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "client")] + AbsoluteUriRequired, + + /// User tried polling for an upgrade that doesn't exist. + NoUpgrade, + + /// User polled for an upgrade, but low-level API is not using upgrades. + #[cfg(feature = "http1")] + ManualUpgrade, + + /// User called `server::Connection::without_shutdown()` on an HTTP/2 conn. + #[cfg(feature = "server")] + WithoutShutdownNonHttp1, + + /// The dispatch task is gone. + #[cfg(feature = "client")] + DispatchGone, + + /// User aborted in an FFI callback. + #[cfg(feature = "ffi")] + AbortedByCallback, +} + +// Sentinel type to indicate the error was caused by a timeout. +#[derive(Debug)] +pub(super) struct TimedOut; + +impl Error { + /// Returns true if this was an HTTP parse error. + pub fn is_parse(&self) -> bool { + matches!(self.inner.kind, Kind::Parse(_)) + } + + /// Returns true if this was an HTTP parse error caused by a message that was too large. + pub fn is_parse_too_large(&self) -> bool { + matches!( + self.inner.kind, + Kind::Parse(Parse::TooLarge) | Kind::Parse(Parse::UriTooLong) + ) + } + + /// Returns true if this was an HTTP parse error caused by an invalid response status code or + /// reason phrase. + pub fn is_parse_status(&self) -> bool { + matches!(self.inner.kind, Kind::Parse(Parse::Status)) + } + + /// Returns true if this error was caused by user code. + pub fn is_user(&self) -> bool { + matches!(self.inner.kind, Kind::User(_)) + } + + /// Returns true if this was about a `Request` that was canceled. + pub fn is_canceled(&self) -> bool { + matches!(self.inner.kind, Kind::Canceled) + } + + /// Returns true if a sender's channel is closed. + pub fn is_closed(&self) -> bool { + matches!(self.inner.kind, Kind::ChannelClosed) + } + + /// Returns true if this was an error from `Connect`. + pub fn is_connect(&self) -> bool { + matches!(self.inner.kind, Kind::Connect) + } + + /// Returns true if the connection closed before a message could complete. + pub fn is_incomplete_message(&self) -> bool { + matches!(self.inner.kind, Kind::IncompleteMessage) + } + + /// Returns true if the body write was aborted. + pub fn is_body_write_aborted(&self) -> bool { + matches!(self.inner.kind, Kind::User(User::BodyWriteAborted)) + } + + /// Returns true if the error was caused by a timeout. + pub fn is_timeout(&self) -> bool { + self.find_source::().is_some() + } + + /// Consumes the error, returning its cause. + pub fn into_cause(self) -> Option> { + self.inner.cause + } + + /// Returns the info of the client connection on which this error occurred. + #[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] + pub fn client_connect_info(&self) -> Option<&Connected> { + self.inner.connect_info.as_ref() + } + + pub(super) fn new(kind: Kind) -> Error { + Error { + inner: Box::new(ErrorImpl { + kind, + cause: None, + #[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] + connect_info: None, + }), + } + } + + pub(super) fn with>(mut self, cause: C) -> Error { + self.inner.cause = Some(cause.into()); + self + } + + #[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] + pub(super) fn with_client_connect_info(mut self, connect_info: Connected) -> Error { + self.inner.connect_info = Some(connect_info); + self + } + + #[cfg(any(all(feature = "http1", feature = "server"), feature = "ffi"))] + pub(super) fn kind(&self) -> &Kind { + &self.inner.kind + } + + pub(crate) fn find_source(&self) -> Option<&E> { + let mut cause = self.source(); + while let Some(err) = cause { + if let Some(ref typed) = err.downcast_ref() { + return Some(typed); + } + cause = err.source(); + } + + // else + None + } + + #[cfg(feature = "http2")] + pub(super) fn h2_reason(&self) -> h2::Reason { + // Find an h2::Reason somewhere in the cause stack, if it exists, + // otherwise assume an INTERNAL_ERROR. + self.find_source::() + .and_then(|h2_err| h2_err.reason()) + .unwrap_or(h2::Reason::INTERNAL_ERROR) + } + + pub(super) fn new_canceled() -> Error { + Error::new(Kind::Canceled) + } + + #[cfg(feature = "http1")] + pub(super) fn new_incomplete() -> Error { + Error::new(Kind::IncompleteMessage) + } + + #[cfg(feature = "http1")] + pub(super) fn new_too_large() -> Error { + Error::new(Kind::Parse(Parse::TooLarge)) + } + + #[cfg(feature = "http1")] + pub(super) fn new_version_h2() -> Error { + Error::new(Kind::Parse(Parse::VersionH2)) + } + + #[cfg(feature = "http1")] + pub(super) fn new_unexpected_message() -> Error { + Error::new(Kind::UnexpectedMessage) + } + + #[cfg(any(feature = "http1", feature = "http2"))] + pub(super) fn new_io(cause: std::io::Error) -> Error { + Error::new(Kind::Io).with(cause) + } + + #[cfg(all(feature = "server", feature = "tcp"))] + pub(super) fn new_listen>(cause: E) -> Error { + Error::new(Kind::Listen).with(cause) + } + + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "server")] + pub(super) fn new_accept>(cause: E) -> Error { + Error::new(Kind::Accept).with(cause) + } + + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "client")] + pub(super) fn new_connect>(cause: E) -> Error { + Error::new(Kind::Connect).with(cause) + } + + pub(super) fn new_closed() -> Error { + Error::new(Kind::ChannelClosed) + } + + #[cfg(any(feature = "http1", feature = "http2", feature = "stream"))] + pub(super) fn new_body>(cause: E) -> Error { + Error::new(Kind::Body).with(cause) + } + + #[cfg(any(feature = "http1", feature = "http2"))] + pub(super) fn new_body_write>(cause: E) -> Error { + Error::new(Kind::BodyWrite).with(cause) + } + + pub(super) fn new_body_write_aborted() -> Error { + Error::new(Kind::User(User::BodyWriteAborted)) + } + + fn new_user(user: User) -> Error { + Error::new(Kind::User(user)) + } + + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "server")] + pub(super) fn new_user_header() -> Error { + Error::new_user(User::UnexpectedHeader) + } + + #[cfg(all(feature = "http1", feature = "server", feature = "runtime"))] + pub(super) fn new_header_timeout() -> Error { + Error::new(Kind::HeaderTimeout) + } + + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "client")] + pub(super) fn new_user_unsupported_version() -> Error { + Error::new_user(User::UnsupportedVersion) + } + + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "client")] + pub(super) fn new_user_unsupported_request_method() -> Error { + Error::new_user(User::UnsupportedRequestMethod) + } + + #[cfg(feature = "http1")] + #[cfg(feature = "server")] + pub(super) fn new_user_unsupported_status_code() -> Error { + Error::new_user(User::UnsupportedStatusCode) + } + + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "client")] + pub(super) fn new_user_absolute_uri_required() -> Error { + Error::new_user(User::AbsoluteUriRequired) + } + + pub(super) fn new_user_no_upgrade() -> Error { + Error::new_user(User::NoUpgrade) + } + + #[cfg(feature = "http1")] + pub(super) fn new_user_manual_upgrade() -> Error { + Error::new_user(User::ManualUpgrade) + } + + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "server")] + pub(super) fn new_user_make_service>(cause: E) -> Error { + Error::new_user(User::MakeService).with(cause) + } + + #[cfg(any(feature = "http1", feature = "http2"))] + pub(super) fn new_user_service>(cause: E) -> Error { + Error::new_user(User::Service).with(cause) + } + + #[cfg(any(feature = "http1", feature = "http2"))] + pub(super) fn new_user_body>(cause: E) -> Error { + Error::new_user(User::Body).with(cause) + } + + #[cfg(feature = "server")] + pub(super) fn new_without_shutdown_not_h1() -> Error { + Error::new(Kind::User(User::WithoutShutdownNonHttp1)) + } + + #[cfg(feature = "http1")] + pub(super) fn new_shutdown(cause: std::io::Error) -> Error { + Error::new(Kind::Shutdown).with(cause) + } + + #[cfg(feature = "ffi")] + pub(super) fn new_user_aborted_by_callback() -> Error { + Error::new_user(User::AbortedByCallback) + } + + #[cfg(feature = "client")] + pub(super) fn new_user_dispatch_gone() -> Error { + Error::new(Kind::User(User::DispatchGone)) + } + + #[cfg(feature = "http2")] + pub(super) fn new_h2(cause: ::h2::Error) -> Error { + if cause.is_io() { + Error::new_io(cause.into_io().expect("h2::Error::is_io")) + } else { + Error::new(Kind::Http2).with(cause) + } + } + + /// The error's standalone message, without the message from the source. + pub fn message(&self) -> impl fmt::Display + '_ { + self.description() + } + + fn description(&self) -> &str { + match self.inner.kind { + Kind::Parse(Parse::Method) => "invalid HTTP method parsed", + Kind::Parse(Parse::Version) => "invalid HTTP version parsed", + #[cfg(feature = "http1")] + Kind::Parse(Parse::VersionH2) => "invalid HTTP version parsed (found HTTP2 preface)", + Kind::Parse(Parse::Uri) => "invalid URI", + Kind::Parse(Parse::UriTooLong) => "URI too long", + Kind::Parse(Parse::Header(Header::Token)) => "invalid HTTP header parsed", + #[cfg(feature = "http1")] + Kind::Parse(Parse::Header(Header::ContentLengthInvalid)) => { + "invalid content-length parsed" + } + #[cfg(all(feature = "http1", feature = "server"))] + Kind::Parse(Parse::Header(Header::TransferEncodingInvalid)) => { + "invalid transfer-encoding parsed" + } + #[cfg(feature = "http1")] + Kind::Parse(Parse::Header(Header::TransferEncodingUnexpected)) => { + "unexpected transfer-encoding parsed" + } + Kind::Parse(Parse::TooLarge) => "message head is too large", + Kind::Parse(Parse::Status) => "invalid HTTP status-code parsed", + Kind::Parse(Parse::Internal) => { + "internal error inside Hyper and/or its dependencies, please report" + } + Kind::IncompleteMessage => "connection closed before message completed", + #[cfg(feature = "http1")] + Kind::UnexpectedMessage => "received unexpected message from connection", + Kind::ChannelClosed => "channel closed", + Kind::Connect => "error trying to connect", + Kind::Canceled => "operation was canceled", + #[cfg(all(feature = "server", feature = "tcp"))] + Kind::Listen => "error creating server listener", + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "server")] + Kind::Accept => "error accepting connection", + #[cfg(all(feature = "http1", feature = "server", feature = "runtime"))] + Kind::HeaderTimeout => "read header from client timeout", + #[cfg(any(feature = "http1", feature = "http2", feature = "stream"))] + Kind::Body => "error reading a body from connection", + #[cfg(any(feature = "http1", feature = "http2"))] + Kind::BodyWrite => "error writing a body to connection", + #[cfg(feature = "http1")] + Kind::Shutdown => "error shutting down connection", + #[cfg(feature = "http2")] + Kind::Http2 => "http2 error", + #[cfg(any(feature = "http1", feature = "http2"))] + Kind::Io => "connection error", + + #[cfg(any(feature = "http1", feature = "http2"))] + Kind::User(User::Body) => "error from user's HttpBody stream", + Kind::User(User::BodyWriteAborted) => "user body write aborted", + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "server")] + Kind::User(User::MakeService) => "error from user's MakeService", + #[cfg(any(feature = "http1", feature = "http2"))] + Kind::User(User::Service) => "error from user's Service", + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "server")] + Kind::User(User::UnexpectedHeader) => "user sent unexpected header", + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "client")] + Kind::User(User::UnsupportedVersion) => "request has unsupported HTTP version", + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "client")] + Kind::User(User::UnsupportedRequestMethod) => "request has unsupported HTTP method", + #[cfg(feature = "http1")] + #[cfg(feature = "server")] + Kind::User(User::UnsupportedStatusCode) => { + "response has 1xx status code, not supported by server" + } + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(feature = "client")] + Kind::User(User::AbsoluteUriRequired) => "client requires absolute-form URIs", + Kind::User(User::NoUpgrade) => "no upgrade available", + #[cfg(feature = "http1")] + Kind::User(User::ManualUpgrade) => "upgrade expected but low level API in use", + #[cfg(feature = "server")] + Kind::User(User::WithoutShutdownNonHttp1) => { + "without_shutdown() called on a non-HTTP/1 connection" + } + #[cfg(feature = "client")] + Kind::User(User::DispatchGone) => "dispatch task is gone", + #[cfg(feature = "ffi")] + Kind::User(User::AbortedByCallback) => "operation aborted by an application callback", + } + } +} + +impl fmt::Debug for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut f = f.debug_tuple("hyper::Error"); + f.field(&self.inner.kind); + if let Some(ref cause) = self.inner.cause { + f.field(cause); + } + f.finish() + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if let Some(ref cause) = self.inner.cause { + write!(f, "{}: {}", self.description(), cause) + } else { + f.write_str(self.description()) + } + } +} + +impl StdError for Error { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + self.inner + .cause + .as_ref() + .map(|cause| &**cause as &(dyn StdError + 'static)) + } +} + +#[doc(hidden)] +impl From for Error { + fn from(err: Parse) -> Error { + Error::new(Kind::Parse(err)) + } +} + +#[cfg(feature = "http1")] +impl Parse { + pub(crate) fn content_length_invalid() -> Self { + Parse::Header(Header::ContentLengthInvalid) + } + + #[cfg(all(feature = "http1", feature = "server"))] + pub(crate) fn transfer_encoding_invalid() -> Self { + Parse::Header(Header::TransferEncodingInvalid) + } + + pub(crate) fn transfer_encoding_unexpected() -> Self { + Parse::Header(Header::TransferEncodingUnexpected) + } +} + +impl From for Parse { + fn from(err: httparse::Error) -> Parse { + match err { + httparse::Error::HeaderName + | httparse::Error::HeaderValue + | httparse::Error::NewLine + | httparse::Error::Token => Parse::Header(Header::Token), + httparse::Error::Status => Parse::Status, + httparse::Error::TooManyHeaders => Parse::TooLarge, + httparse::Error::Version => Parse::Version, + } + } +} + +impl From for Parse { + fn from(_: http::method::InvalidMethod) -> Parse { + Parse::Method + } +} + +impl From for Parse { + fn from(_: http::status::InvalidStatusCode) -> Parse { + Parse::Status + } +} + +impl From for Parse { + fn from(_: http::uri::InvalidUri) -> Parse { + Parse::Uri + } +} + +impl From for Parse { + fn from(_: http::uri::InvalidUriParts) -> Parse { + Parse::Uri + } +} + +#[doc(hidden)] +trait AssertSendSync: Send + Sync + 'static {} +#[doc(hidden)] +impl AssertSendSync for Error {} + +// ===== impl TimedOut ==== + +impl fmt::Display for TimedOut { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("operation timed out") + } +} + +impl StdError for TimedOut {} + +#[cfg(test)] +mod tests { + use super::*; + use std::mem; + + #[test] + fn error_size_of() { + assert_eq!(mem::size_of::(), mem::size_of::()); + } + + #[cfg(feature = "http2")] + #[test] + fn h2_reason_unknown() { + let closed = Error::new_closed(); + assert_eq!(closed.h2_reason(), h2::Reason::INTERNAL_ERROR); + } + + #[cfg(feature = "http2")] + #[test] + fn h2_reason_one_level() { + let body_err = Error::new_user_body(h2::Error::from(h2::Reason::ENHANCE_YOUR_CALM)); + assert_eq!(body_err.h2_reason(), h2::Reason::ENHANCE_YOUR_CALM); + } + + #[cfg(feature = "http2")] + #[test] + fn h2_reason_nested() { + let recvd = Error::new_h2(h2::Error::from(h2::Reason::HTTP_1_1_REQUIRED)); + // Suppose a user were proxying the received error + let svc_err = Error::new_user_service(recvd); + assert_eq!(svc_err.h2_reason(), h2::Reason::HTTP_1_1_REQUIRED); + } +} diff --git a/.cargo-vendor/hyper/src/ext.rs b/.cargo-vendor/hyper-0.14.30/src/ext.rs similarity index 100% rename from .cargo-vendor/hyper/src/ext.rs rename to .cargo-vendor/hyper-0.14.30/src/ext.rs diff --git a/.cargo-vendor/hyper-0.14.30/src/ext/h1_reason_phrase.rs b/.cargo-vendor/hyper-0.14.30/src/ext/h1_reason_phrase.rs new file mode 100644 index 0000000000..021b632b6d --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/ext/h1_reason_phrase.rs @@ -0,0 +1,221 @@ +use std::convert::TryFrom; + +use bytes::Bytes; + +/// A reason phrase in an HTTP/1 response. +/// +/// # Clients +/// +/// For clients, a `ReasonPhrase` will be present in the extensions of the `http::Response` returned +/// for a request if the reason phrase is different from the canonical reason phrase for the +/// response's status code. For example, if a server returns `HTTP/1.1 200 Awesome`, the +/// `ReasonPhrase` will be present and contain `Awesome`, but if a server returns `HTTP/1.1 200 OK`, +/// the response will not contain a `ReasonPhrase`. +/// +/// ```no_run +/// # #[cfg(all(feature = "tcp", feature = "client", feature = "http1"))] +/// # async fn fake_fetch() -> hyper::Result<()> { +/// use hyper::{Client, Uri}; +/// use hyper::ext::ReasonPhrase; +/// +/// let res = Client::new().get(Uri::from_static("http://example.com/non_canonical_reason")).await?; +/// +/// // Print out the non-canonical reason phrase, if it has one... +/// if let Some(reason) = res.extensions().get::() { +/// println!("non-canonical reason: {}", std::str::from_utf8(reason.as_bytes()).unwrap()); +/// } +/// # Ok(()) +/// # } +/// ``` +/// +/// # Servers +/// +/// When a `ReasonPhrase` is present in the extensions of the `http::Response` written by a server, +/// its contents will be written in place of the canonical reason phrase when responding via HTTP/1. +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct ReasonPhrase(Bytes); + +impl ReasonPhrase { + /// Gets the reason phrase as bytes. + pub fn as_bytes(&self) -> &[u8] { + &self.0 + } + + /// Converts a static byte slice to a reason phrase. + pub fn from_static(reason: &'static [u8]) -> Self { + // TODO: this can be made const once MSRV is >= 1.57.0 + if find_invalid_byte(reason).is_some() { + panic!("invalid byte in static reason phrase"); + } + Self(Bytes::from_static(reason)) + } + + /// Converts a `Bytes` directly into a `ReasonPhrase` without validating. + /// + /// Use with care; invalid bytes in a reason phrase can cause serious security problems if + /// emitted in a response. + pub unsafe fn from_bytes_unchecked(reason: Bytes) -> Self { + Self(reason) + } +} + +impl TryFrom<&[u8]> for ReasonPhrase { + type Error = InvalidReasonPhrase; + + fn try_from(reason: &[u8]) -> Result { + if let Some(bad_byte) = find_invalid_byte(reason) { + Err(InvalidReasonPhrase { bad_byte }) + } else { + Ok(Self(Bytes::copy_from_slice(reason))) + } + } +} + +impl TryFrom> for ReasonPhrase { + type Error = InvalidReasonPhrase; + + fn try_from(reason: Vec) -> Result { + if let Some(bad_byte) = find_invalid_byte(&reason) { + Err(InvalidReasonPhrase { bad_byte }) + } else { + Ok(Self(Bytes::from(reason))) + } + } +} + +impl TryFrom for ReasonPhrase { + type Error = InvalidReasonPhrase; + + fn try_from(reason: String) -> Result { + if let Some(bad_byte) = find_invalid_byte(reason.as_bytes()) { + Err(InvalidReasonPhrase { bad_byte }) + } else { + Ok(Self(Bytes::from(reason))) + } + } +} + +impl TryFrom for ReasonPhrase { + type Error = InvalidReasonPhrase; + + fn try_from(reason: Bytes) -> Result { + if let Some(bad_byte) = find_invalid_byte(&reason) { + Err(InvalidReasonPhrase { bad_byte }) + } else { + Ok(Self(reason)) + } + } +} + +impl Into for ReasonPhrase { + fn into(self) -> Bytes { + self.0 + } +} + +impl AsRef<[u8]> for ReasonPhrase { + fn as_ref(&self) -> &[u8] { + &self.0 + } +} + +/// Error indicating an invalid byte when constructing a `ReasonPhrase`. +/// +/// See [the spec][spec] for details on allowed bytes. +/// +/// [spec]: https://httpwg.org/http-core/draft-ietf-httpbis-messaging-latest.html#rfc.section.4.p.7 +#[derive(Debug)] +pub struct InvalidReasonPhrase { + bad_byte: u8, +} + +impl std::fmt::Display for InvalidReasonPhrase { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Invalid byte in reason phrase: {}", self.bad_byte) + } +} + +impl std::error::Error for InvalidReasonPhrase {} + +const fn is_valid_byte(b: u8) -> bool { + // See https://www.rfc-editor.org/rfc/rfc5234.html#appendix-B.1 + const fn is_vchar(b: u8) -> bool { + 0x21 <= b && b <= 0x7E + } + + // See https://httpwg.org/http-core/draft-ietf-httpbis-semantics-latest.html#fields.values + // + // The 0xFF comparison is technically redundant, but it matches the text of the spec more + // clearly and will be optimized away. + #[allow(unused_comparisons)] + const fn is_obs_text(b: u8) -> bool { + 0x80 <= b && b <= 0xFF + } + + // See https://httpwg.org/http-core/draft-ietf-httpbis-messaging-latest.html#rfc.section.4.p.7 + b == b'\t' || b == b' ' || is_vchar(b) || is_obs_text(b) +} + +const fn find_invalid_byte(bytes: &[u8]) -> Option { + let mut i = 0; + while i < bytes.len() { + let b = bytes[i]; + if !is_valid_byte(b) { + return Some(b); + } + i += 1; + } + None +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn basic_valid() { + const PHRASE: &'static [u8] = b"OK"; + assert_eq!(ReasonPhrase::from_static(PHRASE).as_bytes(), PHRASE); + assert_eq!(ReasonPhrase::try_from(PHRASE).unwrap().as_bytes(), PHRASE); + } + + #[test] + fn empty_valid() { + const PHRASE: &'static [u8] = b""; + assert_eq!(ReasonPhrase::from_static(PHRASE).as_bytes(), PHRASE); + assert_eq!(ReasonPhrase::try_from(PHRASE).unwrap().as_bytes(), PHRASE); + } + + #[test] + fn obs_text_valid() { + const PHRASE: &'static [u8] = b"hyp\xe9r"; + assert_eq!(ReasonPhrase::from_static(PHRASE).as_bytes(), PHRASE); + assert_eq!(ReasonPhrase::try_from(PHRASE).unwrap().as_bytes(), PHRASE); + } + + const NEWLINE_PHRASE: &'static [u8] = b"hyp\ner"; + + #[test] + #[should_panic] + fn newline_invalid_panic() { + ReasonPhrase::from_static(NEWLINE_PHRASE); + } + + #[test] + fn newline_invalid_err() { + assert!(ReasonPhrase::try_from(NEWLINE_PHRASE).is_err()); + } + + const CR_PHRASE: &'static [u8] = b"hyp\rer"; + + #[test] + #[should_panic] + fn cr_invalid_panic() { + ReasonPhrase::from_static(CR_PHRASE); + } + + #[test] + fn cr_invalid_err() { + assert!(ReasonPhrase::try_from(CR_PHRASE).is_err()); + } +} diff --git a/.cargo-vendor/hyper-0.14.30/src/ffi/body.rs b/.cargo-vendor/hyper-0.14.30/src/ffi/body.rs new file mode 100644 index 0000000000..39ba5beffb --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/ffi/body.rs @@ -0,0 +1,229 @@ +use std::ffi::c_void; +use std::mem::ManuallyDrop; +use std::ptr; +use std::task::{Context, Poll}; + +use http::HeaderMap; +use libc::{c_int, size_t}; + +use super::task::{hyper_context, hyper_task, hyper_task_return_type, AsTaskType}; +use super::{UserDataPointer, HYPER_ITER_CONTINUE}; +use crate::body::{Body, Bytes, HttpBody as _}; + +/// A streaming HTTP body. +pub struct hyper_body(pub(super) Body); + +/// A buffer of bytes that is sent or received on a `hyper_body`. +pub struct hyper_buf(pub(crate) Bytes); + +pub(crate) struct UserBody { + data_func: hyper_body_data_callback, + userdata: *mut c_void, +} + +// ===== Body ===== + +type hyper_body_foreach_callback = extern "C" fn(*mut c_void, *const hyper_buf) -> c_int; + +type hyper_body_data_callback = + extern "C" fn(*mut c_void, *mut hyper_context<'_>, *mut *mut hyper_buf) -> c_int; + +ffi_fn! { + /// Create a new "empty" body. + /// + /// If not configured, this body acts as an empty payload. + fn hyper_body_new() -> *mut hyper_body { + Box::into_raw(Box::new(hyper_body(Body::empty()))) + } ?= ptr::null_mut() +} + +ffi_fn! { + /// Free a `hyper_body *`. + fn hyper_body_free(body: *mut hyper_body) { + drop(non_null!(Box::from_raw(body) ?= ())); + } +} + +ffi_fn! { + /// Return a task that will poll the body for the next buffer of data. + /// + /// The task value may have different types depending on the outcome: + /// + /// - `HYPER_TASK_BUF`: Success, and more data was received. + /// - `HYPER_TASK_ERROR`: An error retrieving the data. + /// - `HYPER_TASK_EMPTY`: The body has finished streaming data. + /// + /// This does not consume the `hyper_body *`, so it may be used to again. + /// However, it MUST NOT be used or freed until the related task completes. + fn hyper_body_data(body: *mut hyper_body) -> *mut hyper_task { + // This doesn't take ownership of the Body, so don't allow destructor + let mut body = ManuallyDrop::new(non_null!(Box::from_raw(body) ?= ptr::null_mut())); + + Box::into_raw(hyper_task::boxed(async move { + body.0.data().await.map(|res| res.map(hyper_buf)) + })) + } ?= ptr::null_mut() +} + +ffi_fn! { + /// Return a task that will poll the body and execute the callback with each + /// body chunk that is received. + /// + /// The `hyper_buf` pointer is only a borrowed reference, it cannot live outside + /// the execution of the callback. You must make a copy to retain it. + /// + /// The callback should return `HYPER_ITER_CONTINUE` to continue iterating + /// chunks as they are received, or `HYPER_ITER_BREAK` to cancel. + /// + /// This will consume the `hyper_body *`, you shouldn't use it anymore or free it. + fn hyper_body_foreach(body: *mut hyper_body, func: hyper_body_foreach_callback, userdata: *mut c_void) -> *mut hyper_task { + let mut body = non_null!(Box::from_raw(body) ?= ptr::null_mut()); + let userdata = UserDataPointer(userdata); + + Box::into_raw(hyper_task::boxed(async move { + while let Some(item) = body.0.data().await { + let chunk = item?; + if HYPER_ITER_CONTINUE != func(userdata.0, &hyper_buf(chunk)) { + return Err(crate::Error::new_user_aborted_by_callback()); + } + } + Ok(()) + })) + } ?= ptr::null_mut() +} + +ffi_fn! { + /// Set userdata on this body, which will be passed to callback functions. + fn hyper_body_set_userdata(body: *mut hyper_body, userdata: *mut c_void) { + let b = non_null!(&mut *body ?= ()); + b.0.as_ffi_mut().userdata = userdata; + } +} + +ffi_fn! { + /// Set the data callback for this body. + /// + /// The callback is called each time hyper needs to send more data for the + /// body. It is passed the value from `hyper_body_set_userdata`. + /// + /// If there is data available, the `hyper_buf **` argument should be set + /// to a `hyper_buf *` containing the data, and `HYPER_POLL_READY` should + /// be returned. + /// + /// Returning `HYPER_POLL_READY` while the `hyper_buf **` argument points + /// to `NULL` will indicate the body has completed all data. + /// + /// If there is more data to send, but it isn't yet available, a + /// `hyper_waker` should be saved from the `hyper_context *` argument, and + /// `HYPER_POLL_PENDING` should be returned. You must wake the saved waker + /// to signal the task when data is available. + /// + /// If some error has occurred, you can return `HYPER_POLL_ERROR` to abort + /// the body. + fn hyper_body_set_data_func(body: *mut hyper_body, func: hyper_body_data_callback) { + let b = non_null!{ &mut *body ?= () }; + b.0.as_ffi_mut().data_func = func; + } +} + +// ===== impl UserBody ===== + +impl UserBody { + pub(crate) fn new() -> UserBody { + UserBody { + data_func: data_noop, + userdata: std::ptr::null_mut(), + } + } + + pub(crate) fn poll_data(&mut self, cx: &mut Context<'_>) -> Poll>> { + let mut out = std::ptr::null_mut(); + match (self.data_func)(self.userdata, hyper_context::wrap(cx), &mut out) { + super::task::HYPER_POLL_READY => { + if out.is_null() { + Poll::Ready(None) + } else { + let buf = unsafe { Box::from_raw(out) }; + Poll::Ready(Some(Ok(buf.0))) + } + } + super::task::HYPER_POLL_PENDING => Poll::Pending, + super::task::HYPER_POLL_ERROR => { + Poll::Ready(Some(Err(crate::Error::new_body_write_aborted()))) + } + unexpected => Poll::Ready(Some(Err(crate::Error::new_body_write(format!( + "unexpected hyper_body_data_func return code {}", + unexpected + ))))), + } + } + + pub(crate) fn poll_trailers( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll>> { + Poll::Ready(Ok(None)) + } +} + +/// cbindgen:ignore +extern "C" fn data_noop( + _userdata: *mut c_void, + _: *mut hyper_context<'_>, + _: *mut *mut hyper_buf, +) -> c_int { + super::task::HYPER_POLL_READY +} + +unsafe impl Send for UserBody {} +unsafe impl Sync for UserBody {} + +// ===== Bytes ===== + +ffi_fn! { + /// Create a new `hyper_buf *` by copying the provided bytes. + /// + /// This makes an owned copy of the bytes, so the `buf` argument can be + /// freed or changed afterwards. + /// + /// This returns `NULL` if allocating a new buffer fails. + fn hyper_buf_copy(buf: *const u8, len: size_t) -> *mut hyper_buf { + let slice = unsafe { + std::slice::from_raw_parts(buf, len) + }; + Box::into_raw(Box::new(hyper_buf(Bytes::copy_from_slice(slice)))) + } ?= ptr::null_mut() +} + +ffi_fn! { + /// Get a pointer to the bytes in this buffer. + /// + /// This should be used in conjunction with `hyper_buf_len` to get the length + /// of the bytes data. + /// + /// This pointer is borrowed data, and not valid once the `hyper_buf` is + /// consumed/freed. + fn hyper_buf_bytes(buf: *const hyper_buf) -> *const u8 { + unsafe { (*buf).0.as_ptr() } + } ?= ptr::null() +} + +ffi_fn! { + /// Get the length of the bytes this buffer contains. + fn hyper_buf_len(buf: *const hyper_buf) -> size_t { + unsafe { (*buf).0.len() } + } +} + +ffi_fn! { + /// Free this buffer. + fn hyper_buf_free(buf: *mut hyper_buf) { + drop(unsafe { Box::from_raw(buf) }); + } +} + +unsafe impl AsTaskType for hyper_buf { + fn as_task_type(&self) -> hyper_task_return_type { + hyper_task_return_type::HYPER_TASK_BUF + } +} diff --git a/.cargo-vendor/hyper-0.14.30/src/ffi/client.rs b/.cargo-vendor/hyper-0.14.30/src/ffi/client.rs new file mode 100644 index 0000000000..670f77d141 --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/ffi/client.rs @@ -0,0 +1,182 @@ +use std::ptr; +use std::sync::Arc; + +use libc::c_int; + +use crate::client::conn; +use crate::rt::Executor as _; + +use super::error::hyper_code; +use super::http_types::{hyper_request, hyper_response}; +use super::io::hyper_io; +use super::task::{hyper_executor, hyper_task, hyper_task_return_type, AsTaskType, WeakExec}; + +/// An options builder to configure an HTTP client connection. +pub struct hyper_clientconn_options { + builder: conn::Builder, + /// Use a `Weak` to prevent cycles. + exec: WeakExec, +} + +/// An HTTP client connection handle. +/// +/// These are used to send a request on a single connection. It's possible to +/// send multiple requests on a single connection, such as when HTTP/1 +/// keep-alive or HTTP/2 is used. +pub struct hyper_clientconn { + tx: conn::SendRequest, +} + +// ===== impl hyper_clientconn ===== + +ffi_fn! { + /// Starts an HTTP client connection handshake using the provided IO transport + /// and options. + /// + /// Both the `io` and the `options` are consumed in this function call. + /// + /// The returned `hyper_task *` must be polled with an executor until the + /// handshake completes, at which point the value can be taken. + fn hyper_clientconn_handshake(io: *mut hyper_io, options: *mut hyper_clientconn_options) -> *mut hyper_task { + let options = non_null! { Box::from_raw(options) ?= ptr::null_mut() }; + let io = non_null! { Box::from_raw(io) ?= ptr::null_mut() }; + + Box::into_raw(hyper_task::boxed(async move { + options.builder.handshake::<_, crate::Body>(io) + .await + .map(|(tx, conn)| { + options.exec.execute(Box::pin(async move { + let _ = conn.await; + })); + hyper_clientconn { tx } + }) + })) + } ?= std::ptr::null_mut() +} + +ffi_fn! { + /// Send a request on the client connection. + /// + /// Returns a task that needs to be polled until it is ready. When ready, the + /// task yields a `hyper_response *`. + fn hyper_clientconn_send(conn: *mut hyper_clientconn, req: *mut hyper_request) -> *mut hyper_task { + let mut req = non_null! { Box::from_raw(req) ?= ptr::null_mut() }; + + // Update request with original-case map of headers + req.finalize_request(); + + let fut = non_null! { &mut *conn ?= ptr::null_mut() }.tx.send_request(req.0); + + let fut = async move { + fut.await.map(hyper_response::wrap) + }; + + Box::into_raw(hyper_task::boxed(fut)) + } ?= std::ptr::null_mut() +} + +ffi_fn! { + /// Free a `hyper_clientconn *`. + fn hyper_clientconn_free(conn: *mut hyper_clientconn) { + drop(non_null! { Box::from_raw(conn) ?= () }); + } +} + +unsafe impl AsTaskType for hyper_clientconn { + fn as_task_type(&self) -> hyper_task_return_type { + hyper_task_return_type::HYPER_TASK_CLIENTCONN + } +} + +// ===== impl hyper_clientconn_options ===== + +ffi_fn! { + /// Creates a new set of HTTP clientconn options to be used in a handshake. + fn hyper_clientconn_options_new() -> *mut hyper_clientconn_options { + #[allow(deprecated)] + let builder = conn::Builder::new(); + + Box::into_raw(Box::new(hyper_clientconn_options { + builder, + exec: WeakExec::new(), + })) + } ?= std::ptr::null_mut() +} + +ffi_fn! { + /// Set the whether or not header case is preserved. + /// + /// Pass `0` to allow lowercase normalization (default), `1` to retain original case. + fn hyper_clientconn_options_set_preserve_header_case(opts: *mut hyper_clientconn_options, enabled: c_int) { + let opts = non_null! { &mut *opts ?= () }; + opts.builder.http1_preserve_header_case(enabled != 0); + } +} + +ffi_fn! { + /// Set the whether or not header order is preserved. + /// + /// Pass `0` to allow reordering (default), `1` to retain original ordering. + fn hyper_clientconn_options_set_preserve_header_order(opts: *mut hyper_clientconn_options, enabled: c_int) { + let opts = non_null! { &mut *opts ?= () }; + opts.builder.http1_preserve_header_order(enabled != 0); + } +} + +ffi_fn! { + /// Free a `hyper_clientconn_options *`. + fn hyper_clientconn_options_free(opts: *mut hyper_clientconn_options) { + drop(non_null! { Box::from_raw(opts) ?= () }); + } +} + +ffi_fn! { + /// Set the client background task executor. + /// + /// This does not consume the `options` or the `exec`. + fn hyper_clientconn_options_exec(opts: *mut hyper_clientconn_options, exec: *const hyper_executor) { + let opts = non_null! { &mut *opts ?= () }; + + let exec = non_null! { Arc::from_raw(exec) ?= () }; + let weak_exec = hyper_executor::downgrade(&exec); + std::mem::forget(exec); + + opts.builder.executor(weak_exec.clone()); + opts.exec = weak_exec; + } +} + +ffi_fn! { + /// Set the whether to use HTTP2. + /// + /// Pass `0` to disable, `1` to enable. + fn hyper_clientconn_options_http2(opts: *mut hyper_clientconn_options, enabled: c_int) -> hyper_code { + #[cfg(feature = "http2")] + { + let opts = non_null! { &mut *opts ?= hyper_code::HYPERE_INVALID_ARG }; + opts.builder.http2_only(enabled != 0); + hyper_code::HYPERE_OK + } + + #[cfg(not(feature = "http2"))] + { + drop(opts); + drop(enabled); + hyper_code::HYPERE_FEATURE_NOT_ENABLED + } + } +} + +ffi_fn! { + /// Set the whether to include a copy of the raw headers in responses + /// received on this connection. + /// + /// Pass `0` to disable, `1` to enable. + /// + /// If enabled, see `hyper_response_headers_raw()` for usage. + fn hyper_clientconn_options_headers_raw(opts: *mut hyper_clientconn_options, enabled: c_int) -> hyper_code { + let opts = non_null! { &mut *opts ?= hyper_code::HYPERE_INVALID_ARG }; + opts.builder.http1_headers_raw(enabled != 0); + hyper_code::HYPERE_OK + } +} diff --git a/.cargo-vendor/hyper-0.14.30/src/ffi/error.rs b/.cargo-vendor/hyper-0.14.30/src/ffi/error.rs new file mode 100644 index 0000000000..015e595aee --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/ffi/error.rs @@ -0,0 +1,85 @@ +use libc::size_t; + +/// A more detailed error object returned by some hyper functions. +pub struct hyper_error(crate::Error); + +/// A return code for many of hyper's methods. +#[repr(C)] +pub enum hyper_code { + /// All is well. + HYPERE_OK, + /// General error, details in the `hyper_error *`. + HYPERE_ERROR, + /// A function argument was invalid. + HYPERE_INVALID_ARG, + /// The IO transport returned an EOF when one wasn't expected. + /// + /// This typically means an HTTP request or response was expected, but the + /// connection closed cleanly without sending (all of) it. + HYPERE_UNEXPECTED_EOF, + /// Aborted by a user supplied callback. + HYPERE_ABORTED_BY_CALLBACK, + /// An optional hyper feature was not enabled. + #[cfg_attr(feature = "http2", allow(unused))] + HYPERE_FEATURE_NOT_ENABLED, + /// The peer sent an HTTP message that could not be parsed. + HYPERE_INVALID_PEER_MESSAGE, +} + +// ===== impl hyper_error ===== + +impl hyper_error { + fn code(&self) -> hyper_code { + use crate::error::Kind as ErrorKind; + use crate::error::User; + + match self.0.kind() { + ErrorKind::Parse(_) => hyper_code::HYPERE_INVALID_PEER_MESSAGE, + ErrorKind::IncompleteMessage => hyper_code::HYPERE_UNEXPECTED_EOF, + ErrorKind::User(User::AbortedByCallback) => hyper_code::HYPERE_ABORTED_BY_CALLBACK, + // TODO: add more variants + _ => hyper_code::HYPERE_ERROR, + } + } + + fn print_to(&self, dst: &mut [u8]) -> usize { + use std::io::Write; + + let mut dst = std::io::Cursor::new(dst); + + // A write! error doesn't matter. As much as possible will have been + // written, and the Cursor position will know how far that is (even + // if that is zero). + let _ = write!(dst, "{}", &self.0); + dst.position() as usize + } +} + +ffi_fn! { + /// Frees a `hyper_error`. + fn hyper_error_free(err: *mut hyper_error) { + drop(non_null!(Box::from_raw(err) ?= ())); + } +} + +ffi_fn! { + /// Get an equivalent `hyper_code` from this error. + fn hyper_error_code(err: *const hyper_error) -> hyper_code { + non_null!(&*err ?= hyper_code::HYPERE_INVALID_ARG).code() + } +} + +ffi_fn! { + /// Print the details of this error to a buffer. + /// + /// The `dst_len` value must be the maximum length that the buffer can + /// store. + /// + /// The return value is number of bytes that were written to `dst`. + fn hyper_error_print(err: *const hyper_error, dst: *mut u8, dst_len: size_t) -> size_t { + let dst = unsafe { + std::slice::from_raw_parts_mut(dst, dst_len) + }; + non_null!(&*err ?= 0).print_to(dst) + } +} diff --git a/.cargo-vendor/hyper-0.14.30/src/ffi/http_types.rs b/.cargo-vendor/hyper-0.14.30/src/ffi/http_types.rs new file mode 100644 index 0000000000..ea10f139cb --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/ffi/http_types.rs @@ -0,0 +1,657 @@ +use bytes::Bytes; +use libc::{c_int, size_t}; +use std::ffi::c_void; + +use super::body::{hyper_body, hyper_buf}; +use super::error::hyper_code; +use super::task::{hyper_task_return_type, AsTaskType}; +use super::{UserDataPointer, HYPER_ITER_CONTINUE}; +use crate::ext::{HeaderCaseMap, OriginalHeaderOrder, ReasonPhrase}; +use crate::header::{HeaderName, HeaderValue}; +use crate::{Body, HeaderMap, Method, Request, Response, Uri}; + +/// An HTTP request. +pub struct hyper_request(pub(super) Request); + +/// An HTTP response. +pub struct hyper_response(pub(super) Response); + +/// An HTTP header map. +/// +/// These can be part of a request or response. +pub struct hyper_headers { + pub(super) headers: HeaderMap, + orig_casing: HeaderCaseMap, + orig_order: OriginalHeaderOrder, +} + +pub(crate) struct RawHeaders(pub(crate) hyper_buf); + +pub(crate) struct OnInformational { + func: hyper_request_on_informational_callback, + data: UserDataPointer, +} + +type hyper_request_on_informational_callback = extern "C" fn(*mut c_void, *mut hyper_response); + +// ===== impl hyper_request ===== + +ffi_fn! { + /// Construct a new HTTP request. + fn hyper_request_new() -> *mut hyper_request { + Box::into_raw(Box::new(hyper_request(Request::new(Body::empty())))) + } ?= std::ptr::null_mut() +} + +ffi_fn! { + /// Free an HTTP request if not going to send it on a client. + fn hyper_request_free(req: *mut hyper_request) { + drop(non_null!(Box::from_raw(req) ?= ())); + } +} + +ffi_fn! { + /// Set the HTTP Method of the request. + fn hyper_request_set_method(req: *mut hyper_request, method: *const u8, method_len: size_t) -> hyper_code { + let bytes = unsafe { + std::slice::from_raw_parts(method, method_len as usize) + }; + let req = non_null!(&mut *req ?= hyper_code::HYPERE_INVALID_ARG); + match Method::from_bytes(bytes) { + Ok(m) => { + *req.0.method_mut() = m; + hyper_code::HYPERE_OK + }, + Err(_) => { + hyper_code::HYPERE_INVALID_ARG + } + } + } +} + +ffi_fn! { + /// Set the URI of the request. + /// + /// The request's URI is best described as the `request-target` from the RFCs. So in HTTP/1, + /// whatever is set will get sent as-is in the first line (GET $uri HTTP/1.1). It + /// supports the 4 defined variants, origin-form, absolute-form, authority-form, and + /// asterisk-form. + /// + /// The underlying type was built to efficiently support HTTP/2 where the request-target is + /// split over :scheme, :authority, and :path. As such, each part can be set explicitly, or the + /// type can parse a single contiguous string and if a scheme is found, that slot is "set". If + /// the string just starts with a path, only the path portion is set. All pseudo headers that + /// have been parsed/set are sent when the connection type is HTTP/2. + /// + /// To set each slot explicitly, use `hyper_request_set_uri_parts`. + fn hyper_request_set_uri(req: *mut hyper_request, uri: *const u8, uri_len: size_t) -> hyper_code { + let bytes = unsafe { + std::slice::from_raw_parts(uri, uri_len as usize) + }; + let req = non_null!(&mut *req ?= hyper_code::HYPERE_INVALID_ARG); + match Uri::from_maybe_shared(bytes) { + Ok(u) => { + *req.0.uri_mut() = u; + hyper_code::HYPERE_OK + }, + Err(_) => { + hyper_code::HYPERE_INVALID_ARG + } + } + } +} + +ffi_fn! { + /// Set the URI of the request with separate scheme, authority, and + /// path/query strings. + /// + /// Each of `scheme`, `authority`, and `path_and_query` should either be + /// null, to skip providing a component, or point to a UTF-8 encoded + /// string. If any string pointer argument is non-null, its corresponding + /// `len` parameter must be set to the string's length. + fn hyper_request_set_uri_parts( + req: *mut hyper_request, + scheme: *const u8, + scheme_len: size_t, + authority: *const u8, + authority_len: size_t, + path_and_query: *const u8, + path_and_query_len: size_t + ) -> hyper_code { + let mut builder = Uri::builder(); + if !scheme.is_null() { + let scheme_bytes = unsafe { + std::slice::from_raw_parts(scheme, scheme_len as usize) + }; + builder = builder.scheme(scheme_bytes); + } + if !authority.is_null() { + let authority_bytes = unsafe { + std::slice::from_raw_parts(authority, authority_len as usize) + }; + builder = builder.authority(authority_bytes); + } + if !path_and_query.is_null() { + let path_and_query_bytes = unsafe { + std::slice::from_raw_parts(path_and_query, path_and_query_len as usize) + }; + builder = builder.path_and_query(path_and_query_bytes); + } + match builder.build() { + Ok(u) => { + *unsafe { &mut *req }.0.uri_mut() = u; + hyper_code::HYPERE_OK + }, + Err(_) => { + hyper_code::HYPERE_INVALID_ARG + } + } + } +} + +ffi_fn! { + /// Set the preferred HTTP version of the request. + /// + /// The version value should be one of the `HYPER_HTTP_VERSION_` constants. + /// + /// Note that this won't change the major HTTP version of the connection, + /// since that is determined at the handshake step. + fn hyper_request_set_version(req: *mut hyper_request, version: c_int) -> hyper_code { + use http::Version; + + let req = non_null!(&mut *req ?= hyper_code::HYPERE_INVALID_ARG); + *req.0.version_mut() = match version { + super::HYPER_HTTP_VERSION_NONE => Version::HTTP_11, + super::HYPER_HTTP_VERSION_1_0 => Version::HTTP_10, + super::HYPER_HTTP_VERSION_1_1 => Version::HTTP_11, + super::HYPER_HTTP_VERSION_2 => Version::HTTP_2, + _ => { + // We don't know this version + return hyper_code::HYPERE_INVALID_ARG; + } + }; + hyper_code::HYPERE_OK + } +} + +ffi_fn! { + /// Gets a reference to the HTTP headers of this request + /// + /// This is not an owned reference, so it should not be accessed after the + /// `hyper_request` has been consumed. + fn hyper_request_headers(req: *mut hyper_request) -> *mut hyper_headers { + hyper_headers::get_or_default(unsafe { &mut *req }.0.extensions_mut()) + } ?= std::ptr::null_mut() +} + +ffi_fn! { + /// Set the body of the request. + /// + /// The default is an empty body. + /// + /// This takes ownership of the `hyper_body *`, you must not use it or + /// free it after setting it on the request. + fn hyper_request_set_body(req: *mut hyper_request, body: *mut hyper_body) -> hyper_code { + let body = non_null!(Box::from_raw(body) ?= hyper_code::HYPERE_INVALID_ARG); + let req = non_null!(&mut *req ?= hyper_code::HYPERE_INVALID_ARG); + *req.0.body_mut() = body.0; + hyper_code::HYPERE_OK + } +} + +ffi_fn! { + /// Set an informational (1xx) response callback. + /// + /// The callback is called each time hyper receives an informational (1xx) + /// response for this request. + /// + /// The third argument is an opaque user data pointer, which is passed to + /// the callback each time. + /// + /// The callback is passed the `void *` data pointer, and a + /// `hyper_response *` which can be inspected as any other response. The + /// body of the response will always be empty. + /// + /// NOTE: The `hyper_response *` is just borrowed data, and will not + /// be valid after the callback finishes. You must copy any data you wish + /// to persist. + fn hyper_request_on_informational(req: *mut hyper_request, callback: hyper_request_on_informational_callback, data: *mut c_void) -> hyper_code { + let ext = OnInformational { + func: callback, + data: UserDataPointer(data), + }; + let req = non_null!(&mut *req ?= hyper_code::HYPERE_INVALID_ARG); + req.0.extensions_mut().insert(ext); + hyper_code::HYPERE_OK + } +} + +impl hyper_request { + pub(super) fn finalize_request(&mut self) { + if let Some(headers) = self.0.extensions_mut().remove::() { + *self.0.headers_mut() = headers.headers; + self.0.extensions_mut().insert(headers.orig_casing); + self.0.extensions_mut().insert(headers.orig_order); + } + } +} + +// ===== impl hyper_response ===== + +ffi_fn! { + /// Free an HTTP response after using it. + fn hyper_response_free(resp: *mut hyper_response) { + drop(non_null!(Box::from_raw(resp) ?= ())); + } +} + +ffi_fn! { + /// Get the HTTP-Status code of this response. + /// + /// It will always be within the range of 100-599. + fn hyper_response_status(resp: *const hyper_response) -> u16 { + non_null!(&*resp ?= 0).0.status().as_u16() + } +} + +ffi_fn! { + /// Get a pointer to the reason-phrase of this response. + /// + /// This buffer is not null-terminated. + /// + /// This buffer is owned by the response, and should not be used after + /// the response has been freed. + /// + /// Use `hyper_response_reason_phrase_len()` to get the length of this + /// buffer. + fn hyper_response_reason_phrase(resp: *const hyper_response) -> *const u8 { + non_null!(&*resp ?= std::ptr::null()).reason_phrase().as_ptr() + } ?= std::ptr::null() +} + +ffi_fn! { + /// Get the length of the reason-phrase of this response. + /// + /// Use `hyper_response_reason_phrase()` to get the buffer pointer. + fn hyper_response_reason_phrase_len(resp: *const hyper_response) -> size_t { + non_null!(&*resp ?= 0).reason_phrase().len() + } +} + +ffi_fn! { + /// Get a reference to the full raw headers of this response. + /// + /// You must have enabled `hyper_clientconn_options_headers_raw()`, or this + /// will return NULL. + /// + /// The returned `hyper_buf *` is just a reference, owned by the response. + /// You need to make a copy if you wish to use it after freeing the + /// response. + /// + /// The buffer is not null-terminated, see the `hyper_buf` functions for + /// getting the bytes and length. + fn hyper_response_headers_raw(resp: *const hyper_response) -> *const hyper_buf { + let resp = non_null!(&*resp ?= std::ptr::null()); + match resp.0.extensions().get::() { + Some(raw) => &raw.0, + None => std::ptr::null(), + } + } ?= std::ptr::null() +} + +ffi_fn! { + /// Get the HTTP version used by this response. + /// + /// The returned value could be: + /// + /// - `HYPER_HTTP_VERSION_1_0` + /// - `HYPER_HTTP_VERSION_1_1` + /// - `HYPER_HTTP_VERSION_2` + /// - `HYPER_HTTP_VERSION_NONE` if newer (or older). + fn hyper_response_version(resp: *const hyper_response) -> c_int { + use http::Version; + + match non_null!(&*resp ?= 0).0.version() { + Version::HTTP_10 => super::HYPER_HTTP_VERSION_1_0, + Version::HTTP_11 => super::HYPER_HTTP_VERSION_1_1, + Version::HTTP_2 => super::HYPER_HTTP_VERSION_2, + _ => super::HYPER_HTTP_VERSION_NONE, + } + } +} + +ffi_fn! { + /// Gets a reference to the HTTP headers of this response. + /// + /// This is not an owned reference, so it should not be accessed after the + /// `hyper_response` has been freed. + fn hyper_response_headers(resp: *mut hyper_response) -> *mut hyper_headers { + hyper_headers::get_or_default(unsafe { &mut *resp }.0.extensions_mut()) + } ?= std::ptr::null_mut() +} + +ffi_fn! { + /// Take ownership of the body of this response. + /// + /// It is safe to free the response even after taking ownership of its body. + fn hyper_response_body(resp: *mut hyper_response) -> *mut hyper_body { + let body = std::mem::take(non_null!(&mut *resp ?= std::ptr::null_mut()).0.body_mut()); + Box::into_raw(Box::new(hyper_body(body))) + } ?= std::ptr::null_mut() +} + +impl hyper_response { + pub(super) fn wrap(mut resp: Response) -> hyper_response { + let headers = std::mem::take(resp.headers_mut()); + let orig_casing = resp + .extensions_mut() + .remove::() + .unwrap_or_else(HeaderCaseMap::default); + let orig_order = resp + .extensions_mut() + .remove::() + .unwrap_or_else(OriginalHeaderOrder::default); + resp.extensions_mut().insert(hyper_headers { + headers, + orig_casing, + orig_order, + }); + + hyper_response(resp) + } + + fn reason_phrase(&self) -> &[u8] { + if let Some(reason) = self.0.extensions().get::() { + return reason.as_bytes(); + } + + if let Some(reason) = self.0.status().canonical_reason() { + return reason.as_bytes(); + } + + &[] + } +} + +unsafe impl AsTaskType for hyper_response { + fn as_task_type(&self) -> hyper_task_return_type { + hyper_task_return_type::HYPER_TASK_RESPONSE + } +} + +// ===== impl Headers ===== + +type hyper_headers_foreach_callback = + extern "C" fn(*mut c_void, *const u8, size_t, *const u8, size_t) -> c_int; + +impl hyper_headers { + pub(super) fn get_or_default(ext: &mut http::Extensions) -> &mut hyper_headers { + if let None = ext.get_mut::() { + ext.insert(hyper_headers::default()); + } + + ext.get_mut::().unwrap() + } +} + +ffi_fn! { + /// Iterates the headers passing each name and value pair to the callback. + /// + /// The `userdata` pointer is also passed to the callback. + /// + /// The callback should return `HYPER_ITER_CONTINUE` to keep iterating, or + /// `HYPER_ITER_BREAK` to stop. + fn hyper_headers_foreach(headers: *const hyper_headers, func: hyper_headers_foreach_callback, userdata: *mut c_void) { + let headers = non_null!(&*headers ?= ()); + // For each header name/value pair, there may be a value in the casemap + // that corresponds to the HeaderValue. So, we iterator all the keys, + // and for each one, try to pair the originally cased name with the value. + // + // TODO: consider adding http::HeaderMap::entries() iterator + let mut ordered_iter = headers.orig_order.get_in_order().peekable(); + if ordered_iter.peek().is_some() { + for (name, idx) in ordered_iter { + let (name_ptr, name_len) = if let Some(orig_name) = headers.orig_casing.get_all(name).nth(*idx) { + (orig_name.as_ref().as_ptr(), orig_name.as_ref().len()) + } else { + ( + name.as_str().as_bytes().as_ptr(), + name.as_str().as_bytes().len(), + ) + }; + + let val_ptr; + let val_len; + if let Some(value) = headers.headers.get_all(name).iter().nth(*idx) { + val_ptr = value.as_bytes().as_ptr(); + val_len = value.as_bytes().len(); + } else { + // Stop iterating, something has gone wrong. + return; + } + + if HYPER_ITER_CONTINUE != func(userdata, name_ptr, name_len, val_ptr, val_len) { + return; + } + } + } else { + for name in headers.headers.keys() { + let mut names = headers.orig_casing.get_all(name); + + for value in headers.headers.get_all(name) { + let (name_ptr, name_len) = if let Some(orig_name) = names.next() { + (orig_name.as_ref().as_ptr(), orig_name.as_ref().len()) + } else { + ( + name.as_str().as_bytes().as_ptr(), + name.as_str().as_bytes().len(), + ) + }; + + let val_ptr = value.as_bytes().as_ptr(); + let val_len = value.as_bytes().len(); + + if HYPER_ITER_CONTINUE != func(userdata, name_ptr, name_len, val_ptr, val_len) { + return; + } + } + } + } + } +} + +ffi_fn! { + /// Sets the header with the provided name to the provided value. + /// + /// This overwrites any previous value set for the header. + fn hyper_headers_set(headers: *mut hyper_headers, name: *const u8, name_len: size_t, value: *const u8, value_len: size_t) -> hyper_code { + let headers = non_null!(&mut *headers ?= hyper_code::HYPERE_INVALID_ARG); + match unsafe { raw_name_value(name, name_len, value, value_len) } { + Ok((name, value, orig_name)) => { + headers.headers.insert(&name, value); + headers.orig_casing.insert(name.clone(), orig_name.clone()); + headers.orig_order.insert(name); + hyper_code::HYPERE_OK + } + Err(code) => code, + } + } +} + +ffi_fn! { + /// Adds the provided value to the list of the provided name. + /// + /// If there were already existing values for the name, this will append the + /// new value to the internal list. + fn hyper_headers_add(headers: *mut hyper_headers, name: *const u8, name_len: size_t, value: *const u8, value_len: size_t) -> hyper_code { + let headers = non_null!(&mut *headers ?= hyper_code::HYPERE_INVALID_ARG); + + match unsafe { raw_name_value(name, name_len, value, value_len) } { + Ok((name, value, orig_name)) => { + headers.headers.append(&name, value); + headers.orig_casing.append(&name, orig_name.clone()); + headers.orig_order.append(name); + hyper_code::HYPERE_OK + } + Err(code) => code, + } + } +} + +impl Default for hyper_headers { + fn default() -> Self { + Self { + headers: Default::default(), + orig_casing: HeaderCaseMap::default(), + orig_order: OriginalHeaderOrder::default(), + } + } +} + +unsafe fn raw_name_value( + name: *const u8, + name_len: size_t, + value: *const u8, + value_len: size_t, +) -> Result<(HeaderName, HeaderValue, Bytes), hyper_code> { + let name = std::slice::from_raw_parts(name, name_len); + let orig_name = Bytes::copy_from_slice(name); + let name = match HeaderName::from_bytes(name) { + Ok(name) => name, + Err(_) => return Err(hyper_code::HYPERE_INVALID_ARG), + }; + let value = std::slice::from_raw_parts(value, value_len); + let value = match HeaderValue::from_bytes(value) { + Ok(val) => val, + Err(_) => return Err(hyper_code::HYPERE_INVALID_ARG), + }; + + Ok((name, value, orig_name)) +} + +// ===== impl OnInformational ===== + +impl OnInformational { + pub(crate) fn call(&mut self, resp: Response) { + let mut resp = hyper_response::wrap(resp); + (self.func)(self.data.0, &mut resp); + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_headers_foreach_cases_preserved() { + let mut headers = hyper_headers::default(); + + let name1 = b"Set-CookiE"; + let value1 = b"a=b"; + hyper_headers_add( + &mut headers, + name1.as_ptr(), + name1.len(), + value1.as_ptr(), + value1.len(), + ); + + let name2 = b"SET-COOKIE"; + let value2 = b"c=d"; + hyper_headers_add( + &mut headers, + name2.as_ptr(), + name2.len(), + value2.as_ptr(), + value2.len(), + ); + + let mut vec = Vec::::new(); + hyper_headers_foreach(&headers, concat, &mut vec as *mut _ as *mut c_void); + + assert_eq!(vec, b"Set-CookiE: a=b\r\nSET-COOKIE: c=d\r\n"); + + extern "C" fn concat( + vec: *mut c_void, + name: *const u8, + name_len: usize, + value: *const u8, + value_len: usize, + ) -> c_int { + unsafe { + let vec = &mut *(vec as *mut Vec); + let name = std::slice::from_raw_parts(name, name_len); + let value = std::slice::from_raw_parts(value, value_len); + vec.extend(name); + vec.extend(b": "); + vec.extend(value); + vec.extend(b"\r\n"); + } + HYPER_ITER_CONTINUE + } + } + + #[cfg(all(feature = "http1", feature = "ffi"))] + #[test] + fn test_headers_foreach_order_preserved() { + let mut headers = hyper_headers::default(); + + let name1 = b"Set-CookiE"; + let value1 = b"a=b"; + hyper_headers_add( + &mut headers, + name1.as_ptr(), + name1.len(), + value1.as_ptr(), + value1.len(), + ); + + let name2 = b"Content-Encoding"; + let value2 = b"gzip"; + hyper_headers_add( + &mut headers, + name2.as_ptr(), + name2.len(), + value2.as_ptr(), + value2.len(), + ); + + let name3 = b"SET-COOKIE"; + let value3 = b"c=d"; + hyper_headers_add( + &mut headers, + name3.as_ptr(), + name3.len(), + value3.as_ptr(), + value3.len(), + ); + + let mut vec = Vec::::new(); + hyper_headers_foreach(&headers, concat, &mut vec as *mut _ as *mut c_void); + + println!("{}", std::str::from_utf8(&vec).unwrap()); + assert_eq!( + vec, + b"Set-CookiE: a=b\r\nContent-Encoding: gzip\r\nSET-COOKIE: c=d\r\n" + ); + + extern "C" fn concat( + vec: *mut c_void, + name: *const u8, + name_len: usize, + value: *const u8, + value_len: usize, + ) -> c_int { + unsafe { + let vec = &mut *(vec as *mut Vec); + let name = std::slice::from_raw_parts(name, name_len); + let value = std::slice::from_raw_parts(value, value_len); + vec.extend(name); + vec.extend(b": "); + vec.extend(value); + vec.extend(b"\r\n"); + } + HYPER_ITER_CONTINUE + } + } +} diff --git a/.cargo-vendor/hyper-0.14.30/src/ffi/io.rs b/.cargo-vendor/hyper-0.14.30/src/ffi/io.rs new file mode 100644 index 0000000000..bff666dbcf --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/ffi/io.rs @@ -0,0 +1,178 @@ +use std::ffi::c_void; +use std::pin::Pin; +use std::task::{Context, Poll}; + +use libc::size_t; +use tokio::io::{AsyncRead, AsyncWrite}; + +use super::task::hyper_context; + +/// Sentinel value to return from a read or write callback that the operation +/// is pending. +pub const HYPER_IO_PENDING: size_t = 0xFFFFFFFF; +/// Sentinel value to return from a read or write callback that the operation +/// has errored. +pub const HYPER_IO_ERROR: size_t = 0xFFFFFFFE; + +type hyper_io_read_callback = + extern "C" fn(*mut c_void, *mut hyper_context<'_>, *mut u8, size_t) -> size_t; +type hyper_io_write_callback = + extern "C" fn(*mut c_void, *mut hyper_context<'_>, *const u8, size_t) -> size_t; + +/// An IO object used to represent a socket or similar concept. +pub struct hyper_io { + read: hyper_io_read_callback, + write: hyper_io_write_callback, + userdata: *mut c_void, +} + +ffi_fn! { + /// Create a new IO type used to represent a transport. + /// + /// The read and write functions of this transport should be set with + /// `hyper_io_set_read` and `hyper_io_set_write`. + fn hyper_io_new() -> *mut hyper_io { + Box::into_raw(Box::new(hyper_io { + read: read_noop, + write: write_noop, + userdata: std::ptr::null_mut(), + })) + } ?= std::ptr::null_mut() +} + +ffi_fn! { + /// Free an unused `hyper_io *`. + /// + /// This is typically only useful if you aren't going to pass ownership + /// of the IO handle to hyper, such as with `hyper_clientconn_handshake()`. + fn hyper_io_free(io: *mut hyper_io) { + drop(non_null!(Box::from_raw(io) ?= ())); + } +} + +ffi_fn! { + /// Set the user data pointer for this IO to some value. + /// + /// This value is passed as an argument to the read and write callbacks. + fn hyper_io_set_userdata(io: *mut hyper_io, data: *mut c_void) { + non_null!(&mut *io ?= ()).userdata = data; + } +} + +ffi_fn! { + /// Set the read function for this IO transport. + /// + /// Data that is read from the transport should be put in the `buf` pointer, + /// up to `buf_len` bytes. The number of bytes read should be the return value. + /// + /// It is undefined behavior to try to access the bytes in the `buf` pointer, + /// unless you have already written them yourself. It is also undefined behavior + /// to return that more bytes have been written than actually set on the `buf`. + /// + /// If there is no data currently available, a waker should be claimed from + /// the `ctx` and registered with whatever polling mechanism is used to signal + /// when data is available later on. The return value should be + /// `HYPER_IO_PENDING`. + /// + /// If there is an irrecoverable error reading data, then `HYPER_IO_ERROR` + /// should be the return value. + fn hyper_io_set_read(io: *mut hyper_io, func: hyper_io_read_callback) { + non_null!(&mut *io ?= ()).read = func; + } +} + +ffi_fn! { + /// Set the write function for this IO transport. + /// + /// Data from the `buf` pointer should be written to the transport, up to + /// `buf_len` bytes. The number of bytes written should be the return value. + /// + /// If no data can currently be written, the `waker` should be cloned and + /// registered with whatever polling mechanism is used to signal when data + /// is available later on. The return value should be `HYPER_IO_PENDING`. + /// + /// Yeet. + /// + /// If there is an irrecoverable error reading data, then `HYPER_IO_ERROR` + /// should be the return value. + fn hyper_io_set_write(io: *mut hyper_io, func: hyper_io_write_callback) { + non_null!(&mut *io ?= ()).write = func; + } +} + +/// cbindgen:ignore +extern "C" fn read_noop( + _userdata: *mut c_void, + _: *mut hyper_context<'_>, + _buf: *mut u8, + _buf_len: size_t, +) -> size_t { + 0 +} + +/// cbindgen:ignore +extern "C" fn write_noop( + _userdata: *mut c_void, + _: *mut hyper_context<'_>, + _buf: *const u8, + _buf_len: size_t, +) -> size_t { + 0 +} + +impl AsyncRead for hyper_io { + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut tokio::io::ReadBuf<'_>, + ) -> Poll> { + let buf_ptr = unsafe { buf.unfilled_mut() }.as_mut_ptr() as *mut u8; + let buf_len = buf.remaining(); + + match (self.read)(self.userdata, hyper_context::wrap(cx), buf_ptr, buf_len) { + HYPER_IO_PENDING => Poll::Pending, + HYPER_IO_ERROR => Poll::Ready(Err(std::io::Error::new( + std::io::ErrorKind::Other, + "io error", + ))), + ok => { + // We have to trust that the user's read callback actually + // filled in that many bytes... :( + unsafe { buf.assume_init(ok) }; + buf.advance(ok); + Poll::Ready(Ok(())) + } + } + } +} + +impl AsyncWrite for hyper_io { + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + let buf_ptr = buf.as_ptr(); + let buf_len = buf.len(); + + match (self.write)(self.userdata, hyper_context::wrap(cx), buf_ptr, buf_len) { + HYPER_IO_PENDING => Poll::Pending, + HYPER_IO_ERROR => Poll::Ready(Err(std::io::Error::new( + std::io::ErrorKind::Other, + "io error", + ))), + ok => Poll::Ready(Ok(ok)), + } + } + + fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } +} + +unsafe impl Send for hyper_io {} +unsafe impl Sync for hyper_io {} diff --git a/.cargo-vendor/hyper-0.14.30/src/ffi/macros.rs b/.cargo-vendor/hyper-0.14.30/src/ffi/macros.rs new file mode 100644 index 0000000000..022711baaa --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/ffi/macros.rs @@ -0,0 +1,53 @@ +macro_rules! ffi_fn { + ($(#[$doc:meta])* fn $name:ident($($arg:ident: $arg_ty:ty),*) -> $ret:ty $body:block ?= $default:expr) => { + $(#[$doc])* + #[no_mangle] + pub extern fn $name($($arg: $arg_ty),*) -> $ret { + use std::panic::{self, AssertUnwindSafe}; + + match panic::catch_unwind(AssertUnwindSafe(move || $body)) { + Ok(v) => v, + Err(_) => { + $default + } + } + } + }; + + ($(#[$doc:meta])* fn $name:ident($($arg:ident: $arg_ty:ty),*) -> $ret:ty $body:block) => { + ffi_fn!($(#[$doc])* fn $name($($arg: $arg_ty),*) -> $ret $body ?= { + eprintln!("panic unwind caught, aborting"); + std::process::abort() + }); + }; + + ($(#[$doc:meta])* fn $name:ident($($arg:ident: $arg_ty:ty),*) $body:block ?= $default:expr) => { + ffi_fn!($(#[$doc])* fn $name($($arg: $arg_ty),*) -> () $body ?= $default); + }; + + ($(#[$doc:meta])* fn $name:ident($($arg:ident: $arg_ty:ty),*) $body:block) => { + ffi_fn!($(#[$doc])* fn $name($($arg: $arg_ty),*) -> () $body); + }; +} + +macro_rules! non_null { + ($ptr:ident, $eval:expr, $err:expr) => {{ + debug_assert!(!$ptr.is_null(), "{:?} must not be null", stringify!($ptr)); + if $ptr.is_null() { + return $err; + } + unsafe { $eval } + }}; + (&*$ptr:ident ?= $err:expr) => {{ + non_null!($ptr, &*$ptr, $err) + }}; + (&mut *$ptr:ident ?= $err:expr) => {{ + non_null!($ptr, &mut *$ptr, $err) + }}; + (Box::from_raw($ptr:ident) ?= $err:expr) => {{ + non_null!($ptr, Box::from_raw($ptr), $err) + }}; + (Arc::from_raw($ptr:ident) ?= $err:expr) => {{ + non_null!($ptr, Arc::from_raw($ptr), $err) + }}; +} diff --git a/.cargo-vendor/hyper-0.14.30/src/ffi/mod.rs b/.cargo-vendor/hyper-0.14.30/src/ffi/mod.rs new file mode 100644 index 0000000000..fd67a880a6 --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/ffi/mod.rs @@ -0,0 +1,94 @@ +// We have a lot of c-types in here, stop warning about their names! +#![allow(non_camel_case_types)] +// fmt::Debug isn't helpful on FFI types +#![allow(missing_debug_implementations)] +// unreachable_pub warns `#[no_mangle] pub extern fn` in private mod. +#![allow(unreachable_pub)] + +//! # hyper C API +//! +//! This part of the documentation describes the C API for hyper. That is, how +//! to *use* the hyper library in C code. This is **not** a regular Rust +//! module, and thus it is not accessible in Rust. +//! +//! ## Unstable +//! +//! The C API of hyper is currently **unstable**, which means it's not part of +//! the semver contract as the rest of the Rust API is. Because of that, it's +//! only accessible if `--cfg hyper_unstable_ffi` is passed to `rustc` when +//! compiling. The easiest way to do that is setting the `RUSTFLAGS` +//! environment variable. +//! +//! ## Building +//! +//! The C API is part of the Rust library, but isn't compiled by default. Using +//! `cargo`, it can be compiled with the following command: +//! +//! ```notrust +//! RUSTFLAGS="--cfg hyper_unstable_ffi" cargo build --features client,http1,http2,ffi +//! ``` + +// We may eventually allow the FFI to be enabled without `client` or `http1`, +// that is why we don't auto enable them as `ffi = ["client", "http1"]` in +// the `Cargo.toml`. +// +// But for now, give a clear message that this compile error is expected. +#[cfg(not(all(feature = "client", feature = "http1")))] +compile_error!("The `ffi` feature currently requires the `client` and `http1` features."); + +#[cfg(not(hyper_unstable_ffi))] +compile_error!( + "\ + The `ffi` feature is unstable, and requires the \ + `RUSTFLAGS='--cfg hyper_unstable_ffi'` environment variable to be set.\ +" +); + +#[macro_use] +mod macros; + +mod body; +mod client; +mod error; +mod http_types; +mod io; +mod task; + +pub use self::body::*; +pub use self::client::*; +pub use self::error::*; +pub use self::http_types::*; +pub use self::io::*; +pub use self::task::*; + +/// Return in iter functions to continue iterating. +pub const HYPER_ITER_CONTINUE: libc::c_int = 0; +/// Return in iter functions to stop iterating. +#[allow(unused)] +pub const HYPER_ITER_BREAK: libc::c_int = 1; + +/// An HTTP Version that is unspecified. +pub const HYPER_HTTP_VERSION_NONE: libc::c_int = 0; +/// The HTTP/1.0 version. +pub const HYPER_HTTP_VERSION_1_0: libc::c_int = 10; +/// The HTTP/1.1 version. +pub const HYPER_HTTP_VERSION_1_1: libc::c_int = 11; +/// The HTTP/2 version. +pub const HYPER_HTTP_VERSION_2: libc::c_int = 20; + +struct UserDataPointer(*mut std::ffi::c_void); + +// We don't actually know anything about this pointer, it's up to the user +// to do the right thing. +unsafe impl Send for UserDataPointer {} +unsafe impl Sync for UserDataPointer {} + +/// cbindgen:ignore +static VERSION_CSTR: &str = concat!(env!("CARGO_PKG_VERSION"), "\0"); + +ffi_fn! { + /// Returns a static ASCII (null terminated) string of the hyper version. + fn hyper_version() -> *const libc::c_char { + VERSION_CSTR.as_ptr() as _ + } ?= std::ptr::null() +} diff --git a/.cargo-vendor/hyper-0.14.30/src/ffi/task.rs b/.cargo-vendor/hyper-0.14.30/src/ffi/task.rs new file mode 100644 index 0000000000..ef54fe408f --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/ffi/task.rs @@ -0,0 +1,411 @@ +use std::ffi::c_void; +use std::future::Future; +use std::pin::Pin; +use std::ptr; +use std::sync::{ + atomic::{AtomicBool, Ordering}, + Arc, Mutex, Weak, +}; +use std::task::{Context, Poll}; + +use futures_util::stream::{FuturesUnordered, Stream}; +use libc::c_int; + +use super::error::hyper_code; +use super::UserDataPointer; + +type BoxFuture = Pin + Send>>; +type BoxAny = Box; + +/// Return in a poll function to indicate it was ready. +pub const HYPER_POLL_READY: c_int = 0; +/// Return in a poll function to indicate it is still pending. +/// +/// The passed in `hyper_waker` should be registered to wake up the task at +/// some later point. +pub const HYPER_POLL_PENDING: c_int = 1; +/// Return in a poll function indicate an error. +pub const HYPER_POLL_ERROR: c_int = 3; + +/// A task executor for `hyper_task`s. +pub struct hyper_executor { + /// The executor of all task futures. + /// + /// There should never be contention on the mutex, as it is only locked + /// to drive the futures. However, we cannot guarantee proper usage from + /// `hyper_executor_poll()`, which in C could potentially be called inside + /// one of the stored futures. The mutex isn't re-entrant, so doing so + /// would result in a deadlock, but that's better than data corruption. + driver: Mutex>, + + /// The queue of futures that need to be pushed into the `driver`. + /// + /// This is has a separate mutex since `spawn` could be called from inside + /// a future, which would mean the driver's mutex is already locked. + spawn_queue: Mutex>, + + /// This is used to track when a future calls `wake` while we are within + /// `hyper_executor::poll_next`. + is_woken: Arc, +} + +#[derive(Clone)] +pub(crate) struct WeakExec(Weak); + +struct ExecWaker(AtomicBool); + +/// An async task. +pub struct hyper_task { + future: BoxFuture, + output: Option, + userdata: UserDataPointer, +} + +struct TaskFuture { + task: Option>, +} + +/// An async context for a task that contains the related waker. +pub struct hyper_context<'a>(Context<'a>); + +/// A waker that is saved and used to waken a pending task. +pub struct hyper_waker { + waker: std::task::Waker, +} + +/// A descriptor for what type a `hyper_task` value is. +#[repr(C)] +pub enum hyper_task_return_type { + /// The value of this task is null (does not imply an error). + HYPER_TASK_EMPTY, + /// The value of this task is `hyper_error *`. + HYPER_TASK_ERROR, + /// The value of this task is `hyper_clientconn *`. + HYPER_TASK_CLIENTCONN, + /// The value of this task is `hyper_response *`. + HYPER_TASK_RESPONSE, + /// The value of this task is `hyper_buf *`. + HYPER_TASK_BUF, +} + +pub(crate) unsafe trait AsTaskType { + fn as_task_type(&self) -> hyper_task_return_type; +} + +pub(crate) trait IntoDynTaskType { + fn into_dyn_task_type(self) -> BoxAny; +} + +// ===== impl hyper_executor ===== + +impl hyper_executor { + fn new() -> Arc { + Arc::new(hyper_executor { + driver: Mutex::new(FuturesUnordered::new()), + spawn_queue: Mutex::new(Vec::new()), + is_woken: Arc::new(ExecWaker(AtomicBool::new(false))), + }) + } + + pub(crate) fn downgrade(exec: &Arc) -> WeakExec { + WeakExec(Arc::downgrade(exec)) + } + + fn spawn(&self, task: Box) { + self.spawn_queue + .lock() + .unwrap() + .push(TaskFuture { task: Some(task) }); + } + + fn poll_next(&self) -> Option> { + // Drain the queue first. + self.drain_queue(); + + let waker = futures_util::task::waker_ref(&self.is_woken); + let mut cx = Context::from_waker(&waker); + + loop { + match Pin::new(&mut *self.driver.lock().unwrap()).poll_next(&mut cx) { + Poll::Ready(val) => return val, + Poll::Pending => { + // Check if any of the pending tasks tried to spawn + // some new tasks. If so, drain into the driver and loop. + if self.drain_queue() { + continue; + } + + // If the driver called `wake` while we were polling, + // we should poll again immediately! + if self.is_woken.0.swap(false, Ordering::SeqCst) { + continue; + } + + return None; + } + } + } + } + + fn drain_queue(&self) -> bool { + let mut queue = self.spawn_queue.lock().unwrap(); + if queue.is_empty() { + return false; + } + + let driver = self.driver.lock().unwrap(); + + for task in queue.drain(..) { + driver.push(task); + } + + true + } +} + +impl futures_util::task::ArcWake for ExecWaker { + fn wake_by_ref(me: &Arc) { + me.0.store(true, Ordering::SeqCst); + } +} + +// ===== impl WeakExec ===== + +impl WeakExec { + pub(crate) fn new() -> Self { + WeakExec(Weak::new()) + } +} + +impl crate::rt::Executor> for WeakExec { + fn execute(&self, fut: BoxFuture<()>) { + if let Some(exec) = self.0.upgrade() { + exec.spawn(hyper_task::boxed(fut)); + } + } +} + +ffi_fn! { + /// Creates a new task executor. + fn hyper_executor_new() -> *const hyper_executor { + Arc::into_raw(hyper_executor::new()) + } ?= ptr::null() +} + +ffi_fn! { + /// Frees an executor and any incomplete tasks still part of it. + fn hyper_executor_free(exec: *const hyper_executor) { + drop(non_null!(Arc::from_raw(exec) ?= ())); + } +} + +ffi_fn! { + /// Push a task onto the executor. + /// + /// The executor takes ownership of the task, it should not be accessed + /// again unless returned back to the user with `hyper_executor_poll`. + fn hyper_executor_push(exec: *const hyper_executor, task: *mut hyper_task) -> hyper_code { + let exec = non_null!(&*exec ?= hyper_code::HYPERE_INVALID_ARG); + let task = non_null!(Box::from_raw(task) ?= hyper_code::HYPERE_INVALID_ARG); + exec.spawn(task); + hyper_code::HYPERE_OK + } +} + +ffi_fn! { + /// Polls the executor, trying to make progress on any tasks that have notified + /// that they are ready again. + /// + /// If ready, returns a task from the executor that has completed. + /// + /// If there are no ready tasks, this returns `NULL`. + fn hyper_executor_poll(exec: *const hyper_executor) -> *mut hyper_task { + let exec = non_null!(&*exec ?= ptr::null_mut()); + match exec.poll_next() { + Some(task) => Box::into_raw(task), + None => ptr::null_mut(), + } + } ?= ptr::null_mut() +} + +// ===== impl hyper_task ===== + +impl hyper_task { + pub(crate) fn boxed(fut: F) -> Box + where + F: Future + Send + 'static, + F::Output: IntoDynTaskType + Send + Sync + 'static, + { + Box::new(hyper_task { + future: Box::pin(async move { fut.await.into_dyn_task_type() }), + output: None, + userdata: UserDataPointer(ptr::null_mut()), + }) + } + + fn output_type(&self) -> hyper_task_return_type { + match self.output { + None => hyper_task_return_type::HYPER_TASK_EMPTY, + Some(ref val) => val.as_task_type(), + } + } +} + +impl Future for TaskFuture { + type Output = Box; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match Pin::new(&mut self.task.as_mut().unwrap().future).poll(cx) { + Poll::Ready(val) => { + let mut task = self.task.take().unwrap(); + task.output = Some(val); + Poll::Ready(task) + } + Poll::Pending => Poll::Pending, + } + } +} + +ffi_fn! { + /// Free a task. + fn hyper_task_free(task: *mut hyper_task) { + drop(non_null!(Box::from_raw(task) ?= ())); + } +} + +ffi_fn! { + /// Takes the output value of this task. + /// + /// This must only be called once polling the task on an executor has finished + /// this task. + /// + /// Use `hyper_task_type` to determine the type of the `void *` return value. + fn hyper_task_value(task: *mut hyper_task) -> *mut c_void { + let task = non_null!(&mut *task ?= ptr::null_mut()); + + if let Some(val) = task.output.take() { + let p = Box::into_raw(val) as *mut c_void; + // protect from returning fake pointers to empty types + if p == std::ptr::NonNull::::dangling().as_ptr() { + ptr::null_mut() + } else { + p + } + } else { + ptr::null_mut() + } + } ?= ptr::null_mut() +} + +ffi_fn! { + /// Query the return type of this task. + fn hyper_task_type(task: *mut hyper_task) -> hyper_task_return_type { + // instead of blowing up spectacularly, just say this null task + // doesn't have a value to retrieve. + non_null!(&*task ?= hyper_task_return_type::HYPER_TASK_EMPTY).output_type() + } +} + +ffi_fn! { + /// Set a user data pointer to be associated with this task. + /// + /// This value will be passed to task callbacks, and can be checked later + /// with `hyper_task_userdata`. + fn hyper_task_set_userdata(task: *mut hyper_task, userdata: *mut c_void) { + if task.is_null() { + return; + } + + unsafe { (*task).userdata = UserDataPointer(userdata) }; + } +} + +ffi_fn! { + /// Retrieve the userdata that has been set via `hyper_task_set_userdata`. + fn hyper_task_userdata(task: *mut hyper_task) -> *mut c_void { + non_null!(&*task ?= ptr::null_mut()).userdata.0 + } ?= ptr::null_mut() +} + +// ===== impl AsTaskType ===== + +unsafe impl AsTaskType for () { + fn as_task_type(&self) -> hyper_task_return_type { + hyper_task_return_type::HYPER_TASK_EMPTY + } +} + +unsafe impl AsTaskType for crate::Error { + fn as_task_type(&self) -> hyper_task_return_type { + hyper_task_return_type::HYPER_TASK_ERROR + } +} + +impl IntoDynTaskType for T +where + T: AsTaskType + Send + Sync + 'static, +{ + fn into_dyn_task_type(self) -> BoxAny { + Box::new(self) + } +} + +impl IntoDynTaskType for crate::Result +where + T: IntoDynTaskType + Send + Sync + 'static, +{ + fn into_dyn_task_type(self) -> BoxAny { + match self { + Ok(val) => val.into_dyn_task_type(), + Err(err) => Box::new(err), + } + } +} + +impl IntoDynTaskType for Option +where + T: IntoDynTaskType + Send + Sync + 'static, +{ + fn into_dyn_task_type(self) -> BoxAny { + match self { + Some(val) => val.into_dyn_task_type(), + None => ().into_dyn_task_type(), + } + } +} + +// ===== impl hyper_context ===== + +impl hyper_context<'_> { + pub(crate) fn wrap<'a, 'b>(cx: &'a mut Context<'b>) -> &'a mut hyper_context<'b> { + // A struct with only one field has the same layout as that field. + unsafe { std::mem::transmute::<&mut Context<'_>, &mut hyper_context<'_>>(cx) } + } +} + +ffi_fn! { + /// Copies a waker out of the task context. + fn hyper_context_waker(cx: *mut hyper_context<'_>) -> *mut hyper_waker { + let waker = non_null!(&mut *cx ?= ptr::null_mut()).0.waker().clone(); + Box::into_raw(Box::new(hyper_waker { waker })) + } ?= ptr::null_mut() +} + +// ===== impl hyper_waker ===== + +ffi_fn! { + /// Free a waker that hasn't been woken. + fn hyper_waker_free(waker: *mut hyper_waker) { + drop(non_null!(Box::from_raw(waker) ?= ())); + } +} + +ffi_fn! { + /// Wake up the task associated with a waker. + /// + /// NOTE: This consumes the waker. You should not use or free the waker afterwards. + fn hyper_waker_wake(waker: *mut hyper_waker) { + let waker = non_null!(Box::from_raw(waker) ?= ()); + waker.waker.wake(); + } +} diff --git a/.cargo-vendor/hyper-0.14.30/src/headers.rs b/.cargo-vendor/hyper-0.14.30/src/headers.rs new file mode 100644 index 0000000000..2e5e5db0f2 --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/headers.rs @@ -0,0 +1,154 @@ +#[cfg(feature = "http1")] +use bytes::BytesMut; +use http::header::CONTENT_LENGTH; +use http::header::{HeaderValue, ValueIter}; +use http::HeaderMap; +#[cfg(all(feature = "http2", feature = "client"))] +use http::Method; + +#[cfg(feature = "http1")] +pub(super) fn connection_keep_alive(value: &HeaderValue) -> bool { + connection_has(value, "keep-alive") +} + +#[cfg(feature = "http1")] +pub(super) fn connection_close(value: &HeaderValue) -> bool { + connection_has(value, "close") +} + +#[cfg(feature = "http1")] +fn connection_has(value: &HeaderValue, needle: &str) -> bool { + if let Ok(s) = value.to_str() { + for val in s.split(',') { + if val.trim().eq_ignore_ascii_case(needle) { + return true; + } + } + } + false +} + +#[cfg(all(feature = "http1", feature = "server"))] +pub(super) fn content_length_parse(value: &HeaderValue) -> Option { + from_digits(value.as_bytes()) +} + +pub(super) fn content_length_parse_all(headers: &HeaderMap) -> Option { + content_length_parse_all_values(headers.get_all(CONTENT_LENGTH).into_iter()) +} + +pub(super) fn content_length_parse_all_values(values: ValueIter<'_, HeaderValue>) -> Option { + // If multiple Content-Length headers were sent, everything can still + // be alright if they all contain the same value, and all parse + // correctly. If not, then it's an error. + + let mut content_length: Option = None; + for h in values { + if let Ok(line) = h.to_str() { + for v in line.split(',') { + if let Some(n) = from_digits(v.trim().as_bytes()) { + if content_length.is_none() { + content_length = Some(n) + } else if content_length != Some(n) { + return None; + } + } else { + return None; + } + } + } else { + return None; + } + } + + content_length +} + +fn from_digits(bytes: &[u8]) -> Option { + // cannot use FromStr for u64, since it allows a signed prefix + let mut result = 0u64; + const RADIX: u64 = 10; + + if bytes.is_empty() { + return None; + } + + for &b in bytes { + // can't use char::to_digit, since we haven't verified these bytes + // are utf-8. + match b { + b'0'..=b'9' => { + result = result.checked_mul(RADIX)?; + result = result.checked_add((b - b'0') as u64)?; + } + _ => { + // not a DIGIT, get outta here! + return None; + } + } + } + + Some(result) +} + +#[cfg(all(feature = "http2", feature = "client"))] +pub(super) fn method_has_defined_payload_semantics(method: &Method) -> bool { + match *method { + Method::GET | Method::HEAD | Method::DELETE | Method::CONNECT => false, + _ => true, + } +} + +#[cfg(feature = "http2")] +pub(super) fn set_content_length_if_missing(headers: &mut HeaderMap, len: u64) { + headers + .entry(CONTENT_LENGTH) + .or_insert_with(|| HeaderValue::from(len)); +} + +#[cfg(feature = "http1")] +pub(super) fn transfer_encoding_is_chunked(headers: &HeaderMap) -> bool { + is_chunked(headers.get_all(http::header::TRANSFER_ENCODING).into_iter()) +} + +#[cfg(feature = "http1")] +pub(super) fn is_chunked(mut encodings: ValueIter<'_, HeaderValue>) -> bool { + // chunked must always be the last encoding, according to spec + if let Some(line) = encodings.next_back() { + return is_chunked_(line); + } + + false +} + +#[cfg(feature = "http1")] +pub(super) fn is_chunked_(value: &HeaderValue) -> bool { + // chunked must always be the last encoding, according to spec + if let Ok(s) = value.to_str() { + if let Some(encoding) = s.rsplit(',').next() { + return encoding.trim().eq_ignore_ascii_case("chunked"); + } + } + + false +} + +#[cfg(feature = "http1")] +pub(super) fn add_chunked(mut entry: http::header::OccupiedEntry<'_, HeaderValue>) { + const CHUNKED: &str = "chunked"; + + if let Some(line) = entry.iter_mut().next_back() { + // + 2 for ", " + let new_cap = line.as_bytes().len() + CHUNKED.len() + 2; + let mut buf = BytesMut::with_capacity(new_cap); + buf.extend_from_slice(line.as_bytes()); + buf.extend_from_slice(b", "); + buf.extend_from_slice(CHUNKED.as_bytes()); + + *line = HeaderValue::from_maybe_shared(buf.freeze()) + .expect("original header value plus ascii is valid"); + return; + } + + entry.insert(HeaderValue::from_static(CHUNKED)); +} diff --git a/.cargo-vendor/hyper-0.14.30/src/lib.rs b/.cargo-vendor/hyper-0.14.30/src/lib.rs new file mode 100644 index 0000000000..064a18ec30 --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/lib.rs @@ -0,0 +1,112 @@ +#![deny(missing_docs)] +#![deny(missing_debug_implementations)] +#![cfg_attr(test, deny(rust_2018_idioms))] +#![cfg_attr(all(test, feature = "full"), deny(unreachable_pub))] +// 0.14.x is not actively developed, new warnings just get in the way. +//#![cfg_attr(all(test, feature = "full", not(feature = "nightly")), deny(warnings))] +#![cfg_attr(all(test, feature = "nightly"), feature(test))] +#![cfg_attr(docsrs, feature(doc_cfg))] + +//! # hyper +//! +//! hyper is a **fast** and **correct** HTTP implementation written in and for Rust. +//! +//! ## Features +//! +//! - HTTP/1 and HTTP/2 +//! - Asynchronous design +//! - Leading in performance +//! - Tested and **correct** +//! - Extensive production use +//! - [Client](client/index.html) and [Server](server/index.html) APIs +//! +//! If just starting out, **check out the [Guides](https://hyper.rs/guides) +//! first.** +//! +//! ## "Low-level" +//! +//! hyper is a lower-level HTTP library, meant to be a building block +//! for libraries and applications. +//! +//! If looking for just a convenient HTTP client, consider the +//! [reqwest](https://crates.io/crates/reqwest) crate. +//! +//! # Optional Features +//! +//! hyper uses a set of [feature flags] to reduce the amount of compiled code. +//! It is possible to just enable certain features over others. By default, +//! hyper does not enable any features but allows one to enable a subset for +//! their use case. Below is a list of the available feature flags. You may +//! also notice above each function, struct and trait there is listed one or +//! more feature flags that are required for that item to be used. +//! +//! If you are new to hyper it is possible to enable the `full` feature flag +//! which will enable all public APIs. Beware though that this will pull in +//! many extra dependencies that you may not need. +//! +//! The following optional features are available: +//! +//! - `http1`: Enables HTTP/1 support. +//! - `http2`: Enables HTTP/2 support. +//! - `client`: Enables the HTTP `client`. +//! - `server`: Enables the HTTP `server`. +//! - `runtime`: Enables convenient integration with `tokio`, providing +//! connectors and acceptors for TCP, and a default executor. +//! - `tcp`: Enables convenient implementations over TCP (using tokio). +//! - `stream`: Provides `futures::Stream` capabilities. +//! - `backports`: 1.0 functionality backported to 0.14. +//! - `deprecated`: opt-in to deprecation warnings to prepare you for 1.0. +//! +//! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section + +#[doc(hidden)] +pub use http; + +#[cfg(all(test, feature = "nightly"))] +extern crate test; + +pub use crate::http::{header, Method, Request, Response, StatusCode, Uri, Version}; + +#[doc(no_inline)] +pub use crate::http::HeaderMap; + +pub use crate::body::Body; +pub use crate::error::{Error, Result}; + +#[macro_use] +mod cfg; +#[macro_use] +mod common; +pub mod body; +mod error; +pub mod ext; +#[cfg(test)] +mod mock; +pub mod rt; +pub mod service; +pub mod upgrade; + +#[cfg(feature = "ffi")] +pub mod ffi; + +cfg_proto! { + mod headers; + mod proto; +} + +cfg_feature! { + #![feature = "client"] + + pub mod client; + #[cfg(any(feature = "http1", feature = "http2"))] + #[doc(no_inline)] + pub use crate::client::Client; +} + +cfg_feature! { + #![feature = "server"] + + pub mod server; + #[doc(no_inline)] + pub use crate::server::Server; +} diff --git a/.cargo-vendor/hyper-0.14.30/src/mock.rs b/.cargo-vendor/hyper-0.14.30/src/mock.rs new file mode 100644 index 0000000000..1dd57de319 --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/mock.rs @@ -0,0 +1,235 @@ +// FIXME: re-implement tests with `async/await` +/* +#[cfg(feature = "runtime")] +use std::collections::HashMap; +use std::cmp; +use std::io::{self, Read, Write}; +#[cfg(feature = "runtime")] +use std::sync::{Arc, Mutex}; + +use bytes::Buf; +use futures::{Async, Poll}; +#[cfg(feature = "runtime")] +use futures::Future; +use futures::task::{self, Task}; +use tokio_io::{AsyncRead, AsyncWrite}; + +#[cfg(feature = "runtime")] +use crate::client::connect::{Connect, Connected, Destination}; + + + +#[cfg(feature = "runtime")] +pub struct Duplex { + inner: Arc>, +} + +#[cfg(feature = "runtime")] +struct DuplexInner { + handle_read_task: Option, + read: AsyncIo, + write: AsyncIo, +} + +#[cfg(feature = "runtime")] +impl Duplex { + pub(crate) fn channel() -> (Duplex, DuplexHandle) { + let mut inner = DuplexInner { + handle_read_task: None, + read: AsyncIo::new_buf(Vec::new(), 0), + write: AsyncIo::new_buf(Vec::new(), std::usize::MAX), + }; + + inner.read.park_tasks(true); + inner.write.park_tasks(true); + + let inner = Arc::new(Mutex::new(inner)); + + let duplex = Duplex { + inner: inner.clone(), + }; + let handle = DuplexHandle { + inner: inner, + }; + + (duplex, handle) + } +} + +#[cfg(feature = "runtime")] +impl Read for Duplex { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + self.inner.lock().unwrap().read.read(buf) + } +} + +#[cfg(feature = "runtime")] +impl Write for Duplex { + fn write(&mut self, buf: &[u8]) -> io::Result { + let mut inner = self.inner.lock().unwrap(); + let ret = inner.write.write(buf); + if let Some(task) = inner.handle_read_task.take() { + trace!("waking DuplexHandle read"); + task.notify(); + } + ret + } + + fn flush(&mut self) -> io::Result<()> { + self.inner.lock().unwrap().write.flush() + } +} + +#[cfg(feature = "runtime")] +impl AsyncRead for Duplex { +} + +#[cfg(feature = "runtime")] +impl AsyncWrite for Duplex { + fn shutdown(&mut self) -> Poll<(), io::Error> { + Ok(().into()) + } + + fn write_buf(&mut self, buf: &mut B) -> Poll { + let mut inner = self.inner.lock().unwrap(); + if let Some(task) = inner.handle_read_task.take() { + task.notify(); + } + inner.write.write_buf(buf) + } +} + +#[cfg(feature = "runtime")] +pub struct DuplexHandle { + inner: Arc>, +} + +#[cfg(feature = "runtime")] +impl DuplexHandle { + pub fn read(&self, buf: &mut [u8]) -> Poll { + let mut inner = self.inner.lock().unwrap(); + assert!(buf.len() >= inner.write.inner.len()); + if inner.write.inner.is_empty() { + trace!("DuplexHandle read parking"); + inner.handle_read_task = Some(task::current()); + return Ok(Async::NotReady); + } + inner.write.read(buf).map(Async::Ready) + } + + pub fn write(&self, bytes: &[u8]) -> Poll { + let mut inner = self.inner.lock().unwrap(); + assert_eq!(inner.read.inner.pos, 0); + assert_eq!(inner.read.inner.vec.len(), 0, "write but read isn't empty"); + inner + .read + .inner + .vec + .extend(bytes); + inner.read.block_in(bytes.len()); + Ok(Async::Ready(bytes.len())) + } +} + +#[cfg(feature = "runtime")] +impl Drop for DuplexHandle { + fn drop(&mut self) { + trace!("mock duplex handle drop"); + if !::std::thread::panicking() { + let mut inner = self.inner.lock().unwrap(); + inner.read.close(); + inner.write.close(); + } + } +} + +#[cfg(feature = "runtime")] +type BoxedConnectFut = Box + Send>; + +#[cfg(feature = "runtime")] +#[derive(Clone)] +pub struct MockConnector { + mocks: Arc>, +} + +#[cfg(feature = "runtime")] +struct MockedConnections(HashMap>); + +#[cfg(feature = "runtime")] +impl MockConnector { + pub fn new() -> MockConnector { + MockConnector { + mocks: Arc::new(Mutex::new(MockedConnections(HashMap::new()))), + } + } + + pub fn mock(&mut self, key: &str) -> DuplexHandle { + use futures::future; + self.mock_fut(key, future::ok::<_, ()>(())) + } + + pub fn mock_fut(&mut self, key: &str, fut: F) -> DuplexHandle + where + F: Future + Send + 'static, + { + self.mock_opts(key, Connected::new(), fut) + } + + pub fn mock_opts(&mut self, key: &str, connected: Connected, fut: F) -> DuplexHandle + where + F: Future + Send + 'static, + { + let key = key.to_owned(); + + let (duplex, handle) = Duplex::channel(); + + let fut = Box::new(fut.then(move |_| { + trace!("MockConnector mocked fut ready"); + Ok((duplex, connected)) + })); + self.mocks.lock().unwrap().0.entry(key) + .or_insert(Vec::new()) + .push(fut); + + handle + } +} + +#[cfg(feature = "runtime")] +impl Connect for MockConnector { + type Transport = Duplex; + type Error = io::Error; + type Future = BoxedConnectFut; + + fn connect(&self, dst: Destination) -> Self::Future { + trace!("mock connect: {:?}", dst); + let key = format!("{}://{}{}", dst.scheme(), dst.host(), if let Some(port) = dst.port() { + format!(":{}", port) + } else { + "".to_owned() + }); + let mut mocks = self.mocks.lock().unwrap(); + let mocks = mocks.0.get_mut(&key) + .expect(&format!("unknown mocks uri: {}", key)); + assert!(!mocks.is_empty(), "no additional mocks for {}", key); + mocks.remove(0) + } +} + + +#[cfg(feature = "runtime")] +impl Drop for MockedConnections { + fn drop(&mut self) { + if !::std::thread::panicking() { + for (key, mocks) in self.0.iter() { + assert_eq!( + mocks.len(), + 0, + "not all mocked connects for {:?} were used", + key, + ); + } + } + } +} +*/ diff --git a/.cargo-vendor/hyper-0.14.30/src/proto/h1/conn.rs b/.cargo-vendor/hyper-0.14.30/src/proto/h1/conn.rs new file mode 100644 index 0000000000..5ab72f264e --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/proto/h1/conn.rs @@ -0,0 +1,1424 @@ +use std::fmt; +use std::io; +use std::marker::PhantomData; +use std::marker::Unpin; +use std::pin::Pin; +use std::task::{Context, Poll}; +#[cfg(all(feature = "server", feature = "runtime"))] +use std::time::Duration; + +use bytes::{Buf, Bytes}; +use http::header::{HeaderValue, CONNECTION}; +use http::{HeaderMap, Method, Version}; +use httparse::ParserConfig; +use tokio::io::{AsyncRead, AsyncWrite}; +#[cfg(all(feature = "server", feature = "runtime"))] +use tokio::time::Sleep; +use tracing::{debug, error, trace}; + +use super::io::Buffered; +use super::{Decoder, Encode, EncodedBuf, Encoder, Http1Transaction, ParseContext, Wants}; +use crate::body::DecodedLength; +use crate::headers::connection_keep_alive; +use crate::proto::{BodyLength, MessageHead}; + +const H2_PREFACE: &[u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; + +/// This handles a connection, which will have been established over an +/// `AsyncRead + AsyncWrite` (like a socket), and will likely include multiple +/// `Transaction`s over HTTP. +/// +/// The connection will determine when a message begins and ends as well as +/// determine if this connection can be kept alive after the message, +/// or if it is complete. +pub(crate) struct Conn { + io: Buffered>, + state: State, + _marker: PhantomData, +} + +impl Conn +where + I: AsyncRead + AsyncWrite + Unpin, + B: Buf, + T: Http1Transaction, +{ + pub(crate) fn new(io: I) -> Conn { + Conn { + io: Buffered::new(io), + state: State { + allow_half_close: false, + cached_headers: None, + error: None, + keep_alive: KA::Busy, + method: None, + h1_parser_config: ParserConfig::default(), + #[cfg(all(feature = "server", feature = "runtime"))] + h1_header_read_timeout: None, + #[cfg(all(feature = "server", feature = "runtime"))] + h1_header_read_timeout_fut: None, + #[cfg(all(feature = "server", feature = "runtime"))] + h1_header_read_timeout_running: false, + preserve_header_case: false, + #[cfg(feature = "ffi")] + preserve_header_order: false, + title_case_headers: false, + h09_responses: false, + #[cfg(feature = "ffi")] + on_informational: None, + #[cfg(feature = "ffi")] + raw_headers: false, + notify_read: false, + reading: Reading::Init, + writing: Writing::Init, + upgrade: None, + // We assume a modern world where the remote speaks HTTP/1.1. + // If they tell us otherwise, we'll downgrade in `read_head`. + version: Version::HTTP_11, + }, + _marker: PhantomData, + } + } + + #[cfg(feature = "server")] + pub(crate) fn set_flush_pipeline(&mut self, enabled: bool) { + self.io.set_flush_pipeline(enabled); + } + + pub(crate) fn set_write_strategy_queue(&mut self) { + self.io.set_write_strategy_queue(); + } + + pub(crate) fn set_max_buf_size(&mut self, max: usize) { + self.io.set_max_buf_size(max); + } + + #[cfg(feature = "client")] + pub(crate) fn set_read_buf_exact_size(&mut self, sz: usize) { + self.io.set_read_buf_exact_size(sz); + } + + pub(crate) fn set_write_strategy_flatten(&mut self) { + self.io.set_write_strategy_flatten(); + } + + #[cfg(feature = "client")] + pub(crate) fn set_h1_parser_config(&mut self, parser_config: ParserConfig) { + self.state.h1_parser_config = parser_config; + } + + pub(crate) fn set_title_case_headers(&mut self) { + self.state.title_case_headers = true; + } + + pub(crate) fn set_preserve_header_case(&mut self) { + self.state.preserve_header_case = true; + } + + #[cfg(feature = "ffi")] + pub(crate) fn set_preserve_header_order(&mut self) { + self.state.preserve_header_order = true; + } + + #[cfg(feature = "client")] + pub(crate) fn set_h09_responses(&mut self) { + self.state.h09_responses = true; + } + + #[cfg(all(feature = "server", feature = "runtime"))] + pub(crate) fn set_http1_header_read_timeout(&mut self, val: Duration) { + self.state.h1_header_read_timeout = Some(val); + } + + #[cfg(feature = "server")] + pub(crate) fn set_allow_half_close(&mut self) { + self.state.allow_half_close = true; + } + + #[cfg(feature = "ffi")] + pub(crate) fn set_raw_headers(&mut self, enabled: bool) { + self.state.raw_headers = enabled; + } + + pub(crate) fn into_inner(self) -> (I, Bytes) { + self.io.into_inner() + } + + pub(crate) fn pending_upgrade(&mut self) -> Option { + self.state.upgrade.take() + } + + pub(crate) fn is_read_closed(&self) -> bool { + self.state.is_read_closed() + } + + pub(crate) fn is_write_closed(&self) -> bool { + self.state.is_write_closed() + } + + pub(crate) fn can_read_head(&self) -> bool { + if !matches!(self.state.reading, Reading::Init) { + return false; + } + + if T::should_read_first() { + return true; + } + + !matches!(self.state.writing, Writing::Init) + } + + pub(crate) fn can_read_body(&self) -> bool { + match self.state.reading { + Reading::Body(..) | Reading::Continue(..) => true, + _ => false, + } + } + + fn should_error_on_eof(&self) -> bool { + // If we're idle, it's probably just the connection closing gracefully. + T::should_error_on_parse_eof() && !self.state.is_idle() + } + + fn has_h2_prefix(&self) -> bool { + let read_buf = self.io.read_buf(); + read_buf.len() >= 24 && read_buf[..24] == *H2_PREFACE + } + + pub(super) fn poll_read_head( + &mut self, + cx: &mut Context<'_>, + ) -> Poll, DecodedLength, Wants)>>> { + debug_assert!(self.can_read_head()); + trace!("Conn::read_head"); + + let msg = match ready!(self.io.parse::( + cx, + ParseContext { + cached_headers: &mut self.state.cached_headers, + req_method: &mut self.state.method, + h1_parser_config: self.state.h1_parser_config.clone(), + #[cfg(all(feature = "server", feature = "runtime"))] + h1_header_read_timeout: self.state.h1_header_read_timeout, + #[cfg(all(feature = "server", feature = "runtime"))] + h1_header_read_timeout_fut: &mut self.state.h1_header_read_timeout_fut, + #[cfg(all(feature = "server", feature = "runtime"))] + h1_header_read_timeout_running: &mut self.state.h1_header_read_timeout_running, + preserve_header_case: self.state.preserve_header_case, + #[cfg(feature = "ffi")] + preserve_header_order: self.state.preserve_header_order, + h09_responses: self.state.h09_responses, + #[cfg(feature = "ffi")] + on_informational: &mut self.state.on_informational, + #[cfg(feature = "ffi")] + raw_headers: self.state.raw_headers, + } + )) { + Ok(msg) => msg, + Err(e) => return self.on_read_head_error(e), + }; + + // Note: don't deconstruct `msg` into local variables, it appears + // the optimizer doesn't remove the extra copies. + + debug!("incoming body is {}", msg.decode); + + // Prevent accepting HTTP/0.9 responses after the initial one, if any. + self.state.h09_responses = false; + + // Drop any OnInformational callbacks, we're done there! + #[cfg(feature = "ffi")] + { + self.state.on_informational = None; + } + + self.state.busy(); + self.state.keep_alive &= msg.keep_alive; + self.state.version = msg.head.version; + + let mut wants = if msg.wants_upgrade { + Wants::UPGRADE + } else { + Wants::EMPTY + }; + + if msg.decode == DecodedLength::ZERO { + if msg.expect_continue { + debug!("ignoring expect-continue since body is empty"); + } + self.state.reading = Reading::KeepAlive; + if !T::should_read_first() { + self.try_keep_alive(cx); + } + } else if msg.expect_continue { + self.state.reading = Reading::Continue(Decoder::new(msg.decode)); + wants = wants.add(Wants::EXPECT); + } else { + self.state.reading = Reading::Body(Decoder::new(msg.decode)); + } + + Poll::Ready(Some(Ok((msg.head, msg.decode, wants)))) + } + + fn on_read_head_error(&mut self, e: crate::Error) -> Poll>> { + // If we are currently waiting on a message, then an empty + // message should be reported as an error. If not, it is just + // the connection closing gracefully. + let must_error = self.should_error_on_eof(); + self.close_read(); + self.io.consume_leading_lines(); + let was_mid_parse = e.is_parse() || !self.io.read_buf().is_empty(); + if was_mid_parse || must_error { + // We check if the buf contains the h2 Preface + debug!( + "parse error ({}) with {} bytes", + e, + self.io.read_buf().len() + ); + match self.on_parse_error(e) { + Ok(()) => Poll::Pending, // XXX: wat? + Err(e) => Poll::Ready(Some(Err(e))), + } + } else { + debug!("read eof"); + self.close_write(); + Poll::Ready(None) + } + } + + pub(crate) fn poll_read_body( + &mut self, + cx: &mut Context<'_>, + ) -> Poll>> { + debug_assert!(self.can_read_body()); + + let (reading, ret) = match self.state.reading { + Reading::Body(ref mut decoder) => { + match ready!(decoder.decode(cx, &mut self.io)) { + Ok(slice) => { + let (reading, chunk) = if decoder.is_eof() { + debug!("incoming body completed"); + ( + Reading::KeepAlive, + if !slice.is_empty() { + Some(Ok(slice)) + } else { + None + }, + ) + } else if slice.is_empty() { + error!("incoming body unexpectedly ended"); + // This should be unreachable, since all 3 decoders + // either set eof=true or return an Err when reading + // an empty slice... + (Reading::Closed, None) + } else { + return Poll::Ready(Some(Ok(slice))); + }; + (reading, Poll::Ready(chunk)) + } + Err(e) => { + debug!("incoming body decode error: {}", e); + (Reading::Closed, Poll::Ready(Some(Err(e)))) + } + } + } + Reading::Continue(ref decoder) => { + // Write the 100 Continue if not already responded... + if let Writing::Init = self.state.writing { + trace!("automatically sending 100 Continue"); + let cont = b"HTTP/1.1 100 Continue\r\n\r\n"; + self.io.headers_buf().extend_from_slice(cont); + } + + // And now recurse once in the Reading::Body state... + self.state.reading = Reading::Body(decoder.clone()); + return self.poll_read_body(cx); + } + _ => unreachable!("poll_read_body invalid state: {:?}", self.state.reading), + }; + + self.state.reading = reading; + self.try_keep_alive(cx); + ret + } + + pub(crate) fn wants_read_again(&mut self) -> bool { + let ret = self.state.notify_read; + self.state.notify_read = false; + ret + } + + pub(crate) fn poll_read_keep_alive(&mut self, cx: &mut Context<'_>) -> Poll> { + debug_assert!(!self.can_read_head() && !self.can_read_body()); + + if self.is_read_closed() { + Poll::Pending + } else if self.is_mid_message() { + self.mid_message_detect_eof(cx) + } else { + self.require_empty_read(cx) + } + } + + fn is_mid_message(&self) -> bool { + !matches!( + (&self.state.reading, &self.state.writing), + (&Reading::Init, &Writing::Init) + ) + } + + // This will check to make sure the io object read is empty. + // + // This should only be called for Clients wanting to enter the idle + // state. + fn require_empty_read(&mut self, cx: &mut Context<'_>) -> Poll> { + debug_assert!(!self.can_read_head() && !self.can_read_body() && !self.is_read_closed()); + debug_assert!(!self.is_mid_message()); + debug_assert!(T::is_client()); + + if !self.io.read_buf().is_empty() { + debug!("received an unexpected {} bytes", self.io.read_buf().len()); + return Poll::Ready(Err(crate::Error::new_unexpected_message())); + } + + let num_read = ready!(self.force_io_read(cx)).map_err(crate::Error::new_io)?; + + if num_read == 0 { + let ret = if self.should_error_on_eof() { + trace!("found unexpected EOF on busy connection: {:?}", self.state); + Poll::Ready(Err(crate::Error::new_incomplete())) + } else { + trace!("found EOF on idle connection, closing"); + Poll::Ready(Ok(())) + }; + + // order is important: should_error needs state BEFORE close_read + self.state.close_read(); + return ret; + } + + debug!( + "received unexpected {} bytes on an idle connection", + num_read + ); + Poll::Ready(Err(crate::Error::new_unexpected_message())) + } + + fn mid_message_detect_eof(&mut self, cx: &mut Context<'_>) -> Poll> { + debug_assert!(!self.can_read_head() && !self.can_read_body() && !self.is_read_closed()); + debug_assert!(self.is_mid_message()); + + if self.state.allow_half_close || !self.io.read_buf().is_empty() { + return Poll::Pending; + } + + let num_read = ready!(self.force_io_read(cx)).map_err(crate::Error::new_io)?; + + if num_read == 0 { + trace!("found unexpected EOF on busy connection: {:?}", self.state); + self.state.close_read(); + Poll::Ready(Err(crate::Error::new_incomplete())) + } else { + Poll::Ready(Ok(())) + } + } + + fn force_io_read(&mut self, cx: &mut Context<'_>) -> Poll> { + debug_assert!(!self.state.is_read_closed()); + + let result = ready!(self.io.poll_read_from_io(cx)); + Poll::Ready(result.map_err(|e| { + trace!("force_io_read; io error = {:?}", e); + self.state.close(); + e + })) + } + + fn maybe_notify(&mut self, cx: &mut Context<'_>) { + // its possible that we returned NotReady from poll() without having + // exhausted the underlying Io. We would have done this when we + // determined we couldn't keep reading until we knew how writing + // would finish. + + match self.state.reading { + Reading::Continue(..) | Reading::Body(..) | Reading::KeepAlive | Reading::Closed => { + return + } + Reading::Init => (), + }; + + match self.state.writing { + Writing::Body(..) => return, + Writing::Init | Writing::KeepAlive | Writing::Closed => (), + } + + if !self.io.is_read_blocked() { + if self.io.read_buf().is_empty() { + match self.io.poll_read_from_io(cx) { + Poll::Ready(Ok(n)) => { + if n == 0 { + trace!("maybe_notify; read eof"); + if self.state.is_idle() { + self.state.close(); + } else { + self.close_read() + } + return; + } + } + Poll::Pending => { + trace!("maybe_notify; read_from_io blocked"); + return; + } + Poll::Ready(Err(e)) => { + trace!("maybe_notify; read_from_io error: {}", e); + self.state.close(); + self.state.error = Some(crate::Error::new_io(e)); + } + } + } + self.state.notify_read = true; + } + } + + fn try_keep_alive(&mut self, cx: &mut Context<'_>) { + self.state.try_keep_alive::(); + self.maybe_notify(cx); + } + + pub(crate) fn can_write_head(&self) -> bool { + if !T::should_read_first() && matches!(self.state.reading, Reading::Closed) { + return false; + } + + match self.state.writing { + Writing::Init => self.io.can_headers_buf(), + _ => false, + } + } + + pub(crate) fn can_write_body(&self) -> bool { + match self.state.writing { + Writing::Body(..) => true, + Writing::Init | Writing::KeepAlive | Writing::Closed => false, + } + } + + pub(crate) fn can_buffer_body(&self) -> bool { + self.io.can_buffer() + } + + pub(crate) fn write_head(&mut self, head: MessageHead, body: Option) { + if let Some(encoder) = self.encode_head(head, body) { + self.state.writing = if !encoder.is_eof() { + Writing::Body(encoder) + } else if encoder.is_last() { + Writing::Closed + } else { + Writing::KeepAlive + }; + } + } + + pub(crate) fn write_full_msg(&mut self, head: MessageHead, body: B) { + if let Some(encoder) = + self.encode_head(head, Some(BodyLength::Known(body.remaining() as u64))) + { + let is_last = encoder.is_last(); + // Make sure we don't write a body if we weren't actually allowed + // to do so, like because its a HEAD request. + if !encoder.is_eof() { + encoder.danger_full_buf(body, self.io.write_buf()); + } + self.state.writing = if is_last { + Writing::Closed + } else { + Writing::KeepAlive + } + } + } + + fn encode_head( + &mut self, + mut head: MessageHead, + body: Option, + ) -> Option { + debug_assert!(self.can_write_head()); + + if !T::should_read_first() { + self.state.busy(); + } + + self.enforce_version(&mut head); + + let buf = self.io.headers_buf(); + match super::role::encode_headers::( + Encode { + head: &mut head, + body, + #[cfg(feature = "server")] + keep_alive: self.state.wants_keep_alive(), + req_method: &mut self.state.method, + title_case_headers: self.state.title_case_headers, + }, + buf, + ) { + Ok(encoder) => { + debug_assert!(self.state.cached_headers.is_none()); + debug_assert!(head.headers.is_empty()); + self.state.cached_headers = Some(head.headers); + + #[cfg(feature = "ffi")] + { + self.state.on_informational = + head.extensions.remove::(); + } + + Some(encoder) + } + Err(err) => { + self.state.error = Some(err); + self.state.writing = Writing::Closed; + None + } + } + } + + // Fix keep-alive when Connection: keep-alive header is not present + fn fix_keep_alive(&mut self, head: &mut MessageHead) { + let outgoing_is_keep_alive = head + .headers + .get(CONNECTION) + .map(connection_keep_alive) + .unwrap_or(false); + + if !outgoing_is_keep_alive { + match head.version { + // If response is version 1.0 and keep-alive is not present in the response, + // disable keep-alive so the server closes the connection + Version::HTTP_10 => self.state.disable_keep_alive(), + // If response is version 1.1 and keep-alive is wanted, add + // Connection: keep-alive header when not present + Version::HTTP_11 => { + if self.state.wants_keep_alive() { + head.headers + .insert(CONNECTION, HeaderValue::from_static("keep-alive")); + } + } + _ => (), + } + } + } + + // If we know the remote speaks an older version, we try to fix up any messages + // to work with our older peer. + fn enforce_version(&mut self, head: &mut MessageHead) { + if let Version::HTTP_10 = self.state.version { + // Fixes response or connection when keep-alive header is not present + self.fix_keep_alive(head); + // If the remote only knows HTTP/1.0, we should force ourselves + // to do only speak HTTP/1.0 as well. + head.version = Version::HTTP_10; + } + // If the remote speaks HTTP/1.1, then it *should* be fine with + // both HTTP/1.0 and HTTP/1.1 from us. So again, we just let + // the user's headers be. + } + + pub(crate) fn write_body(&mut self, chunk: B) { + debug_assert!(self.can_write_body() && self.can_buffer_body()); + // empty chunks should be discarded at Dispatcher level + debug_assert!(chunk.remaining() != 0); + + let state = match self.state.writing { + Writing::Body(ref mut encoder) => { + self.io.buffer(encoder.encode(chunk)); + + if !encoder.is_eof() { + return; + } + + if encoder.is_last() { + Writing::Closed + } else { + Writing::KeepAlive + } + } + _ => unreachable!("write_body invalid state: {:?}", self.state.writing), + }; + + self.state.writing = state; + } + + pub(crate) fn write_body_and_end(&mut self, chunk: B) { + debug_assert!(self.can_write_body() && self.can_buffer_body()); + // empty chunks should be discarded at Dispatcher level + debug_assert!(chunk.remaining() != 0); + + let state = match self.state.writing { + Writing::Body(ref encoder) => { + let can_keep_alive = encoder.encode_and_end(chunk, self.io.write_buf()); + if can_keep_alive { + Writing::KeepAlive + } else { + Writing::Closed + } + } + _ => unreachable!("write_body invalid state: {:?}", self.state.writing), + }; + + self.state.writing = state; + } + + pub(crate) fn end_body(&mut self) -> crate::Result<()> { + debug_assert!(self.can_write_body()); + + let encoder = match self.state.writing { + Writing::Body(ref mut enc) => enc, + _ => return Ok(()), + }; + + // end of stream, that means we should try to eof + match encoder.end() { + Ok(end) => { + if let Some(end) = end { + self.io.buffer(end); + } + + self.state.writing = if encoder.is_last() || encoder.is_close_delimited() { + Writing::Closed + } else { + Writing::KeepAlive + }; + + Ok(()) + } + Err(not_eof) => { + self.state.writing = Writing::Closed; + Err(crate::Error::new_body_write_aborted().with(not_eof)) + } + } + } + + // When we get a parse error, depending on what side we are, we might be able + // to write a response before closing the connection. + // + // - Client: there is nothing we can do + // - Server: if Response hasn't been written yet, we can send a 4xx response + fn on_parse_error(&mut self, err: crate::Error) -> crate::Result<()> { + if let Writing::Init = self.state.writing { + if self.has_h2_prefix() { + return Err(crate::Error::new_version_h2()); + } + if let Some(msg) = T::on_error(&err) { + // Drop the cached headers so as to not trigger a debug + // assert in `write_head`... + self.state.cached_headers.take(); + self.write_head(msg, None); + self.state.error = Some(err); + return Ok(()); + } + } + + // fallback is pass the error back up + Err(err) + } + + pub(crate) fn poll_flush(&mut self, cx: &mut Context<'_>) -> Poll> { + ready!(Pin::new(&mut self.io).poll_flush(cx))?; + self.try_keep_alive(cx); + trace!("flushed({}): {:?}", T::LOG, self.state); + Poll::Ready(Ok(())) + } + + pub(crate) fn poll_shutdown(&mut self, cx: &mut Context<'_>) -> Poll> { + match ready!(Pin::new(self.io.io_mut()).poll_shutdown(cx)) { + Ok(()) => { + trace!("shut down IO complete"); + Poll::Ready(Ok(())) + } + Err(e) => { + debug!("error shutting down IO: {}", e); + Poll::Ready(Err(e)) + } + } + } + + /// If the read side can be cheaply drained, do so. Otherwise, close. + pub(super) fn poll_drain_or_close_read(&mut self, cx: &mut Context<'_>) { + if let Reading::Continue(ref decoder) = self.state.reading { + // skip sending the 100-continue + // just move forward to a read, in case a tiny body was included + self.state.reading = Reading::Body(decoder.clone()); + } + + let _ = self.poll_read_body(cx); + + // If still in Reading::Body, just give up + match self.state.reading { + Reading::Init | Reading::KeepAlive => trace!("body drained"), + _ => self.close_read(), + } + } + + pub(crate) fn close_read(&mut self) { + self.state.close_read(); + } + + pub(crate) fn close_write(&mut self) { + self.state.close_write(); + } + + #[cfg(feature = "server")] + pub(crate) fn disable_keep_alive(&mut self) { + if self.state.is_idle() { + trace!("disable_keep_alive; closing idle connection"); + self.state.close(); + } else { + trace!("disable_keep_alive; in-progress connection"); + self.state.disable_keep_alive(); + } + } + + pub(crate) fn take_error(&mut self) -> crate::Result<()> { + if let Some(err) = self.state.error.take() { + Err(err) + } else { + Ok(()) + } + } + + pub(super) fn on_upgrade(&mut self) -> crate::upgrade::OnUpgrade { + trace!("{}: prepare possible HTTP upgrade", T::LOG); + self.state.prepare_upgrade() + } +} + +impl fmt::Debug for Conn { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Conn") + .field("state", &self.state) + .field("io", &self.io) + .finish() + } +} + +// B and T are never pinned +impl Unpin for Conn {} + +struct State { + allow_half_close: bool, + /// Re-usable HeaderMap to reduce allocating new ones. + cached_headers: Option, + /// If an error occurs when there wasn't a direct way to return it + /// back to the user, this is set. + error: Option, + /// Current keep-alive status. + keep_alive: KA, + /// If mid-message, the HTTP Method that started it. + /// + /// This is used to know things such as if the message can include + /// a body or not. + method: Option, + h1_parser_config: ParserConfig, + #[cfg(all(feature = "server", feature = "runtime"))] + h1_header_read_timeout: Option, + #[cfg(all(feature = "server", feature = "runtime"))] + h1_header_read_timeout_fut: Option>>, + #[cfg(all(feature = "server", feature = "runtime"))] + h1_header_read_timeout_running: bool, + preserve_header_case: bool, + #[cfg(feature = "ffi")] + preserve_header_order: bool, + title_case_headers: bool, + h09_responses: bool, + /// If set, called with each 1xx informational response received for + /// the current request. MUST be unset after a non-1xx response is + /// received. + #[cfg(feature = "ffi")] + on_informational: Option, + #[cfg(feature = "ffi")] + raw_headers: bool, + /// Set to true when the Dispatcher should poll read operations + /// again. See the `maybe_notify` method for more. + notify_read: bool, + /// State of allowed reads + reading: Reading, + /// State of allowed writes + writing: Writing, + /// An expected pending HTTP upgrade. + upgrade: Option, + /// Either HTTP/1.0 or 1.1 connection + version: Version, +} + +#[derive(Debug)] +enum Reading { + Init, + Continue(Decoder), + Body(Decoder), + KeepAlive, + Closed, +} + +enum Writing { + Init, + Body(Encoder), + KeepAlive, + Closed, +} + +impl fmt::Debug for State { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut builder = f.debug_struct("State"); + builder + .field("reading", &self.reading) + .field("writing", &self.writing) + .field("keep_alive", &self.keep_alive); + + // Only show error field if it's interesting... + if let Some(ref error) = self.error { + builder.field("error", error); + } + + if self.allow_half_close { + builder.field("allow_half_close", &true); + } + + // Purposefully leaving off other fields.. + + builder.finish() + } +} + +impl fmt::Debug for Writing { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + Writing::Init => f.write_str("Init"), + Writing::Body(ref enc) => f.debug_tuple("Body").field(enc).finish(), + Writing::KeepAlive => f.write_str("KeepAlive"), + Writing::Closed => f.write_str("Closed"), + } + } +} + +impl std::ops::BitAndAssign for KA { + fn bitand_assign(&mut self, enabled: bool) { + if !enabled { + trace!("remote disabling keep-alive"); + *self = KA::Disabled; + } + } +} + +#[derive(Clone, Copy, Debug)] +enum KA { + Idle, + Busy, + Disabled, +} + +impl Default for KA { + fn default() -> KA { + KA::Busy + } +} + +impl KA { + fn idle(&mut self) { + *self = KA::Idle; + } + + fn busy(&mut self) { + *self = KA::Busy; + } + + fn disable(&mut self) { + *self = KA::Disabled; + } + + fn status(&self) -> KA { + *self + } +} + +impl State { + fn close(&mut self) { + trace!("State::close()"); + self.reading = Reading::Closed; + self.writing = Writing::Closed; + self.keep_alive.disable(); + } + + fn close_read(&mut self) { + trace!("State::close_read()"); + self.reading = Reading::Closed; + self.keep_alive.disable(); + } + + fn close_write(&mut self) { + trace!("State::close_write()"); + self.writing = Writing::Closed; + self.keep_alive.disable(); + } + + fn wants_keep_alive(&self) -> bool { + if let KA::Disabled = self.keep_alive.status() { + false + } else { + true + } + } + + fn try_keep_alive(&mut self) { + match (&self.reading, &self.writing) { + (&Reading::KeepAlive, &Writing::KeepAlive) => { + if let KA::Busy = self.keep_alive.status() { + self.idle::(); + } else { + trace!( + "try_keep_alive({}): could keep-alive, but status = {:?}", + T::LOG, + self.keep_alive + ); + self.close(); + } + } + (&Reading::Closed, &Writing::KeepAlive) | (&Reading::KeepAlive, &Writing::Closed) => { + self.close() + } + _ => (), + } + } + + fn disable_keep_alive(&mut self) { + self.keep_alive.disable() + } + + fn busy(&mut self) { + if let KA::Disabled = self.keep_alive.status() { + return; + } + self.keep_alive.busy(); + } + + fn idle(&mut self) { + debug_assert!(!self.is_idle(), "State::idle() called while idle"); + + self.method = None; + self.keep_alive.idle(); + + if !self.is_idle() { + self.close(); + return; + } + + self.reading = Reading::Init; + self.writing = Writing::Init; + + // !T::should_read_first() means Client. + // + // If Client connection has just gone idle, the Dispatcher + // should try the poll loop one more time, so as to poll the + // pending requests stream. + if !T::should_read_first() { + self.notify_read = true; + } + } + + fn is_idle(&self) -> bool { + matches!(self.keep_alive.status(), KA::Idle) + } + + fn is_read_closed(&self) -> bool { + matches!(self.reading, Reading::Closed) + } + + fn is_write_closed(&self) -> bool { + matches!(self.writing, Writing::Closed) + } + + fn prepare_upgrade(&mut self) -> crate::upgrade::OnUpgrade { + let (tx, rx) = crate::upgrade::pending(); + self.upgrade = Some(tx); + rx + } +} + +#[cfg(test)] +mod tests { + #[cfg(feature = "nightly")] + #[bench] + fn bench_read_head_short(b: &mut ::test::Bencher) { + use super::*; + let s = b"GET / HTTP/1.1\r\nHost: localhost:8080\r\n\r\n"; + let len = s.len(); + b.bytes = len as u64; + + // an empty IO, we'll be skipping and using the read buffer anyways + let io = tokio_test::io::Builder::new().build(); + let mut conn = Conn::<_, bytes::Bytes, crate::proto::h1::ServerTransaction>::new(io); + *conn.io.read_buf_mut() = ::bytes::BytesMut::from(&s[..]); + conn.state.cached_headers = Some(HeaderMap::with_capacity(2)); + + let rt = tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .unwrap(); + + b.iter(|| { + rt.block_on(futures_util::future::poll_fn(|cx| { + match conn.poll_read_head(cx) { + Poll::Ready(Some(Ok(x))) => { + ::test::black_box(&x); + let mut headers = x.0.headers; + headers.clear(); + conn.state.cached_headers = Some(headers); + } + f => panic!("expected Ready(Some(Ok(..))): {:?}", f), + } + + conn.io.read_buf_mut().reserve(1); + unsafe { + conn.io.read_buf_mut().set_len(len); + } + conn.state.reading = Reading::Init; + Poll::Ready(()) + })); + }); + } + + /* + //TODO: rewrite these using dispatch... someday... + use futures::{Async, Future, Stream, Sink}; + use futures::future; + + use proto::{self, ClientTransaction, MessageHead, ServerTransaction}; + use super::super::Encoder; + use mock::AsyncIo; + + use super::{Conn, Decoder, Reading, Writing}; + use ::uri::Uri; + + use std::str::FromStr; + + #[test] + fn test_conn_init_read() { + let good_message = b"GET / HTTP/1.1\r\n\r\n".to_vec(); + let len = good_message.len(); + let io = AsyncIo::new_buf(good_message, len); + let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io); + + match conn.poll().unwrap() { + Async::Ready(Some(Frame::Message { message, body: false })) => { + assert_eq!(message, MessageHead { + subject: ::proto::RequestLine(::Get, Uri::from_str("/").unwrap()), + .. MessageHead::default() + }) + }, + f => panic!("frame is not Frame::Message: {:?}", f) + } + } + + #[test] + fn test_conn_parse_partial() { + let _: Result<(), ()> = future::lazy(|| { + let good_message = b"GET / HTTP/1.1\r\nHost: foo.bar\r\n\r\n".to_vec(); + let io = AsyncIo::new_buf(good_message, 10); + let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io); + assert!(conn.poll().unwrap().is_not_ready()); + conn.io.io_mut().block_in(50); + let async = conn.poll().unwrap(); + assert!(async.is_ready()); + match async { + Async::Ready(Some(Frame::Message { .. })) => (), + f => panic!("frame is not Message: {:?}", f), + } + Ok(()) + }).wait(); + } + + #[test] + fn test_conn_init_read_eof_idle() { + let io = AsyncIo::new_buf(vec![], 1); + let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io); + conn.state.idle(); + + match conn.poll().unwrap() { + Async::Ready(None) => {}, + other => panic!("frame is not None: {:?}", other) + } + } + + #[test] + fn test_conn_init_read_eof_idle_partial_parse() { + let io = AsyncIo::new_buf(b"GET / HTTP/1.1".to_vec(), 100); + let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io); + conn.state.idle(); + + match conn.poll() { + Err(ref err) if err.kind() == std::io::ErrorKind::UnexpectedEof => {}, + other => panic!("unexpected frame: {:?}", other) + } + } + + #[test] + fn test_conn_init_read_eof_busy() { + let _: Result<(), ()> = future::lazy(|| { + // server ignores + let io = AsyncIo::new_eof(); + let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io); + conn.state.busy(); + + match conn.poll().unwrap() { + Async::Ready(None) => {}, + other => panic!("unexpected frame: {:?}", other) + } + + // client + let io = AsyncIo::new_eof(); + let mut conn = Conn::<_, proto::Bytes, ClientTransaction>::new(io); + conn.state.busy(); + + match conn.poll() { + Err(ref err) if err.kind() == std::io::ErrorKind::UnexpectedEof => {}, + other => panic!("unexpected frame: {:?}", other) + } + Ok(()) + }).wait(); + } + + #[test] + fn test_conn_body_finish_read_eof() { + let _: Result<(), ()> = future::lazy(|| { + let io = AsyncIo::new_eof(); + let mut conn = Conn::<_, proto::Bytes, ClientTransaction>::new(io); + conn.state.busy(); + conn.state.writing = Writing::KeepAlive; + conn.state.reading = Reading::Body(Decoder::length(0)); + + match conn.poll() { + Ok(Async::Ready(Some(Frame::Body { chunk: None }))) => (), + other => panic!("unexpected frame: {:?}", other) + } + + // conn eofs, but tokio-proto will call poll() again, before calling flush() + // the conn eof in this case is perfectly fine + + match conn.poll() { + Ok(Async::Ready(None)) => (), + other => panic!("unexpected frame: {:?}", other) + } + Ok(()) + }).wait(); + } + + #[test] + fn test_conn_message_empty_body_read_eof() { + let _: Result<(), ()> = future::lazy(|| { + let io = AsyncIo::new_buf(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n".to_vec(), 1024); + let mut conn = Conn::<_, proto::Bytes, ClientTransaction>::new(io); + conn.state.busy(); + conn.state.writing = Writing::KeepAlive; + + match conn.poll() { + Ok(Async::Ready(Some(Frame::Message { body: false, .. }))) => (), + other => panic!("unexpected frame: {:?}", other) + } + + // conn eofs, but tokio-proto will call poll() again, before calling flush() + // the conn eof in this case is perfectly fine + + match conn.poll() { + Ok(Async::Ready(None)) => (), + other => panic!("unexpected frame: {:?}", other) + } + Ok(()) + }).wait(); + } + + #[test] + fn test_conn_read_body_end() { + let _: Result<(), ()> = future::lazy(|| { + let io = AsyncIo::new_buf(b"POST / HTTP/1.1\r\nContent-Length: 5\r\n\r\n12345".to_vec(), 1024); + let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io); + conn.state.busy(); + + match conn.poll() { + Ok(Async::Ready(Some(Frame::Message { body: true, .. }))) => (), + other => panic!("unexpected frame: {:?}", other) + } + + match conn.poll() { + Ok(Async::Ready(Some(Frame::Body { chunk: Some(_) }))) => (), + other => panic!("unexpected frame: {:?}", other) + } + + // When the body is done, `poll` MUST return a `Body` frame with chunk set to `None` + match conn.poll() { + Ok(Async::Ready(Some(Frame::Body { chunk: None }))) => (), + other => panic!("unexpected frame: {:?}", other) + } + + match conn.poll() { + Ok(Async::NotReady) => (), + other => panic!("unexpected frame: {:?}", other) + } + Ok(()) + }).wait(); + } + + #[test] + fn test_conn_closed_read() { + let io = AsyncIo::new_buf(vec![], 0); + let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io); + conn.state.close(); + + match conn.poll().unwrap() { + Async::Ready(None) => {}, + other => panic!("frame is not None: {:?}", other) + } + } + + #[test] + fn test_conn_body_write_length() { + let _ = pretty_env_logger::try_init(); + let _: Result<(), ()> = future::lazy(|| { + let io = AsyncIo::new_buf(vec![], 0); + let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io); + let max = super::super::io::DEFAULT_MAX_BUFFER_SIZE + 4096; + conn.state.writing = Writing::Body(Encoder::length((max * 2) as u64)); + + assert!(conn.start_send(Frame::Body { chunk: Some(vec![b'a'; max].into()) }).unwrap().is_ready()); + assert!(!conn.can_buffer_body()); + + assert!(conn.start_send(Frame::Body { chunk: Some(vec![b'b'; 1024 * 8].into()) }).unwrap().is_not_ready()); + + conn.io.io_mut().block_in(1024 * 3); + assert!(conn.poll_complete().unwrap().is_not_ready()); + conn.io.io_mut().block_in(1024 * 3); + assert!(conn.poll_complete().unwrap().is_not_ready()); + conn.io.io_mut().block_in(max * 2); + assert!(conn.poll_complete().unwrap().is_ready()); + + assert!(conn.start_send(Frame::Body { chunk: Some(vec![b'c'; 1024 * 8].into()) }).unwrap().is_ready()); + Ok(()) + }).wait(); + } + + #[test] + fn test_conn_body_write_chunked() { + let _: Result<(), ()> = future::lazy(|| { + let io = AsyncIo::new_buf(vec![], 4096); + let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io); + conn.state.writing = Writing::Body(Encoder::chunked()); + + assert!(conn.start_send(Frame::Body { chunk: Some("headers".into()) }).unwrap().is_ready()); + assert!(conn.start_send(Frame::Body { chunk: Some(vec![b'x'; 8192].into()) }).unwrap().is_ready()); + Ok(()) + }).wait(); + } + + #[test] + fn test_conn_body_flush() { + let _: Result<(), ()> = future::lazy(|| { + let io = AsyncIo::new_buf(vec![], 1024 * 1024 * 5); + let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io); + conn.state.writing = Writing::Body(Encoder::length(1024 * 1024)); + assert!(conn.start_send(Frame::Body { chunk: Some(vec![b'a'; 1024 * 1024].into()) }).unwrap().is_ready()); + assert!(!conn.can_buffer_body()); + conn.io.io_mut().block_in(1024 * 1024 * 5); + assert!(conn.poll_complete().unwrap().is_ready()); + assert!(conn.can_buffer_body()); + assert!(conn.io.io_mut().flushed()); + + Ok(()) + }).wait(); + } + + #[test] + fn test_conn_parking() { + use std::sync::Arc; + use futures::executor::Notify; + use futures::executor::NotifyHandle; + + struct Car { + permit: bool, + } + impl Notify for Car { + fn notify(&self, _id: usize) { + assert!(self.permit, "unparked without permit"); + } + } + + fn car(permit: bool) -> NotifyHandle { + Arc::new(Car { + permit: permit, + }).into() + } + + // test that once writing is done, unparks + let f = future::lazy(|| { + let io = AsyncIo::new_buf(vec![], 4096); + let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io); + conn.state.reading = Reading::KeepAlive; + assert!(conn.poll().unwrap().is_not_ready()); + + conn.state.writing = Writing::KeepAlive; + assert!(conn.poll_complete().unwrap().is_ready()); + Ok::<(), ()>(()) + }); + ::futures::executor::spawn(f).poll_future_notify(&car(true), 0).unwrap(); + + + // test that flushing when not waiting on read doesn't unpark + let f = future::lazy(|| { + let io = AsyncIo::new_buf(vec![], 4096); + let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io); + conn.state.writing = Writing::KeepAlive; + assert!(conn.poll_complete().unwrap().is_ready()); + Ok::<(), ()>(()) + }); + ::futures::executor::spawn(f).poll_future_notify(&car(false), 0).unwrap(); + + + // test that flushing and writing isn't done doesn't unpark + let f = future::lazy(|| { + let io = AsyncIo::new_buf(vec![], 4096); + let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io); + conn.state.reading = Reading::KeepAlive; + assert!(conn.poll().unwrap().is_not_ready()); + conn.state.writing = Writing::Body(Encoder::length(5_000)); + assert!(conn.poll_complete().unwrap().is_ready()); + Ok::<(), ()>(()) + }); + ::futures::executor::spawn(f).poll_future_notify(&car(false), 0).unwrap(); + } + + #[test] + fn test_conn_closed_write() { + let io = AsyncIo::new_buf(vec![], 0); + let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io); + conn.state.close(); + + match conn.start_send(Frame::Body { chunk: Some(b"foobar".to_vec().into()) }) { + Err(_e) => {}, + other => panic!("did not return Err: {:?}", other) + } + + assert!(conn.state.is_write_closed()); + } + + #[test] + fn test_conn_write_empty_chunk() { + let io = AsyncIo::new_buf(vec![], 0); + let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io); + conn.state.writing = Writing::KeepAlive; + + assert!(conn.start_send(Frame::Body { chunk: None }).unwrap().is_ready()); + assert!(conn.start_send(Frame::Body { chunk: Some(Vec::new().into()) }).unwrap().is_ready()); + conn.start_send(Frame::Body { chunk: Some(vec![b'a'].into()) }).unwrap_err(); + } + */ +} diff --git a/.cargo-vendor/hyper-0.14.30/src/proto/h1/decode.rs b/.cargo-vendor/hyper-0.14.30/src/proto/h1/decode.rs new file mode 100644 index 0000000000..3206863530 --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/proto/h1/decode.rs @@ -0,0 +1,853 @@ +use std::error::Error as StdError; +use std::fmt; +use std::io; +use std::task::{Context, Poll}; +use std::usize; + +use bytes::Bytes; +use tracing::{debug, trace}; + +use super::io::MemRead; +use super::DecodedLength; + +use self::Kind::{Chunked, Eof, Length}; + +/// Maximum amount of bytes allowed in chunked extensions. +/// +/// This limit is currentlty applied for the entire body, not per chunk. +const CHUNKED_EXTENSIONS_LIMIT: u64 = 1024 * 16; + +/// Decoders to handle different Transfer-Encodings. +/// +/// If a message body does not include a Transfer-Encoding, it *should* +/// include a Content-Length header. +#[derive(Clone, PartialEq)] +pub(crate) struct Decoder { + kind: Kind, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +enum Kind { + /// A Reader used when a Content-Length header is passed with a positive integer. + Length(u64), + /// A Reader used when Transfer-Encoding is `chunked`. + Chunked { + state: ChunkedState, + chunk_len: u64, + extensions_cnt: u64, + }, + /// A Reader used for responses that don't indicate a length or chunked. + /// + /// The bool tracks when EOF is seen on the transport. + /// + /// Note: This should only used for `Response`s. It is illegal for a + /// `Request` to be made with both `Content-Length` and + /// `Transfer-Encoding: chunked` missing, as explained from the spec: + /// + /// > If a Transfer-Encoding header field is present in a response and + /// > the chunked transfer coding is not the final encoding, the + /// > message body length is determined by reading the connection until + /// > it is closed by the server. If a Transfer-Encoding header field + /// > is present in a request and the chunked transfer coding is not + /// > the final encoding, the message body length cannot be determined + /// > reliably; the server MUST respond with the 400 (Bad Request) + /// > status code and then close the connection. + Eof(bool), +} + +#[derive(Debug, PartialEq, Clone, Copy)] +enum ChunkedState { + Start, + Size, + SizeLws, + Extension, + SizeLf, + Body, + BodyCr, + BodyLf, + Trailer, + TrailerLf, + EndCr, + EndLf, + End, +} + +impl Decoder { + // constructors + + pub(crate) fn length(x: u64) -> Decoder { + Decoder { + kind: Kind::Length(x), + } + } + + pub(crate) fn chunked() -> Decoder { + Decoder { + kind: Kind::Chunked { + state: ChunkedState::new(), + chunk_len: 0, + extensions_cnt: 0, + }, + } + } + + pub(crate) fn eof() -> Decoder { + Decoder { + kind: Kind::Eof(false), + } + } + + pub(super) fn new(len: DecodedLength) -> Self { + match len { + DecodedLength::CHUNKED => Decoder::chunked(), + DecodedLength::CLOSE_DELIMITED => Decoder::eof(), + length => Decoder::length(length.danger_len()), + } + } + + // methods + + pub(crate) fn is_eof(&self) -> bool { + matches!( + self.kind, + Length(0) + | Chunked { + state: ChunkedState::End, + .. + } + | Eof(true) + ) + } + + pub(crate) fn decode( + &mut self, + cx: &mut Context<'_>, + body: &mut R, + ) -> Poll> { + trace!("decode; state={:?}", self.kind); + match self.kind { + Length(ref mut remaining) => { + if *remaining == 0 { + Poll::Ready(Ok(Bytes::new())) + } else { + let to_read = *remaining as usize; + let buf = ready!(body.read_mem(cx, to_read))?; + let num = buf.as_ref().len() as u64; + if num > *remaining { + *remaining = 0; + } else if num == 0 { + return Poll::Ready(Err(io::Error::new( + io::ErrorKind::UnexpectedEof, + IncompleteBody, + ))); + } else { + *remaining -= num; + } + Poll::Ready(Ok(buf)) + } + } + Chunked { + ref mut state, + ref mut chunk_len, + ref mut extensions_cnt, + } => { + loop { + let mut buf = None; + // advances the chunked state + *state = ready!(state.step(cx, body, chunk_len, extensions_cnt, &mut buf))?; + if *state == ChunkedState::End { + trace!("end of chunked"); + return Poll::Ready(Ok(Bytes::new())); + } + if let Some(buf) = buf { + return Poll::Ready(Ok(buf)); + } + } + } + Eof(ref mut is_eof) => { + if *is_eof { + Poll::Ready(Ok(Bytes::new())) + } else { + // 8192 chosen because its about 2 packets, there probably + // won't be that much available, so don't have MemReaders + // allocate buffers to big + body.read_mem(cx, 8192).map_ok(|slice| { + *is_eof = slice.is_empty(); + slice + }) + } + } + } + } + + #[cfg(test)] + async fn decode_fut(&mut self, body: &mut R) -> Result { + futures_util::future::poll_fn(move |cx| self.decode(cx, body)).await + } +} + +impl fmt::Debug for Decoder { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(&self.kind, f) + } +} + +macro_rules! byte ( + ($rdr:ident, $cx:expr) => ({ + let buf = ready!($rdr.read_mem($cx, 1))?; + if !buf.is_empty() { + buf[0] + } else { + return Poll::Ready(Err(io::Error::new(io::ErrorKind::UnexpectedEof, + "unexpected EOF during chunk size line"))); + } + }) +); + +macro_rules! or_overflow { + ($e:expr) => ( + match $e { + Some(val) => val, + None => return Poll::Ready(Err(io::Error::new( + io::ErrorKind::InvalidData, + "invalid chunk size: overflow", + ))), + } + ) +} + +impl ChunkedState { + fn new() -> ChunkedState { + ChunkedState::Start + } + fn step( + &self, + cx: &mut Context<'_>, + body: &mut R, + size: &mut u64, + extensions_cnt: &mut u64, + buf: &mut Option, + ) -> Poll> { + use self::ChunkedState::*; + match *self { + Start => ChunkedState::read_start(cx, body, size), + Size => ChunkedState::read_size(cx, body, size), + SizeLws => ChunkedState::read_size_lws(cx, body), + Extension => ChunkedState::read_extension(cx, body, extensions_cnt), + SizeLf => ChunkedState::read_size_lf(cx, body, *size), + Body => ChunkedState::read_body(cx, body, size, buf), + BodyCr => ChunkedState::read_body_cr(cx, body), + BodyLf => ChunkedState::read_body_lf(cx, body), + Trailer => ChunkedState::read_trailer(cx, body), + TrailerLf => ChunkedState::read_trailer_lf(cx, body), + EndCr => ChunkedState::read_end_cr(cx, body), + EndLf => ChunkedState::read_end_lf(cx, body), + End => Poll::Ready(Ok(ChunkedState::End)), + } + } + + fn read_start( + cx: &mut Context<'_>, + rdr: &mut R, + size: &mut u64, + ) -> Poll> { + trace!("Read chunk start"); + + let radix = 16; + match byte!(rdr, cx) { + b @ b'0'..=b'9' => { + *size = or_overflow!(size.checked_mul(radix)); + *size = or_overflow!(size.checked_add((b - b'0') as u64)); + } + b @ b'a'..=b'f' => { + *size = or_overflow!(size.checked_mul(radix)); + *size = or_overflow!(size.checked_add((b + 10 - b'a') as u64)); + } + b @ b'A'..=b'F' => { + *size = or_overflow!(size.checked_mul(radix)); + *size = or_overflow!(size.checked_add((b + 10 - b'A') as u64)); + } + _ => { + return Poll::Ready(Err(io::Error::new( + io::ErrorKind::InvalidInput, + "Invalid chunk size line: missing size digit", + ))); + } + } + + Poll::Ready(Ok(ChunkedState::Size)) + } + + fn read_size( + cx: &mut Context<'_>, + rdr: &mut R, + size: &mut u64, + ) -> Poll> { + trace!("Read chunk hex size"); + + let radix = 16; + match byte!(rdr, cx) { + b @ b'0'..=b'9' => { + *size = or_overflow!(size.checked_mul(radix)); + *size = or_overflow!(size.checked_add((b - b'0') as u64)); + } + b @ b'a'..=b'f' => { + *size = or_overflow!(size.checked_mul(radix)); + *size = or_overflow!(size.checked_add((b + 10 - b'a') as u64)); + } + b @ b'A'..=b'F' => { + *size = or_overflow!(size.checked_mul(radix)); + *size = or_overflow!(size.checked_add((b + 10 - b'A') as u64)); + } + b'\t' | b' ' => return Poll::Ready(Ok(ChunkedState::SizeLws)), + b';' => return Poll::Ready(Ok(ChunkedState::Extension)), + b'\r' => return Poll::Ready(Ok(ChunkedState::SizeLf)), + _ => { + return Poll::Ready(Err(io::Error::new( + io::ErrorKind::InvalidInput, + "Invalid chunk size line: Invalid Size", + ))); + } + } + Poll::Ready(Ok(ChunkedState::Size)) + } + fn read_size_lws( + cx: &mut Context<'_>, + rdr: &mut R, + ) -> Poll> { + trace!("read_size_lws"); + match byte!(rdr, cx) { + // LWS can follow the chunk size, but no more digits can come + b'\t' | b' ' => Poll::Ready(Ok(ChunkedState::SizeLws)), + b';' => Poll::Ready(Ok(ChunkedState::Extension)), + b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)), + _ => Poll::Ready(Err(io::Error::new( + io::ErrorKind::InvalidInput, + "Invalid chunk size linear white space", + ))), + } + } + fn read_extension( + cx: &mut Context<'_>, + rdr: &mut R, + extensions_cnt: &mut u64, + ) -> Poll> { + trace!("read_extension"); + // We don't care about extensions really at all. Just ignore them. + // They "end" at the next CRLF. + // + // However, some implementations may not check for the CR, so to save + // them from themselves, we reject extensions containing plain LF as + // well. + match byte!(rdr, cx) { + b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)), + b'\n' => Poll::Ready(Err(io::Error::new( + io::ErrorKind::InvalidData, + "invalid chunk extension contains newline", + ))), + _ => { + *extensions_cnt += 1; + if *extensions_cnt >= CHUNKED_EXTENSIONS_LIMIT { + Poll::Ready(Err(io::Error::new( + io::ErrorKind::InvalidData, + "chunk extensions over limit", + ))) + } else { + Poll::Ready(Ok(ChunkedState::Extension)) + } + } // no supported extensions + } + } + fn read_size_lf( + cx: &mut Context<'_>, + rdr: &mut R, + size: u64, + ) -> Poll> { + trace!("Chunk size is {:?}", size); + match byte!(rdr, cx) { + b'\n' => { + if size == 0 { + Poll::Ready(Ok(ChunkedState::EndCr)) + } else { + debug!("incoming chunked header: {0:#X} ({0} bytes)", size); + Poll::Ready(Ok(ChunkedState::Body)) + } + } + _ => Poll::Ready(Err(io::Error::new( + io::ErrorKind::InvalidInput, + "Invalid chunk size LF", + ))), + } + } + + fn read_body( + cx: &mut Context<'_>, + rdr: &mut R, + rem: &mut u64, + buf: &mut Option, + ) -> Poll> { + trace!("Chunked read, remaining={:?}", rem); + + // cap remaining bytes at the max capacity of usize + let rem_cap = match *rem { + r if r > usize::MAX as u64 => usize::MAX, + r => r as usize, + }; + + let to_read = rem_cap; + let slice = ready!(rdr.read_mem(cx, to_read))?; + let count = slice.len(); + + if count == 0 { + *rem = 0; + return Poll::Ready(Err(io::Error::new( + io::ErrorKind::UnexpectedEof, + IncompleteBody, + ))); + } + *buf = Some(slice); + *rem -= count as u64; + + if *rem > 0 { + Poll::Ready(Ok(ChunkedState::Body)) + } else { + Poll::Ready(Ok(ChunkedState::BodyCr)) + } + } + fn read_body_cr( + cx: &mut Context<'_>, + rdr: &mut R, + ) -> Poll> { + match byte!(rdr, cx) { + b'\r' => Poll::Ready(Ok(ChunkedState::BodyLf)), + _ => Poll::Ready(Err(io::Error::new( + io::ErrorKind::InvalidInput, + "Invalid chunk body CR", + ))), + } + } + fn read_body_lf( + cx: &mut Context<'_>, + rdr: &mut R, + ) -> Poll> { + match byte!(rdr, cx) { + b'\n' => Poll::Ready(Ok(ChunkedState::Start)), + _ => Poll::Ready(Err(io::Error::new( + io::ErrorKind::InvalidInput, + "Invalid chunk body LF", + ))), + } + } + + fn read_trailer( + cx: &mut Context<'_>, + rdr: &mut R, + ) -> Poll> { + trace!("read_trailer"); + match byte!(rdr, cx) { + b'\r' => Poll::Ready(Ok(ChunkedState::TrailerLf)), + _ => Poll::Ready(Ok(ChunkedState::Trailer)), + } + } + fn read_trailer_lf( + cx: &mut Context<'_>, + rdr: &mut R, + ) -> Poll> { + match byte!(rdr, cx) { + b'\n' => Poll::Ready(Ok(ChunkedState::EndCr)), + _ => Poll::Ready(Err(io::Error::new( + io::ErrorKind::InvalidInput, + "Invalid trailer end LF", + ))), + } + } + + fn read_end_cr( + cx: &mut Context<'_>, + rdr: &mut R, + ) -> Poll> { + match byte!(rdr, cx) { + b'\r' => Poll::Ready(Ok(ChunkedState::EndLf)), + _ => Poll::Ready(Ok(ChunkedState::Trailer)), + } + } + fn read_end_lf( + cx: &mut Context<'_>, + rdr: &mut R, + ) -> Poll> { + match byte!(rdr, cx) { + b'\n' => Poll::Ready(Ok(ChunkedState::End)), + _ => Poll::Ready(Err(io::Error::new( + io::ErrorKind::InvalidInput, + "Invalid chunk end LF", + ))), + } + } +} + +#[derive(Debug)] +struct IncompleteBody; + +impl fmt::Display for IncompleteBody { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "end of file before message length reached") + } +} + +impl StdError for IncompleteBody {} + +#[cfg(test)] +mod tests { + use super::*; + use std::pin::Pin; + use std::time::Duration; + use tokio::io::{AsyncRead, ReadBuf}; + + impl<'a> MemRead for &'a [u8] { + fn read_mem(&mut self, _: &mut Context<'_>, len: usize) -> Poll> { + let n = std::cmp::min(len, self.len()); + if n > 0 { + let (a, b) = self.split_at(n); + let buf = Bytes::copy_from_slice(a); + *self = b; + Poll::Ready(Ok(buf)) + } else { + Poll::Ready(Ok(Bytes::new())) + } + } + } + + impl<'a> MemRead for &'a mut (dyn AsyncRead + Unpin) { + fn read_mem(&mut self, cx: &mut Context<'_>, len: usize) -> Poll> { + let mut v = vec![0; len]; + let mut buf = ReadBuf::new(&mut v); + ready!(Pin::new(self).poll_read(cx, &mut buf)?); + Poll::Ready(Ok(Bytes::copy_from_slice(&buf.filled()))) + } + } + + impl MemRead for Bytes { + fn read_mem(&mut self, _: &mut Context<'_>, len: usize) -> Poll> { + let n = std::cmp::min(len, self.len()); + let ret = self.split_to(n); + Poll::Ready(Ok(ret)) + } + } + + /* + use std::io; + use std::io::Write; + use super::Decoder; + use super::ChunkedState; + use futures::{Async, Poll}; + use bytes::{BytesMut, Bytes}; + use crate::mock::AsyncIo; + */ + + #[tokio::test] + async fn test_read_chunk_size() { + use std::io::ErrorKind::{InvalidData, InvalidInput, UnexpectedEof}; + + async fn read(s: &str) -> u64 { + let mut state = ChunkedState::new(); + let rdr = &mut s.as_bytes(); + let mut size = 0; + let mut ext_cnt = 0; + loop { + let result = futures_util::future::poll_fn(|cx| { + state.step(cx, rdr, &mut size, &mut ext_cnt, &mut None) + }) + .await; + let desc = format!("read_size failed for {:?}", s); + state = result.expect(desc.as_str()); + if state == ChunkedState::Body || state == ChunkedState::EndCr { + break; + } + } + size + } + + async fn read_err(s: &str, expected_err: io::ErrorKind) { + let mut state = ChunkedState::new(); + let rdr = &mut s.as_bytes(); + let mut size = 0; + let mut ext_cnt = 0; + loop { + let result = futures_util::future::poll_fn(|cx| { + state.step(cx, rdr, &mut size, &mut ext_cnt, &mut None) + }) + .await; + state = match result { + Ok(s) => s, + Err(e) => { + assert!( + expected_err == e.kind(), + "Reading {:?}, expected {:?}, but got {:?}", + s, + expected_err, + e.kind() + ); + return; + } + }; + if state == ChunkedState::Body || state == ChunkedState::End { + panic!("Was Ok. Expected Err for {:?}", s); + } + } + } + + assert_eq!(1, read("1\r\n").await); + assert_eq!(1, read("01\r\n").await); + assert_eq!(0, read("0\r\n").await); + assert_eq!(0, read("00\r\n").await); + assert_eq!(10, read("A\r\n").await); + assert_eq!(10, read("a\r\n").await); + assert_eq!(255, read("Ff\r\n").await); + assert_eq!(255, read("Ff \r\n").await); + // Missing LF or CRLF + read_err("F\rF", InvalidInput).await; + read_err("F", UnexpectedEof).await; + // Missing digit + read_err("\r\n\r\n", InvalidInput).await; + read_err("\r\n", InvalidInput).await; + // Invalid hex digit + read_err("X\r\n", InvalidInput).await; + read_err("1X\r\n", InvalidInput).await; + read_err("-\r\n", InvalidInput).await; + read_err("-1\r\n", InvalidInput).await; + // Acceptable (if not fully valid) extensions do not influence the size + assert_eq!(1, read("1;extension\r\n").await); + assert_eq!(10, read("a;ext name=value\r\n").await); + assert_eq!(1, read("1;extension;extension2\r\n").await); + assert_eq!(1, read("1;;; ;\r\n").await); + assert_eq!(2, read("2; extension...\r\n").await); + assert_eq!(3, read("3 ; extension=123\r\n").await); + assert_eq!(3, read("3 ;\r\n").await); + assert_eq!(3, read("3 ; \r\n").await); + // Invalid extensions cause an error + read_err("1 invalid extension\r\n", InvalidInput).await; + read_err("1 A\r\n", InvalidInput).await; + read_err("1;no CRLF", UnexpectedEof).await; + read_err("1;reject\nnewlines\r\n", InvalidData).await; + // Overflow + read_err("f0000000000000003\r\n", InvalidData).await; + } + + #[tokio::test] + async fn test_read_sized_early_eof() { + let mut bytes = &b"foo bar"[..]; + let mut decoder = Decoder::length(10); + assert_eq!(decoder.decode_fut(&mut bytes).await.unwrap().len(), 7); + let e = decoder.decode_fut(&mut bytes).await.unwrap_err(); + assert_eq!(e.kind(), io::ErrorKind::UnexpectedEof); + } + + #[tokio::test] + async fn test_read_chunked_early_eof() { + let mut bytes = &b"\ + 9\r\n\ + foo bar\ + "[..]; + let mut decoder = Decoder::chunked(); + assert_eq!(decoder.decode_fut(&mut bytes).await.unwrap().len(), 7); + let e = decoder.decode_fut(&mut bytes).await.unwrap_err(); + assert_eq!(e.kind(), io::ErrorKind::UnexpectedEof); + } + + #[tokio::test] + async fn test_read_chunked_single_read() { + let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\n"[..]; + let buf = Decoder::chunked() + .decode_fut(&mut mock_buf) + .await + .expect("decode"); + assert_eq!(16, buf.len()); + let result = String::from_utf8(buf.as_ref().to_vec()).expect("decode String"); + assert_eq!("1234567890abcdef", &result); + } + + #[tokio::test] + async fn test_read_chunked_with_missing_zero_digit() { + // After reading a valid chunk, the ending is missing a zero. + let mut mock_buf = &b"1\r\nZ\r\n\r\n\r\n"[..]; + let mut decoder = Decoder::chunked(); + let buf = decoder.decode_fut(&mut mock_buf).await.expect("decode"); + assert_eq!("Z", buf); + + let err = decoder + .decode_fut(&mut mock_buf) + .await + .expect_err("decode 2"); + assert_eq!(err.kind(), io::ErrorKind::InvalidInput); + } + + #[tokio::test] + async fn test_read_chunked_extensions_over_limit() { + // construct a chunked body where each individual chunked extension + // is totally fine, but combined is over the limit. + let per_chunk = super::CHUNKED_EXTENSIONS_LIMIT * 2 / 3; + let mut scratch = vec![]; + for _ in 0..2 { + scratch.extend(b"1;"); + scratch.extend(b"x".repeat(per_chunk as usize)); + scratch.extend(b"\r\nA\r\n"); + } + scratch.extend(b"0\r\n\r\n"); + let mut mock_buf = Bytes::from(scratch); + + let mut decoder = Decoder::chunked(); + let buf1 = decoder.decode_fut(&mut mock_buf).await.expect("decode1"); + assert_eq!(&buf1[..], b"A"); + + let err = decoder + .decode_fut(&mut mock_buf) + .await + .expect_err("decode2"); + assert_eq!(err.kind(), io::ErrorKind::InvalidData); + assert_eq!(err.to_string(), "chunk extensions over limit"); + } + + #[cfg(not(miri))] + #[tokio::test] + async fn test_read_chunked_trailer_with_missing_lf() { + let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\nbad\r\r\n"[..]; + let mut decoder = Decoder::chunked(); + decoder.decode_fut(&mut mock_buf).await.expect("decode"); + let e = decoder.decode_fut(&mut mock_buf).await.unwrap_err(); + assert_eq!(e.kind(), io::ErrorKind::InvalidInput); + } + + #[tokio::test] + async fn test_read_chunked_after_eof() { + let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\n\r\n"[..]; + let mut decoder = Decoder::chunked(); + + // normal read + let buf = decoder.decode_fut(&mut mock_buf).await.unwrap(); + assert_eq!(16, buf.len()); + let result = String::from_utf8(buf.as_ref().to_vec()).expect("decode String"); + assert_eq!("1234567890abcdef", &result); + + // eof read + let buf = decoder.decode_fut(&mut mock_buf).await.expect("decode"); + assert_eq!(0, buf.len()); + + // ensure read after eof also returns eof + let buf = decoder.decode_fut(&mut mock_buf).await.expect("decode"); + assert_eq!(0, buf.len()); + } + + // perform an async read using a custom buffer size and causing a blocking + // read at the specified byte + async fn read_async(mut decoder: Decoder, content: &[u8], block_at: usize) -> String { + let mut outs = Vec::new(); + + let mut ins = if block_at == 0 { + tokio_test::io::Builder::new() + .wait(Duration::from_millis(10)) + .read(content) + .build() + } else { + tokio_test::io::Builder::new() + .read(&content[..block_at]) + .wait(Duration::from_millis(10)) + .read(&content[block_at..]) + .build() + }; + + let mut ins = &mut ins as &mut (dyn AsyncRead + Unpin); + + loop { + let buf = decoder + .decode_fut(&mut ins) + .await + .expect("unexpected decode error"); + if buf.is_empty() { + break; // eof + } + outs.extend(buf.as_ref()); + } + + String::from_utf8(outs).expect("decode String") + } + + // iterate over the different ways that this async read could go. + // tests blocking a read at each byte along the content - The shotgun approach + async fn all_async_cases(content: &str, expected: &str, decoder: Decoder) { + let content_len = content.len(); + for block_at in 0..content_len { + let actual = read_async(decoder.clone(), content.as_bytes(), block_at).await; + assert_eq!(expected, &actual) //, "Failed async. Blocking at {}", block_at); + } + } + + #[tokio::test] + async fn test_read_length_async() { + let content = "foobar"; + all_async_cases(content, content, Decoder::length(content.len() as u64)).await; + } + + #[tokio::test] + async fn test_read_chunked_async() { + let content = "3\r\nfoo\r\n3\r\nbar\r\n0\r\n\r\n"; + let expected = "foobar"; + all_async_cases(content, expected, Decoder::chunked()).await; + } + + #[tokio::test] + async fn test_read_eof_async() { + let content = "foobar"; + all_async_cases(content, content, Decoder::eof()).await; + } + + #[cfg(feature = "nightly")] + #[bench] + fn bench_decode_chunked_1kb(b: &mut test::Bencher) { + let rt = new_runtime(); + + const LEN: usize = 1024; + let mut vec = Vec::new(); + vec.extend(format!("{:x}\r\n", LEN).as_bytes()); + vec.extend(&[0; LEN][..]); + vec.extend(b"\r\n"); + let content = Bytes::from(vec); + + b.bytes = LEN as u64; + + b.iter(|| { + let mut decoder = Decoder::chunked(); + rt.block_on(async { + let mut raw = content.clone(); + let chunk = decoder.decode_fut(&mut raw).await.unwrap(); + assert_eq!(chunk.len(), LEN); + }); + }); + } + + #[cfg(feature = "nightly")] + #[bench] + fn bench_decode_length_1kb(b: &mut test::Bencher) { + let rt = new_runtime(); + + const LEN: usize = 1024; + let content = Bytes::from(&[0; LEN][..]); + b.bytes = LEN as u64; + + b.iter(|| { + let mut decoder = Decoder::length(LEN as u64); + rt.block_on(async { + let mut raw = content.clone(); + let chunk = decoder.decode_fut(&mut raw).await.unwrap(); + assert_eq!(chunk.len(), LEN); + }); + }); + } + + #[cfg(feature = "nightly")] + fn new_runtime() -> tokio::runtime::Runtime { + tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .expect("rt build") + } +} diff --git a/.cargo-vendor/hyper-0.14.30/src/proto/h1/dispatch.rs b/.cargo-vendor/hyper-0.14.30/src/proto/h1/dispatch.rs new file mode 100644 index 0000000000..3516d7ad21 --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/proto/h1/dispatch.rs @@ -0,0 +1,761 @@ +use std::error::Error as StdError; +use std::future::Future; +use std::marker::Unpin; +use std::pin::Pin; +use std::task::{Context, Poll}; + +use bytes::{Buf, Bytes}; +use http::Request; +use tokio::io::{AsyncRead, AsyncWrite}; +use tracing::{debug, trace}; + +use super::{Http1Transaction, Wants}; +use crate::body::{Body, DecodedLength, HttpBody}; +use crate::common; +use crate::proto::{BodyLength, Conn, Dispatched, MessageHead, RequestHead}; +use crate::upgrade::OnUpgrade; + +pub(crate) struct Dispatcher { + conn: Conn, + dispatch: D, + body_tx: Option, + body_rx: Pin>>, + is_closing: bool, +} + +pub(crate) trait Dispatch { + type PollItem; + type PollBody; + type PollError; + type RecvItem; + fn poll_msg( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll>>; + fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, Body)>) -> crate::Result<()>; + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll>; + fn should_poll(&self) -> bool; +} + +cfg_server! { + use crate::service::HttpService; + + pub(crate) struct Server, B> { + in_flight: Pin>>, + pub(crate) service: S, + } +} + +cfg_client! { + pin_project_lite::pin_project! { + pub(crate) struct Client { + callback: Option, http::Response>>, + #[pin] + rx: ClientRx, + rx_closed: bool, + } + } + + type ClientRx = crate::client::dispatch::Receiver, http::Response>; +} + +impl Dispatcher +where + D: Dispatch< + PollItem = MessageHead, + PollBody = Bs, + RecvItem = MessageHead, + > + Unpin, + D::PollError: Into>, + I: AsyncRead + AsyncWrite + Unpin, + T: Http1Transaction + Unpin, + Bs: HttpBody + 'static, + Bs::Error: Into>, +{ + pub(crate) fn new(dispatch: D, conn: Conn) -> Self { + Dispatcher { + conn, + dispatch, + body_tx: None, + body_rx: Box::pin(None), + is_closing: false, + } + } + + #[cfg(feature = "server")] + pub(crate) fn disable_keep_alive(&mut self) { + self.conn.disable_keep_alive(); + if self.conn.is_write_closed() { + self.close(); + } + } + + pub(crate) fn into_inner(self) -> (I, Bytes, D) { + let (io, buf) = self.conn.into_inner(); + (io, buf, self.dispatch) + } + + /// Run this dispatcher until HTTP says this connection is done, + /// but don't call `AsyncWrite::shutdown` on the underlying IO. + /// + /// This is useful for old-style HTTP upgrades, but ignores + /// newer-style upgrade API. + pub(crate) fn poll_without_shutdown(&mut self, cx: &mut Context<'_>) -> Poll> + where + Self: Unpin, + { + Pin::new(self).poll_catch(cx, false).map_ok(|ds| { + if let Dispatched::Upgrade(pending) = ds { + pending.manual(); + } + }) + } + + fn poll_catch( + &mut self, + cx: &mut Context<'_>, + should_shutdown: bool, + ) -> Poll> { + Poll::Ready(ready!(self.poll_inner(cx, should_shutdown)).or_else(|e| { + // Be sure to alert a streaming body of the failure. + if let Some(mut body) = self.body_tx.take() { + body.send_error(crate::Error::new_body("connection error")); + } + // An error means we're shutting down either way. + // We just try to give the error to the user, + // and close the connection with an Ok. If we + // cannot give it to the user, then return the Err. + self.dispatch.recv_msg(Err(e))?; + Ok(Dispatched::Shutdown) + })) + } + + fn poll_inner( + &mut self, + cx: &mut Context<'_>, + should_shutdown: bool, + ) -> Poll> { + T::update_date(); + + ready!(self.poll_loop(cx))?; + + if self.is_done() { + if let Some(pending) = self.conn.pending_upgrade() { + self.conn.take_error()?; + return Poll::Ready(Ok(Dispatched::Upgrade(pending))); + } else if should_shutdown { + ready!(self.conn.poll_shutdown(cx)).map_err(crate::Error::new_shutdown)?; + } + self.conn.take_error()?; + Poll::Ready(Ok(Dispatched::Shutdown)) + } else { + Poll::Pending + } + } + + fn poll_loop(&mut self, cx: &mut Context<'_>) -> Poll> { + // Limit the looping on this connection, in case it is ready far too + // often, so that other futures don't starve. + // + // 16 was chosen arbitrarily, as that is number of pipelined requests + // benchmarks often use. Perhaps it should be a config option instead. + for _ in 0..16 { + let _ = self.poll_read(cx)?; + let _ = self.poll_write(cx)?; + let _ = self.poll_flush(cx)?; + + // This could happen if reading paused before blocking on IO, + // such as getting to the end of a framed message, but then + // writing/flushing set the state back to Init. In that case, + // if the read buffer still had bytes, we'd want to try poll_read + // again, or else we wouldn't ever be woken up again. + // + // Using this instead of task::current() and notify() inside + // the Conn is noticeably faster in pipelined benchmarks. + if !self.conn.wants_read_again() { + //break; + return Poll::Ready(Ok(())); + } + } + + trace!("poll_loop yielding (self = {:p})", self); + + common::task::yield_now(cx).map(|never| match never {}) + } + + fn poll_read(&mut self, cx: &mut Context<'_>) -> Poll> { + loop { + if self.is_closing { + return Poll::Ready(Ok(())); + } else if self.conn.can_read_head() { + ready!(self.poll_read_head(cx))?; + } else if let Some(mut body) = self.body_tx.take() { + if self.conn.can_read_body() { + match body.poll_ready(cx) { + Poll::Ready(Ok(())) => (), + Poll::Pending => { + self.body_tx = Some(body); + return Poll::Pending; + } + Poll::Ready(Err(_canceled)) => { + // user doesn't care about the body + // so we should stop reading + trace!("body receiver dropped before eof, draining or closing"); + self.conn.poll_drain_or_close_read(cx); + continue; + } + } + match self.conn.poll_read_body(cx) { + Poll::Ready(Some(Ok(chunk))) => match body.try_send_data(chunk) { + Ok(()) => { + self.body_tx = Some(body); + } + Err(_canceled) => { + if self.conn.can_read_body() { + trace!("body receiver dropped before eof, closing"); + self.conn.close_read(); + } + } + }, + Poll::Ready(None) => { + // just drop, the body will close automatically + } + Poll::Pending => { + self.body_tx = Some(body); + return Poll::Pending; + } + Poll::Ready(Some(Err(e))) => { + body.send_error(crate::Error::new_body(e)); + } + } + } else { + // just drop, the body will close automatically + } + } else { + return self.conn.poll_read_keep_alive(cx); + } + } + } + + fn poll_read_head(&mut self, cx: &mut Context<'_>) -> Poll> { + // can dispatch receive, or does it still care about, an incoming message? + match ready!(self.dispatch.poll_ready(cx)) { + Ok(()) => (), + Err(()) => { + trace!("dispatch no longer receiving messages"); + self.close(); + return Poll::Ready(Ok(())); + } + } + // dispatch is ready for a message, try to read one + match ready!(self.conn.poll_read_head(cx)) { + Some(Ok((mut head, body_len, wants))) => { + let body = match body_len { + DecodedLength::ZERO => Body::empty(), + other => { + let (tx, rx) = Body::new_channel(other, wants.contains(Wants::EXPECT)); + self.body_tx = Some(tx); + rx + } + }; + if wants.contains(Wants::UPGRADE) { + let upgrade = self.conn.on_upgrade(); + debug_assert!(!upgrade.is_none(), "empty upgrade"); + debug_assert!( + head.extensions.get::().is_none(), + "OnUpgrade already set" + ); + head.extensions.insert(upgrade); + } + self.dispatch.recv_msg(Ok((head, body)))?; + Poll::Ready(Ok(())) + } + Some(Err(err)) => { + debug!("read_head error: {}", err); + self.dispatch.recv_msg(Err(err))?; + // if here, the dispatcher gave the user the error + // somewhere else. we still need to shutdown, but + // not as a second error. + self.close(); + Poll::Ready(Ok(())) + } + None => { + // read eof, the write side will have been closed too unless + // allow_read_close was set to true, in which case just do + // nothing... + debug_assert!(self.conn.is_read_closed()); + if self.conn.is_write_closed() { + self.close(); + } + Poll::Ready(Ok(())) + } + } + } + + fn poll_write(&mut self, cx: &mut Context<'_>) -> Poll> { + loop { + if self.is_closing { + return Poll::Ready(Ok(())); + } else if self.body_rx.is_none() + && self.conn.can_write_head() + && self.dispatch.should_poll() + { + if let Some(msg) = ready!(Pin::new(&mut self.dispatch).poll_msg(cx)) { + let (head, mut body) = msg.map_err(crate::Error::new_user_service)?; + + // Check if the body knows its full data immediately. + // + // If so, we can skip a bit of bookkeeping that streaming + // bodies need to do. + if let Some(full) = crate::body::take_full_data(&mut body) { + self.conn.write_full_msg(head, full); + return Poll::Ready(Ok(())); + } + + let body_type = if body.is_end_stream() { + self.body_rx.set(None); + None + } else { + let btype = body + .size_hint() + .exact() + .map(BodyLength::Known) + .or_else(|| Some(BodyLength::Unknown)); + self.body_rx.set(Some(body)); + btype + }; + self.conn.write_head(head, body_type); + } else { + self.close(); + return Poll::Ready(Ok(())); + } + } else if !self.conn.can_buffer_body() { + ready!(self.poll_flush(cx))?; + } else { + // A new scope is needed :( + if let (Some(mut body), clear_body) = + OptGuard::new(self.body_rx.as_mut()).guard_mut() + { + debug_assert!(!*clear_body, "opt guard defaults to keeping body"); + if !self.conn.can_write_body() { + trace!( + "no more write body allowed, user body is_end_stream = {}", + body.is_end_stream(), + ); + *clear_body = true; + continue; + } + + let item = ready!(body.as_mut().poll_data(cx)); + if let Some(item) = item { + let chunk = item.map_err(|e| { + *clear_body = true; + crate::Error::new_user_body(e) + })?; + let eos = body.is_end_stream(); + if eos { + *clear_body = true; + if chunk.remaining() == 0 { + trace!("discarding empty chunk"); + self.conn.end_body()?; + } else { + self.conn.write_body_and_end(chunk); + } + } else { + if chunk.remaining() == 0 { + trace!("discarding empty chunk"); + continue; + } + self.conn.write_body(chunk); + } + } else { + *clear_body = true; + self.conn.end_body()?; + } + } else { + // If there's no body_rx, end the body + if self.conn.can_write_body() { + self.conn.end_body()?; + } else { + return Poll::Pending; + } + } + } + } + } + + fn poll_flush(&mut self, cx: &mut Context<'_>) -> Poll> { + self.conn.poll_flush(cx).map_err(|err| { + debug!("error writing: {}", err); + crate::Error::new_body_write(err) + }) + } + + fn close(&mut self) { + self.is_closing = true; + self.conn.close_read(); + self.conn.close_write(); + } + + fn is_done(&self) -> bool { + if self.is_closing { + return true; + } + + let read_done = self.conn.is_read_closed(); + + if !T::should_read_first() && read_done { + // a client that cannot read may was well be done. + true + } else { + let write_done = self.conn.is_write_closed() + || (!self.dispatch.should_poll() && self.body_rx.is_none()); + read_done && write_done + } + } +} + +impl Future for Dispatcher +where + D: Dispatch< + PollItem = MessageHead, + PollBody = Bs, + RecvItem = MessageHead, + > + Unpin, + D::PollError: Into>, + I: AsyncRead + AsyncWrite + Unpin, + T: Http1Transaction + Unpin, + Bs: HttpBody + 'static, + Bs::Error: Into>, +{ + type Output = crate::Result; + + #[inline] + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + self.poll_catch(cx, true) + } +} + +// ===== impl OptGuard ===== + +/// A drop guard to allow a mutable borrow of an Option while being able to +/// set whether the `Option` should be cleared on drop. +struct OptGuard<'a, T>(Pin<&'a mut Option>, bool); + +impl<'a, T> OptGuard<'a, T> { + fn new(pin: Pin<&'a mut Option>) -> Self { + OptGuard(pin, false) + } + + fn guard_mut(&mut self) -> (Option>, &mut bool) { + (self.0.as_mut().as_pin_mut(), &mut self.1) + } +} + +impl<'a, T> Drop for OptGuard<'a, T> { + fn drop(&mut self) { + if self.1 { + self.0.set(None); + } + } +} + +// ===== impl Server ===== + +cfg_server! { + impl Server + where + S: HttpService, + { + pub(crate) fn new(service: S) -> Server { + Server { + in_flight: Box::pin(None), + service, + } + } + + pub(crate) fn into_service(self) -> S { + self.service + } + } + + // Service is never pinned + impl, B> Unpin for Server {} + + impl Dispatch for Server + where + S: HttpService, + S::Error: Into>, + Bs: HttpBody, + { + type PollItem = MessageHead; + type PollBody = Bs; + type PollError = S::Error; + type RecvItem = RequestHead; + + fn poll_msg( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll>> { + let mut this = self.as_mut(); + let ret = if let Some(ref mut fut) = this.in_flight.as_mut().as_pin_mut() { + let resp = ready!(fut.as_mut().poll(cx)?); + let (parts, body) = resp.into_parts(); + let head = MessageHead { + version: parts.version, + subject: parts.status, + headers: parts.headers, + extensions: parts.extensions, + }; + Poll::Ready(Some(Ok((head, body)))) + } else { + unreachable!("poll_msg shouldn't be called if no inflight"); + }; + + // Since in_flight finished, remove it + this.in_flight.set(None); + ret + } + + fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, Body)>) -> crate::Result<()> { + let (msg, body) = msg?; + let mut req = Request::new(body); + *req.method_mut() = msg.subject.0; + *req.uri_mut() = msg.subject.1; + *req.headers_mut() = msg.headers; + *req.version_mut() = msg.version; + *req.extensions_mut() = msg.extensions; + let fut = self.service.call(req); + self.in_flight.set(Some(fut)); + Ok(()) + } + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + if self.in_flight.is_some() { + Poll::Pending + } else { + self.service.poll_ready(cx).map_err(|_e| { + // FIXME: return error value. + trace!("service closed"); + }) + } + } + + fn should_poll(&self) -> bool { + self.in_flight.is_some() + } + } +} + +// ===== impl Client ===== + +cfg_client! { + impl Client { + pub(crate) fn new(rx: ClientRx) -> Client { + Client { + callback: None, + rx, + rx_closed: false, + } + } + } + + impl Dispatch for Client + where + B: HttpBody, + { + type PollItem = RequestHead; + type PollBody = B; + type PollError = std::convert::Infallible; + type RecvItem = crate::proto::ResponseHead; + + fn poll_msg( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll>> { + let mut this = self.as_mut(); + debug_assert!(!this.rx_closed); + match this.rx.poll_recv(cx) { + Poll::Ready(Some((req, mut cb))) => { + // check that future hasn't been canceled already + match cb.poll_canceled(cx) { + Poll::Ready(()) => { + trace!("request canceled"); + Poll::Ready(None) + } + Poll::Pending => { + let (parts, body) = req.into_parts(); + let head = RequestHead { + version: parts.version, + subject: crate::proto::RequestLine(parts.method, parts.uri), + headers: parts.headers, + extensions: parts.extensions, + }; + this.callback = Some(cb); + Poll::Ready(Some(Ok((head, body)))) + } + } + } + Poll::Ready(None) => { + // user has dropped sender handle + trace!("client tx closed"); + this.rx_closed = true; + Poll::Ready(None) + } + Poll::Pending => Poll::Pending, + } + } + + fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, Body)>) -> crate::Result<()> { + match msg { + Ok((msg, body)) => { + if let Some(cb) = self.callback.take() { + let res = msg.into_response(body); + cb.send(Ok(res)); + Ok(()) + } else { + // Getting here is likely a bug! An error should have happened + // in Conn::require_empty_read() before ever parsing a + // full message! + Err(crate::Error::new_unexpected_message()) + } + } + Err(err) => { + if let Some(cb) = self.callback.take() { + cb.send(Err((err, None))); + Ok(()) + } else if !self.rx_closed { + self.rx.close(); + if let Some((req, cb)) = self.rx.try_recv() { + trace!("canceling queued request with connection error: {}", err); + // in this case, the message was never even started, so it's safe to tell + // the user that the request was completely canceled + cb.send(Err((crate::Error::new_canceled().with(err), Some(req)))); + Ok(()) + } else { + Err(err) + } + } else { + Err(err) + } + } + } + } + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + match self.callback { + Some(ref mut cb) => match cb.poll_canceled(cx) { + Poll::Ready(()) => { + trace!("callback receiver has dropped"); + Poll::Ready(Err(())) + } + Poll::Pending => Poll::Ready(Ok(())), + }, + None => Poll::Ready(Err(())), + } + } + + fn should_poll(&self) -> bool { + self.callback.is_none() + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::proto::h1::ClientTransaction; + use std::time::Duration; + + #[test] + fn client_read_bytes_before_writing_request() { + let _ = pretty_env_logger::try_init(); + + tokio_test::task::spawn(()).enter(|cx, _| { + let (io, mut handle) = tokio_test::io::Builder::new().build_with_handle(); + + // Block at 0 for now, but we will release this response before + // the request is ready to write later... + let (mut tx, rx) = crate::client::dispatch::channel(); + let conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(io); + let mut dispatcher = Dispatcher::new(Client::new(rx), conn); + + // First poll is needed to allow tx to send... + assert!(Pin::new(&mut dispatcher).poll(cx).is_pending()); + + // Unblock our IO, which has a response before we've sent request! + // + handle.read(b"HTTP/1.1 200 OK\r\n\r\n"); + + let mut res_rx = tx + .try_send(crate::Request::new(crate::Body::empty())) + .unwrap(); + + tokio_test::assert_ready_ok!(Pin::new(&mut dispatcher).poll(cx)); + let err = tokio_test::assert_ready_ok!(Pin::new(&mut res_rx).poll(cx)) + .expect_err("callback should send error"); + + match (err.0.kind(), err.1) { + (&crate::error::Kind::Canceled, Some(_)) => (), + other => panic!("expected Canceled, got {:?}", other), + } + }); + } + + #[tokio::test] + async fn client_flushing_is_not_ready_for_next_request() { + let _ = pretty_env_logger::try_init(); + + let (io, _handle) = tokio_test::io::Builder::new() + .write(b"POST / HTTP/1.1\r\ncontent-length: 4\r\n\r\n") + .read(b"HTTP/1.1 200 OK\r\ncontent-length: 0\r\n\r\n") + .wait(std::time::Duration::from_secs(2)) + .build_with_handle(); + + let (mut tx, rx) = crate::client::dispatch::channel(); + let mut conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(io); + conn.set_write_strategy_queue(); + + let dispatcher = Dispatcher::new(Client::new(rx), conn); + let _dispatcher = tokio::spawn(async move { dispatcher.await }); + + let req = crate::Request::builder() + .method("POST") + .body(crate::Body::from("reee")) + .unwrap(); + + let res = tx.try_send(req).unwrap().await.expect("response"); + drop(res); + + assert!(!tx.is_ready()); + } + + #[tokio::test] + async fn body_empty_chunks_ignored() { + let _ = pretty_env_logger::try_init(); + + let io = tokio_test::io::Builder::new() + // no reading or writing, just be blocked for the test... + .wait(Duration::from_secs(5)) + .build(); + + let (mut tx, rx) = crate::client::dispatch::channel(); + let conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(io); + let mut dispatcher = tokio_test::task::spawn(Dispatcher::new(Client::new(rx), conn)); + + // First poll is needed to allow tx to send... + assert!(dispatcher.poll().is_pending()); + + let body = { + let (mut tx, body) = crate::Body::channel(); + tx.try_send_data("".into()).unwrap(); + body + }; + + let _res_rx = tx.try_send(crate::Request::new(body)).unwrap(); + + // Ensure conn.write_body wasn't called with the empty chunk. + // If it is, it will trigger an assertion. + assert!(dispatcher.poll().is_pending()); + } +} diff --git a/.cargo-vendor/hyper-0.14.30/src/proto/h1/encode.rs b/.cargo-vendor/hyper-0.14.30/src/proto/h1/encode.rs new file mode 100644 index 0000000000..f0aa261a4f --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/proto/h1/encode.rs @@ -0,0 +1,439 @@ +use std::fmt; +use std::io::IoSlice; + +use bytes::buf::{Chain, Take}; +use bytes::Buf; +use tracing::trace; + +use super::io::WriteBuf; + +type StaticBuf = &'static [u8]; + +/// Encoders to handle different Transfer-Encodings. +#[derive(Debug, Clone, PartialEq)] +pub(crate) struct Encoder { + kind: Kind, + is_last: bool, +} + +#[derive(Debug)] +pub(crate) struct EncodedBuf { + kind: BufKind, +} + +#[derive(Debug)] +pub(crate) struct NotEof(u64); + +#[derive(Debug, PartialEq, Clone)] +enum Kind { + /// An Encoder for when Transfer-Encoding includes `chunked`. + Chunked, + /// An Encoder for when Content-Length is set. + /// + /// Enforces that the body is not longer than the Content-Length header. + Length(u64), + /// An Encoder for when neither Content-Length nor Chunked encoding is set. + /// + /// This is mostly only used with HTTP/1.0 with a length. This kind requires + /// the connection to be closed when the body is finished. + #[cfg(feature = "server")] + CloseDelimited, +} + +#[derive(Debug)] +enum BufKind { + Exact(B), + Limited(Take), + Chunked(Chain, StaticBuf>), + ChunkedEnd(StaticBuf), +} + +impl Encoder { + fn new(kind: Kind) -> Encoder { + Encoder { + kind, + is_last: false, + } + } + pub(crate) fn chunked() -> Encoder { + Encoder::new(Kind::Chunked) + } + + pub(crate) fn length(len: u64) -> Encoder { + Encoder::new(Kind::Length(len)) + } + + #[cfg(feature = "server")] + pub(crate) fn close_delimited() -> Encoder { + Encoder::new(Kind::CloseDelimited) + } + + pub(crate) fn is_eof(&self) -> bool { + matches!(self.kind, Kind::Length(0)) + } + + #[cfg(feature = "server")] + pub(crate) fn set_last(mut self, is_last: bool) -> Self { + self.is_last = is_last; + self + } + + pub(crate) fn is_last(&self) -> bool { + self.is_last + } + + pub(crate) fn is_close_delimited(&self) -> bool { + match self.kind { + #[cfg(feature = "server")] + Kind::CloseDelimited => true, + _ => false, + } + } + + pub(crate) fn end(&self) -> Result>, NotEof> { + match self.kind { + Kind::Length(0) => Ok(None), + Kind::Chunked => Ok(Some(EncodedBuf { + kind: BufKind::ChunkedEnd(b"0\r\n\r\n"), + })), + #[cfg(feature = "server")] + Kind::CloseDelimited => Ok(None), + Kind::Length(n) => Err(NotEof(n)), + } + } + + pub(crate) fn encode(&mut self, msg: B) -> EncodedBuf + where + B: Buf, + { + let len = msg.remaining(); + debug_assert!(len > 0, "encode() called with empty buf"); + + let kind = match self.kind { + Kind::Chunked => { + trace!("encoding chunked {}B", len); + let buf = ChunkSize::new(len) + .chain(msg) + .chain(b"\r\n" as &'static [u8]); + BufKind::Chunked(buf) + } + Kind::Length(ref mut remaining) => { + trace!("sized write, len = {}", len); + if len as u64 > *remaining { + let limit = *remaining as usize; + *remaining = 0; + BufKind::Limited(msg.take(limit)) + } else { + *remaining -= len as u64; + BufKind::Exact(msg) + } + } + #[cfg(feature = "server")] + Kind::CloseDelimited => { + trace!("close delimited write {}B", len); + BufKind::Exact(msg) + } + }; + EncodedBuf { kind } + } + + pub(super) fn encode_and_end(&self, msg: B, dst: &mut WriteBuf>) -> bool + where + B: Buf, + { + let len = msg.remaining(); + debug_assert!(len > 0, "encode() called with empty buf"); + + match self.kind { + Kind::Chunked => { + trace!("encoding chunked {}B", len); + let buf = ChunkSize::new(len) + .chain(msg) + .chain(b"\r\n0\r\n\r\n" as &'static [u8]); + dst.buffer(buf); + !self.is_last + } + Kind::Length(remaining) => { + use std::cmp::Ordering; + + trace!("sized write, len = {}", len); + match (len as u64).cmp(&remaining) { + Ordering::Equal => { + dst.buffer(msg); + !self.is_last + } + Ordering::Greater => { + dst.buffer(msg.take(remaining as usize)); + !self.is_last + } + Ordering::Less => { + dst.buffer(msg); + false + } + } + } + #[cfg(feature = "server")] + Kind::CloseDelimited => { + trace!("close delimited write {}B", len); + dst.buffer(msg); + false + } + } + } + + /// Encodes the full body, without verifying the remaining length matches. + /// + /// This is used in conjunction with HttpBody::__hyper_full_data(), which + /// means we can trust that the buf has the correct size (the buf itself + /// was checked to make the headers). + pub(super) fn danger_full_buf(self, msg: B, dst: &mut WriteBuf>) + where + B: Buf, + { + debug_assert!(msg.remaining() > 0, "encode() called with empty buf"); + debug_assert!( + match self.kind { + Kind::Length(len) => len == msg.remaining() as u64, + _ => true, + }, + "danger_full_buf length mismatches" + ); + + match self.kind { + Kind::Chunked => { + let len = msg.remaining(); + trace!("encoding chunked {}B", len); + let buf = ChunkSize::new(len) + .chain(msg) + .chain(b"\r\n0\r\n\r\n" as &'static [u8]); + dst.buffer(buf); + } + _ => { + dst.buffer(msg); + } + } + } +} + +impl Buf for EncodedBuf +where + B: Buf, +{ + #[inline] + fn remaining(&self) -> usize { + match self.kind { + BufKind::Exact(ref b) => b.remaining(), + BufKind::Limited(ref b) => b.remaining(), + BufKind::Chunked(ref b) => b.remaining(), + BufKind::ChunkedEnd(ref b) => b.remaining(), + } + } + + #[inline] + fn chunk(&self) -> &[u8] { + match self.kind { + BufKind::Exact(ref b) => b.chunk(), + BufKind::Limited(ref b) => b.chunk(), + BufKind::Chunked(ref b) => b.chunk(), + BufKind::ChunkedEnd(ref b) => b.chunk(), + } + } + + #[inline] + fn advance(&mut self, cnt: usize) { + match self.kind { + BufKind::Exact(ref mut b) => b.advance(cnt), + BufKind::Limited(ref mut b) => b.advance(cnt), + BufKind::Chunked(ref mut b) => b.advance(cnt), + BufKind::ChunkedEnd(ref mut b) => b.advance(cnt), + } + } + + #[inline] + fn chunks_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize { + match self.kind { + BufKind::Exact(ref b) => b.chunks_vectored(dst), + BufKind::Limited(ref b) => b.chunks_vectored(dst), + BufKind::Chunked(ref b) => b.chunks_vectored(dst), + BufKind::ChunkedEnd(ref b) => b.chunks_vectored(dst), + } + } +} + +#[cfg(target_pointer_width = "32")] +const USIZE_BYTES: usize = 4; + +#[cfg(target_pointer_width = "64")] +const USIZE_BYTES: usize = 8; + +// each byte will become 2 hex +const CHUNK_SIZE_MAX_BYTES: usize = USIZE_BYTES * 2; + +#[derive(Clone, Copy)] +struct ChunkSize { + bytes: [u8; CHUNK_SIZE_MAX_BYTES + 2], + pos: u8, + len: u8, +} + +impl ChunkSize { + fn new(len: usize) -> ChunkSize { + use std::fmt::Write; + let mut size = ChunkSize { + bytes: [0; CHUNK_SIZE_MAX_BYTES + 2], + pos: 0, + len: 0, + }; + write!(&mut size, "{:X}\r\n", len).expect("CHUNK_SIZE_MAX_BYTES should fit any usize"); + size + } +} + +impl Buf for ChunkSize { + #[inline] + fn remaining(&self) -> usize { + (self.len - self.pos).into() + } + + #[inline] + fn chunk(&self) -> &[u8] { + &self.bytes[self.pos.into()..self.len.into()] + } + + #[inline] + fn advance(&mut self, cnt: usize) { + assert!(cnt <= self.remaining()); + self.pos += cnt as u8; // just asserted cnt fits in u8 + } +} + +impl fmt::Debug for ChunkSize { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ChunkSize") + .field("bytes", &&self.bytes[..self.len.into()]) + .field("pos", &self.pos) + .finish() + } +} + +impl fmt::Write for ChunkSize { + fn write_str(&mut self, num: &str) -> fmt::Result { + use std::io::Write; + (&mut self.bytes[self.len.into()..]) + .write_all(num.as_bytes()) + .expect("&mut [u8].write() cannot error"); + self.len += num.len() as u8; // safe because bytes is never bigger than 256 + Ok(()) + } +} + +impl From for EncodedBuf { + fn from(buf: B) -> Self { + EncodedBuf { + kind: BufKind::Exact(buf), + } + } +} + +impl From> for EncodedBuf { + fn from(buf: Take) -> Self { + EncodedBuf { + kind: BufKind::Limited(buf), + } + } +} + +impl From, StaticBuf>> for EncodedBuf { + fn from(buf: Chain, StaticBuf>) -> Self { + EncodedBuf { + kind: BufKind::Chunked(buf), + } + } +} + +impl fmt::Display for NotEof { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "early end, expected {} more bytes", self.0) + } +} + +impl std::error::Error for NotEof {} + +#[cfg(test)] +mod tests { + use bytes::BufMut; + + use super::super::io::Cursor; + use super::Encoder; + + #[test] + fn chunked() { + let mut encoder = Encoder::chunked(); + let mut dst = Vec::new(); + + let msg1 = b"foo bar".as_ref(); + let buf1 = encoder.encode(msg1); + dst.put(buf1); + assert_eq!(dst, b"7\r\nfoo bar\r\n"); + + let msg2 = b"baz quux herp".as_ref(); + let buf2 = encoder.encode(msg2); + dst.put(buf2); + + assert_eq!(dst, b"7\r\nfoo bar\r\nD\r\nbaz quux herp\r\n"); + + let end = encoder.end::>>().unwrap().unwrap(); + dst.put(end); + + assert_eq!( + dst, + b"7\r\nfoo bar\r\nD\r\nbaz quux herp\r\n0\r\n\r\n".as_ref() + ); + } + + #[test] + fn length() { + let max_len = 8; + let mut encoder = Encoder::length(max_len as u64); + let mut dst = Vec::new(); + + let msg1 = b"foo bar".as_ref(); + let buf1 = encoder.encode(msg1); + dst.put(buf1); + + assert_eq!(dst, b"foo bar"); + assert!(!encoder.is_eof()); + encoder.end::<()>().unwrap_err(); + + let msg2 = b"baz".as_ref(); + let buf2 = encoder.encode(msg2); + dst.put(buf2); + + assert_eq!(dst.len(), max_len); + assert_eq!(dst, b"foo barb"); + assert!(encoder.is_eof()); + assert!(encoder.end::<()>().unwrap().is_none()); + } + + #[test] + fn eof() { + let mut encoder = Encoder::close_delimited(); + let mut dst = Vec::new(); + + let msg1 = b"foo bar".as_ref(); + let buf1 = encoder.encode(msg1); + dst.put(buf1); + + assert_eq!(dst, b"foo bar"); + assert!(!encoder.is_eof()); + encoder.end::<()>().unwrap(); + + let msg2 = b"baz".as_ref(); + let buf2 = encoder.encode(msg2); + dst.put(buf2); + + assert_eq!(dst, b"foo barbaz"); + assert!(!encoder.is_eof()); + encoder.end::<()>().unwrap(); + } +} diff --git a/.cargo-vendor/hyper-0.14.30/src/proto/h1/io.rs b/.cargo-vendor/hyper-0.14.30/src/proto/h1/io.rs new file mode 100644 index 0000000000..02d8a4a9ec --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/proto/h1/io.rs @@ -0,0 +1,1000 @@ +use std::cmp; +use std::fmt; +#[cfg(all(feature = "server", feature = "runtime"))] +use std::future::Future; +use std::io::{self, IoSlice}; +use std::marker::Unpin; +use std::mem::MaybeUninit; +use std::pin::Pin; +use std::task::{Context, Poll}; +#[cfg(all(feature = "server", feature = "runtime"))] +use std::time::Duration; + +use bytes::{Buf, BufMut, Bytes, BytesMut}; +use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; +#[cfg(all(feature = "server", feature = "runtime"))] +use tokio::time::Instant; +use tracing::{debug, trace}; + +use super::{Http1Transaction, ParseContext, ParsedMessage}; +use crate::common::buf::BufList; + +/// The initial buffer size allocated before trying to read from IO. +pub(crate) const INIT_BUFFER_SIZE: usize = 8192; + +/// The minimum value that can be set to max buffer size. +pub(crate) const MINIMUM_MAX_BUFFER_SIZE: usize = INIT_BUFFER_SIZE; + +/// The default maximum read buffer size. If the buffer gets this big and +/// a message is still not complete, a `TooLarge` error is triggered. +// Note: if this changes, update server::conn::Http::max_buf_size docs. +pub(crate) const DEFAULT_MAX_BUFFER_SIZE: usize = 8192 + 4096 * 100; + +/// The maximum number of distinct `Buf`s to hold in a list before requiring +/// a flush. Only affects when the buffer strategy is to queue buffers. +/// +/// Note that a flush can happen before reaching the maximum. This simply +/// forces a flush if the queue gets this big. +const MAX_BUF_LIST_BUFFERS: usize = 16; + +pub(crate) struct Buffered { + flush_pipeline: bool, + io: T, + read_blocked: bool, + read_buf: BytesMut, + read_buf_strategy: ReadStrategy, + write_buf: WriteBuf, +} + +impl fmt::Debug for Buffered +where + B: Buf, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Buffered") + .field("read_buf", &self.read_buf) + .field("write_buf", &self.write_buf) + .finish() + } +} + +impl Buffered +where + T: AsyncRead + AsyncWrite + Unpin, + B: Buf, +{ + pub(crate) fn new(io: T) -> Buffered { + let strategy = if io.is_write_vectored() { + WriteStrategy::Queue + } else { + WriteStrategy::Flatten + }; + let write_buf = WriteBuf::new(strategy); + Buffered { + flush_pipeline: false, + io, + read_blocked: false, + read_buf: BytesMut::with_capacity(0), + read_buf_strategy: ReadStrategy::default(), + write_buf, + } + } + + #[cfg(feature = "server")] + pub(crate) fn set_flush_pipeline(&mut self, enabled: bool) { + debug_assert!(!self.write_buf.has_remaining()); + self.flush_pipeline = enabled; + if enabled { + self.set_write_strategy_flatten(); + } + } + + pub(crate) fn set_max_buf_size(&mut self, max: usize) { + assert!( + max >= MINIMUM_MAX_BUFFER_SIZE, + "The max_buf_size cannot be smaller than {}.", + MINIMUM_MAX_BUFFER_SIZE, + ); + self.read_buf_strategy = ReadStrategy::with_max(max); + self.write_buf.max_buf_size = max; + } + + #[cfg(feature = "client")] + pub(crate) fn set_read_buf_exact_size(&mut self, sz: usize) { + self.read_buf_strategy = ReadStrategy::Exact(sz); + } + + pub(crate) fn set_write_strategy_flatten(&mut self) { + // this should always be called only at construction time, + // so this assert is here to catch myself + debug_assert!(self.write_buf.queue.bufs_cnt() == 0); + self.write_buf.set_strategy(WriteStrategy::Flatten); + } + + pub(crate) fn set_write_strategy_queue(&mut self) { + // this should always be called only at construction time, + // so this assert is here to catch myself + debug_assert!(self.write_buf.queue.bufs_cnt() == 0); + self.write_buf.set_strategy(WriteStrategy::Queue); + } + + pub(crate) fn read_buf(&self) -> &[u8] { + self.read_buf.as_ref() + } + + #[cfg(test)] + #[cfg(feature = "nightly")] + pub(super) fn read_buf_mut(&mut self) -> &mut BytesMut { + &mut self.read_buf + } + + /// Return the "allocated" available space, not the potential space + /// that could be allocated in the future. + fn read_buf_remaining_mut(&self) -> usize { + self.read_buf.capacity() - self.read_buf.len() + } + + /// Return whether we can append to the headers buffer. + /// + /// Reasons we can't: + /// - The write buf is in queue mode, and some of the past body is still + /// needing to be flushed. + pub(crate) fn can_headers_buf(&self) -> bool { + !self.write_buf.queue.has_remaining() + } + + pub(crate) fn headers_buf(&mut self) -> &mut Vec { + let buf = self.write_buf.headers_mut(); + &mut buf.bytes + } + + pub(super) fn write_buf(&mut self) -> &mut WriteBuf { + &mut self.write_buf + } + + pub(crate) fn buffer>(&mut self, buf: BB) { + self.write_buf.buffer(buf) + } + + pub(crate) fn can_buffer(&self) -> bool { + self.flush_pipeline || self.write_buf.can_buffer() + } + + pub(crate) fn consume_leading_lines(&mut self) { + if !self.read_buf.is_empty() { + let mut i = 0; + while i < self.read_buf.len() { + match self.read_buf[i] { + b'\r' | b'\n' => i += 1, + _ => break, + } + } + self.read_buf.advance(i); + } + } + + pub(super) fn parse( + &mut self, + cx: &mut Context<'_>, + parse_ctx: ParseContext<'_>, + ) -> Poll>> + where + S: Http1Transaction, + { + loop { + match super::role::parse_headers::( + &mut self.read_buf, + ParseContext { + cached_headers: parse_ctx.cached_headers, + req_method: parse_ctx.req_method, + h1_parser_config: parse_ctx.h1_parser_config.clone(), + #[cfg(all(feature = "server", feature = "runtime"))] + h1_header_read_timeout: parse_ctx.h1_header_read_timeout, + #[cfg(all(feature = "server", feature = "runtime"))] + h1_header_read_timeout_fut: parse_ctx.h1_header_read_timeout_fut, + #[cfg(all(feature = "server", feature = "runtime"))] + h1_header_read_timeout_running: parse_ctx.h1_header_read_timeout_running, + preserve_header_case: parse_ctx.preserve_header_case, + #[cfg(feature = "ffi")] + preserve_header_order: parse_ctx.preserve_header_order, + h09_responses: parse_ctx.h09_responses, + #[cfg(feature = "ffi")] + on_informational: parse_ctx.on_informational, + #[cfg(feature = "ffi")] + raw_headers: parse_ctx.raw_headers, + }, + )? { + Some(msg) => { + debug!("parsed {} headers", msg.head.headers.len()); + + #[cfg(all(feature = "server", feature = "runtime"))] + { + *parse_ctx.h1_header_read_timeout_running = false; + + if let Some(h1_header_read_timeout_fut) = + parse_ctx.h1_header_read_timeout_fut + { + // Reset the timer in order to avoid woken up when the timeout finishes + h1_header_read_timeout_fut + .as_mut() + .reset(Instant::now() + Duration::from_secs(30 * 24 * 60 * 60)); + } + } + return Poll::Ready(Ok(msg)); + } + None => { + let max = self.read_buf_strategy.max(); + if self.read_buf.len() >= max { + debug!("max_buf_size ({}) reached, closing", max); + return Poll::Ready(Err(crate::Error::new_too_large())); + } + + #[cfg(all(feature = "server", feature = "runtime"))] + if *parse_ctx.h1_header_read_timeout_running { + if let Some(h1_header_read_timeout_fut) = + parse_ctx.h1_header_read_timeout_fut + { + if Pin::new(h1_header_read_timeout_fut).poll(cx).is_ready() { + *parse_ctx.h1_header_read_timeout_running = false; + + tracing::warn!("read header from client timeout"); + return Poll::Ready(Err(crate::Error::new_header_timeout())); + } + } + } + } + } + if ready!(self.poll_read_from_io(cx)).map_err(crate::Error::new_io)? == 0 { + trace!("parse eof"); + return Poll::Ready(Err(crate::Error::new_incomplete())); + } + } + } + + pub(crate) fn poll_read_from_io(&mut self, cx: &mut Context<'_>) -> Poll> { + self.read_blocked = false; + let next = self.read_buf_strategy.next(); + if self.read_buf_remaining_mut() < next { + self.read_buf.reserve(next); + } + + let dst = self.read_buf.chunk_mut(); + let dst = unsafe { &mut *(dst as *mut _ as *mut [MaybeUninit]) }; + let mut buf = ReadBuf::uninit(dst); + match Pin::new(&mut self.io).poll_read(cx, &mut buf) { + Poll::Ready(Ok(_)) => { + let n = buf.filled().len(); + trace!("received {} bytes", n); + unsafe { + // Safety: we just read that many bytes into the + // uninitialized part of the buffer, so this is okay. + // @tokio pls give me back `poll_read_buf` thanks + self.read_buf.advance_mut(n); + } + self.read_buf_strategy.record(n); + Poll::Ready(Ok(n)) + } + Poll::Pending => { + self.read_blocked = true; + Poll::Pending + } + Poll::Ready(Err(e)) => Poll::Ready(Err(e)), + } + } + + pub(crate) fn into_inner(self) -> (T, Bytes) { + (self.io, self.read_buf.freeze()) + } + + pub(crate) fn io_mut(&mut self) -> &mut T { + &mut self.io + } + + pub(crate) fn is_read_blocked(&self) -> bool { + self.read_blocked + } + + pub(crate) fn poll_flush(&mut self, cx: &mut Context<'_>) -> Poll> { + if self.flush_pipeline && !self.read_buf.is_empty() { + Poll::Ready(Ok(())) + } else if self.write_buf.remaining() == 0 { + Pin::new(&mut self.io).poll_flush(cx) + } else { + if let WriteStrategy::Flatten = self.write_buf.strategy { + return self.poll_flush_flattened(cx); + } + + const MAX_WRITEV_BUFS: usize = 64; + loop { + let n = { + let mut iovs = [IoSlice::new(&[]); MAX_WRITEV_BUFS]; + let len = self.write_buf.chunks_vectored(&mut iovs); + ready!(Pin::new(&mut self.io).poll_write_vectored(cx, &iovs[..len]))? + }; + // TODO(eliza): we have to do this manually because + // `poll_write_buf` doesn't exist in Tokio 0.3 yet...when + // `poll_write_buf` comes back, the manual advance will need to leave! + self.write_buf.advance(n); + debug!("flushed {} bytes", n); + if self.write_buf.remaining() == 0 { + break; + } else if n == 0 { + trace!( + "write returned zero, but {} bytes remaining", + self.write_buf.remaining() + ); + return Poll::Ready(Err(io::ErrorKind::WriteZero.into())); + } + } + Pin::new(&mut self.io).poll_flush(cx) + } + } + + /// Specialized version of `flush` when strategy is Flatten. + /// + /// Since all buffered bytes are flattened into the single headers buffer, + /// that skips some bookkeeping around using multiple buffers. + fn poll_flush_flattened(&mut self, cx: &mut Context<'_>) -> Poll> { + loop { + let n = ready!(Pin::new(&mut self.io).poll_write(cx, self.write_buf.headers.chunk()))?; + debug!("flushed {} bytes", n); + self.write_buf.headers.advance(n); + if self.write_buf.headers.remaining() == 0 { + self.write_buf.headers.reset(); + break; + } else if n == 0 { + trace!( + "write returned zero, but {} bytes remaining", + self.write_buf.remaining() + ); + return Poll::Ready(Err(io::ErrorKind::WriteZero.into())); + } + } + Pin::new(&mut self.io).poll_flush(cx) + } + + #[cfg(test)] + fn flush<'a>(&'a mut self) -> impl std::future::Future> + 'a { + futures_util::future::poll_fn(move |cx| self.poll_flush(cx)) + } +} + +// The `B` is a `Buf`, we never project a pin to it +impl Unpin for Buffered {} + +// TODO: This trait is old... at least rename to PollBytes or something... +pub(crate) trait MemRead { + fn read_mem(&mut self, cx: &mut Context<'_>, len: usize) -> Poll>; +} + +impl MemRead for Buffered +where + T: AsyncRead + AsyncWrite + Unpin, + B: Buf, +{ + fn read_mem(&mut self, cx: &mut Context<'_>, len: usize) -> Poll> { + if !self.read_buf.is_empty() { + let n = std::cmp::min(len, self.read_buf.len()); + Poll::Ready(Ok(self.read_buf.split_to(n).freeze())) + } else { + let n = ready!(self.poll_read_from_io(cx))?; + Poll::Ready(Ok(self.read_buf.split_to(::std::cmp::min(len, n)).freeze())) + } + } +} + +#[derive(Clone, Copy, Debug)] +enum ReadStrategy { + Adaptive { + decrease_now: bool, + next: usize, + max: usize, + }, + #[cfg(feature = "client")] + Exact(usize), +} + +impl ReadStrategy { + fn with_max(max: usize) -> ReadStrategy { + ReadStrategy::Adaptive { + decrease_now: false, + next: INIT_BUFFER_SIZE, + max, + } + } + + fn next(&self) -> usize { + match *self { + ReadStrategy::Adaptive { next, .. } => next, + #[cfg(feature = "client")] + ReadStrategy::Exact(exact) => exact, + } + } + + fn max(&self) -> usize { + match *self { + ReadStrategy::Adaptive { max, .. } => max, + #[cfg(feature = "client")] + ReadStrategy::Exact(exact) => exact, + } + } + + fn record(&mut self, bytes_read: usize) { + match *self { + ReadStrategy::Adaptive { + ref mut decrease_now, + ref mut next, + max, + .. + } => { + if bytes_read >= *next { + *next = cmp::min(incr_power_of_two(*next), max); + *decrease_now = false; + } else { + let decr_to = prev_power_of_two(*next); + if bytes_read < decr_to { + if *decrease_now { + *next = cmp::max(decr_to, INIT_BUFFER_SIZE); + *decrease_now = false; + } else { + // Decreasing is a two "record" process. + *decrease_now = true; + } + } else { + // A read within the current range should cancel + // a potential decrease, since we just saw proof + // that we still need this size. + *decrease_now = false; + } + } + } + #[cfg(feature = "client")] + ReadStrategy::Exact(_) => (), + } + } +} + +fn incr_power_of_two(n: usize) -> usize { + n.saturating_mul(2) +} + +fn prev_power_of_two(n: usize) -> usize { + // Only way this shift can underflow is if n is less than 4. + // (Which would means `usize::MAX >> 64` and underflowed!) + debug_assert!(n >= 4); + (::std::usize::MAX >> (n.leading_zeros() + 2)) + 1 +} + +impl Default for ReadStrategy { + fn default() -> ReadStrategy { + ReadStrategy::with_max(DEFAULT_MAX_BUFFER_SIZE) + } +} + +#[derive(Clone)] +pub(crate) struct Cursor { + bytes: T, + pos: usize, +} + +impl> Cursor { + #[inline] + pub(crate) fn new(bytes: T) -> Cursor { + Cursor { bytes, pos: 0 } + } +} + +impl Cursor> { + /// If we've advanced the position a bit in this cursor, and wish to + /// extend the underlying vector, we may wish to unshift the "read" bytes + /// off, and move everything else over. + fn maybe_unshift(&mut self, additional: usize) { + if self.pos == 0 { + // nothing to do + return; + } + + if self.bytes.capacity() - self.bytes.len() >= additional { + // there's room! + return; + } + + self.bytes.drain(0..self.pos); + self.pos = 0; + } + + fn reset(&mut self) { + self.pos = 0; + self.bytes.clear(); + } +} + +impl> fmt::Debug for Cursor { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Cursor") + .field("pos", &self.pos) + .field("len", &self.bytes.as_ref().len()) + .finish() + } +} + +impl> Buf for Cursor { + #[inline] + fn remaining(&self) -> usize { + self.bytes.as_ref().len() - self.pos + } + + #[inline] + fn chunk(&self) -> &[u8] { + &self.bytes.as_ref()[self.pos..] + } + + #[inline] + fn advance(&mut self, cnt: usize) { + debug_assert!(self.pos + cnt <= self.bytes.as_ref().len()); + self.pos += cnt; + } +} + +// an internal buffer to collect writes before flushes +pub(super) struct WriteBuf { + /// Re-usable buffer that holds message headers + headers: Cursor>, + max_buf_size: usize, + /// Deque of user buffers if strategy is Queue + queue: BufList, + strategy: WriteStrategy, +} + +impl WriteBuf { + fn new(strategy: WriteStrategy) -> WriteBuf { + WriteBuf { + headers: Cursor::new(Vec::with_capacity(INIT_BUFFER_SIZE)), + max_buf_size: DEFAULT_MAX_BUFFER_SIZE, + queue: BufList::new(), + strategy, + } + } +} + +impl WriteBuf +where + B: Buf, +{ + fn set_strategy(&mut self, strategy: WriteStrategy) { + self.strategy = strategy; + } + + pub(super) fn buffer>(&mut self, mut buf: BB) { + debug_assert!(buf.has_remaining()); + match self.strategy { + WriteStrategy::Flatten => { + let head = self.headers_mut(); + + head.maybe_unshift(buf.remaining()); + trace!( + self.len = head.remaining(), + buf.len = buf.remaining(), + "buffer.flatten" + ); + //perf: This is a little faster than >::put, + //but accomplishes the same result. + loop { + let adv = { + let slice = buf.chunk(); + if slice.is_empty() { + return; + } + head.bytes.extend_from_slice(slice); + slice.len() + }; + buf.advance(adv); + } + } + WriteStrategy::Queue => { + trace!( + self.len = self.remaining(), + buf.len = buf.remaining(), + "buffer.queue" + ); + self.queue.push(buf.into()); + } + } + } + + fn can_buffer(&self) -> bool { + match self.strategy { + WriteStrategy::Flatten => self.remaining() < self.max_buf_size, + WriteStrategy::Queue => { + self.queue.bufs_cnt() < MAX_BUF_LIST_BUFFERS && self.remaining() < self.max_buf_size + } + } + } + + fn headers_mut(&mut self) -> &mut Cursor> { + debug_assert!(!self.queue.has_remaining()); + &mut self.headers + } +} + +impl fmt::Debug for WriteBuf { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("WriteBuf") + .field("remaining", &self.remaining()) + .field("strategy", &self.strategy) + .finish() + } +} + +impl Buf for WriteBuf { + #[inline] + fn remaining(&self) -> usize { + self.headers.remaining() + self.queue.remaining() + } + + #[inline] + fn chunk(&self) -> &[u8] { + let headers = self.headers.chunk(); + if !headers.is_empty() { + headers + } else { + self.queue.chunk() + } + } + + #[inline] + fn advance(&mut self, cnt: usize) { + let hrem = self.headers.remaining(); + + match hrem.cmp(&cnt) { + cmp::Ordering::Equal => self.headers.reset(), + cmp::Ordering::Greater => self.headers.advance(cnt), + cmp::Ordering::Less => { + let qcnt = cnt - hrem; + self.headers.reset(); + self.queue.advance(qcnt); + } + } + } + + #[inline] + fn chunks_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize { + let n = self.headers.chunks_vectored(dst); + self.queue.chunks_vectored(&mut dst[n..]) + n + } +} + +#[derive(Debug)] +enum WriteStrategy { + Flatten, + Queue, +} + +#[cfg(test)] +mod tests { + use super::*; + use std::time::Duration; + + use tokio_test::io::Builder as Mock; + + // #[cfg(feature = "nightly")] + // use test::Bencher; + + /* + impl MemRead for AsyncIo { + fn read_mem(&mut self, len: usize) -> Poll { + let mut v = vec![0; len]; + let n = try_nb!(self.read(v.as_mut_slice())); + Ok(Async::Ready(BytesMut::from(&v[..n]).freeze())) + } + } + */ + + #[tokio::test] + #[ignore] + async fn iobuf_write_empty_slice() { + // TODO(eliza): can i have writev back pls T_T + // // First, let's just check that the Mock would normally return an + // // error on an unexpected write, even if the buffer is empty... + // let mut mock = Mock::new().build(); + // futures_util::future::poll_fn(|cx| { + // Pin::new(&mut mock).poll_write_buf(cx, &mut Cursor::new(&[])) + // }) + // .await + // .expect_err("should be a broken pipe"); + + // // underlying io will return the logic error upon write, + // // so we are testing that the io_buf does not trigger a write + // // when there is nothing to flush + // let mock = Mock::new().build(); + // let mut io_buf = Buffered::<_, Cursor>>::new(mock); + // io_buf.flush().await.expect("should short-circuit flush"); + } + + #[tokio::test] + async fn parse_reads_until_blocked() { + use crate::proto::h1::ClientTransaction; + + let _ = pretty_env_logger::try_init(); + let mock = Mock::new() + // Split over multiple reads will read all of it + .read(b"HTTP/1.1 200 OK\r\n") + .read(b"Server: hyper\r\n") + // missing last line ending + .wait(Duration::from_secs(1)) + .build(); + + let mut buffered = Buffered::<_, Cursor>>::new(mock); + + // We expect a `parse` to be not ready, and so can't await it directly. + // Rather, this `poll_fn` will wrap the `Poll` result. + futures_util::future::poll_fn(|cx| { + let parse_ctx = ParseContext { + cached_headers: &mut None, + req_method: &mut None, + h1_parser_config: Default::default(), + #[cfg(feature = "runtime")] + h1_header_read_timeout: None, + #[cfg(feature = "runtime")] + h1_header_read_timeout_fut: &mut None, + #[cfg(feature = "runtime")] + h1_header_read_timeout_running: &mut false, + preserve_header_case: false, + #[cfg(feature = "ffi")] + preserve_header_order: false, + h09_responses: false, + #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] + raw_headers: false, + }; + assert!(buffered + .parse::(cx, parse_ctx) + .is_pending()); + Poll::Ready(()) + }) + .await; + + assert_eq!( + buffered.read_buf, + b"HTTP/1.1 200 OK\r\nServer: hyper\r\n"[..] + ); + } + + #[test] + fn read_strategy_adaptive_increments() { + let mut strategy = ReadStrategy::default(); + assert_eq!(strategy.next(), 8192); + + // Grows if record == next + strategy.record(8192); + assert_eq!(strategy.next(), 16384); + + strategy.record(16384); + assert_eq!(strategy.next(), 32768); + + // Enormous records still increment at same rate + strategy.record(::std::usize::MAX); + assert_eq!(strategy.next(), 65536); + + let max = strategy.max(); + while strategy.next() < max { + strategy.record(max); + } + + assert_eq!(strategy.next(), max, "never goes over max"); + strategy.record(max + 1); + assert_eq!(strategy.next(), max, "never goes over max"); + } + + #[test] + fn read_strategy_adaptive_decrements() { + let mut strategy = ReadStrategy::default(); + strategy.record(8192); + assert_eq!(strategy.next(), 16384); + + strategy.record(1); + assert_eq!( + strategy.next(), + 16384, + "first smaller record doesn't decrement yet" + ); + strategy.record(8192); + assert_eq!(strategy.next(), 16384, "record was with range"); + + strategy.record(1); + assert_eq!( + strategy.next(), + 16384, + "in-range record should make this the 'first' again" + ); + + strategy.record(1); + assert_eq!(strategy.next(), 8192, "second smaller record decrements"); + + strategy.record(1); + assert_eq!(strategy.next(), 8192, "first doesn't decrement"); + strategy.record(1); + assert_eq!(strategy.next(), 8192, "doesn't decrement under minimum"); + } + + #[test] + fn read_strategy_adaptive_stays_the_same() { + let mut strategy = ReadStrategy::default(); + strategy.record(8192); + assert_eq!(strategy.next(), 16384); + + strategy.record(8193); + assert_eq!( + strategy.next(), + 16384, + "first smaller record doesn't decrement yet" + ); + + strategy.record(8193); + assert_eq!( + strategy.next(), + 16384, + "with current step does not decrement" + ); + } + + #[test] + fn read_strategy_adaptive_max_fuzz() { + fn fuzz(max: usize) { + let mut strategy = ReadStrategy::with_max(max); + while strategy.next() < max { + strategy.record(::std::usize::MAX); + } + let mut next = strategy.next(); + while next > 8192 { + strategy.record(1); + strategy.record(1); + next = strategy.next(); + assert!( + next.is_power_of_two(), + "decrement should be powers of two: {} (max = {})", + next, + max, + ); + } + } + + let mut max = 8192; + while max < std::usize::MAX { + fuzz(max); + max = (max / 2).saturating_mul(3); + } + fuzz(::std::usize::MAX); + } + + #[test] + #[should_panic] + #[cfg(debug_assertions)] // needs to trigger a debug_assert + fn write_buf_requires_non_empty_bufs() { + let mock = Mock::new().build(); + let mut buffered = Buffered::<_, Cursor>>::new(mock); + + buffered.buffer(Cursor::new(Vec::new())); + } + + /* + TODO: needs tokio_test::io to allow configure write_buf calls + #[test] + fn write_buf_queue() { + let _ = pretty_env_logger::try_init(); + + let mock = AsyncIo::new_buf(vec![], 1024); + let mut buffered = Buffered::<_, Cursor>>::new(mock); + + + buffered.headers_buf().extend(b"hello "); + buffered.buffer(Cursor::new(b"world, ".to_vec())); + buffered.buffer(Cursor::new(b"it's ".to_vec())); + buffered.buffer(Cursor::new(b"hyper!".to_vec())); + assert_eq!(buffered.write_buf.queue.bufs_cnt(), 3); + buffered.flush().unwrap(); + + assert_eq!(buffered.io, b"hello world, it's hyper!"); + assert_eq!(buffered.io.num_writes(), 1); + assert_eq!(buffered.write_buf.queue.bufs_cnt(), 0); + } + */ + + #[tokio::test] + async fn write_buf_flatten() { + let _ = pretty_env_logger::try_init(); + + let mock = Mock::new().write(b"hello world, it's hyper!").build(); + + let mut buffered = Buffered::<_, Cursor>>::new(mock); + buffered.write_buf.set_strategy(WriteStrategy::Flatten); + + buffered.headers_buf().extend(b"hello "); + buffered.buffer(Cursor::new(b"world, ".to_vec())); + buffered.buffer(Cursor::new(b"it's ".to_vec())); + buffered.buffer(Cursor::new(b"hyper!".to_vec())); + assert_eq!(buffered.write_buf.queue.bufs_cnt(), 0); + + buffered.flush().await.expect("flush"); + } + + #[test] + fn write_buf_flatten_partially_flushed() { + let _ = pretty_env_logger::try_init(); + + let b = |s: &str| Cursor::new(s.as_bytes().to_vec()); + + let mut write_buf = WriteBuf::>>::new(WriteStrategy::Flatten); + + write_buf.buffer(b("hello ")); + write_buf.buffer(b("world, ")); + + assert_eq!(write_buf.chunk(), b"hello world, "); + + // advance most of the way, but not all + write_buf.advance(11); + + assert_eq!(write_buf.chunk(), b", "); + assert_eq!(write_buf.headers.pos, 11); + assert_eq!(write_buf.headers.bytes.capacity(), INIT_BUFFER_SIZE); + + // there's still room in the headers buffer, so just push on the end + write_buf.buffer(b("it's hyper!")); + + assert_eq!(write_buf.chunk(), b", it's hyper!"); + assert_eq!(write_buf.headers.pos, 11); + + let rem1 = write_buf.remaining(); + let cap = write_buf.headers.bytes.capacity(); + + // but when this would go over capacity, don't copy the old bytes + write_buf.buffer(Cursor::new(vec![b'X'; cap])); + assert_eq!(write_buf.remaining(), cap + rem1); + assert_eq!(write_buf.headers.pos, 0); + } + + #[tokio::test] + async fn write_buf_queue_disable_auto() { + let _ = pretty_env_logger::try_init(); + + let mock = Mock::new() + .write(b"hello ") + .write(b"world, ") + .write(b"it's ") + .write(b"hyper!") + .build(); + + let mut buffered = Buffered::<_, Cursor>>::new(mock); + buffered.write_buf.set_strategy(WriteStrategy::Queue); + + // we have 4 buffers, and vec IO disabled, but explicitly said + // don't try to auto detect (via setting strategy above) + + buffered.headers_buf().extend(b"hello "); + buffered.buffer(Cursor::new(b"world, ".to_vec())); + buffered.buffer(Cursor::new(b"it's ".to_vec())); + buffered.buffer(Cursor::new(b"hyper!".to_vec())); + assert_eq!(buffered.write_buf.queue.bufs_cnt(), 3); + + buffered.flush().await.expect("flush"); + + assert_eq!(buffered.write_buf.queue.bufs_cnt(), 0); + } + + // #[cfg(feature = "nightly")] + // #[bench] + // fn bench_write_buf_flatten_buffer_chunk(b: &mut Bencher) { + // let s = "Hello, World!"; + // b.bytes = s.len() as u64; + + // let mut write_buf = WriteBuf::::new(); + // write_buf.set_strategy(WriteStrategy::Flatten); + // b.iter(|| { + // let chunk = bytes::Bytes::from(s); + // write_buf.buffer(chunk); + // ::test::black_box(&write_buf); + // write_buf.headers.bytes.clear(); + // }) + // } +} diff --git a/.cargo-vendor/hyper-0.14.30/src/proto/h1/mod.rs b/.cargo-vendor/hyper-0.14.30/src/proto/h1/mod.rs new file mode 100644 index 0000000000..5a2587a843 --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/proto/h1/mod.rs @@ -0,0 +1,122 @@ +#[cfg(all(feature = "server", feature = "runtime"))] +use std::{pin::Pin, time::Duration}; + +use bytes::BytesMut; +use http::{HeaderMap, Method}; +use httparse::ParserConfig; +#[cfg(all(feature = "server", feature = "runtime"))] +use tokio::time::Sleep; + +use crate::body::DecodedLength; +use crate::proto::{BodyLength, MessageHead}; + +pub(crate) use self::conn::Conn; +pub(crate) use self::decode::Decoder; +pub(crate) use self::dispatch::Dispatcher; +pub(crate) use self::encode::{EncodedBuf, Encoder}; +//TODO: move out of h1::io +pub(crate) use self::io::MINIMUM_MAX_BUFFER_SIZE; + +mod conn; +mod decode; +pub(crate) mod dispatch; +mod encode; +mod io; +mod role; + +cfg_client! { + pub(crate) type ClientTransaction = role::Client; +} + +cfg_server! { + pub(crate) type ServerTransaction = role::Server; +} + +pub(crate) trait Http1Transaction { + type Incoming; + type Outgoing: Default; + const LOG: &'static str; + fn parse(bytes: &mut BytesMut, ctx: ParseContext<'_>) -> ParseResult; + fn encode(enc: Encode<'_, Self::Outgoing>, dst: &mut Vec) -> crate::Result; + + fn on_error(err: &crate::Error) -> Option>; + + fn is_client() -> bool { + !Self::is_server() + } + + fn is_server() -> bool { + !Self::is_client() + } + + fn should_error_on_parse_eof() -> bool { + Self::is_client() + } + + fn should_read_first() -> bool { + Self::is_server() + } + + fn update_date() {} +} + +/// Result newtype for Http1Transaction::parse. +pub(crate) type ParseResult = Result>, crate::error::Parse>; + +#[derive(Debug)] +pub(crate) struct ParsedMessage { + head: MessageHead, + decode: DecodedLength, + expect_continue: bool, + keep_alive: bool, + wants_upgrade: bool, +} + +pub(crate) struct ParseContext<'a> { + cached_headers: &'a mut Option, + req_method: &'a mut Option, + h1_parser_config: ParserConfig, + #[cfg(all(feature = "server", feature = "runtime"))] + h1_header_read_timeout: Option, + #[cfg(all(feature = "server", feature = "runtime"))] + h1_header_read_timeout_fut: &'a mut Option>>, + #[cfg(all(feature = "server", feature = "runtime"))] + h1_header_read_timeout_running: &'a mut bool, + preserve_header_case: bool, + #[cfg(feature = "ffi")] + preserve_header_order: bool, + h09_responses: bool, + #[cfg(feature = "ffi")] + on_informational: &'a mut Option, + #[cfg(feature = "ffi")] + raw_headers: bool, +} + +/// Passed to Http1Transaction::encode +pub(crate) struct Encode<'a, T> { + head: &'a mut MessageHead, + body: Option, + #[cfg(feature = "server")] + keep_alive: bool, + req_method: &'a mut Option, + title_case_headers: bool, +} + +/// Extra flags that a request "wants", like expect-continue or upgrades. +#[derive(Clone, Copy, Debug)] +struct Wants(u8); + +impl Wants { + const EMPTY: Wants = Wants(0b00); + const EXPECT: Wants = Wants(0b01); + const UPGRADE: Wants = Wants(0b10); + + #[must_use] + fn add(self, other: Wants) -> Wants { + Wants(self.0 | other.0) + } + + fn contains(&self, other: Wants) -> bool { + (self.0 & other.0) == other.0 + } +} diff --git a/.cargo-vendor/hyper-0.14.30/src/proto/h1/role.rs b/.cargo-vendor/hyper-0.14.30/src/proto/h1/role.rs new file mode 100644 index 0000000000..7a4544d989 --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/proto/h1/role.rs @@ -0,0 +1,2850 @@ +use std::fmt::{self, Write}; +use std::mem::MaybeUninit; + +use bytes::Bytes; +use bytes::BytesMut; +#[cfg(feature = "server")] +use http::header::ValueIter; +use http::header::{self, Entry, HeaderName, HeaderValue}; +use http::{HeaderMap, Method, StatusCode, Version}; +#[cfg(all(feature = "server", feature = "runtime"))] +use tokio::time::Instant; +use tracing::{debug, error, trace, trace_span, warn}; + +use crate::body::DecodedLength; +#[cfg(feature = "server")] +use crate::common::date; +use crate::error::Parse; +use crate::ext::HeaderCaseMap; +#[cfg(feature = "ffi")] +use crate::ext::OriginalHeaderOrder; +use crate::headers; +use crate::proto::h1::{ + Encode, Encoder, Http1Transaction, ParseContext, ParseResult, ParsedMessage, +}; +use crate::proto::{BodyLength, MessageHead, RequestHead, RequestLine}; + +const MAX_HEADERS: usize = 100; +const AVERAGE_HEADER_SIZE: usize = 30; // totally scientific +#[cfg(feature = "server")] +const MAX_URI_LEN: usize = (u16::MAX - 1) as usize; + +macro_rules! header_name { + ($bytes:expr) => {{ + { + match HeaderName::from_bytes($bytes) { + Ok(name) => name, + Err(e) => maybe_panic!(e), + } + } + }}; +} + +macro_rules! header_value { + ($bytes:expr) => {{ + { + unsafe { HeaderValue::from_maybe_shared_unchecked($bytes) } + } + }}; +} + +macro_rules! maybe_panic { + ($($arg:tt)*) => ({ + let _err = ($($arg)*); + if cfg!(debug_assertions) { + panic!("{:?}", _err); + } else { + error!("Internal Hyper error, please report {:?}", _err); + return Err(Parse::Internal) + } + }) +} + +pub(super) fn parse_headers( + bytes: &mut BytesMut, + ctx: ParseContext<'_>, +) -> ParseResult +where + T: Http1Transaction, +{ + #[cfg(all(feature = "server", feature = "runtime"))] + if !*ctx.h1_header_read_timeout_running { + if let Some(h1_header_read_timeout) = ctx.h1_header_read_timeout { + let span = trace_span!("parse_headers"); + let _s = span.enter(); + + let deadline = Instant::now() + h1_header_read_timeout; + *ctx.h1_header_read_timeout_running = true; + match ctx.h1_header_read_timeout_fut { + Some(h1_header_read_timeout_fut) => { + debug!("resetting h1 header read timeout timer"); + h1_header_read_timeout_fut.as_mut().reset(deadline); + } + None => { + debug!("setting h1 header read timeout timer"); + *ctx.h1_header_read_timeout_fut = + Some(Box::pin(tokio::time::sleep_until(deadline))); + } + } + } + } + + // If the buffer is empty, don't bother entering the span, it's just noise. + if bytes.is_empty() { + return Ok(None); + } + + let span = trace_span!("parse_headers"); + let _s = span.enter(); + + T::parse(bytes, ctx) +} + +pub(super) fn encode_headers( + enc: Encode<'_, T::Outgoing>, + dst: &mut Vec, +) -> crate::Result +where + T: Http1Transaction, +{ + let span = trace_span!("encode_headers"); + let _s = span.enter(); + T::encode(enc, dst) +} + +// There are 2 main roles, Client and Server. + +#[cfg(feature = "client")] +pub(crate) enum Client {} + +#[cfg(feature = "server")] +pub(crate) enum Server {} + +#[cfg(feature = "server")] +impl Http1Transaction for Server { + type Incoming = RequestLine; + type Outgoing = StatusCode; + const LOG: &'static str = "{role=server}"; + + fn parse(buf: &mut BytesMut, ctx: ParseContext<'_>) -> ParseResult { + debug_assert!(!buf.is_empty(), "parse called with empty buf"); + + let mut keep_alive; + let is_http_11; + let subject; + let version; + let len; + let headers_len; + + // Unsafe: both headers_indices and headers are using uninitialized memory, + // but we *never* read any of it until after httparse has assigned + // values into it. By not zeroing out the stack memory, this saves + // a good ~5% on pipeline benchmarks. + let mut headers_indices: [MaybeUninit; MAX_HEADERS] = unsafe { + // SAFETY: We can go safely from MaybeUninit array to array of MaybeUninit + MaybeUninit::uninit().assume_init() + }; + { + /* SAFETY: it is safe to go from MaybeUninit array to array of MaybeUninit */ + let mut headers: [MaybeUninit>; MAX_HEADERS] = + unsafe { MaybeUninit::uninit().assume_init() }; + trace!(bytes = buf.len(), "Request.parse"); + let mut req = httparse::Request::new(&mut []); + let bytes = buf.as_ref(); + match req.parse_with_uninit_headers(bytes, &mut headers) { + Ok(httparse::Status::Complete(parsed_len)) => { + trace!("Request.parse Complete({})", parsed_len); + len = parsed_len; + let uri = req.path.unwrap(); + if uri.len() > MAX_URI_LEN { + return Err(Parse::UriTooLong); + } + subject = RequestLine( + Method::from_bytes(req.method.unwrap().as_bytes())?, + uri.parse()?, + ); + version = if req.version.unwrap() == 1 { + keep_alive = true; + is_http_11 = true; + Version::HTTP_11 + } else { + keep_alive = false; + is_http_11 = false; + Version::HTTP_10 + }; + + record_header_indices(bytes, &req.headers, &mut headers_indices)?; + headers_len = req.headers.len(); + } + Ok(httparse::Status::Partial) => return Ok(None), + Err(err) => { + return Err(match err { + // if invalid Token, try to determine if for method or path + httparse::Error::Token => { + if req.method.is_none() { + Parse::Method + } else { + debug_assert!(req.path.is_none()); + Parse::Uri + } + } + other => other.into(), + }); + } + } + }; + + let slice = buf.split_to(len).freeze(); + + // According to https://tools.ietf.org/html/rfc7230#section-3.3.3 + // 1. (irrelevant to Request) + // 2. (irrelevant to Request) + // 3. Transfer-Encoding: chunked has a chunked body. + // 4. If multiple differing Content-Length headers or invalid, close connection. + // 5. Content-Length header has a sized body. + // 6. Length 0. + // 7. (irrelevant to Request) + + let mut decoder = DecodedLength::ZERO; + let mut expect_continue = false; + let mut con_len = None; + let mut is_te = false; + let mut is_te_chunked = false; + let mut wants_upgrade = subject.0 == Method::CONNECT; + + let mut header_case_map = if ctx.preserve_header_case { + Some(HeaderCaseMap::default()) + } else { + None + }; + + #[cfg(feature = "ffi")] + let mut header_order = if ctx.preserve_header_order { + Some(OriginalHeaderOrder::default()) + } else { + None + }; + + let mut headers = ctx.cached_headers.take().unwrap_or_else(HeaderMap::new); + + headers.reserve(headers_len); + + for header in &headers_indices[..headers_len] { + // SAFETY: array is valid up to `headers_len` + let header = unsafe { &*header.as_ptr() }; + let name = header_name!(&slice[header.name.0..header.name.1]); + let value = header_value!(slice.slice(header.value.0..header.value.1)); + + match name { + header::TRANSFER_ENCODING => { + // https://tools.ietf.org/html/rfc7230#section-3.3.3 + // If Transfer-Encoding header is present, and 'chunked' is + // not the final encoding, and this is a Request, then it is + // malformed. A server should respond with 400 Bad Request. + if !is_http_11 { + debug!("HTTP/1.0 cannot have Transfer-Encoding header"); + return Err(Parse::transfer_encoding_unexpected()); + } + is_te = true; + if headers::is_chunked_(&value) { + is_te_chunked = true; + decoder = DecodedLength::CHUNKED; + } else { + is_te_chunked = false; + } + } + header::CONTENT_LENGTH => { + if is_te { + continue; + } + let len = headers::content_length_parse(&value) + .ok_or_else(Parse::content_length_invalid)?; + if let Some(prev) = con_len { + if prev != len { + debug!( + "multiple Content-Length headers with different values: [{}, {}]", + prev, len, + ); + return Err(Parse::content_length_invalid()); + } + // we don't need to append this secondary length + continue; + } + decoder = DecodedLength::checked_new(len)?; + con_len = Some(len); + } + header::CONNECTION => { + // keep_alive was previously set to default for Version + if keep_alive { + // HTTP/1.1 + keep_alive = !headers::connection_close(&value); + } else { + // HTTP/1.0 + keep_alive = headers::connection_keep_alive(&value); + } + } + header::EXPECT => { + // According to https://datatracker.ietf.org/doc/html/rfc2616#section-14.20 + // Comparison of expectation values is case-insensitive for unquoted tokens + // (including the 100-continue token) + expect_continue = value.as_bytes().eq_ignore_ascii_case(b"100-continue"); + } + header::UPGRADE => { + // Upgrades are only allowed with HTTP/1.1 + wants_upgrade = is_http_11; + } + + _ => (), + } + + if let Some(ref mut header_case_map) = header_case_map { + header_case_map.append(&name, slice.slice(header.name.0..header.name.1)); + } + + #[cfg(feature = "ffi")] + if let Some(ref mut header_order) = header_order { + header_order.append(&name); + } + + headers.append(name, value); + } + + if is_te && !is_te_chunked { + debug!("request with transfer-encoding header, but not chunked, bad request"); + return Err(Parse::transfer_encoding_invalid()); + } + + let mut extensions = http::Extensions::default(); + + if let Some(header_case_map) = header_case_map { + extensions.insert(header_case_map); + } + + #[cfg(feature = "ffi")] + if let Some(header_order) = header_order { + extensions.insert(header_order); + } + + *ctx.req_method = Some(subject.0.clone()); + + Ok(Some(ParsedMessage { + head: MessageHead { + version, + subject, + headers, + extensions, + }, + decode: decoder, + expect_continue, + keep_alive, + wants_upgrade, + })) + } + + fn encode(mut msg: Encode<'_, Self::Outgoing>, dst: &mut Vec) -> crate::Result { + trace!( + "Server::encode status={:?}, body={:?}, req_method={:?}", + msg.head.subject, + msg.body, + msg.req_method + ); + + let mut wrote_len = false; + + // hyper currently doesn't support returning 1xx status codes as a Response + // This is because Service only allows returning a single Response, and + // so if you try to reply with a e.g. 100 Continue, you have no way of + // replying with the latter status code response. + let (ret, is_last) = if msg.head.subject == StatusCode::SWITCHING_PROTOCOLS { + (Ok(()), true) + } else if msg.req_method == &Some(Method::CONNECT) && msg.head.subject.is_success() { + // Sending content-length or transfer-encoding header on 2xx response + // to CONNECT is forbidden in RFC 7231. + wrote_len = true; + (Ok(()), true) + } else if msg.head.subject.is_informational() { + warn!("response with 1xx status code not supported"); + *msg.head = MessageHead::default(); + msg.head.subject = StatusCode::INTERNAL_SERVER_ERROR; + msg.body = None; + (Err(crate::Error::new_user_unsupported_status_code()), true) + } else { + (Ok(()), !msg.keep_alive) + }; + + // In some error cases, we don't know about the invalid message until already + // pushing some bytes onto the `dst`. In those cases, we don't want to send + // the half-pushed message, so rewind to before. + let orig_len = dst.len(); + + let init_cap = 30 + msg.head.headers.len() * AVERAGE_HEADER_SIZE; + dst.reserve(init_cap); + + let custom_reason_phrase = msg.head.extensions.get::(); + + if msg.head.version == Version::HTTP_11 + && msg.head.subject == StatusCode::OK + && custom_reason_phrase.is_none() + { + extend(dst, b"HTTP/1.1 200 OK\r\n"); + } else { + match msg.head.version { + Version::HTTP_10 => extend(dst, b"HTTP/1.0 "), + Version::HTTP_11 => extend(dst, b"HTTP/1.1 "), + Version::HTTP_2 => { + debug!("response with HTTP2 version coerced to HTTP/1.1"); + extend(dst, b"HTTP/1.1 "); + } + other => panic!("unexpected response version: {:?}", other), + } + + extend(dst, msg.head.subject.as_str().as_bytes()); + extend(dst, b" "); + + if let Some(reason) = custom_reason_phrase { + extend(dst, reason.as_bytes()); + } else { + // a reason MUST be written, as many parsers will expect it. + extend( + dst, + msg.head + .subject + .canonical_reason() + .unwrap_or("") + .as_bytes(), + ); + } + + extend(dst, b"\r\n"); + } + + let orig_headers; + let extensions = std::mem::take(&mut msg.head.extensions); + let orig_headers = match extensions.get::() { + None if msg.title_case_headers => { + orig_headers = HeaderCaseMap::default(); + Some(&orig_headers) + } + orig_headers => orig_headers, + }; + let encoder = if let Some(orig_headers) = orig_headers { + Self::encode_headers_with_original_case( + msg, + dst, + is_last, + orig_len, + wrote_len, + orig_headers, + )? + } else { + Self::encode_headers_with_lower_case(msg, dst, is_last, orig_len, wrote_len)? + }; + + ret.map(|()| encoder) + } + + fn on_error(err: &crate::Error) -> Option> { + use crate::error::Kind; + let status = match *err.kind() { + Kind::Parse(Parse::Method) + | Kind::Parse(Parse::Header(_)) + | Kind::Parse(Parse::Uri) + | Kind::Parse(Parse::Version) => StatusCode::BAD_REQUEST, + Kind::Parse(Parse::TooLarge) => StatusCode::REQUEST_HEADER_FIELDS_TOO_LARGE, + Kind::Parse(Parse::UriTooLong) => StatusCode::URI_TOO_LONG, + _ => return None, + }; + + debug!("sending automatic response ({}) for parse error", status); + let mut msg = MessageHead::default(); + msg.subject = status; + Some(msg) + } + + fn is_server() -> bool { + true + } + + fn update_date() { + date::update(); + } +} + +#[cfg(feature = "server")] +impl Server { + fn can_have_body(method: &Option, status: StatusCode) -> bool { + Server::can_chunked(method, status) + } + + fn can_chunked(method: &Option, status: StatusCode) -> bool { + if method == &Some(Method::HEAD) || method == &Some(Method::CONNECT) && status.is_success() + { + false + } else if status.is_informational() { + false + } else { + match status { + StatusCode::NO_CONTENT | StatusCode::NOT_MODIFIED => false, + _ => true, + } + } + } + + fn can_have_content_length(method: &Option, status: StatusCode) -> bool { + if status.is_informational() || method == &Some(Method::CONNECT) && status.is_success() { + false + } else { + match status { + StatusCode::NO_CONTENT | StatusCode::NOT_MODIFIED => false, + _ => true, + } + } + } + + fn can_have_implicit_zero_content_length(method: &Option, status: StatusCode) -> bool { + Server::can_have_content_length(method, status) && method != &Some(Method::HEAD) + } + + fn encode_headers_with_lower_case( + msg: Encode<'_, StatusCode>, + dst: &mut Vec, + is_last: bool, + orig_len: usize, + wrote_len: bool, + ) -> crate::Result { + struct LowercaseWriter; + + impl HeaderNameWriter for LowercaseWriter { + #[inline] + fn write_full_header_line( + &mut self, + dst: &mut Vec, + line: &str, + _: (HeaderName, &str), + ) { + extend(dst, line.as_bytes()) + } + + #[inline] + fn write_header_name_with_colon( + &mut self, + dst: &mut Vec, + name_with_colon: &str, + _: HeaderName, + ) { + extend(dst, name_with_colon.as_bytes()) + } + + #[inline] + fn write_header_name(&mut self, dst: &mut Vec, name: &HeaderName) { + extend(dst, name.as_str().as_bytes()) + } + } + + Self::encode_headers(msg, dst, is_last, orig_len, wrote_len, LowercaseWriter) + } + + #[cold] + #[inline(never)] + fn encode_headers_with_original_case( + msg: Encode<'_, StatusCode>, + dst: &mut Vec, + is_last: bool, + orig_len: usize, + wrote_len: bool, + orig_headers: &HeaderCaseMap, + ) -> crate::Result { + struct OrigCaseWriter<'map> { + map: &'map HeaderCaseMap, + current: Option<(HeaderName, ValueIter<'map, Bytes>)>, + title_case_headers: bool, + } + + impl HeaderNameWriter for OrigCaseWriter<'_> { + #[inline] + fn write_full_header_line( + &mut self, + dst: &mut Vec, + _: &str, + (name, rest): (HeaderName, &str), + ) { + self.write_header_name(dst, &name); + extend(dst, rest.as_bytes()); + } + + #[inline] + fn write_header_name_with_colon( + &mut self, + dst: &mut Vec, + _: &str, + name: HeaderName, + ) { + self.write_header_name(dst, &name); + extend(dst, b": "); + } + + #[inline] + fn write_header_name(&mut self, dst: &mut Vec, name: &HeaderName) { + let Self { + map, + ref mut current, + title_case_headers, + } = *self; + if current.as_ref().map_or(true, |(last, _)| last != name) { + *current = None; + } + let (_, values) = + current.get_or_insert_with(|| (name.clone(), map.get_all_internal(name))); + + if let Some(orig_name) = values.next() { + extend(dst, orig_name); + } else if title_case_headers { + title_case(dst, name.as_str().as_bytes()); + } else { + extend(dst, name.as_str().as_bytes()); + } + } + } + + let header_name_writer = OrigCaseWriter { + map: orig_headers, + current: None, + title_case_headers: msg.title_case_headers, + }; + + Self::encode_headers(msg, dst, is_last, orig_len, wrote_len, header_name_writer) + } + + #[inline] + fn encode_headers( + msg: Encode<'_, StatusCode>, + dst: &mut Vec, + mut is_last: bool, + orig_len: usize, + mut wrote_len: bool, + mut header_name_writer: W, + ) -> crate::Result + where + W: HeaderNameWriter, + { + // In some error cases, we don't know about the invalid message until already + // pushing some bytes onto the `dst`. In those cases, we don't want to send + // the half-pushed message, so rewind to before. + let rewind = |dst: &mut Vec| { + dst.truncate(orig_len); + }; + + let mut encoder = Encoder::length(0); + let mut wrote_date = false; + let mut cur_name = None; + let mut is_name_written = false; + let mut must_write_chunked = false; + let mut prev_con_len = None; + + macro_rules! handle_is_name_written { + () => {{ + if is_name_written { + // we need to clean up and write the newline + debug_assert_ne!( + &dst[dst.len() - 2..], + b"\r\n", + "previous header wrote newline but set is_name_written" + ); + + if must_write_chunked { + extend(dst, b", chunked\r\n"); + } else { + extend(dst, b"\r\n"); + } + } + }}; + } + + 'headers: for (opt_name, value) in msg.head.headers.drain() { + if let Some(n) = opt_name { + cur_name = Some(n); + handle_is_name_written!(); + is_name_written = false; + } + let name = cur_name.as_ref().expect("current header name"); + match *name { + header::CONTENT_LENGTH => { + if wrote_len && !is_name_written { + warn!("unexpected content-length found, canceling"); + rewind(dst); + return Err(crate::Error::new_user_header()); + } + match msg.body { + Some(BodyLength::Known(known_len)) => { + // The HttpBody claims to know a length, and + // the headers are already set. For performance + // reasons, we are just going to trust that + // the values match. + // + // In debug builds, we'll assert they are the + // same to help developers find bugs. + #[cfg(debug_assertions)] + { + if let Some(len) = headers::content_length_parse(&value) { + assert!( + len == known_len, + "payload claims content-length of {}, custom content-length header claims {}", + known_len, + len, + ); + } + } + + if !is_name_written { + encoder = Encoder::length(known_len); + header_name_writer.write_header_name_with_colon( + dst, + "content-length: ", + header::CONTENT_LENGTH, + ); + extend(dst, value.as_bytes()); + wrote_len = true; + is_name_written = true; + } + continue 'headers; + } + Some(BodyLength::Unknown) => { + // The HttpBody impl didn't know how long the + // body is, but a length header was included. + // We have to parse the value to return our + // Encoder... + + if let Some(len) = headers::content_length_parse(&value) { + if let Some(prev) = prev_con_len { + if prev != len { + warn!( + "multiple Content-Length values found: [{}, {}]", + prev, len + ); + rewind(dst); + return Err(crate::Error::new_user_header()); + } + debug_assert!(is_name_written); + continue 'headers; + } else { + // we haven't written content-length yet! + encoder = Encoder::length(len); + header_name_writer.write_header_name_with_colon( + dst, + "content-length: ", + header::CONTENT_LENGTH, + ); + extend(dst, value.as_bytes()); + wrote_len = true; + is_name_written = true; + prev_con_len = Some(len); + continue 'headers; + } + } else { + warn!("illegal Content-Length value: {:?}", value); + rewind(dst); + return Err(crate::Error::new_user_header()); + } + } + None => { + // We have no body to actually send, + // but the headers claim a content-length. + // There's only 2 ways this makes sense: + // + // - The header says the length is `0`. + // - This is a response to a `HEAD` request. + if msg.req_method == &Some(Method::HEAD) { + debug_assert_eq!(encoder, Encoder::length(0)); + } else { + if value.as_bytes() != b"0" { + warn!( + "content-length value found, but empty body provided: {:?}", + value + ); + } + continue 'headers; + } + } + } + wrote_len = true; + } + header::TRANSFER_ENCODING => { + if wrote_len && !is_name_written { + warn!("unexpected transfer-encoding found, canceling"); + rewind(dst); + return Err(crate::Error::new_user_header()); + } + // check that we actually can send a chunked body... + if msg.head.version == Version::HTTP_10 + || !Server::can_chunked(msg.req_method, msg.head.subject) + { + continue; + } + wrote_len = true; + // Must check each value, because `chunked` needs to be the + // last encoding, or else we add it. + must_write_chunked = !headers::is_chunked_(&value); + + if !is_name_written { + encoder = Encoder::chunked(); + is_name_written = true; + header_name_writer.write_header_name_with_colon( + dst, + "transfer-encoding: ", + header::TRANSFER_ENCODING, + ); + extend(dst, value.as_bytes()); + } else { + extend(dst, b", "); + extend(dst, value.as_bytes()); + } + continue 'headers; + } + header::CONNECTION => { + if !is_last && headers::connection_close(&value) { + is_last = true; + } + if !is_name_written { + is_name_written = true; + header_name_writer.write_header_name_with_colon( + dst, + "connection: ", + header::CONNECTION, + ); + extend(dst, value.as_bytes()); + } else { + extend(dst, b", "); + extend(dst, value.as_bytes()); + } + continue 'headers; + } + header::DATE => { + wrote_date = true; + } + _ => (), + } + //TODO: this should perhaps instead combine them into + //single lines, as RFC7230 suggests is preferable. + + // non-special write Name and Value + debug_assert!( + !is_name_written, + "{:?} set is_name_written and didn't continue loop", + name, + ); + header_name_writer.write_header_name(dst, name); + extend(dst, b": "); + extend(dst, value.as_bytes()); + extend(dst, b"\r\n"); + } + + handle_is_name_written!(); + + if !wrote_len { + encoder = match msg.body { + Some(BodyLength::Unknown) => { + if msg.head.version == Version::HTTP_10 + || !Server::can_chunked(msg.req_method, msg.head.subject) + { + Encoder::close_delimited() + } else { + header_name_writer.write_full_header_line( + dst, + "transfer-encoding: chunked\r\n", + (header::TRANSFER_ENCODING, ": chunked\r\n"), + ); + Encoder::chunked() + } + } + None | Some(BodyLength::Known(0)) => { + if Server::can_have_implicit_zero_content_length( + msg.req_method, + msg.head.subject, + ) { + header_name_writer.write_full_header_line( + dst, + "content-length: 0\r\n", + (header::CONTENT_LENGTH, ": 0\r\n"), + ) + } + Encoder::length(0) + } + Some(BodyLength::Known(len)) => { + if !Server::can_have_content_length(msg.req_method, msg.head.subject) { + Encoder::length(0) + } else { + header_name_writer.write_header_name_with_colon( + dst, + "content-length: ", + header::CONTENT_LENGTH, + ); + extend(dst, ::itoa::Buffer::new().format(len).as_bytes()); + extend(dst, b"\r\n"); + Encoder::length(len) + } + } + }; + } + + if !Server::can_have_body(msg.req_method, msg.head.subject) { + trace!( + "server body forced to 0; method={:?}, status={:?}", + msg.req_method, + msg.head.subject + ); + encoder = Encoder::length(0); + } + + // cached date is much faster than formatting every request + if !wrote_date { + dst.reserve(date::DATE_VALUE_LENGTH + 8); + header_name_writer.write_header_name_with_colon(dst, "date: ", header::DATE); + date::extend(dst); + extend(dst, b"\r\n\r\n"); + } else { + extend(dst, b"\r\n"); + } + + Ok(encoder.set_last(is_last)) + } +} + +#[cfg(feature = "server")] +trait HeaderNameWriter { + fn write_full_header_line( + &mut self, + dst: &mut Vec, + line: &str, + name_value_pair: (HeaderName, &str), + ); + fn write_header_name_with_colon( + &mut self, + dst: &mut Vec, + name_with_colon: &str, + name: HeaderName, + ); + fn write_header_name(&mut self, dst: &mut Vec, name: &HeaderName); +} + +#[cfg(feature = "client")] +impl Http1Transaction for Client { + type Incoming = StatusCode; + type Outgoing = RequestLine; + const LOG: &'static str = "{role=client}"; + + fn parse(buf: &mut BytesMut, ctx: ParseContext<'_>) -> ParseResult { + debug_assert!(!buf.is_empty(), "parse called with empty buf"); + + // Loop to skip information status code headers (100 Continue, etc). + loop { + // Unsafe: see comment in Server Http1Transaction, above. + let mut headers_indices: [MaybeUninit; MAX_HEADERS] = unsafe { + // SAFETY: We can go safely from MaybeUninit array to array of MaybeUninit + MaybeUninit::uninit().assume_init() + }; + let (len, status, reason, version, headers_len) = { + // SAFETY: We can go safely from MaybeUninit array to array of MaybeUninit + let mut headers: [MaybeUninit>; MAX_HEADERS] = + unsafe { MaybeUninit::uninit().assume_init() }; + trace!(bytes = buf.len(), "Response.parse"); + let mut res = httparse::Response::new(&mut []); + let bytes = buf.as_ref(); + match ctx.h1_parser_config.parse_response_with_uninit_headers( + &mut res, + bytes, + &mut headers, + ) { + Ok(httparse::Status::Complete(len)) => { + trace!("Response.parse Complete({})", len); + let status = StatusCode::from_u16(res.code.unwrap())?; + + let reason = { + let reason = res.reason.unwrap(); + // Only save the reason phrase if it isn't the canonical reason + if Some(reason) != status.canonical_reason() { + Some(Bytes::copy_from_slice(reason.as_bytes())) + } else { + None + } + }; + + let version = if res.version.unwrap() == 1 { + Version::HTTP_11 + } else { + Version::HTTP_10 + }; + record_header_indices(bytes, &res.headers, &mut headers_indices)?; + let headers_len = res.headers.len(); + (len, status, reason, version, headers_len) + } + Ok(httparse::Status::Partial) => return Ok(None), + Err(httparse::Error::Version) if ctx.h09_responses => { + trace!("Response.parse accepted HTTP/0.9 response"); + + (0, StatusCode::OK, None, Version::HTTP_09, 0) + } + Err(e) => return Err(e.into()), + } + }; + + let mut slice = buf.split_to(len); + + if ctx + .h1_parser_config + .obsolete_multiline_headers_in_responses_are_allowed() + { + for header in &headers_indices[..headers_len] { + // SAFETY: array is valid up to `headers_len` + let header = unsafe { &*header.as_ptr() }; + for b in &mut slice[header.value.0..header.value.1] { + if *b == b'\r' || *b == b'\n' { + *b = b' '; + } + } + } + } + + let slice = slice.freeze(); + + let mut headers = ctx.cached_headers.take().unwrap_or_else(HeaderMap::new); + + let mut keep_alive = version == Version::HTTP_11; + + let mut header_case_map = if ctx.preserve_header_case { + Some(HeaderCaseMap::default()) + } else { + None + }; + + #[cfg(feature = "ffi")] + let mut header_order = if ctx.preserve_header_order { + Some(OriginalHeaderOrder::default()) + } else { + None + }; + + headers.reserve(headers_len); + for header in &headers_indices[..headers_len] { + // SAFETY: array is valid up to `headers_len` + let header = unsafe { &*header.as_ptr() }; + let name = header_name!(&slice[header.name.0..header.name.1]); + let value = header_value!(slice.slice(header.value.0..header.value.1)); + + if let header::CONNECTION = name { + // keep_alive was previously set to default for Version + if keep_alive { + // HTTP/1.1 + keep_alive = !headers::connection_close(&value); + } else { + // HTTP/1.0 + keep_alive = headers::connection_keep_alive(&value); + } + } + + if let Some(ref mut header_case_map) = header_case_map { + header_case_map.append(&name, slice.slice(header.name.0..header.name.1)); + } + + #[cfg(feature = "ffi")] + if let Some(ref mut header_order) = header_order { + header_order.append(&name); + } + + headers.append(name, value); + } + + let mut extensions = http::Extensions::default(); + + if let Some(header_case_map) = header_case_map { + extensions.insert(header_case_map); + } + + #[cfg(feature = "ffi")] + if let Some(header_order) = header_order { + extensions.insert(header_order); + } + + if let Some(reason) = reason { + // Safety: httparse ensures that only valid reason phrase bytes are present in this + // field. + let reason = unsafe { crate::ext::ReasonPhrase::from_bytes_unchecked(reason) }; + extensions.insert(reason); + } + + #[cfg(feature = "ffi")] + if ctx.raw_headers { + extensions.insert(crate::ffi::RawHeaders(crate::ffi::hyper_buf(slice))); + } + + let head = MessageHead { + version, + subject: status, + headers, + extensions, + }; + if let Some((decode, is_upgrade)) = Client::decoder(&head, ctx.req_method)? { + return Ok(Some(ParsedMessage { + head, + decode, + expect_continue: false, + // a client upgrade means the connection can't be used + // again, as it is definitely upgrading. + keep_alive: keep_alive && !is_upgrade, + wants_upgrade: is_upgrade, + })); + } + + #[cfg(feature = "ffi")] + if head.subject.is_informational() { + if let Some(callback) = ctx.on_informational { + callback.call(head.into_response(crate::Body::empty())); + } + } + + // Parsing a 1xx response could have consumed the buffer, check if + // it is empty now... + if buf.is_empty() { + return Ok(None); + } + } + } + + fn encode(msg: Encode<'_, Self::Outgoing>, dst: &mut Vec) -> crate::Result { + trace!( + "Client::encode method={:?}, body={:?}", + msg.head.subject.0, + msg.body + ); + + *msg.req_method = Some(msg.head.subject.0.clone()); + + let body = Client::set_length(msg.head, msg.body); + + let init_cap = 30 + msg.head.headers.len() * AVERAGE_HEADER_SIZE; + dst.reserve(init_cap); + + extend(dst, msg.head.subject.0.as_str().as_bytes()); + extend(dst, b" "); + //TODO: add API to http::Uri to encode without std::fmt + let _ = write!(FastWrite(dst), "{} ", msg.head.subject.1); + + match msg.head.version { + Version::HTTP_10 => extend(dst, b"HTTP/1.0"), + Version::HTTP_11 => extend(dst, b"HTTP/1.1"), + Version::HTTP_2 => { + debug!("request with HTTP2 version coerced to HTTP/1.1"); + extend(dst, b"HTTP/1.1"); + } + other => panic!("unexpected request version: {:?}", other), + } + extend(dst, b"\r\n"); + + if let Some(orig_headers) = msg.head.extensions.get::() { + write_headers_original_case( + &msg.head.headers, + orig_headers, + dst, + msg.title_case_headers, + ); + } else if msg.title_case_headers { + write_headers_title_case(&msg.head.headers, dst); + } else { + write_headers(&msg.head.headers, dst); + } + + extend(dst, b"\r\n"); + msg.head.headers.clear(); //TODO: remove when switching to drain() + + Ok(body) + } + + fn on_error(_err: &crate::Error) -> Option> { + // we can't tell the server about any errors it creates + None + } + + fn is_client() -> bool { + true + } +} + +#[cfg(feature = "client")] +impl Client { + /// Returns Some(length, wants_upgrade) if successful. + /// + /// Returns None if this message head should be skipped (like a 100 status). + fn decoder( + inc: &MessageHead, + method: &mut Option, + ) -> Result, Parse> { + // According to https://tools.ietf.org/html/rfc7230#section-3.3.3 + // 1. HEAD responses, and Status 1xx, 204, and 304 cannot have a body. + // 2. Status 2xx to a CONNECT cannot have a body. + // 3. Transfer-Encoding: chunked has a chunked body. + // 4. If multiple differing Content-Length headers or invalid, close connection. + // 5. Content-Length header has a sized body. + // 6. (irrelevant to Response) + // 7. Read till EOF. + + match inc.subject.as_u16() { + 101 => { + return Ok(Some((DecodedLength::ZERO, true))); + } + 100 | 102..=199 => { + trace!("ignoring informational response: {}", inc.subject.as_u16()); + return Ok(None); + } + 204 | 304 => return Ok(Some((DecodedLength::ZERO, false))), + _ => (), + } + match *method { + Some(Method::HEAD) => { + return Ok(Some((DecodedLength::ZERO, false))); + } + Some(Method::CONNECT) => { + if let 200..=299 = inc.subject.as_u16() { + return Ok(Some((DecodedLength::ZERO, true))); + } + } + Some(_) => {} + None => { + trace!("Client::decoder is missing the Method"); + } + } + + if inc.headers.contains_key(header::TRANSFER_ENCODING) { + // https://tools.ietf.org/html/rfc7230#section-3.3.3 + // If Transfer-Encoding header is present, and 'chunked' is + // not the final encoding, and this is a Request, then it is + // malformed. A server should respond with 400 Bad Request. + if inc.version == Version::HTTP_10 { + debug!("HTTP/1.0 cannot have Transfer-Encoding header"); + Err(Parse::transfer_encoding_unexpected()) + } else if headers::transfer_encoding_is_chunked(&inc.headers) { + Ok(Some((DecodedLength::CHUNKED, false))) + } else { + trace!("not chunked, read till eof"); + Ok(Some((DecodedLength::CLOSE_DELIMITED, false))) + } + } else if let Some(len) = headers::content_length_parse_all(&inc.headers) { + Ok(Some((DecodedLength::checked_new(len)?, false))) + } else if inc.headers.contains_key(header::CONTENT_LENGTH) { + debug!("illegal Content-Length header"); + Err(Parse::content_length_invalid()) + } else { + trace!("neither Transfer-Encoding nor Content-Length"); + Ok(Some((DecodedLength::CLOSE_DELIMITED, false))) + } + } + fn set_length(head: &mut RequestHead, body: Option) -> Encoder { + let body = if let Some(body) = body { + body + } else { + head.headers.remove(header::TRANSFER_ENCODING); + return Encoder::length(0); + }; + + // HTTP/1.0 doesn't know about chunked + let can_chunked = head.version == Version::HTTP_11; + let headers = &mut head.headers; + + // If the user already set specific headers, we should respect them, regardless + // of what the HttpBody knows about itself. They set them for a reason. + + // Because of the borrow checker, we can't check the for an existing + // Content-Length header while holding an `Entry` for the Transfer-Encoding + // header, so unfortunately, we must do the check here, first. + + let existing_con_len = headers::content_length_parse_all(headers); + let mut should_remove_con_len = false; + + if !can_chunked { + // Chunked isn't legal, so if it is set, we need to remove it. + if headers.remove(header::TRANSFER_ENCODING).is_some() { + trace!("removing illegal transfer-encoding header"); + } + + return if let Some(len) = existing_con_len { + Encoder::length(len) + } else if let BodyLength::Known(len) = body { + set_content_length(headers, len) + } else { + // HTTP/1.0 client requests without a content-length + // cannot have any body at all. + Encoder::length(0) + }; + } + + // If the user set a transfer-encoding, respect that. Let's just + // make sure `chunked` is the final encoding. + let encoder = match headers.entry(header::TRANSFER_ENCODING) { + Entry::Occupied(te) => { + should_remove_con_len = true; + if headers::is_chunked(te.iter()) { + Some(Encoder::chunked()) + } else { + warn!("user provided transfer-encoding does not end in 'chunked'"); + + // There's a Transfer-Encoding, but it doesn't end in 'chunked'! + // An example that could trigger this: + // + // Transfer-Encoding: gzip + // + // This can be bad, depending on if this is a request or a + // response. + // + // - A request is illegal if there is a `Transfer-Encoding` + // but it doesn't end in `chunked`. + // - A response that has `Transfer-Encoding` but doesn't + // end in `chunked` isn't illegal, it just forces this + // to be close-delimited. + // + // We can try to repair this, by adding `chunked` ourselves. + + headers::add_chunked(te); + Some(Encoder::chunked()) + } + } + Entry::Vacant(te) => { + if let Some(len) = existing_con_len { + Some(Encoder::length(len)) + } else if let BodyLength::Unknown = body { + // GET, HEAD, and CONNECT almost never have bodies. + // + // So instead of sending a "chunked" body with a 0-chunk, + // assume no body here. If you *must* send a body, + // set the headers explicitly. + match head.subject.0 { + Method::GET | Method::HEAD | Method::CONNECT => Some(Encoder::length(0)), + _ => { + te.insert(HeaderValue::from_static("chunked")); + Some(Encoder::chunked()) + } + } + } else { + None + } + } + }; + + // This is because we need a second mutable borrow to remove + // content-length header. + if let Some(encoder) = encoder { + if should_remove_con_len && existing_con_len.is_some() { + headers.remove(header::CONTENT_LENGTH); + } + return encoder; + } + + // User didn't set transfer-encoding, AND we know body length, + // so we can just set the Content-Length automatically. + + let len = if let BodyLength::Known(len) = body { + len + } else { + unreachable!("BodyLength::Unknown would set chunked"); + }; + + set_content_length(headers, len) + } +} + +fn set_content_length(headers: &mut HeaderMap, len: u64) -> Encoder { + // At this point, there should not be a valid Content-Length + // header. However, since we'll be indexing in anyways, we can + // warn the user if there was an existing illegal header. + // + // Or at least, we can in theory. It's actually a little bit slower, + // so perhaps only do that while the user is developing/testing. + + if cfg!(debug_assertions) { + match headers.entry(header::CONTENT_LENGTH) { + Entry::Occupied(mut cl) => { + // Internal sanity check, we should have already determined + // that the header was illegal before calling this function. + debug_assert!(headers::content_length_parse_all_values(cl.iter()).is_none()); + // Uh oh, the user set `Content-Length` headers, but set bad ones. + // This would be an illegal message anyways, so let's try to repair + // with our known good length. + error!("user provided content-length header was invalid"); + + cl.insert(HeaderValue::from(len)); + Encoder::length(len) + } + Entry::Vacant(cl) => { + cl.insert(HeaderValue::from(len)); + Encoder::length(len) + } + } + } else { + headers.insert(header::CONTENT_LENGTH, HeaderValue::from(len)); + Encoder::length(len) + } +} + +#[derive(Clone, Copy)] +struct HeaderIndices { + name: (usize, usize), + value: (usize, usize), +} + +fn record_header_indices( + bytes: &[u8], + headers: &[httparse::Header<'_>], + indices: &mut [MaybeUninit], +) -> Result<(), crate::error::Parse> { + let bytes_ptr = bytes.as_ptr() as usize; + + for (header, indices) in headers.iter().zip(indices.iter_mut()) { + if header.name.len() >= (1 << 16) { + debug!("header name larger than 64kb: {:?}", header.name); + return Err(crate::error::Parse::TooLarge); + } + let name_start = header.name.as_ptr() as usize - bytes_ptr; + let name_end = name_start + header.name.len(); + let value_start = header.value.as_ptr() as usize - bytes_ptr; + let value_end = value_start + header.value.len(); + + // FIXME(maybe_uninit_extra) + // FIXME(addr_of) + // Currently we don't have `ptr::addr_of_mut` in stable rust or + // MaybeUninit::write, so this is some way of assigning into a MaybeUninit + // safely + let new_header_indices = HeaderIndices { + name: (name_start, name_end), + value: (value_start, value_end), + }; + *indices = MaybeUninit::new(new_header_indices); + } + + Ok(()) +} + +// Write header names as title case. The header name is assumed to be ASCII. +fn title_case(dst: &mut Vec, name: &[u8]) { + dst.reserve(name.len()); + + // Ensure first character is uppercased + let mut prev = b'-'; + for &(mut c) in name { + if prev == b'-' { + c.make_ascii_uppercase(); + } + dst.push(c); + prev = c; + } +} + +fn write_headers_title_case(headers: &HeaderMap, dst: &mut Vec) { + for (name, value) in headers { + title_case(dst, name.as_str().as_bytes()); + extend(dst, b": "); + extend(dst, value.as_bytes()); + extend(dst, b"\r\n"); + } +} + +fn write_headers(headers: &HeaderMap, dst: &mut Vec) { + for (name, value) in headers { + extend(dst, name.as_str().as_bytes()); + extend(dst, b": "); + extend(dst, value.as_bytes()); + extend(dst, b"\r\n"); + } +} + +#[cold] +fn write_headers_original_case( + headers: &HeaderMap, + orig_case: &HeaderCaseMap, + dst: &mut Vec, + title_case_headers: bool, +) { + // For each header name/value pair, there may be a value in the casemap + // that corresponds to the HeaderValue. So, we iterator all the keys, + // and for each one, try to pair the originally cased name with the value. + // + // TODO: consider adding http::HeaderMap::entries() iterator + for name in headers.keys() { + let mut names = orig_case.get_all(name); + + for value in headers.get_all(name) { + if let Some(orig_name) = names.next() { + extend(dst, orig_name.as_ref()); + } else if title_case_headers { + title_case(dst, name.as_str().as_bytes()); + } else { + extend(dst, name.as_str().as_bytes()); + } + + // Wanted for curl test cases that send `X-Custom-Header:\r\n` + if value.is_empty() { + extend(dst, b":\r\n"); + } else { + extend(dst, b": "); + extend(dst, value.as_bytes()); + extend(dst, b"\r\n"); + } + } + } +} + +struct FastWrite<'a>(&'a mut Vec); + +impl<'a> fmt::Write for FastWrite<'a> { + #[inline] + fn write_str(&mut self, s: &str) -> fmt::Result { + extend(self.0, s.as_bytes()); + Ok(()) + } + + #[inline] + fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result { + fmt::write(self, args) + } +} + +#[inline] +fn extend(dst: &mut Vec, data: &[u8]) { + dst.extend_from_slice(data); +} + +#[cfg(test)] +mod tests { + use bytes::BytesMut; + + use super::*; + + #[test] + fn test_parse_request() { + let _ = pretty_env_logger::try_init(); + let mut raw = BytesMut::from("GET /echo HTTP/1.1\r\nHost: hyper.rs\r\n\r\n"); + let mut method = None; + let msg = Server::parse( + &mut raw, + ParseContext { + cached_headers: &mut None, + req_method: &mut method, + h1_parser_config: Default::default(), + #[cfg(feature = "runtime")] + h1_header_read_timeout: None, + #[cfg(feature = "runtime")] + h1_header_read_timeout_fut: &mut None, + #[cfg(feature = "runtime")] + h1_header_read_timeout_running: &mut false, + preserve_header_case: false, + #[cfg(feature = "ffi")] + preserve_header_order: false, + h09_responses: false, + #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] + raw_headers: false, + }, + ) + .unwrap() + .unwrap(); + assert_eq!(raw.len(), 0); + assert_eq!(msg.head.subject.0, crate::Method::GET); + assert_eq!(msg.head.subject.1, "/echo"); + assert_eq!(msg.head.version, crate::Version::HTTP_11); + assert_eq!(msg.head.headers.len(), 1); + assert_eq!(msg.head.headers["Host"], "hyper.rs"); + assert_eq!(method, Some(crate::Method::GET)); + } + + #[test] + fn test_parse_response() { + let _ = pretty_env_logger::try_init(); + let mut raw = BytesMut::from("HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n"); + let ctx = ParseContext { + cached_headers: &mut None, + req_method: &mut Some(crate::Method::GET), + h1_parser_config: Default::default(), + #[cfg(feature = "runtime")] + h1_header_read_timeout: None, + #[cfg(feature = "runtime")] + h1_header_read_timeout_fut: &mut None, + #[cfg(feature = "runtime")] + h1_header_read_timeout_running: &mut false, + preserve_header_case: false, + #[cfg(feature = "ffi")] + preserve_header_order: false, + h09_responses: false, + #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] + raw_headers: false, + }; + let msg = Client::parse(&mut raw, ctx).unwrap().unwrap(); + assert_eq!(raw.len(), 0); + assert_eq!(msg.head.subject, crate::StatusCode::OK); + assert_eq!(msg.head.version, crate::Version::HTTP_11); + assert_eq!(msg.head.headers.len(), 1); + assert_eq!(msg.head.headers["Content-Length"], "0"); + } + + #[test] + fn test_parse_request_errors() { + let mut raw = BytesMut::from("GET htt:p// HTTP/1.1\r\nHost: hyper.rs\r\n\r\n"); + let ctx = ParseContext { + cached_headers: &mut None, + req_method: &mut None, + h1_parser_config: Default::default(), + #[cfg(feature = "runtime")] + h1_header_read_timeout: None, + #[cfg(feature = "runtime")] + h1_header_read_timeout_fut: &mut None, + #[cfg(feature = "runtime")] + h1_header_read_timeout_running: &mut false, + preserve_header_case: false, + #[cfg(feature = "ffi")] + preserve_header_order: false, + h09_responses: false, + #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] + raw_headers: false, + }; + Server::parse(&mut raw, ctx).unwrap_err(); + } + + const H09_RESPONSE: &'static str = "Baguettes are super delicious, don't you agree?"; + + #[test] + fn test_parse_response_h09_allowed() { + let _ = pretty_env_logger::try_init(); + let mut raw = BytesMut::from(H09_RESPONSE); + let ctx = ParseContext { + cached_headers: &mut None, + req_method: &mut Some(crate::Method::GET), + h1_parser_config: Default::default(), + #[cfg(feature = "runtime")] + h1_header_read_timeout: None, + #[cfg(feature = "runtime")] + h1_header_read_timeout_fut: &mut None, + #[cfg(feature = "runtime")] + h1_header_read_timeout_running: &mut false, + preserve_header_case: false, + #[cfg(feature = "ffi")] + preserve_header_order: false, + h09_responses: true, + #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] + raw_headers: false, + }; + let msg = Client::parse(&mut raw, ctx).unwrap().unwrap(); + assert_eq!(raw, H09_RESPONSE); + assert_eq!(msg.head.subject, crate::StatusCode::OK); + assert_eq!(msg.head.version, crate::Version::HTTP_09); + assert_eq!(msg.head.headers.len(), 0); + } + + #[test] + fn test_parse_response_h09_rejected() { + let _ = pretty_env_logger::try_init(); + let mut raw = BytesMut::from(H09_RESPONSE); + let ctx = ParseContext { + cached_headers: &mut None, + req_method: &mut Some(crate::Method::GET), + h1_parser_config: Default::default(), + #[cfg(feature = "runtime")] + h1_header_read_timeout: None, + #[cfg(feature = "runtime")] + h1_header_read_timeout_fut: &mut None, + #[cfg(feature = "runtime")] + h1_header_read_timeout_running: &mut false, + preserve_header_case: false, + #[cfg(feature = "ffi")] + preserve_header_order: false, + h09_responses: false, + #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] + raw_headers: false, + }; + Client::parse(&mut raw, ctx).unwrap_err(); + assert_eq!(raw, H09_RESPONSE); + } + + const RESPONSE_WITH_WHITESPACE_BETWEEN_HEADER_NAME_AND_COLON: &'static str = + "HTTP/1.1 200 OK\r\nAccess-Control-Allow-Credentials : true\r\n\r\n"; + + #[test] + fn test_parse_allow_response_with_spaces_before_colons() { + use httparse::ParserConfig; + + let _ = pretty_env_logger::try_init(); + let mut raw = BytesMut::from(RESPONSE_WITH_WHITESPACE_BETWEEN_HEADER_NAME_AND_COLON); + let mut h1_parser_config = ParserConfig::default(); + h1_parser_config.allow_spaces_after_header_name_in_responses(true); + let ctx = ParseContext { + cached_headers: &mut None, + req_method: &mut Some(crate::Method::GET), + h1_parser_config, + #[cfg(feature = "runtime")] + h1_header_read_timeout: None, + #[cfg(feature = "runtime")] + h1_header_read_timeout_fut: &mut None, + #[cfg(feature = "runtime")] + h1_header_read_timeout_running: &mut false, + preserve_header_case: false, + #[cfg(feature = "ffi")] + preserve_header_order: false, + h09_responses: false, + #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] + raw_headers: false, + }; + let msg = Client::parse(&mut raw, ctx).unwrap().unwrap(); + assert_eq!(raw.len(), 0); + assert_eq!(msg.head.subject, crate::StatusCode::OK); + assert_eq!(msg.head.version, crate::Version::HTTP_11); + assert_eq!(msg.head.headers.len(), 1); + assert_eq!(msg.head.headers["Access-Control-Allow-Credentials"], "true"); + } + + #[test] + fn test_parse_reject_response_with_spaces_before_colons() { + let _ = pretty_env_logger::try_init(); + let mut raw = BytesMut::from(RESPONSE_WITH_WHITESPACE_BETWEEN_HEADER_NAME_AND_COLON); + let ctx = ParseContext { + cached_headers: &mut None, + req_method: &mut Some(crate::Method::GET), + h1_parser_config: Default::default(), + #[cfg(feature = "runtime")] + h1_header_read_timeout: None, + #[cfg(feature = "runtime")] + h1_header_read_timeout_fut: &mut None, + #[cfg(feature = "runtime")] + h1_header_read_timeout_running: &mut false, + preserve_header_case: false, + #[cfg(feature = "ffi")] + preserve_header_order: false, + h09_responses: false, + #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] + raw_headers: false, + }; + Client::parse(&mut raw, ctx).unwrap_err(); + } + + #[test] + fn test_parse_preserve_header_case_in_request() { + let mut raw = + BytesMut::from("GET / HTTP/1.1\r\nHost: hyper.rs\r\nX-BREAD: baguette\r\n\r\n"); + let ctx = ParseContext { + cached_headers: &mut None, + req_method: &mut None, + h1_parser_config: Default::default(), + #[cfg(feature = "runtime")] + h1_header_read_timeout: None, + #[cfg(feature = "runtime")] + h1_header_read_timeout_fut: &mut None, + #[cfg(feature = "runtime")] + h1_header_read_timeout_running: &mut false, + preserve_header_case: true, + #[cfg(feature = "ffi")] + preserve_header_order: false, + h09_responses: false, + #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] + raw_headers: false, + }; + let parsed_message = Server::parse(&mut raw, ctx).unwrap().unwrap(); + let orig_headers = parsed_message + .head + .extensions + .get::() + .unwrap(); + assert_eq!( + orig_headers + .get_all_internal(&HeaderName::from_static("host")) + .into_iter() + .collect::>(), + vec![&Bytes::from("Host")] + ); + assert_eq!( + orig_headers + .get_all_internal(&HeaderName::from_static("x-bread")) + .into_iter() + .collect::>(), + vec![&Bytes::from("X-BREAD")] + ); + } + + #[test] + fn test_decoder_request() { + fn parse(s: &str) -> ParsedMessage { + let mut bytes = BytesMut::from(s); + Server::parse( + &mut bytes, + ParseContext { + cached_headers: &mut None, + req_method: &mut None, + h1_parser_config: Default::default(), + #[cfg(feature = "runtime")] + h1_header_read_timeout: None, + #[cfg(feature = "runtime")] + h1_header_read_timeout_fut: &mut None, + #[cfg(feature = "runtime")] + h1_header_read_timeout_running: &mut false, + preserve_header_case: false, + #[cfg(feature = "ffi")] + preserve_header_order: false, + h09_responses: false, + #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] + raw_headers: false, + }, + ) + .expect("parse ok") + .expect("parse complete") + } + + fn parse_err(s: &str, comment: &str) -> crate::error::Parse { + let mut bytes = BytesMut::from(s); + Server::parse( + &mut bytes, + ParseContext { + cached_headers: &mut None, + req_method: &mut None, + h1_parser_config: Default::default(), + #[cfg(feature = "runtime")] + h1_header_read_timeout: None, + #[cfg(feature = "runtime")] + h1_header_read_timeout_fut: &mut None, + #[cfg(feature = "runtime")] + h1_header_read_timeout_running: &mut false, + preserve_header_case: false, + #[cfg(feature = "ffi")] + preserve_header_order: false, + h09_responses: false, + #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] + raw_headers: false, + }, + ) + .expect_err(comment) + } + + // no length or transfer-encoding means 0-length body + assert_eq!( + parse( + "\ + GET / HTTP/1.1\r\n\ + \r\n\ + " + ) + .decode, + DecodedLength::ZERO + ); + + assert_eq!( + parse( + "\ + POST / HTTP/1.1\r\n\ + \r\n\ + " + ) + .decode, + DecodedLength::ZERO + ); + + // transfer-encoding: chunked + assert_eq!( + parse( + "\ + POST / HTTP/1.1\r\n\ + transfer-encoding: chunked\r\n\ + \r\n\ + " + ) + .decode, + DecodedLength::CHUNKED + ); + + assert_eq!( + parse( + "\ + POST / HTTP/1.1\r\n\ + transfer-encoding: gzip, chunked\r\n\ + \r\n\ + " + ) + .decode, + DecodedLength::CHUNKED + ); + + assert_eq!( + parse( + "\ + POST / HTTP/1.1\r\n\ + transfer-encoding: gzip\r\n\ + transfer-encoding: chunked\r\n\ + \r\n\ + " + ) + .decode, + DecodedLength::CHUNKED + ); + + // content-length + assert_eq!( + parse( + "\ + POST / HTTP/1.1\r\n\ + content-length: 10\r\n\ + \r\n\ + " + ) + .decode, + DecodedLength::new(10) + ); + + // transfer-encoding and content-length = chunked + assert_eq!( + parse( + "\ + POST / HTTP/1.1\r\n\ + content-length: 10\r\n\ + transfer-encoding: chunked\r\n\ + \r\n\ + " + ) + .decode, + DecodedLength::CHUNKED + ); + + assert_eq!( + parse( + "\ + POST / HTTP/1.1\r\n\ + transfer-encoding: chunked\r\n\ + content-length: 10\r\n\ + \r\n\ + " + ) + .decode, + DecodedLength::CHUNKED + ); + + assert_eq!( + parse( + "\ + POST / HTTP/1.1\r\n\ + transfer-encoding: gzip\r\n\ + content-length: 10\r\n\ + transfer-encoding: chunked\r\n\ + \r\n\ + " + ) + .decode, + DecodedLength::CHUNKED + ); + + // multiple content-lengths of same value are fine + assert_eq!( + parse( + "\ + POST / HTTP/1.1\r\n\ + content-length: 10\r\n\ + content-length: 10\r\n\ + \r\n\ + " + ) + .decode, + DecodedLength::new(10) + ); + + // multiple content-lengths with different values is an error + parse_err( + "\ + POST / HTTP/1.1\r\n\ + content-length: 10\r\n\ + content-length: 11\r\n\ + \r\n\ + ", + "multiple content-lengths", + ); + + // content-length with prefix is not allowed + parse_err( + "\ + POST / HTTP/1.1\r\n\ + content-length: +10\r\n\ + \r\n\ + ", + "prefixed content-length", + ); + + // transfer-encoding that isn't chunked is an error + parse_err( + "\ + POST / HTTP/1.1\r\n\ + transfer-encoding: gzip\r\n\ + \r\n\ + ", + "transfer-encoding but not chunked", + ); + + parse_err( + "\ + POST / HTTP/1.1\r\n\ + transfer-encoding: chunked, gzip\r\n\ + \r\n\ + ", + "transfer-encoding doesn't end in chunked", + ); + + parse_err( + "\ + POST / HTTP/1.1\r\n\ + transfer-encoding: chunked\r\n\ + transfer-encoding: afterlol\r\n\ + \r\n\ + ", + "transfer-encoding multiple lines doesn't end in chunked", + ); + + // http/1.0 + + assert_eq!( + parse( + "\ + POST / HTTP/1.0\r\n\ + content-length: 10\r\n\ + \r\n\ + " + ) + .decode, + DecodedLength::new(10) + ); + + // 1.0 doesn't understand chunked, so its an error + parse_err( + "\ + POST / HTTP/1.0\r\n\ + transfer-encoding: chunked\r\n\ + \r\n\ + ", + "1.0 chunked", + ); + } + + #[test] + fn test_decoder_response() { + fn parse(s: &str) -> ParsedMessage { + parse_with_method(s, Method::GET) + } + + fn parse_ignores(s: &str) { + let mut bytes = BytesMut::from(s); + assert!(Client::parse( + &mut bytes, + ParseContext { + cached_headers: &mut None, + req_method: &mut Some(Method::GET), + h1_parser_config: Default::default(), + #[cfg(feature = "runtime")] + h1_header_read_timeout: None, + #[cfg(feature = "runtime")] + h1_header_read_timeout_fut: &mut None, + #[cfg(feature = "runtime")] + h1_header_read_timeout_running: &mut false, + preserve_header_case: false, + #[cfg(feature = "ffi")] + preserve_header_order: false, + h09_responses: false, + #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] + raw_headers: false, + } + ) + .expect("parse ok") + .is_none()) + } + + fn parse_with_method(s: &str, m: Method) -> ParsedMessage { + let mut bytes = BytesMut::from(s); + Client::parse( + &mut bytes, + ParseContext { + cached_headers: &mut None, + req_method: &mut Some(m), + h1_parser_config: Default::default(), + #[cfg(feature = "runtime")] + h1_header_read_timeout: None, + #[cfg(feature = "runtime")] + h1_header_read_timeout_fut: &mut None, + #[cfg(feature = "runtime")] + h1_header_read_timeout_running: &mut false, + preserve_header_case: false, + #[cfg(feature = "ffi")] + preserve_header_order: false, + h09_responses: false, + #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] + raw_headers: false, + }, + ) + .expect("parse ok") + .expect("parse complete") + } + + fn parse_err(s: &str) -> crate::error::Parse { + let mut bytes = BytesMut::from(s); + Client::parse( + &mut bytes, + ParseContext { + cached_headers: &mut None, + req_method: &mut Some(Method::GET), + h1_parser_config: Default::default(), + #[cfg(feature = "runtime")] + h1_header_read_timeout: None, + #[cfg(feature = "runtime")] + h1_header_read_timeout_fut: &mut None, + #[cfg(feature = "runtime")] + h1_header_read_timeout_running: &mut false, + preserve_header_case: false, + #[cfg(feature = "ffi")] + preserve_header_order: false, + h09_responses: false, + #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] + raw_headers: false, + }, + ) + .expect_err("parse should err") + } + + // no content-length or transfer-encoding means close-delimited + assert_eq!( + parse( + "\ + HTTP/1.1 200 OK\r\n\ + \r\n\ + " + ) + .decode, + DecodedLength::CLOSE_DELIMITED + ); + + // 204 and 304 never have a body + assert_eq!( + parse( + "\ + HTTP/1.1 204 No Content\r\n\ + \r\n\ + " + ) + .decode, + DecodedLength::ZERO + ); + + assert_eq!( + parse( + "\ + HTTP/1.1 304 Not Modified\r\n\ + \r\n\ + " + ) + .decode, + DecodedLength::ZERO + ); + + // content-length + assert_eq!( + parse( + "\ + HTTP/1.1 200 OK\r\n\ + content-length: 8\r\n\ + \r\n\ + " + ) + .decode, + DecodedLength::new(8) + ); + + assert_eq!( + parse( + "\ + HTTP/1.1 200 OK\r\n\ + content-length: 8\r\n\ + content-length: 8\r\n\ + \r\n\ + " + ) + .decode, + DecodedLength::new(8) + ); + + parse_err( + "\ + HTTP/1.1 200 OK\r\n\ + content-length: 8\r\n\ + content-length: 9\r\n\ + \r\n\ + ", + ); + + parse_err( + "\ + HTTP/1.1 200 OK\r\n\ + content-length: +8\r\n\ + \r\n\ + ", + ); + + // transfer-encoding: chunked + assert_eq!( + parse( + "\ + HTTP/1.1 200 OK\r\n\ + transfer-encoding: chunked\r\n\ + \r\n\ + " + ) + .decode, + DecodedLength::CHUNKED + ); + + // transfer-encoding not-chunked is close-delimited + assert_eq!( + parse( + "\ + HTTP/1.1 200 OK\r\n\ + transfer-encoding: yolo\r\n\ + \r\n\ + " + ) + .decode, + DecodedLength::CLOSE_DELIMITED + ); + + // transfer-encoding and content-length = chunked + assert_eq!( + parse( + "\ + HTTP/1.1 200 OK\r\n\ + content-length: 10\r\n\ + transfer-encoding: chunked\r\n\ + \r\n\ + " + ) + .decode, + DecodedLength::CHUNKED + ); + + // HEAD can have content-length, but not body + assert_eq!( + parse_with_method( + "\ + HTTP/1.1 200 OK\r\n\ + content-length: 8\r\n\ + \r\n\ + ", + Method::HEAD + ) + .decode, + DecodedLength::ZERO + ); + + // CONNECT with 200 never has body + { + let msg = parse_with_method( + "\ + HTTP/1.1 200 OK\r\n\ + \r\n\ + ", + Method::CONNECT, + ); + assert_eq!(msg.decode, DecodedLength::ZERO); + assert!(!msg.keep_alive, "should be upgrade"); + assert!(msg.wants_upgrade, "should be upgrade"); + } + + // CONNECT receiving non 200 can have a body + assert_eq!( + parse_with_method( + "\ + HTTP/1.1 400 Bad Request\r\n\ + \r\n\ + ", + Method::CONNECT + ) + .decode, + DecodedLength::CLOSE_DELIMITED + ); + + // 1xx status codes + parse_ignores( + "\ + HTTP/1.1 100 Continue\r\n\ + \r\n\ + ", + ); + + parse_ignores( + "\ + HTTP/1.1 103 Early Hints\r\n\ + \r\n\ + ", + ); + + // 101 upgrade not supported yet + { + let msg = parse( + "\ + HTTP/1.1 101 Switching Protocols\r\n\ + \r\n\ + ", + ); + assert_eq!(msg.decode, DecodedLength::ZERO); + assert!(!msg.keep_alive, "should be last"); + assert!(msg.wants_upgrade, "should be upgrade"); + } + + // http/1.0 + assert_eq!( + parse( + "\ + HTTP/1.0 200 OK\r\n\ + \r\n\ + " + ) + .decode, + DecodedLength::CLOSE_DELIMITED + ); + + // 1.0 doesn't understand chunked + parse_err( + "\ + HTTP/1.0 200 OK\r\n\ + transfer-encoding: chunked\r\n\ + \r\n\ + ", + ); + + // keep-alive + assert!( + parse( + "\ + HTTP/1.1 200 OK\r\n\ + content-length: 0\r\n\ + \r\n\ + " + ) + .keep_alive, + "HTTP/1.1 keep-alive is default" + ); + + assert!( + !parse( + "\ + HTTP/1.1 200 OK\r\n\ + content-length: 0\r\n\ + connection: foo, close, bar\r\n\ + \r\n\ + " + ) + .keep_alive, + "connection close is always close" + ); + + assert!( + !parse( + "\ + HTTP/1.0 200 OK\r\n\ + content-length: 0\r\n\ + \r\n\ + " + ) + .keep_alive, + "HTTP/1.0 close is default" + ); + + assert!( + parse( + "\ + HTTP/1.0 200 OK\r\n\ + content-length: 0\r\n\ + connection: foo, keep-alive, bar\r\n\ + \r\n\ + " + ) + .keep_alive, + "connection keep-alive is always keep-alive" + ); + } + + #[test] + fn test_client_request_encode_title_case() { + use crate::proto::BodyLength; + use http::header::HeaderValue; + + let mut head = MessageHead::default(); + head.headers + .insert("content-length", HeaderValue::from_static("10")); + head.headers + .insert("content-type", HeaderValue::from_static("application/json")); + head.headers.insert("*-*", HeaderValue::from_static("o_o")); + + let mut vec = Vec::new(); + Client::encode( + Encode { + head: &mut head, + body: Some(BodyLength::Known(10)), + keep_alive: true, + req_method: &mut None, + title_case_headers: true, + }, + &mut vec, + ) + .unwrap(); + + assert_eq!(vec, b"GET / HTTP/1.1\r\nContent-Length: 10\r\nContent-Type: application/json\r\n*-*: o_o\r\n\r\n".to_vec()); + } + + #[test] + fn test_client_request_encode_orig_case() { + use crate::proto::BodyLength; + use http::header::{HeaderValue, CONTENT_LENGTH}; + + let mut head = MessageHead::default(); + head.headers + .insert("content-length", HeaderValue::from_static("10")); + head.headers + .insert("content-type", HeaderValue::from_static("application/json")); + + let mut orig_headers = HeaderCaseMap::default(); + orig_headers.insert(CONTENT_LENGTH, "CONTENT-LENGTH".into()); + head.extensions.insert(orig_headers); + + let mut vec = Vec::new(); + Client::encode( + Encode { + head: &mut head, + body: Some(BodyLength::Known(10)), + keep_alive: true, + req_method: &mut None, + title_case_headers: false, + }, + &mut vec, + ) + .unwrap(); + + assert_eq!( + &*vec, + b"GET / HTTP/1.1\r\nCONTENT-LENGTH: 10\r\ncontent-type: application/json\r\n\r\n" + .as_ref(), + ); + } + #[test] + fn test_client_request_encode_orig_and_title_case() { + use crate::proto::BodyLength; + use http::header::{HeaderValue, CONTENT_LENGTH}; + + let mut head = MessageHead::default(); + head.headers + .insert("content-length", HeaderValue::from_static("10")); + head.headers + .insert("content-type", HeaderValue::from_static("application/json")); + + let mut orig_headers = HeaderCaseMap::default(); + orig_headers.insert(CONTENT_LENGTH, "CONTENT-LENGTH".into()); + head.extensions.insert(orig_headers); + + let mut vec = Vec::new(); + Client::encode( + Encode { + head: &mut head, + body: Some(BodyLength::Known(10)), + keep_alive: true, + req_method: &mut None, + title_case_headers: true, + }, + &mut vec, + ) + .unwrap(); + + assert_eq!( + &*vec, + b"GET / HTTP/1.1\r\nCONTENT-LENGTH: 10\r\nContent-Type: application/json\r\n\r\n" + .as_ref(), + ); + } + + #[test] + fn test_server_encode_connect_method() { + let mut head = MessageHead::default(); + + let mut vec = Vec::new(); + let encoder = Server::encode( + Encode { + head: &mut head, + body: None, + keep_alive: true, + req_method: &mut Some(Method::CONNECT), + title_case_headers: false, + }, + &mut vec, + ) + .unwrap(); + + assert!(encoder.is_last()); + } + + #[test] + fn test_server_response_encode_title_case() { + use crate::proto::BodyLength; + use http::header::HeaderValue; + + let mut head = MessageHead::default(); + head.headers + .insert("content-length", HeaderValue::from_static("10")); + head.headers + .insert("content-type", HeaderValue::from_static("application/json")); + head.headers + .insert("weird--header", HeaderValue::from_static("")); + + let mut vec = Vec::new(); + Server::encode( + Encode { + head: &mut head, + body: Some(BodyLength::Known(10)), + keep_alive: true, + req_method: &mut None, + title_case_headers: true, + }, + &mut vec, + ) + .unwrap(); + + let expected_response = + b"HTTP/1.1 200 OK\r\nContent-Length: 10\r\nContent-Type: application/json\r\nWeird--Header: \r\n"; + + assert_eq!(&vec[..expected_response.len()], &expected_response[..]); + } + + #[test] + fn test_server_response_encode_orig_case() { + use crate::proto::BodyLength; + use http::header::{HeaderValue, CONTENT_LENGTH}; + + let mut head = MessageHead::default(); + head.headers + .insert("content-length", HeaderValue::from_static("10")); + head.headers + .insert("content-type", HeaderValue::from_static("application/json")); + + let mut orig_headers = HeaderCaseMap::default(); + orig_headers.insert(CONTENT_LENGTH, "CONTENT-LENGTH".into()); + head.extensions.insert(orig_headers); + + let mut vec = Vec::new(); + Server::encode( + Encode { + head: &mut head, + body: Some(BodyLength::Known(10)), + keep_alive: true, + req_method: &mut None, + title_case_headers: false, + }, + &mut vec, + ) + .unwrap(); + + let expected_response = + b"HTTP/1.1 200 OK\r\nCONTENT-LENGTH: 10\r\ncontent-type: application/json\r\ndate: "; + + assert_eq!(&vec[..expected_response.len()], &expected_response[..]); + } + + #[test] + fn test_server_response_encode_orig_and_title_case() { + use crate::proto::BodyLength; + use http::header::{HeaderValue, CONTENT_LENGTH}; + + let mut head = MessageHead::default(); + head.headers + .insert("content-length", HeaderValue::from_static("10")); + head.headers + .insert("content-type", HeaderValue::from_static("application/json")); + + let mut orig_headers = HeaderCaseMap::default(); + orig_headers.insert(CONTENT_LENGTH, "CONTENT-LENGTH".into()); + head.extensions.insert(orig_headers); + + let mut vec = Vec::new(); + Server::encode( + Encode { + head: &mut head, + body: Some(BodyLength::Known(10)), + keep_alive: true, + req_method: &mut None, + title_case_headers: true, + }, + &mut vec, + ) + .unwrap(); + + let expected_response = + b"HTTP/1.1 200 OK\r\nCONTENT-LENGTH: 10\r\nContent-Type: application/json\r\nDate: "; + + assert_eq!(&vec[..expected_response.len()], &expected_response[..]); + } + + #[test] + fn parse_header_htabs() { + let mut bytes = BytesMut::from("HTTP/1.1 200 OK\r\nserver: hello\tworld\r\n\r\n"); + let parsed = Client::parse( + &mut bytes, + ParseContext { + cached_headers: &mut None, + req_method: &mut Some(Method::GET), + h1_parser_config: Default::default(), + #[cfg(feature = "runtime")] + h1_header_read_timeout: None, + #[cfg(feature = "runtime")] + h1_header_read_timeout_fut: &mut None, + #[cfg(feature = "runtime")] + h1_header_read_timeout_running: &mut false, + preserve_header_case: false, + #[cfg(feature = "ffi")] + preserve_header_order: false, + h09_responses: false, + #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] + raw_headers: false, + }, + ) + .expect("parse ok") + .expect("parse complete"); + + assert_eq!(parsed.head.headers["server"], "hello\tworld"); + } + + #[test] + fn test_write_headers_orig_case_empty_value() { + let mut headers = HeaderMap::new(); + let name = http::header::HeaderName::from_static("x-empty"); + headers.insert(&name, "".parse().expect("parse empty")); + let mut orig_cases = HeaderCaseMap::default(); + orig_cases.insert(name, Bytes::from_static(b"X-EmptY")); + + let mut dst = Vec::new(); + super::write_headers_original_case(&headers, &orig_cases, &mut dst, false); + + assert_eq!( + dst, b"X-EmptY:\r\n", + "there should be no space between the colon and CRLF" + ); + } + + #[test] + fn test_write_headers_orig_case_multiple_entries() { + let mut headers = HeaderMap::new(); + let name = http::header::HeaderName::from_static("x-empty"); + headers.insert(&name, "a".parse().unwrap()); + headers.append(&name, "b".parse().unwrap()); + + let mut orig_cases = HeaderCaseMap::default(); + orig_cases.insert(name.clone(), Bytes::from_static(b"X-Empty")); + orig_cases.append(name, Bytes::from_static(b"X-EMPTY")); + + let mut dst = Vec::new(); + super::write_headers_original_case(&headers, &orig_cases, &mut dst, false); + + assert_eq!(dst, b"X-Empty: a\r\nX-EMPTY: b\r\n"); + } + + #[cfg(feature = "nightly")] + use test::Bencher; + + #[cfg(feature = "nightly")] + #[bench] + fn bench_parse_incoming(b: &mut Bencher) { + let mut raw = BytesMut::from( + &b"GET /super_long_uri/and_whatever?what_should_we_talk_about/\ + I_wonder/Hard_to_write_in_an_uri_after_all/you_have_to_make\ + _up_the_punctuation_yourself/how_fun_is_that?test=foo&test1=\ + foo1&test2=foo2&test3=foo3&test4=foo4 HTTP/1.1\r\nHost: \ + hyper.rs\r\nAccept: a lot of things\r\nAccept-Charset: \ + utf8\r\nAccept-Encoding: *\r\nAccess-Control-Allow-\ + Credentials: None\r\nAccess-Control-Allow-Origin: None\r\n\ + Access-Control-Allow-Methods: None\r\nAccess-Control-Allow-\ + Headers: None\r\nContent-Encoding: utf8\r\nContent-Security-\ + Policy: None\r\nContent-Type: text/html\r\nOrigin: hyper\ + \r\nSec-Websocket-Extensions: It looks super important!\r\n\ + Sec-Websocket-Origin: hyper\r\nSec-Websocket-Version: 4.3\r\ + \nStrict-Transport-Security: None\r\nUser-Agent: hyper\r\n\ + X-Content-Duration: None\r\nX-Content-Security-Policy: None\ + \r\nX-DNSPrefetch-Control: None\r\nX-Frame-Options: \ + Something important obviously\r\nX-Requested-With: Nothing\ + \r\n\r\n"[..], + ); + let len = raw.len(); + let mut headers = Some(HeaderMap::new()); + + b.bytes = len as u64; + b.iter(|| { + let mut msg = Server::parse( + &mut raw, + ParseContext { + cached_headers: &mut headers, + req_method: &mut None, + h1_parser_config: Default::default(), + #[cfg(feature = "runtime")] + h1_header_read_timeout: None, + #[cfg(feature = "runtime")] + h1_header_read_timeout_fut: &mut None, + #[cfg(feature = "runtime")] + h1_header_read_timeout_running: &mut false, + preserve_header_case: false, + #[cfg(feature = "ffi")] + preserve_header_order: false, + h09_responses: false, + #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] + raw_headers: false, + }, + ) + .unwrap() + .unwrap(); + ::test::black_box(&msg); + msg.head.headers.clear(); + headers = Some(msg.head.headers); + restart(&mut raw, len); + }); + + fn restart(b: &mut BytesMut, len: usize) { + b.reserve(1); + unsafe { + b.set_len(len); + } + } + } + + #[cfg(feature = "nightly")] + #[bench] + fn bench_parse_short(b: &mut Bencher) { + let s = &b"GET / HTTP/1.1\r\nHost: localhost:8080\r\n\r\n"[..]; + let mut raw = BytesMut::from(s); + let len = raw.len(); + let mut headers = Some(HeaderMap::new()); + + b.bytes = len as u64; + b.iter(|| { + let mut msg = Server::parse( + &mut raw, + ParseContext { + cached_headers: &mut headers, + req_method: &mut None, + h1_parser_config: Default::default(), + #[cfg(feature = "runtime")] + h1_header_read_timeout: None, + #[cfg(feature = "runtime")] + h1_header_read_timeout_fut: &mut None, + #[cfg(feature = "runtime")] + h1_header_read_timeout_running: &mut false, + preserve_header_case: false, + #[cfg(feature = "ffi")] + preserve_header_order: false, + h09_responses: false, + #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] + raw_headers: false, + }, + ) + .unwrap() + .unwrap(); + ::test::black_box(&msg); + msg.head.headers.clear(); + headers = Some(msg.head.headers); + restart(&mut raw, len); + }); + + fn restart(b: &mut BytesMut, len: usize) { + b.reserve(1); + unsafe { + b.set_len(len); + } + } + } + + #[cfg(feature = "nightly")] + #[bench] + fn bench_server_encode_headers_preset(b: &mut Bencher) { + use crate::proto::BodyLength; + use http::header::HeaderValue; + + let len = 108; + b.bytes = len as u64; + + let mut head = MessageHead::default(); + let mut headers = HeaderMap::new(); + headers.insert("content-length", HeaderValue::from_static("10")); + headers.insert("content-type", HeaderValue::from_static("application/json")); + + b.iter(|| { + let mut vec = Vec::new(); + head.headers = headers.clone(); + Server::encode( + Encode { + head: &mut head, + body: Some(BodyLength::Known(10)), + keep_alive: true, + req_method: &mut Some(Method::GET), + title_case_headers: false, + }, + &mut vec, + ) + .unwrap(); + assert_eq!(vec.len(), len); + ::test::black_box(vec); + }) + } + + #[cfg(feature = "nightly")] + #[bench] + fn bench_server_encode_no_headers(b: &mut Bencher) { + use crate::proto::BodyLength; + + let len = 76; + b.bytes = len as u64; + + let mut head = MessageHead::default(); + let mut vec = Vec::with_capacity(128); + + b.iter(|| { + Server::encode( + Encode { + head: &mut head, + body: Some(BodyLength::Known(10)), + keep_alive: true, + req_method: &mut Some(Method::GET), + title_case_headers: false, + }, + &mut vec, + ) + .unwrap(); + assert_eq!(vec.len(), len); + ::test::black_box(&vec); + + vec.clear(); + }) + } +} diff --git a/.cargo-vendor/hyper-0.14.30/src/proto/h2/client.rs b/.cargo-vendor/hyper-0.14.30/src/proto/h2/client.rs new file mode 100644 index 0000000000..8c2a4d2e0f --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/proto/h2/client.rs @@ -0,0 +1,455 @@ +use std::convert::Infallible; +use std::error::Error as StdError; +use std::future::Future; +use std::marker::Unpin; +use std::pin::Pin; +use std::task::{Context, Poll}; +#[cfg(feature = "runtime")] +use std::time::Duration; + +use bytes::Bytes; +use futures_channel::{mpsc, oneshot}; +use futures_util::future::{self, Either, FutureExt as _, TryFutureExt as _}; +use futures_util::stream::StreamExt as _; +use h2::client::{Builder, SendRequest}; +use h2::SendStream; +use http::{Method, StatusCode}; +use tokio::io::{AsyncRead, AsyncWrite}; +use tracing::{debug, trace, warn}; + +use super::{ping, H2Upgraded, PipeToSendStream, SendBuf}; +use crate::body::HttpBody; +use crate::client::dispatch::Callback; +use crate::common::exec::Exec; +use crate::ext::Protocol; +use crate::headers; +use crate::proto::h2::UpgradedSendStream; +use crate::proto::Dispatched; +use crate::upgrade::Upgraded; +use crate::{Body, Request, Response}; +use h2::client::ResponseFuture; + +type ClientRx = crate::client::dispatch::Receiver, Response>; + +///// An mpsc channel is used to help notify the `Connection` task when *all* +///// other handles to it have been dropped, so that it can shutdown. +type ConnDropRef = mpsc::Sender; + +///// A oneshot channel watches the `Connection` task, and when it completes, +///// the "dispatch" task will be notified and can shutdown sooner. +type ConnEof = oneshot::Receiver; + +// Our defaults are chosen for the "majority" case, which usually are not +// resource constrained, and so the spec default of 64kb can be too limiting +// for performance. +const DEFAULT_CONN_WINDOW: u32 = 1024 * 1024 * 5; // 5mb +const DEFAULT_STREAM_WINDOW: u32 = 1024 * 1024 * 2; // 2mb +const DEFAULT_MAX_FRAME_SIZE: u32 = 1024 * 16; // 16kb +const DEFAULT_MAX_SEND_BUF_SIZE: usize = 1024 * 1024; // 1mb + +#[derive(Clone, Debug)] +pub(crate) struct Config { + pub(crate) adaptive_window: bool, + pub(crate) initial_conn_window_size: u32, + pub(crate) initial_stream_window_size: u32, + pub(crate) max_frame_size: u32, + #[cfg(feature = "runtime")] + pub(crate) keep_alive_interval: Option, + #[cfg(feature = "runtime")] + pub(crate) keep_alive_timeout: Duration, + #[cfg(feature = "runtime")] + pub(crate) keep_alive_while_idle: bool, + pub(crate) max_concurrent_reset_streams: Option, + pub(crate) max_send_buffer_size: usize, +} + +impl Default for Config { + fn default() -> Config { + Config { + adaptive_window: false, + initial_conn_window_size: DEFAULT_CONN_WINDOW, + initial_stream_window_size: DEFAULT_STREAM_WINDOW, + max_frame_size: DEFAULT_MAX_FRAME_SIZE, + #[cfg(feature = "runtime")] + keep_alive_interval: None, + #[cfg(feature = "runtime")] + keep_alive_timeout: Duration::from_secs(20), + #[cfg(feature = "runtime")] + keep_alive_while_idle: false, + max_concurrent_reset_streams: None, + max_send_buffer_size: DEFAULT_MAX_SEND_BUF_SIZE, + } + } +} + +fn new_builder(config: &Config) -> Builder { + let mut builder = Builder::default(); + builder + .initial_window_size(config.initial_stream_window_size) + .initial_connection_window_size(config.initial_conn_window_size) + .max_frame_size(config.max_frame_size) + .max_send_buffer_size(config.max_send_buffer_size) + .enable_push(false); + if let Some(max) = config.max_concurrent_reset_streams { + builder.max_concurrent_reset_streams(max); + } + builder +} + +fn new_ping_config(config: &Config) -> ping::Config { + ping::Config { + bdp_initial_window: if config.adaptive_window { + Some(config.initial_stream_window_size) + } else { + None + }, + #[cfg(feature = "runtime")] + keep_alive_interval: config.keep_alive_interval, + #[cfg(feature = "runtime")] + keep_alive_timeout: config.keep_alive_timeout, + #[cfg(feature = "runtime")] + keep_alive_while_idle: config.keep_alive_while_idle, + } +} + +pub(crate) async fn handshake( + io: T, + req_rx: ClientRx, + config: &Config, + exec: Exec, +) -> crate::Result> +where + T: AsyncRead + AsyncWrite + Send + Unpin + 'static, + B: HttpBody, + B::Data: Send + 'static, +{ + let (h2_tx, mut conn) = new_builder(config) + .handshake::<_, SendBuf>(io) + .await + .map_err(crate::Error::new_h2)?; + + // An mpsc channel is used entirely to detect when the + // 'Client' has been dropped. This is to get around a bug + // in h2 where dropping all SendRequests won't notify a + // parked Connection. + let (conn_drop_ref, rx) = mpsc::channel(1); + let (cancel_tx, conn_eof) = oneshot::channel(); + + let conn_drop_rx = rx.into_future().map(|(item, _rx)| { + if let Some(never) = item { + match never {} + } + }); + + let ping_config = new_ping_config(&config); + + let (conn, ping) = if ping_config.is_enabled() { + let pp = conn.ping_pong().expect("conn.ping_pong"); + let (recorder, mut ponger) = ping::channel(pp, ping_config); + + let conn = future::poll_fn(move |cx| { + match ponger.poll(cx) { + Poll::Ready(ping::Ponged::SizeUpdate(wnd)) => { + conn.set_target_window_size(wnd); + conn.set_initial_window_size(wnd)?; + } + #[cfg(feature = "runtime")] + Poll::Ready(ping::Ponged::KeepAliveTimedOut) => { + debug!("connection keep-alive timed out"); + return Poll::Ready(Ok(())); + } + Poll::Pending => {} + } + + Pin::new(&mut conn).poll(cx) + }); + (Either::Left(conn), recorder) + } else { + (Either::Right(conn), ping::disabled()) + }; + let conn = conn.map_err(|e| debug!("connection error: {}", e)); + + exec.execute(conn_task(conn, conn_drop_rx, cancel_tx)); + + Ok(ClientTask { + ping, + conn_drop_ref, + conn_eof, + executor: exec, + h2_tx, + req_rx, + fut_ctx: None, + }) +} + +async fn conn_task(conn: C, drop_rx: D, cancel_tx: oneshot::Sender) +where + C: Future + Unpin, + D: Future + Unpin, +{ + match future::select(conn, drop_rx).await { + Either::Left(_) => { + // ok or err, the `conn` has finished + } + Either::Right(((), conn)) => { + // mpsc has been dropped, hopefully polling + // the connection some more should start shutdown + // and then close + trace!("send_request dropped, starting conn shutdown"); + drop(cancel_tx); + let _ = conn.await; + } + } +} + +struct FutCtx +where + B: HttpBody, +{ + is_connect: bool, + eos: bool, + fut: ResponseFuture, + body_tx: SendStream>, + body: B, + cb: Callback, Response>, +} + +impl Unpin for FutCtx {} + +pub(crate) struct ClientTask +where + B: HttpBody, +{ + ping: ping::Recorder, + conn_drop_ref: ConnDropRef, + conn_eof: ConnEof, + executor: Exec, + h2_tx: SendRequest>, + req_rx: ClientRx, + fut_ctx: Option>, +} + +impl ClientTask +where + B: HttpBody + 'static, +{ + pub(crate) fn is_extended_connect_protocol_enabled(&self) -> bool { + self.h2_tx.is_extended_connect_protocol_enabled() + } +} + +impl ClientTask +where + B: HttpBody + Send + 'static, + B::Data: Send, + B::Error: Into>, +{ + fn poll_pipe(&mut self, f: FutCtx, cx: &mut Context<'_>) { + let ping = self.ping.clone(); + let send_stream = if !f.is_connect { + if !f.eos { + let mut pipe = Box::pin(PipeToSendStream::new(f.body, f.body_tx)).map(|res| { + if let Err(e) = res { + debug!("client request body error: {}", e); + } + }); + + // eagerly see if the body pipe is ready and + // can thus skip allocating in the executor + match Pin::new(&mut pipe).poll(cx) { + Poll::Ready(_) => (), + Poll::Pending => { + let conn_drop_ref = self.conn_drop_ref.clone(); + // keep the ping recorder's knowledge of an + // "open stream" alive while this body is + // still sending... + let ping = ping.clone(); + let pipe = pipe.map(move |x| { + drop(conn_drop_ref); + drop(ping); + x + }); + // Clear send task + self.executor.execute(pipe); + } + } + } + + None + } else { + Some(f.body_tx) + }; + + let fut = f.fut.map(move |result| match result { + Ok(res) => { + // record that we got the response headers + ping.record_non_data(); + + let content_length = headers::content_length_parse_all(res.headers()); + if let (Some(mut send_stream), StatusCode::OK) = (send_stream, res.status()) { + if content_length.map_or(false, |len| len != 0) { + warn!("h2 connect response with non-zero body not supported"); + + send_stream.send_reset(h2::Reason::INTERNAL_ERROR); + return Err(( + crate::Error::new_h2(h2::Reason::INTERNAL_ERROR.into()), + None, + )); + } + let (parts, recv_stream) = res.into_parts(); + let mut res = Response::from_parts(parts, Body::empty()); + + let (pending, on_upgrade) = crate::upgrade::pending(); + let io = H2Upgraded { + ping, + send_stream: unsafe { UpgradedSendStream::new(send_stream) }, + recv_stream, + buf: Bytes::new(), + }; + let upgraded = Upgraded::new(io, Bytes::new()); + + pending.fulfill(upgraded); + res.extensions_mut().insert(on_upgrade); + + Ok(res) + } else { + let res = res.map(|stream| { + let ping = ping.for_stream(&stream); + crate::Body::h2(stream, content_length.into(), ping) + }); + Ok(res) + } + } + Err(err) => { + ping.ensure_not_timed_out().map_err(|e| (e, None))?; + + debug!("client response error: {}", err); + Err((crate::Error::new_h2(err), None)) + } + }); + self.executor.execute(f.cb.send_when(fut)); + } +} + +impl Future for ClientTask +where + B: HttpBody + Send + 'static, + B::Data: Send, + B::Error: Into>, +{ + type Output = crate::Result; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + loop { + match ready!(self.h2_tx.poll_ready(cx)) { + Ok(()) => (), + Err(err) => { + self.ping.ensure_not_timed_out()?; + return if err.reason() == Some(::h2::Reason::NO_ERROR) { + trace!("connection gracefully shutdown"); + Poll::Ready(Ok(Dispatched::Shutdown)) + } else { + Poll::Ready(Err(crate::Error::new_h2(err))) + }; + } + }; + + match self.fut_ctx.take() { + // If we were waiting on pending open + // continue where we left off. + Some(f) => { + self.poll_pipe(f, cx); + continue; + } + None => (), + } + + match self.req_rx.poll_recv(cx) { + Poll::Ready(Some((req, cb))) => { + // check that future hasn't been canceled already + if cb.is_canceled() { + trace!("request callback is canceled"); + continue; + } + let (head, body) = req.into_parts(); + let mut req = ::http::Request::from_parts(head, ()); + super::strip_connection_headers(req.headers_mut(), true); + if let Some(len) = body.size_hint().exact() { + if len != 0 || headers::method_has_defined_payload_semantics(req.method()) { + headers::set_content_length_if_missing(req.headers_mut(), len); + } + } + + let is_connect = req.method() == Method::CONNECT; + let eos = body.is_end_stream(); + + if is_connect { + if headers::content_length_parse_all(req.headers()) + .map_or(false, |len| len != 0) + { + warn!("h2 connect request with non-zero body not supported"); + cb.send(Err(( + crate::Error::new_h2(h2::Reason::INTERNAL_ERROR.into()), + None, + ))); + continue; + } + } + + if let Some(protocol) = req.extensions_mut().remove::() { + req.extensions_mut().insert(protocol.into_inner()); + } + + let (fut, body_tx) = match self.h2_tx.send_request(req, !is_connect && eos) { + Ok(ok) => ok, + Err(err) => { + debug!("client send request error: {}", err); + cb.send(Err((crate::Error::new_h2(err), None))); + continue; + } + }; + + let f = FutCtx { + is_connect, + eos, + fut, + body_tx, + body, + cb, + }; + + // Check poll_ready() again. + // If the call to send_request() resulted in the new stream being pending open + // we have to wait for the open to complete before accepting new requests. + match self.h2_tx.poll_ready(cx) { + Poll::Pending => { + // Save Context + self.fut_ctx = Some(f); + return Poll::Pending; + } + Poll::Ready(Ok(())) => (), + Poll::Ready(Err(err)) => { + f.cb.send(Err((crate::Error::new_h2(err), None))); + continue; + } + } + self.poll_pipe(f, cx); + continue; + } + + Poll::Ready(None) => { + trace!("client::dispatch::Sender dropped"); + return Poll::Ready(Ok(Dispatched::Shutdown)); + } + + Poll::Pending => match ready!(Pin::new(&mut self.conn_eof).poll(cx)) { + Ok(never) => match never {}, + Err(_conn_is_eof) => { + trace!("connection task is closed, closing dispatch task"); + return Poll::Ready(Ok(Dispatched::Shutdown)); + } + }, + } + } + } +} diff --git a/.cargo-vendor/hyper-0.14.30/src/proto/h2/mod.rs b/.cargo-vendor/hyper-0.14.30/src/proto/h2/mod.rs new file mode 100644 index 0000000000..d50850d0a0 --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/proto/h2/mod.rs @@ -0,0 +1,470 @@ +use bytes::{Buf, Bytes}; +use h2::{Reason, RecvStream, SendStream}; +use http::header::{HeaderName, CONNECTION, TE, TRAILER, TRANSFER_ENCODING, UPGRADE}; +use http::HeaderMap; +use pin_project_lite::pin_project; +use std::error::Error as StdError; +use std::future::Future; +use std::io::{self, Cursor, IoSlice}; +use std::mem; +use std::pin::Pin; +use std::task::{Context, Poll}; +use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; +use tracing::{debug, trace, warn}; + +use crate::body::HttpBody; +use crate::proto::h2::ping::Recorder; + +pub(crate) mod ping; + +cfg_client! { + pub(crate) mod client; + pub(crate) use self::client::ClientTask; +} + +cfg_server! { + pub(crate) mod server; + pub(crate) use self::server::Server; +} + +/// Default initial stream window size defined in HTTP2 spec. +pub(crate) const SPEC_WINDOW_SIZE: u32 = 65_535; + +fn strip_connection_headers(headers: &mut HeaderMap, is_request: bool) { + // List of connection headers from: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Connection + // + // TE headers are allowed in HTTP/2 requests as long as the value is "trailers", so they're + // tested separately. + let connection_headers = [ + HeaderName::from_lowercase(b"keep-alive").unwrap(), + HeaderName::from_lowercase(b"proxy-connection").unwrap(), + TRAILER, + TRANSFER_ENCODING, + UPGRADE, + ]; + + for header in connection_headers.iter() { + if headers.remove(header).is_some() { + warn!("Connection header illegal in HTTP/2: {}", header.as_str()); + } + } + + if is_request { + if headers + .get(TE) + .map(|te_header| te_header != "trailers") + .unwrap_or(false) + { + warn!("TE headers not set to \"trailers\" are illegal in HTTP/2 requests"); + headers.remove(TE); + } + } else if headers.remove(TE).is_some() { + warn!("TE headers illegal in HTTP/2 responses"); + } + + if let Some(header) = headers.remove(CONNECTION) { + warn!( + "Connection header illegal in HTTP/2: {}", + CONNECTION.as_str() + ); + let header_contents = header.to_str().unwrap(); + + // A `Connection` header may have a comma-separated list of names of other headers that + // are meant for only this specific connection. + // + // Iterate these names and remove them as headers. Connection-specific headers are + // forbidden in HTTP2, as that information has been moved into frame types of the h2 + // protocol. + for name in header_contents.split(',') { + let name = name.trim(); + headers.remove(name); + } + } +} + +// body adapters used by both Client and Server + +pin_project! { + struct PipeToSendStream + where + S: HttpBody, + { + body_tx: SendStream>, + data_done: bool, + #[pin] + stream: S, + } +} + +impl PipeToSendStream +where + S: HttpBody, +{ + fn new(stream: S, tx: SendStream>) -> PipeToSendStream { + PipeToSendStream { + body_tx: tx, + data_done: false, + stream, + } + } +} + +impl Future for PipeToSendStream +where + S: HttpBody, + S::Error: Into>, +{ + type Output = crate::Result<()>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let mut me = self.project(); + loop { + if !*me.data_done { + // we don't have the next chunk of data yet, so just reserve 1 byte to make + // sure there's some capacity available. h2 will handle the capacity management + // for the actual body chunk. + me.body_tx.reserve_capacity(1); + + if me.body_tx.capacity() == 0 { + loop { + match ready!(me.body_tx.poll_capacity(cx)) { + Some(Ok(0)) => {} + Some(Ok(_)) => break, + Some(Err(e)) => { + return Poll::Ready(Err(crate::Error::new_body_write(e))) + } + None => { + // None means the stream is no longer in a + // streaming state, we either finished it + // somehow, or the remote reset us. + return Poll::Ready(Err(crate::Error::new_body_write( + "send stream capacity unexpectedly closed", + ))); + } + } + } + } else if let Poll::Ready(reason) = me + .body_tx + .poll_reset(cx) + .map_err(crate::Error::new_body_write)? + { + debug!("stream received RST_STREAM: {:?}", reason); + return Poll::Ready(Err(crate::Error::new_body_write(::h2::Error::from( + reason, + )))); + } + + match ready!(me.stream.as_mut().poll_data(cx)) { + Some(Ok(chunk)) => { + let is_eos = me.stream.is_end_stream(); + trace!( + "send body chunk: {} bytes, eos={}", + chunk.remaining(), + is_eos, + ); + + let buf = SendBuf::Buf(chunk); + me.body_tx + .send_data(buf, is_eos) + .map_err(crate::Error::new_body_write)?; + + if is_eos { + return Poll::Ready(Ok(())); + } + } + Some(Err(e)) => return Poll::Ready(Err(me.body_tx.on_user_err(e))), + None => { + me.body_tx.reserve_capacity(0); + let is_eos = me.stream.is_end_stream(); + if is_eos { + return Poll::Ready(me.body_tx.send_eos_frame()); + } else { + *me.data_done = true; + // loop again to poll_trailers + } + } + } + } else { + if let Poll::Ready(reason) = me + .body_tx + .poll_reset(cx) + .map_err(crate::Error::new_body_write)? + { + debug!("stream received RST_STREAM: {:?}", reason); + return Poll::Ready(Err(crate::Error::new_body_write(::h2::Error::from( + reason, + )))); + } + + match ready!(me.stream.poll_trailers(cx)) { + Ok(Some(trailers)) => { + me.body_tx + .send_trailers(trailers) + .map_err(crate::Error::new_body_write)?; + return Poll::Ready(Ok(())); + } + Ok(None) => { + // There were no trailers, so send an empty DATA frame... + return Poll::Ready(me.body_tx.send_eos_frame()); + } + Err(e) => return Poll::Ready(Err(me.body_tx.on_user_err(e))), + } + } + } + } +} + +trait SendStreamExt { + fn on_user_err(&mut self, err: E) -> crate::Error + where + E: Into>; + fn send_eos_frame(&mut self) -> crate::Result<()>; +} + +impl SendStreamExt for SendStream> { + fn on_user_err(&mut self, err: E) -> crate::Error + where + E: Into>, + { + let err = crate::Error::new_user_body(err); + debug!("send body user stream error: {}", err); + self.send_reset(err.h2_reason()); + err + } + + fn send_eos_frame(&mut self) -> crate::Result<()> { + trace!("send body eos"); + self.send_data(SendBuf::None, true) + .map_err(crate::Error::new_body_write) + } +} + +#[repr(usize)] +enum SendBuf { + Buf(B), + Cursor(Cursor>), + None, +} + +impl Buf for SendBuf { + #[inline] + fn remaining(&self) -> usize { + match *self { + Self::Buf(ref b) => b.remaining(), + Self::Cursor(ref c) => Buf::remaining(c), + Self::None => 0, + } + } + + #[inline] + fn chunk(&self) -> &[u8] { + match *self { + Self::Buf(ref b) => b.chunk(), + Self::Cursor(ref c) => c.chunk(), + Self::None => &[], + } + } + + #[inline] + fn advance(&mut self, cnt: usize) { + match *self { + Self::Buf(ref mut b) => b.advance(cnt), + Self::Cursor(ref mut c) => c.advance(cnt), + Self::None => {} + } + } + + fn chunks_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize { + match *self { + Self::Buf(ref b) => b.chunks_vectored(dst), + Self::Cursor(ref c) => c.chunks_vectored(dst), + Self::None => 0, + } + } +} + +struct H2Upgraded +where + B: Buf, +{ + ping: Recorder, + send_stream: UpgradedSendStream, + recv_stream: RecvStream, + buf: Bytes, +} + +impl AsyncRead for H2Upgraded +where + B: Buf, +{ + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + read_buf: &mut ReadBuf<'_>, + ) -> Poll> { + if self.buf.is_empty() { + self.buf = loop { + match ready!(self.recv_stream.poll_data(cx)) { + None => return Poll::Ready(Ok(())), + Some(Ok(buf)) if buf.is_empty() && !self.recv_stream.is_end_stream() => { + continue + } + Some(Ok(buf)) => { + self.ping.record_data(buf.len()); + break buf; + } + Some(Err(e)) => { + return Poll::Ready(match e.reason() { + Some(Reason::NO_ERROR) | Some(Reason::CANCEL) => Ok(()), + Some(Reason::STREAM_CLOSED) => { + Err(io::Error::new(io::ErrorKind::BrokenPipe, e)) + } + _ => Err(h2_to_io_error(e)), + }) + } + } + }; + } + let cnt = std::cmp::min(self.buf.len(), read_buf.remaining()); + read_buf.put_slice(&self.buf[..cnt]); + self.buf.advance(cnt); + let _ = self.recv_stream.flow_control().release_capacity(cnt); + Poll::Ready(Ok(())) + } +} + +impl AsyncWrite for H2Upgraded +where + B: Buf, +{ + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + if buf.is_empty() { + return Poll::Ready(Ok(0)); + } + self.send_stream.reserve_capacity(buf.len()); + + // We ignore all errors returned by `poll_capacity` and `write`, as we + // will get the correct from `poll_reset` anyway. + let cnt = match ready!(self.send_stream.poll_capacity(cx)) { + None => Some(0), + Some(Ok(cnt)) => self + .send_stream + .write(&buf[..cnt], false) + .ok() + .map(|()| cnt), + Some(Err(_)) => None, + }; + + if let Some(cnt) = cnt { + return Poll::Ready(Ok(cnt)); + } + + Poll::Ready(Err(h2_to_io_error( + match ready!(self.send_stream.poll_reset(cx)) { + Ok(Reason::NO_ERROR) | Ok(Reason::CANCEL) | Ok(Reason::STREAM_CLOSED) => { + return Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())) + } + Ok(reason) => reason.into(), + Err(e) => e, + }, + ))) + } + + fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn poll_shutdown( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + if self.send_stream.write(&[], true).is_ok() { + return Poll::Ready(Ok(())); + } + + Poll::Ready(Err(h2_to_io_error( + match ready!(self.send_stream.poll_reset(cx)) { + Ok(Reason::NO_ERROR) => return Poll::Ready(Ok(())), + Ok(Reason::CANCEL) | Ok(Reason::STREAM_CLOSED) => { + return Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())) + } + Ok(reason) => reason.into(), + Err(e) => e, + }, + ))) + } +} + +fn h2_to_io_error(e: h2::Error) -> io::Error { + if e.is_io() { + e.into_io().unwrap() + } else { + io::Error::new(io::ErrorKind::Other, e) + } +} + +struct UpgradedSendStream(SendStream>>); + +impl UpgradedSendStream +where + B: Buf, +{ + unsafe fn new(inner: SendStream>) -> Self { + assert_eq!(mem::size_of::(), mem::size_of::>()); + Self(mem::transmute(inner)) + } + + fn reserve_capacity(&mut self, cnt: usize) { + unsafe { self.as_inner_unchecked().reserve_capacity(cnt) } + } + + fn poll_capacity(&mut self, cx: &mut Context<'_>) -> Poll>> { + unsafe { self.as_inner_unchecked().poll_capacity(cx) } + } + + fn poll_reset(&mut self, cx: &mut Context<'_>) -> Poll> { + unsafe { self.as_inner_unchecked().poll_reset(cx) } + } + + fn write(&mut self, buf: &[u8], end_of_stream: bool) -> Result<(), io::Error> { + let send_buf = SendBuf::Cursor(Cursor::new(buf.into())); + unsafe { + self.as_inner_unchecked() + .send_data(send_buf, end_of_stream) + .map_err(h2_to_io_error) + } + } + + unsafe fn as_inner_unchecked(&mut self) -> &mut SendStream> { + &mut *(&mut self.0 as *mut _ as *mut _) + } +} + +#[repr(transparent)] +struct Neutered { + _inner: B, + impossible: Impossible, +} + +enum Impossible {} + +unsafe impl Send for Neutered {} + +impl Buf for Neutered { + fn remaining(&self) -> usize { + match self.impossible {} + } + + fn chunk(&self) -> &[u8] { + match self.impossible {} + } + + fn advance(&mut self, _cnt: usize) { + match self.impossible {} + } +} diff --git a/.cargo-vendor/hyper-0.14.30/src/proto/h2/ping.rs b/.cargo-vendor/hyper-0.14.30/src/proto/h2/ping.rs new file mode 100644 index 0000000000..d830c93eda --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/proto/h2/ping.rs @@ -0,0 +1,555 @@ +/// HTTP2 Ping usage +/// +/// hyper uses HTTP2 pings for two purposes: +/// +/// 1. Adaptive flow control using BDP +/// 2. Connection keep-alive +/// +/// Both cases are optional. +/// +/// # BDP Algorithm +/// +/// 1. When receiving a DATA frame, if a BDP ping isn't outstanding: +/// 1a. Record current time. +/// 1b. Send a BDP ping. +/// 2. Increment the number of received bytes. +/// 3. When the BDP ping ack is received: +/// 3a. Record duration from sent time. +/// 3b. Merge RTT with a running average. +/// 3c. Calculate bdp as bytes/rtt. +/// 3d. If bdp is over 2/3 max, set new max to bdp and update windows. + +#[cfg(feature = "runtime")] +use std::fmt; +#[cfg(feature = "runtime")] +use std::future::Future; +#[cfg(feature = "runtime")] +use std::pin::Pin; +use std::sync::{Arc, Mutex}; +use std::task::{self, Poll}; +use std::time::Duration; +#[cfg(not(feature = "runtime"))] +use std::time::Instant; + +use h2::{Ping, PingPong}; +#[cfg(feature = "runtime")] +use tokio::time::{Instant, Sleep}; +use tracing::{debug, trace}; + +type WindowSize = u32; + +pub(super) fn disabled() -> Recorder { + Recorder { shared: None } +} + +pub(super) fn channel(ping_pong: PingPong, config: Config) -> (Recorder, Ponger) { + debug_assert!( + config.is_enabled(), + "ping channel requires bdp or keep-alive config", + ); + + let bdp = config.bdp_initial_window.map(|wnd| Bdp { + bdp: wnd, + max_bandwidth: 0.0, + rtt: 0.0, + ping_delay: Duration::from_millis(100), + stable_count: 0, + }); + + let (bytes, next_bdp_at) = if bdp.is_some() { + (Some(0), Some(Instant::now())) + } else { + (None, None) + }; + + #[cfg(feature = "runtime")] + let keep_alive = config.keep_alive_interval.map(|interval| KeepAlive { + interval, + timeout: config.keep_alive_timeout, + while_idle: config.keep_alive_while_idle, + timer: Box::pin(tokio::time::sleep(interval)), + state: KeepAliveState::Init, + }); + + #[cfg(feature = "runtime")] + let last_read_at = keep_alive.as_ref().map(|_| Instant::now()); + + let shared = Arc::new(Mutex::new(Shared { + bytes, + #[cfg(feature = "runtime")] + last_read_at, + #[cfg(feature = "runtime")] + is_keep_alive_timed_out: false, + ping_pong, + ping_sent_at: None, + next_bdp_at, + })); + + ( + Recorder { + shared: Some(shared.clone()), + }, + Ponger { + bdp, + #[cfg(feature = "runtime")] + keep_alive, + shared, + }, + ) +} + +#[derive(Clone)] +pub(super) struct Config { + pub(super) bdp_initial_window: Option, + /// If no frames are received in this amount of time, a PING frame is sent. + #[cfg(feature = "runtime")] + pub(super) keep_alive_interval: Option, + /// After sending a keepalive PING, the connection will be closed if + /// a pong is not received in this amount of time. + #[cfg(feature = "runtime")] + pub(super) keep_alive_timeout: Duration, + /// If true, sends pings even when there are no active streams. + #[cfg(feature = "runtime")] + pub(super) keep_alive_while_idle: bool, +} + +#[derive(Clone)] +pub(crate) struct Recorder { + shared: Option>>, +} + +pub(super) struct Ponger { + bdp: Option, + #[cfg(feature = "runtime")] + keep_alive: Option, + shared: Arc>, +} + +struct Shared { + ping_pong: PingPong, + ping_sent_at: Option, + + // bdp + /// If `Some`, bdp is enabled, and this tracks how many bytes have been + /// read during the current sample. + bytes: Option, + /// We delay a variable amount of time between BDP pings. This allows us + /// to send less pings as the bandwidth stabilizes. + next_bdp_at: Option, + + // keep-alive + /// If `Some`, keep-alive is enabled, and the Instant is how long ago + /// the connection read the last frame. + #[cfg(feature = "runtime")] + last_read_at: Option, + + #[cfg(feature = "runtime")] + is_keep_alive_timed_out: bool, +} + +struct Bdp { + /// Current BDP in bytes + bdp: u32, + /// Largest bandwidth we've seen so far. + max_bandwidth: f64, + /// Round trip time in seconds + rtt: f64, + /// Delay the next ping by this amount. + /// + /// This will change depending on how stable the current bandwidth is. + ping_delay: Duration, + /// The count of ping round trips where BDP has stayed the same. + stable_count: u32, +} + +#[cfg(feature = "runtime")] +struct KeepAlive { + /// If no frames are received in this amount of time, a PING frame is sent. + interval: Duration, + /// After sending a keepalive PING, the connection will be closed if + /// a pong is not received in this amount of time. + timeout: Duration, + /// If true, sends pings even when there are no active streams. + while_idle: bool, + + state: KeepAliveState, + timer: Pin>, +} + +#[cfg(feature = "runtime")] +enum KeepAliveState { + Init, + Scheduled, + PingSent, +} + +pub(super) enum Ponged { + SizeUpdate(WindowSize), + #[cfg(feature = "runtime")] + KeepAliveTimedOut, +} + +#[cfg(feature = "runtime")] +#[derive(Debug)] +pub(super) struct KeepAliveTimedOut; + +// ===== impl Config ===== + +impl Config { + pub(super) fn is_enabled(&self) -> bool { + #[cfg(feature = "runtime")] + { + self.bdp_initial_window.is_some() || self.keep_alive_interval.is_some() + } + + #[cfg(not(feature = "runtime"))] + { + self.bdp_initial_window.is_some() + } + } +} + +// ===== impl Recorder ===== + +impl Recorder { + pub(crate) fn record_data(&self, len: usize) { + let shared = if let Some(ref shared) = self.shared { + shared + } else { + return; + }; + + let mut locked = shared.lock().unwrap(); + + #[cfg(feature = "runtime")] + locked.update_last_read_at(); + + // are we ready to send another bdp ping? + // if not, we don't need to record bytes either + + if let Some(ref next_bdp_at) = locked.next_bdp_at { + if Instant::now() < *next_bdp_at { + return; + } else { + locked.next_bdp_at = None; + } + } + + if let Some(ref mut bytes) = locked.bytes { + *bytes += len; + } else { + // no need to send bdp ping if bdp is disabled + return; + } + + if !locked.is_ping_sent() { + locked.send_ping(); + } + } + + pub(crate) fn record_non_data(&self) { + #[cfg(feature = "runtime")] + { + let shared = if let Some(ref shared) = self.shared { + shared + } else { + return; + }; + + let mut locked = shared.lock().unwrap(); + + locked.update_last_read_at(); + } + } + + /// If the incoming stream is already closed, convert self into + /// a disabled reporter. + #[cfg(feature = "client")] + pub(super) fn for_stream(self, stream: &h2::RecvStream) -> Self { + if stream.is_end_stream() { + disabled() + } else { + self + } + } + + pub(super) fn ensure_not_timed_out(&self) -> crate::Result<()> { + #[cfg(feature = "runtime")] + { + if let Some(ref shared) = self.shared { + let locked = shared.lock().unwrap(); + if locked.is_keep_alive_timed_out { + return Err(KeepAliveTimedOut.crate_error()); + } + } + } + + // else + Ok(()) + } +} + +// ===== impl Ponger ===== + +impl Ponger { + pub(super) fn poll(&mut self, cx: &mut task::Context<'_>) -> Poll { + let now = Instant::now(); + let mut locked = self.shared.lock().unwrap(); + #[cfg(feature = "runtime")] + let is_idle = self.is_idle(); + + #[cfg(feature = "runtime")] + { + if let Some(ref mut ka) = self.keep_alive { + ka.schedule(is_idle, &locked); + ka.maybe_ping(cx, &mut locked); + } + } + + if !locked.is_ping_sent() { + // XXX: this doesn't register a waker...? + return Poll::Pending; + } + + match locked.ping_pong.poll_pong(cx) { + Poll::Ready(Ok(_pong)) => { + let start = locked + .ping_sent_at + .expect("pong received implies ping_sent_at"); + locked.ping_sent_at = None; + let rtt = now - start; + trace!("recv pong"); + + #[cfg(feature = "runtime")] + { + if let Some(ref mut ka) = self.keep_alive { + locked.update_last_read_at(); + ka.schedule(is_idle, &locked); + } + } + + if let Some(ref mut bdp) = self.bdp { + let bytes = locked.bytes.expect("bdp enabled implies bytes"); + locked.bytes = Some(0); // reset + trace!("received BDP ack; bytes = {}, rtt = {:?}", bytes, rtt); + + let update = bdp.calculate(bytes, rtt); + locked.next_bdp_at = Some(now + bdp.ping_delay); + if let Some(update) = update { + return Poll::Ready(Ponged::SizeUpdate(update)); + } + } + } + Poll::Ready(Err(e)) => { + debug!("pong error: {}", e); + } + Poll::Pending => { + #[cfg(feature = "runtime")] + { + if let Some(ref mut ka) = self.keep_alive { + if let Err(KeepAliveTimedOut) = ka.maybe_timeout(cx) { + self.keep_alive = None; + locked.is_keep_alive_timed_out = true; + return Poll::Ready(Ponged::KeepAliveTimedOut); + } + } + } + } + } + + // XXX: this doesn't register a waker...? + Poll::Pending + } + + #[cfg(feature = "runtime")] + fn is_idle(&self) -> bool { + Arc::strong_count(&self.shared) <= 2 + } +} + +// ===== impl Shared ===== + +impl Shared { + fn send_ping(&mut self) { + match self.ping_pong.send_ping(Ping::opaque()) { + Ok(()) => { + self.ping_sent_at = Some(Instant::now()); + trace!("sent ping"); + } + Err(err) => { + debug!("error sending ping: {}", err); + } + } + } + + fn is_ping_sent(&self) -> bool { + self.ping_sent_at.is_some() + } + + #[cfg(feature = "runtime")] + fn update_last_read_at(&mut self) { + if self.last_read_at.is_some() { + self.last_read_at = Some(Instant::now()); + } + } + + #[cfg(feature = "runtime")] + fn last_read_at(&self) -> Instant { + self.last_read_at.expect("keep_alive expects last_read_at") + } +} + +// ===== impl Bdp ===== + +/// Any higher than this likely will be hitting the TCP flow control. +const BDP_LIMIT: usize = 1024 * 1024 * 16; + +impl Bdp { + fn calculate(&mut self, bytes: usize, rtt: Duration) -> Option { + // No need to do any math if we're at the limit. + if self.bdp as usize == BDP_LIMIT { + self.stabilize_delay(); + return None; + } + + // average the rtt + let rtt = seconds(rtt); + if self.rtt == 0.0 { + // First sample means rtt is first rtt. + self.rtt = rtt; + } else { + // Weigh this rtt as 1/8 for a moving average. + self.rtt += (rtt - self.rtt) * 0.125; + } + + // calculate the current bandwidth + let bw = (bytes as f64) / (self.rtt * 1.5); + trace!("current bandwidth = {:.1}B/s", bw); + + if bw < self.max_bandwidth { + // not a faster bandwidth, so don't update + self.stabilize_delay(); + return None; + } else { + self.max_bandwidth = bw; + } + + // if the current `bytes` sample is at least 2/3 the previous + // bdp, increase to double the current sample. + if bytes >= self.bdp as usize * 2 / 3 { + self.bdp = (bytes * 2).min(BDP_LIMIT) as WindowSize; + trace!("BDP increased to {}", self.bdp); + + self.stable_count = 0; + self.ping_delay /= 2; + Some(self.bdp) + } else { + self.stabilize_delay(); + None + } + } + + fn stabilize_delay(&mut self) { + if self.ping_delay < Duration::from_secs(10) { + self.stable_count += 1; + + if self.stable_count >= 2 { + self.ping_delay *= 4; + self.stable_count = 0; + } + } + } +} + +fn seconds(dur: Duration) -> f64 { + const NANOS_PER_SEC: f64 = 1_000_000_000.0; + let secs = dur.as_secs() as f64; + secs + (dur.subsec_nanos() as f64) / NANOS_PER_SEC +} + +// ===== impl KeepAlive ===== + +#[cfg(feature = "runtime")] +impl KeepAlive { + fn schedule(&mut self, is_idle: bool, shared: &Shared) { + match self.state { + KeepAliveState::Init => { + if !self.while_idle && is_idle { + return; + } + + self.state = KeepAliveState::Scheduled; + let interval = shared.last_read_at() + self.interval; + self.timer.as_mut().reset(interval); + } + KeepAliveState::PingSent => { + if shared.is_ping_sent() { + return; + } + + self.state = KeepAliveState::Scheduled; + let interval = shared.last_read_at() + self.interval; + self.timer.as_mut().reset(interval); + } + KeepAliveState::Scheduled => (), + } + } + + fn maybe_ping(&mut self, cx: &mut task::Context<'_>, shared: &mut Shared) { + match self.state { + KeepAliveState::Scheduled => { + if Pin::new(&mut self.timer).poll(cx).is_pending() { + return; + } + // check if we've received a frame while we were scheduled + if shared.last_read_at() + self.interval > self.timer.deadline() { + self.state = KeepAliveState::Init; + cx.waker().wake_by_ref(); // schedule us again + return; + } + trace!("keep-alive interval ({:?}) reached", self.interval); + shared.send_ping(); + self.state = KeepAliveState::PingSent; + let timeout = Instant::now() + self.timeout; + self.timer.as_mut().reset(timeout); + } + KeepAliveState::Init | KeepAliveState::PingSent => (), + } + } + + fn maybe_timeout(&mut self, cx: &mut task::Context<'_>) -> Result<(), KeepAliveTimedOut> { + match self.state { + KeepAliveState::PingSent => { + if Pin::new(&mut self.timer).poll(cx).is_pending() { + return Ok(()); + } + trace!("keep-alive timeout ({:?}) reached", self.timeout); + Err(KeepAliveTimedOut) + } + KeepAliveState::Init | KeepAliveState::Scheduled => Ok(()), + } + } +} + +// ===== impl KeepAliveTimedOut ===== + +#[cfg(feature = "runtime")] +impl KeepAliveTimedOut { + pub(super) fn crate_error(self) -> crate::Error { + crate::Error::new(crate::error::Kind::Http2).with(self) + } +} + +#[cfg(feature = "runtime")] +impl fmt::Display for KeepAliveTimedOut { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("keep-alive timed out") + } +} + +#[cfg(feature = "runtime")] +impl std::error::Error for KeepAliveTimedOut { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + Some(&crate::error::TimedOut) + } +} diff --git a/.cargo-vendor/hyper-0.14.30/src/proto/h2/server.rs b/.cargo-vendor/hyper-0.14.30/src/proto/h2/server.rs new file mode 100644 index 0000000000..b7bff590ff --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/proto/h2/server.rs @@ -0,0 +1,558 @@ +use std::error::Error as StdError; +use std::future::Future; +use std::marker::Unpin; +use std::pin::Pin; +use std::task::{Context, Poll}; +#[cfg(feature = "runtime")] +use std::time::Duration; + +use bytes::Bytes; +use h2::server::{Connection, Handshake, SendResponse}; +use h2::{Reason, RecvStream}; +use http::{Method, Request}; +use pin_project_lite::pin_project; +use tokio::io::{AsyncRead, AsyncWrite}; +use tracing::{debug, trace, warn}; + +use super::{ping, PipeToSendStream, SendBuf}; +use crate::body::HttpBody; +use crate::common::date; +use crate::common::exec::ConnStreamExec; +use crate::ext::Protocol; +use crate::headers; +use crate::proto::h2::ping::Recorder; +use crate::proto::h2::{H2Upgraded, UpgradedSendStream}; +use crate::proto::Dispatched; +use crate::service::HttpService; + +use crate::upgrade::{OnUpgrade, Pending, Upgraded}; +use crate::{Body, Response}; + +// Our defaults are chosen for the "majority" case, which usually are not +// resource constrained, and so the spec default of 64kb can be too limiting +// for performance. +// +// At the same time, a server more often has multiple clients connected, and +// so is more likely to use more resources than a client would. +const DEFAULT_CONN_WINDOW: u32 = 1024 * 1024; // 1mb +const DEFAULT_STREAM_WINDOW: u32 = 1024 * 1024; // 1mb +const DEFAULT_MAX_FRAME_SIZE: u32 = 1024 * 16; // 16kb +const DEFAULT_MAX_SEND_BUF_SIZE: usize = 1024 * 400; // 400kb +const DEFAULT_SETTINGS_MAX_HEADER_LIST_SIZE: u32 = 16 << 20; // 16 MB "sane default" taken from golang http2 +const DEFAULT_MAX_LOCAL_ERROR_RESET_STREAMS: usize = 1024; + +#[derive(Clone, Debug)] +pub(crate) struct Config { + pub(crate) adaptive_window: bool, + pub(crate) initial_conn_window_size: u32, + pub(crate) initial_stream_window_size: u32, + pub(crate) max_frame_size: u32, + pub(crate) enable_connect_protocol: bool, + pub(crate) max_concurrent_streams: Option, + pub(crate) max_pending_accept_reset_streams: Option, + pub(crate) max_local_error_reset_streams: Option, + #[cfg(feature = "runtime")] + pub(crate) keep_alive_interval: Option, + #[cfg(feature = "runtime")] + pub(crate) keep_alive_timeout: Duration, + pub(crate) max_send_buffer_size: usize, + pub(crate) max_header_list_size: u32, +} + +impl Default for Config { + fn default() -> Config { + Config { + adaptive_window: false, + initial_conn_window_size: DEFAULT_CONN_WINDOW, + initial_stream_window_size: DEFAULT_STREAM_WINDOW, + max_frame_size: DEFAULT_MAX_FRAME_SIZE, + enable_connect_protocol: false, + max_concurrent_streams: None, + max_pending_accept_reset_streams: None, + max_local_error_reset_streams: Some(DEFAULT_MAX_LOCAL_ERROR_RESET_STREAMS), + #[cfg(feature = "runtime")] + keep_alive_interval: None, + #[cfg(feature = "runtime")] + keep_alive_timeout: Duration::from_secs(20), + max_send_buffer_size: DEFAULT_MAX_SEND_BUF_SIZE, + max_header_list_size: DEFAULT_SETTINGS_MAX_HEADER_LIST_SIZE, + } + } +} + +pin_project! { + pub(crate) struct Server + where + S: HttpService, + B: HttpBody, + { + exec: E, + service: S, + state: State, + } +} + +enum State +where + B: HttpBody, +{ + Handshaking { + ping_config: ping::Config, + hs: Handshake>, + }, + Serving(Serving), + Closed, +} + +struct Serving +where + B: HttpBody, +{ + ping: Option<(ping::Recorder, ping::Ponger)>, + conn: Connection>, + closing: Option, +} + +impl Server +where + T: AsyncRead + AsyncWrite + Unpin, + S: HttpService, + S::Error: Into>, + B: HttpBody + 'static, + E: ConnStreamExec, +{ + pub(crate) fn new(io: T, service: S, config: &Config, exec: E) -> Server { + let mut builder = h2::server::Builder::default(); + builder + .initial_window_size(config.initial_stream_window_size) + .initial_connection_window_size(config.initial_conn_window_size) + .max_frame_size(config.max_frame_size) + .max_header_list_size(config.max_header_list_size) + .max_local_error_reset_streams(config.max_local_error_reset_streams) + .max_send_buffer_size(config.max_send_buffer_size); + if let Some(max) = config.max_concurrent_streams { + builder.max_concurrent_streams(max); + } + if let Some(max) = config.max_pending_accept_reset_streams { + builder.max_pending_accept_reset_streams(max); + } + if config.enable_connect_protocol { + builder.enable_connect_protocol(); + } + let handshake = builder.handshake(io); + + let bdp = if config.adaptive_window { + Some(config.initial_stream_window_size) + } else { + None + }; + + let ping_config = ping::Config { + bdp_initial_window: bdp, + #[cfg(feature = "runtime")] + keep_alive_interval: config.keep_alive_interval, + #[cfg(feature = "runtime")] + keep_alive_timeout: config.keep_alive_timeout, + // If keep-alive is enabled for servers, always enabled while + // idle, so it can more aggressively close dead connections. + #[cfg(feature = "runtime")] + keep_alive_while_idle: true, + }; + + Server { + exec, + state: State::Handshaking { + ping_config, + hs: handshake, + }, + service, + } + } + + pub(crate) fn graceful_shutdown(&mut self) { + trace!("graceful_shutdown"); + match self.state { + State::Handshaking { .. } => { + // fall-through, to replace state with Closed + } + State::Serving(ref mut srv) => { + if srv.closing.is_none() { + srv.conn.graceful_shutdown(); + } + return; + } + State::Closed => { + return; + } + } + self.state = State::Closed; + } +} + +impl Future for Server +where + T: AsyncRead + AsyncWrite + Unpin, + S: HttpService, + S::Error: Into>, + B: HttpBody + 'static, + E: ConnStreamExec, +{ + type Output = crate::Result; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let me = &mut *self; + loop { + let next = match me.state { + State::Handshaking { + ref mut hs, + ref ping_config, + } => { + let mut conn = ready!(Pin::new(hs).poll(cx).map_err(crate::Error::new_h2))?; + let ping = if ping_config.is_enabled() { + let pp = conn.ping_pong().expect("conn.ping_pong"); + Some(ping::channel(pp, ping_config.clone())) + } else { + None + }; + State::Serving(Serving { + ping, + conn, + closing: None, + }) + } + State::Serving(ref mut srv) => { + ready!(srv.poll_server(cx, &mut me.service, &mut me.exec))?; + return Poll::Ready(Ok(Dispatched::Shutdown)); + } + State::Closed => { + // graceful_shutdown was called before handshaking finished, + // nothing to do here... + return Poll::Ready(Ok(Dispatched::Shutdown)); + } + }; + me.state = next; + } + } +} + +impl Serving +where + T: AsyncRead + AsyncWrite + Unpin, + B: HttpBody + 'static, +{ + fn poll_server( + &mut self, + cx: &mut Context<'_>, + service: &mut S, + exec: &mut E, + ) -> Poll> + where + S: HttpService, + S::Error: Into>, + E: ConnStreamExec, + { + if self.closing.is_none() { + loop { + self.poll_ping(cx); + + // Check that the service is ready to accept a new request. + // + // - If not, just drive the connection some. + // - If ready, try to accept a new request from the connection. + match service.poll_ready(cx) { + Poll::Ready(Ok(())) => (), + Poll::Pending => { + // use `poll_closed` instead of `poll_accept`, + // in order to avoid accepting a request. + ready!(self.conn.poll_closed(cx).map_err(crate::Error::new_h2))?; + trace!("incoming connection complete"); + return Poll::Ready(Ok(())); + } + Poll::Ready(Err(err)) => { + let err = crate::Error::new_user_service(err); + debug!("service closed: {}", err); + + let reason = err.h2_reason(); + if reason == Reason::NO_ERROR { + // NO_ERROR is only used for graceful shutdowns... + trace!("interpreting NO_ERROR user error as graceful_shutdown"); + self.conn.graceful_shutdown(); + } else { + trace!("abruptly shutting down with {:?}", reason); + self.conn.abrupt_shutdown(reason); + } + self.closing = Some(err); + break; + } + } + + // When the service is ready, accepts an incoming request. + match ready!(self.conn.poll_accept(cx)) { + Some(Ok((req, mut respond))) => { + trace!("incoming request"); + let content_length = headers::content_length_parse_all(req.headers()); + let ping = self + .ping + .as_ref() + .map(|ping| ping.0.clone()) + .unwrap_or_else(ping::disabled); + + // Record the headers received + ping.record_non_data(); + + let is_connect = req.method() == Method::CONNECT; + let (mut parts, stream) = req.into_parts(); + let (mut req, connect_parts) = if !is_connect { + ( + Request::from_parts( + parts, + crate::Body::h2(stream, content_length.into(), ping), + ), + None, + ) + } else { + if content_length.map_or(false, |len| len != 0) { + warn!("h2 connect request with non-zero body not supported"); + respond.send_reset(h2::Reason::INTERNAL_ERROR); + return Poll::Ready(Ok(())); + } + let (pending, upgrade) = crate::upgrade::pending(); + debug_assert!(parts.extensions.get::().is_none()); + parts.extensions.insert(upgrade); + ( + Request::from_parts(parts, crate::Body::empty()), + Some(ConnectParts { + pending, + ping, + recv_stream: stream, + }), + ) + }; + + if let Some(protocol) = req.extensions_mut().remove::() { + req.extensions_mut().insert(Protocol::from_inner(protocol)); + } + + let fut = H2Stream::new(service.call(req), connect_parts, respond); + exec.execute_h2stream(fut); + } + Some(Err(e)) => { + return Poll::Ready(Err(crate::Error::new_h2(e))); + } + None => { + // no more incoming streams... + if let Some((ref ping, _)) = self.ping { + ping.ensure_not_timed_out()?; + } + + trace!("incoming connection complete"); + return Poll::Ready(Ok(())); + } + } + } + } + + debug_assert!( + self.closing.is_some(), + "poll_server broke loop without closing" + ); + + ready!(self.conn.poll_closed(cx).map_err(crate::Error::new_h2))?; + + Poll::Ready(Err(self.closing.take().expect("polled after error"))) + } + + fn poll_ping(&mut self, cx: &mut Context<'_>) { + if let Some((_, ref mut estimator)) = self.ping { + match estimator.poll(cx) { + Poll::Ready(ping::Ponged::SizeUpdate(wnd)) => { + self.conn.set_target_window_size(wnd); + let _ = self.conn.set_initial_window_size(wnd); + } + #[cfg(feature = "runtime")] + Poll::Ready(ping::Ponged::KeepAliveTimedOut) => { + debug!("keep-alive timed out, closing connection"); + self.conn.abrupt_shutdown(h2::Reason::NO_ERROR); + } + Poll::Pending => {} + } + } + } +} + +pin_project! { + #[allow(missing_debug_implementations)] + pub struct H2Stream + where + B: HttpBody, + { + reply: SendResponse>, + #[pin] + state: H2StreamState, + } +} + +pin_project! { + #[project = H2StreamStateProj] + enum H2StreamState + where + B: HttpBody, + { + Service { + #[pin] + fut: F, + connect_parts: Option, + }, + Body { + #[pin] + pipe: PipeToSendStream, + }, + } +} + +struct ConnectParts { + pending: Pending, + ping: Recorder, + recv_stream: RecvStream, +} + +impl H2Stream +where + B: HttpBody, +{ + fn new( + fut: F, + connect_parts: Option, + respond: SendResponse>, + ) -> H2Stream { + H2Stream { + reply: respond, + state: H2StreamState::Service { fut, connect_parts }, + } + } +} + +macro_rules! reply { + ($me:expr, $res:expr, $eos:expr) => {{ + match $me.reply.send_response($res, $eos) { + Ok(tx) => tx, + Err(e) => { + debug!("send response error: {}", e); + $me.reply.send_reset(Reason::INTERNAL_ERROR); + return Poll::Ready(Err(crate::Error::new_h2(e))); + } + } + }}; +} + +impl H2Stream +where + F: Future, E>>, + B: HttpBody, + B::Data: 'static, + B::Error: Into>, + E: Into>, +{ + fn poll2(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut me = self.project(); + loop { + let next = match me.state.as_mut().project() { + H2StreamStateProj::Service { + fut: h, + connect_parts, + } => { + let res = match h.poll(cx) { + Poll::Ready(Ok(r)) => r, + Poll::Pending => { + // Response is not yet ready, so we want to check if the client has sent a + // RST_STREAM frame which would cancel the current request. + if let Poll::Ready(reason) = + me.reply.poll_reset(cx).map_err(crate::Error::new_h2)? + { + debug!("stream received RST_STREAM: {:?}", reason); + return Poll::Ready(Err(crate::Error::new_h2(reason.into()))); + } + return Poll::Pending; + } + Poll::Ready(Err(e)) => { + let err = crate::Error::new_user_service(e); + warn!("http2 service errored: {}", err); + me.reply.send_reset(err.h2_reason()); + return Poll::Ready(Err(err)); + } + }; + + let (head, body) = res.into_parts(); + let mut res = ::http::Response::from_parts(head, ()); + super::strip_connection_headers(res.headers_mut(), false); + + // set Date header if it isn't already set... + res.headers_mut() + .entry(::http::header::DATE) + .or_insert_with(date::update_and_header_value); + + if let Some(connect_parts) = connect_parts.take() { + if res.status().is_success() { + if headers::content_length_parse_all(res.headers()) + .map_or(false, |len| len != 0) + { + warn!("h2 successful response to CONNECT request with body not supported"); + me.reply.send_reset(h2::Reason::INTERNAL_ERROR); + return Poll::Ready(Err(crate::Error::new_user_header())); + } + let send_stream = reply!(me, res, false); + connect_parts.pending.fulfill(Upgraded::new( + H2Upgraded { + ping: connect_parts.ping, + recv_stream: connect_parts.recv_stream, + send_stream: unsafe { UpgradedSendStream::new(send_stream) }, + buf: Bytes::new(), + }, + Bytes::new(), + )); + return Poll::Ready(Ok(())); + } + } + + if !body.is_end_stream() { + // automatically set Content-Length from body... + if let Some(len) = body.size_hint().exact() { + headers::set_content_length_if_missing(res.headers_mut(), len); + } + + let body_tx = reply!(me, res, false); + H2StreamState::Body { + pipe: PipeToSendStream::new(body, body_tx), + } + } else { + reply!(me, res, true); + return Poll::Ready(Ok(())); + } + } + H2StreamStateProj::Body { pipe } => { + return pipe.poll(cx); + } + }; + me.state.set(next); + } + } +} + +impl Future for H2Stream +where + F: Future, E>>, + B: HttpBody, + B::Data: 'static, + B::Error: Into>, + E: Into>, +{ + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + self.poll2(cx).map(|res| { + if let Err(e) = res { + debug!("stream error: {}", e); + } + }) + } +} diff --git a/.cargo-vendor/hyper-0.14.30/src/proto/mod.rs b/.cargo-vendor/hyper-0.14.30/src/proto/mod.rs new file mode 100644 index 0000000000..3628576dc1 --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/proto/mod.rs @@ -0,0 +1,71 @@ +//! Pieces pertaining to the HTTP message protocol. + +cfg_feature! { + #![feature = "http1"] + + pub(crate) mod h1; + + pub(crate) use self::h1::Conn; + + #[cfg(feature = "client")] + pub(crate) use self::h1::dispatch; + #[cfg(feature = "server")] + pub(crate) use self::h1::ServerTransaction; +} + +#[cfg(feature = "http2")] +pub(crate) mod h2; + +/// An Incoming Message head. Includes request/status line, and headers. +#[derive(Debug, Default)] +pub(crate) struct MessageHead { + /// HTTP version of the message. + pub(crate) version: http::Version, + /// Subject (request line or status line) of Incoming message. + pub(crate) subject: S, + /// Headers of the Incoming message. + pub(crate) headers: http::HeaderMap, + /// Extensions. + extensions: http::Extensions, +} + +/// An incoming request message. +#[cfg(feature = "http1")] +pub(crate) type RequestHead = MessageHead; + +#[derive(Debug, Default, PartialEq)] +#[cfg(feature = "http1")] +pub(crate) struct RequestLine(pub(crate) http::Method, pub(crate) http::Uri); + +/// An incoming response message. +#[cfg(all(feature = "http1", feature = "client"))] +pub(crate) type ResponseHead = MessageHead; + +#[derive(Debug)] +#[cfg(feature = "http1")] +pub(crate) enum BodyLength { + /// Content-Length + Known(u64), + /// Transfer-Encoding: chunked (if h1) + Unknown, +} + +/// Status of when a Dispatcher future completes. +pub(crate) enum Dispatched { + /// Dispatcher completely shutdown connection. + Shutdown, + /// Dispatcher has pending upgrade, and so did not shutdown. + #[cfg(feature = "http1")] + Upgrade(crate::upgrade::Pending), +} + +impl MessageHead { + fn into_response(self, body: B) -> http::Response { + let mut res = http::Response::new(body); + *res.status_mut() = self.subject; + *res.headers_mut() = self.headers; + *res.version_mut() = self.version; + *res.extensions_mut() = self.extensions; + res + } +} diff --git a/.cargo-vendor/hyper/src/rt.rs b/.cargo-vendor/hyper-0.14.30/src/rt.rs similarity index 100% rename from .cargo-vendor/hyper/src/rt.rs rename to .cargo-vendor/hyper-0.14.30/src/rt.rs diff --git a/.cargo-vendor/hyper/src/server/accept.rs b/.cargo-vendor/hyper-0.14.30/src/server/accept.rs similarity index 100% rename from .cargo-vendor/hyper/src/server/accept.rs rename to .cargo-vendor/hyper-0.14.30/src/server/accept.rs diff --git a/.cargo-vendor/hyper/src/server/conn.rs b/.cargo-vendor/hyper-0.14.30/src/server/conn.rs similarity index 100% rename from .cargo-vendor/hyper/src/server/conn.rs rename to .cargo-vendor/hyper-0.14.30/src/server/conn.rs diff --git a/.cargo-vendor/hyper-0.14.30/src/server/conn/http1.rs b/.cargo-vendor/hyper-0.14.30/src/server/conn/http1.rs new file mode 100644 index 0000000000..ab833b938b --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/server/conn/http1.rs @@ -0,0 +1,449 @@ +//! HTTP/1 Server Connections + +use std::error::Error as StdError; +use std::fmt; +use std::future::Future; +use std::marker::Unpin; +use std::pin::Pin; +use std::task::{Context, Poll}; +use std::time::Duration; + +use bytes::Bytes; +use tokio::io::{AsyncRead, AsyncWrite}; + +use crate::body::{Body as IncomingBody, HttpBody as Body}; +use crate::proto; +use crate::service::HttpService; + +type Http1Dispatcher = proto::h1::Dispatcher< + proto::h1::dispatch::Server, + B, + T, + proto::ServerTransaction, +>; + +pin_project_lite::pin_project! { + /// A future binding an http1 connection with a Service. + /// + /// Polling this future will drive HTTP forward. + #[must_use = "futures do nothing unless polled"] + pub struct Connection + where + S: HttpService, + { + conn: Http1Dispatcher, + } +} + +/// A configuration builder for HTTP/1 server connections. +#[derive(Clone, Debug)] +pub struct Builder { + h1_half_close: bool, + h1_keep_alive: bool, + h1_title_case_headers: bool, + h1_preserve_header_case: bool, + h1_header_read_timeout: Option, + h1_writev: Option, + max_buf_size: Option, + pipeline_flush: bool, +} + +/// Deconstructed parts of a `Connection`. +/// +/// This allows taking apart a `Connection` at a later time, in order to +/// reclaim the IO object, and additional related pieces. +#[derive(Debug)] +pub struct Parts { + /// The original IO object used in the handshake. + pub io: T, + /// A buffer of bytes that have been read but not processed as HTTP. + /// + /// If the client sent additional bytes after its last request, and + /// this connection "ended" with an upgrade, the read buffer will contain + /// those bytes. + /// + /// You will want to check for any existing bytes if you plan to continue + /// communicating on the IO object. + pub read_buf: Bytes, + /// The `Service` used to serve this connection. + pub service: S, + _inner: (), +} + +// ===== impl Connection ===== + +impl fmt::Debug for Connection +where + S: HttpService, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Connection").finish() + } +} + +impl Connection +where + S: HttpService, + S::Error: Into>, + I: AsyncRead + AsyncWrite + Unpin, + B: Body + 'static, + B::Error: Into>, +{ + /// Start a graceful shutdown process for this connection. + /// + /// This `Connection` should continue to be polled until shutdown + /// can finish. + /// + /// # Note + /// + /// This should only be called while the `Connection` future is still + /// pending. If called after `Connection::poll` has resolved, this does + /// nothing. + pub fn graceful_shutdown(mut self: Pin<&mut Self>) { + self.conn.disable_keep_alive(); + } + + /// Return the inner IO object, and additional information. + /// + /// If the IO object has been "rewound" the io will not contain those bytes rewound. + /// This should only be called after `poll_without_shutdown` signals + /// that the connection is "done". Otherwise, it may not have finished + /// flushing all necessary HTTP bytes. + /// + /// # Panics + /// This method will panic if this connection is using an h2 protocol. + pub fn into_parts(self) -> Parts { + let (io, read_buf, dispatch) = self.conn.into_inner(); + Parts { + io, + read_buf, + service: dispatch.into_service(), + _inner: (), + } + } + + /// Poll the connection for completion, but without calling `shutdown` + /// on the underlying IO. + /// + /// This is useful to allow running a connection while doing an HTTP + /// upgrade. Once the upgrade is completed, the connection would be "done", + /// but it is not desired to actually shutdown the IO object. Instead you + /// would take it back using `into_parts`. + pub fn poll_without_shutdown(&mut self, cx: &mut Context<'_>) -> Poll> + where + S: Unpin, + S::Future: Unpin, + B: Unpin, + { + self.conn.poll_without_shutdown(cx) + } + + /// Prevent shutdown of the underlying IO object at the end of service the request, + /// instead run `into_parts`. This is a convenience wrapper over `poll_without_shutdown`. + /// + /// # Error + /// + /// This errors if the underlying connection protocol is not HTTP/1. + pub fn without_shutdown(self) -> impl Future>> + where + S: Unpin, + S::Future: Unpin, + B: Unpin, + { + let mut zelf = Some(self); + futures_util::future::poll_fn(move |cx| { + ready!(zelf.as_mut().unwrap().conn.poll_without_shutdown(cx))?; + Poll::Ready(Ok(zelf.take().unwrap().into_parts())) + }) + } + + /// Enable this connection to support higher-level HTTP upgrades. + /// + /// See [the `upgrade` module](crate::upgrade) for more. + pub fn with_upgrades(self) -> upgrades::UpgradeableConnection + where + I: Send, + { + upgrades::UpgradeableConnection { inner: Some(self) } + } +} + +impl Future for Connection +where + S: HttpService, + S::Error: Into>, + I: AsyncRead + AsyncWrite + Unpin + 'static, + B: Body + 'static, + B::Error: Into>, +{ + type Output = crate::Result<()>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match ready!(Pin::new(&mut self.conn).poll(cx)) { + Ok(done) => { + match done { + proto::Dispatched::Shutdown => {} + proto::Dispatched::Upgrade(pending) => { + // With no `Send` bound on `I`, we can't try to do + // upgrades here. In case a user was trying to use + // `Body::on_upgrade` with this API, send a special + // error letting them know about that. + pending.manual(); + } + }; + return Poll::Ready(Ok(())); + } + Err(e) => Poll::Ready(Err(e)), + } + } +} + +// ===== impl Builder ===== + +impl Builder { + /// Create a new connection builder. + pub fn new() -> Self { + Self { + h1_half_close: false, + h1_keep_alive: true, + h1_title_case_headers: false, + h1_preserve_header_case: false, + h1_header_read_timeout: None, + h1_writev: None, + max_buf_size: None, + pipeline_flush: false, + } + } + /// Set whether HTTP/1 connections should support half-closures. + /// + /// Clients can chose to shutdown their write-side while waiting + /// for the server to respond. Setting this to `true` will + /// prevent closing the connection immediately if `read` + /// detects an EOF in the middle of a request. + /// + /// Default is `false`. + pub fn half_close(&mut self, val: bool) -> &mut Self { + self.h1_half_close = val; + self + } + + /// Enables or disables HTTP/1 keep-alive. + /// + /// Default is true. + pub fn keep_alive(&mut self, val: bool) -> &mut Self { + self.h1_keep_alive = val; + self + } + + /// Set whether HTTP/1 connections will write header names as title case at + /// the socket level. + /// + /// Default is false. + pub fn title_case_headers(&mut self, enabled: bool) -> &mut Self { + self.h1_title_case_headers = enabled; + self + } + + /// Set whether to support preserving original header cases. + /// + /// Currently, this will record the original cases received, and store them + /// in a private extension on the `Request`. It will also look for and use + /// such an extension in any provided `Response`. + /// + /// Since the relevant extension is still private, there is no way to + /// interact with the original cases. The only effect this can have now is + /// to forward the cases in a proxy-like fashion. + /// + /// Default is false. + pub fn preserve_header_case(&mut self, enabled: bool) -> &mut Self { + self.h1_preserve_header_case = enabled; + self + } + + /// Set a timeout for reading client request headers. If a client does not + /// transmit the entire header within this time, the connection is closed. + /// + /// Default is None. + pub fn header_read_timeout(&mut self, read_timeout: Duration) -> &mut Self { + self.h1_header_read_timeout = Some(read_timeout); + self + } + + /// Set whether HTTP/1 connections should try to use vectored writes, + /// or always flatten into a single buffer. + /// + /// Note that setting this to false may mean more copies of body data, + /// but may also improve performance when an IO transport doesn't + /// support vectored writes well, such as most TLS implementations. + /// + /// Setting this to true will force hyper to use queued strategy + /// which may eliminate unnecessary cloning on some TLS backends + /// + /// Default is `auto`. In this mode hyper will try to guess which + /// mode to use + pub fn writev(&mut self, val: bool) -> &mut Self { + self.h1_writev = Some(val); + self + } + + /// Set the maximum buffer size for the connection. + /// + /// Default is ~400kb. + /// + /// # Panics + /// + /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum. + pub fn max_buf_size(&mut self, max: usize) -> &mut Self { + assert!( + max >= proto::h1::MINIMUM_MAX_BUFFER_SIZE, + "the max_buf_size cannot be smaller than the minimum that h1 specifies." + ); + self.max_buf_size = Some(max); + self + } + + /// Aggregates flushes to better support pipelined responses. + /// + /// Experimental, may have bugs. + /// + /// Default is false. + pub fn pipeline_flush(&mut self, enabled: bool) -> &mut Self { + self.pipeline_flush = enabled; + self + } + + // /// Set the timer used in background tasks. + // pub fn timer(&mut self, timer: M) -> &mut Self + // where + // M: Timer + Send + Sync + 'static, + // { + // self.timer = Time::Timer(Arc::new(timer)); + // self + // } + + /// Bind a connection together with a [`Service`](crate::service::Service). + /// + /// This returns a Future that must be polled in order for HTTP to be + /// driven on the connection. + /// + /// # Example + /// + /// ``` + /// # use hyper::{Body as Incoming, Request, Response}; + /// # use hyper::service::Service; + /// # use hyper::server::conn::http1::Builder; + /// # use tokio::io::{AsyncRead, AsyncWrite}; + /// # async fn run(some_io: I, some_service: S) + /// # where + /// # I: AsyncRead + AsyncWrite + Unpin + Send + 'static, + /// # S: Service, Response=hyper::Response> + Send + 'static, + /// # S::Error: Into>, + /// # S::Future: Send, + /// # { + /// let http = Builder::new(); + /// let conn = http.serve_connection(some_io, some_service); + /// + /// if let Err(e) = conn.await { + /// eprintln!("server connection error: {}", e); + /// } + /// # } + /// # fn main() {} + /// ``` + pub fn serve_connection(&self, io: I, service: S) -> Connection + where + S: HttpService, + S::Error: Into>, + S::ResBody: 'static, + ::Error: Into>, + I: AsyncRead + AsyncWrite + Unpin, + { + let mut conn = proto::Conn::new(io); + if !self.h1_keep_alive { + conn.disable_keep_alive(); + } + if self.h1_half_close { + conn.set_allow_half_close(); + } + if self.h1_title_case_headers { + conn.set_title_case_headers(); + } + if self.h1_preserve_header_case { + conn.set_preserve_header_case(); + } + if let Some(header_read_timeout) = self.h1_header_read_timeout { + conn.set_http1_header_read_timeout(header_read_timeout); + } + if let Some(writev) = self.h1_writev { + if writev { + conn.set_write_strategy_queue(); + } else { + conn.set_write_strategy_flatten(); + } + } + conn.set_flush_pipeline(self.pipeline_flush); + if let Some(max) = self.max_buf_size { + conn.set_max_buf_size(max); + } + let sd = proto::h1::dispatch::Server::new(service); + let proto = proto::h1::Dispatcher::new(sd, conn); + Connection { conn: proto } + } +} + +mod upgrades { + use crate::upgrade::Upgraded; + + use super::*; + + // A future binding a connection with a Service with Upgrade support. + // + // This type is unnameable outside the crate. + #[must_use = "futures do nothing unless polled"] + #[allow(missing_debug_implementations)] + pub struct UpgradeableConnection + where + S: HttpService, + { + pub(super) inner: Option>, + } + + impl UpgradeableConnection + where + S: HttpService, + S::Error: Into>, + I: AsyncRead + AsyncWrite + Unpin, + B: Body + 'static, + B::Error: Into>, + { + /// Start a graceful shutdown process for this connection. + /// + /// This `Connection` should continue to be polled until shutdown + /// can finish. + pub fn graceful_shutdown(mut self: Pin<&mut Self>) { + Pin::new(self.inner.as_mut().unwrap()).graceful_shutdown() + } + } + + impl Future for UpgradeableConnection + where + S: HttpService, + S::Error: Into>, + I: AsyncRead + AsyncWrite + Unpin + Send + 'static, + B: Body + 'static, + B::Error: Into>, + { + type Output = crate::Result<()>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match ready!(Pin::new(&mut self.inner.as_mut().unwrap().conn).poll(cx)) { + Ok(proto::Dispatched::Shutdown) => Poll::Ready(Ok(())), + Ok(proto::Dispatched::Upgrade(pending)) => { + let (io, buf, _) = self.inner.take().unwrap().conn.into_inner(); + pending.fulfill(Upgraded::new(io, buf)); + Poll::Ready(Ok(())) + } + Err(e) => Poll::Ready(Err(e)), + } + } + } +} diff --git a/.cargo-vendor/hyper-0.14.30/src/server/conn/http2.rs b/.cargo-vendor/hyper-0.14.30/src/server/conn/http2.rs new file mode 100644 index 0000000000..4f7df823ae --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/server/conn/http2.rs @@ -0,0 +1,260 @@ +//! HTTP/2 Server Connections + +use std::error::Error as StdError; +use std::fmt; +use std::future::Future; +use std::marker::Unpin; +use std::pin::Pin; +use std::task::{Context, Poll}; +use std::time::Duration; + +use pin_project_lite::pin_project; +use tokio::io::{AsyncRead, AsyncWrite}; + +use crate::body::{Body as IncomingBody, HttpBody as Body}; +use crate::common::exec::ConnStreamExec; +use crate::proto; +use crate::service::HttpService; + +pin_project! { + /// A future binding an HTTP/2 connection with a Service. + /// + /// Polling this future will drive HTTP forward. + #[must_use = "futures do nothing unless polled"] + pub struct Connection + where + S: HttpService, + { + conn: proto::h2::Server, + } +} + +/// A configuration builder for HTTP/2 server connections. +#[derive(Clone, Debug)] +pub struct Builder { + exec: E, + h2_builder: proto::h2::server::Config, +} + +// ===== impl Connection ===== + +impl fmt::Debug for Connection +where + S: HttpService, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Connection").finish() + } +} + +impl Connection +where + S: HttpService, + S::Error: Into>, + I: AsyncRead + AsyncWrite + Unpin, + B: Body + 'static, + B::Error: Into>, + E: ConnStreamExec, +{ + /// Start a graceful shutdown process for this connection. + /// + /// This `Connection` should continue to be polled until shutdown + /// can finish. + /// + /// # Note + /// + /// This should only be called while the `Connection` future is still + /// pending. If called after `Connection::poll` has resolved, this does + /// nothing. + pub fn graceful_shutdown(mut self: Pin<&mut Self>) { + self.conn.graceful_shutdown(); + } +} + +impl Future for Connection +where + S: HttpService, + S::Error: Into>, + I: AsyncRead + AsyncWrite + Unpin + 'static, + B: Body + 'static, + B::Error: Into>, + E: ConnStreamExec, +{ + type Output = crate::Result<()>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match ready!(Pin::new(&mut self.conn).poll(cx)) { + Ok(_done) => { + //TODO: the proto::h2::Server no longer needs to return + //the Dispatched enum + Poll::Ready(Ok(())) + } + Err(e) => Poll::Ready(Err(e)), + } + } +} + +// ===== impl Builder ===== + +impl Builder { + /// Create a new connection builder. + /// + /// This starts with the default options, and an executor. + pub fn new(exec: E) -> Self { + Self { + exec: exec, + h2_builder: Default::default(), + } + } + + /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2 + /// stream-level flow control. + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + /// + /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE + pub fn initial_stream_window_size(&mut self, sz: impl Into>) -> &mut Self { + if let Some(sz) = sz.into() { + self.h2_builder.adaptive_window = false; + self.h2_builder.initial_stream_window_size = sz; + } + self + } + + /// Sets the max connection-level flow control for HTTP2. + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + pub fn initial_connection_window_size(&mut self, sz: impl Into>) -> &mut Self { + if let Some(sz) = sz.into() { + self.h2_builder.adaptive_window = false; + self.h2_builder.initial_conn_window_size = sz; + } + self + } + + /// Sets whether to use an adaptive flow control. + /// + /// Enabling this will override the limits set in + /// `initial_stream_window_size` and + /// `initial_connection_window_size`. + pub fn adaptive_window(&mut self, enabled: bool) -> &mut Self { + use proto::h2::SPEC_WINDOW_SIZE; + + self.h2_builder.adaptive_window = enabled; + if enabled { + self.h2_builder.initial_conn_window_size = SPEC_WINDOW_SIZE; + self.h2_builder.initial_stream_window_size = SPEC_WINDOW_SIZE; + } + self + } + + /// Sets the maximum frame size to use for HTTP2. + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + pub fn max_frame_size(&mut self, sz: impl Into>) -> &mut Self { + if let Some(sz) = sz.into() { + self.h2_builder.max_frame_size = sz; + } + self + } + + /// Sets the [`SETTINGS_MAX_CONCURRENT_STREAMS`][spec] option for HTTP2 + /// connections. + /// + /// Default is no limit (`std::u32::MAX`). Passing `None` will do nothing. + /// + /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_MAX_CONCURRENT_STREAMS + pub fn max_concurrent_streams(&mut self, max: impl Into>) -> &mut Self { + self.h2_builder.max_concurrent_streams = max.into(); + self + } + + /// Sets an interval for HTTP2 Ping frames should be sent to keep a + /// connection alive. + /// + /// Pass `None` to disable HTTP2 keep-alive. + /// + /// Default is currently disabled. + /// + /// # Cargo Feature + /// + pub fn keep_alive_interval(&mut self, interval: impl Into>) -> &mut Self { + self.h2_builder.keep_alive_interval = interval.into(); + self + } + + /// Sets a timeout for receiving an acknowledgement of the keep-alive ping. + /// + /// If the ping is not acknowledged within the timeout, the connection will + /// be closed. Does nothing if `keep_alive_interval` is disabled. + /// + /// Default is 20 seconds. + /// + /// # Cargo Feature + /// + pub fn keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self { + self.h2_builder.keep_alive_timeout = timeout; + self + } + + /// Set the maximum write buffer size for each HTTP/2 stream. + /// + /// Default is currently ~400KB, but may change. + /// + /// # Panics + /// + /// The value must be no larger than `u32::MAX`. + pub fn max_send_buf_size(&mut self, max: usize) -> &mut Self { + assert!(max <= std::u32::MAX as usize); + self.h2_builder.max_send_buffer_size = max; + self + } + + /// Enables the [extended CONNECT protocol]. + /// + /// [extended CONNECT protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4 + pub fn enable_connect_protocol(&mut self) -> &mut Self { + self.h2_builder.enable_connect_protocol = true; + self + } + + /// Sets the max size of received header frames. + /// + /// Default is currently ~16MB, but may change. + pub fn max_header_list_size(&mut self, max: u32) -> &mut Self { + self.h2_builder.max_header_list_size = max; + self + } + + // /// Set the timer used in background tasks. + // pub fn timer(&mut self, timer: M) -> &mut Self + // where + // M: Timer + Send + Sync + 'static, + // { + // self.timer = Time::Timer(Arc::new(timer)); + // self + // } + + /// Bind a connection together with a [`Service`](crate::service::Service). + /// + /// This returns a Future that must be polled in order for HTTP to be + /// driven on the connection. + pub fn serve_connection(&self, io: I, service: S) -> Connection + where + S: HttpService, + S::Error: Into>, + Bd: Body + 'static, + Bd::Error: Into>, + I: AsyncRead + AsyncWrite + Unpin, + E: ConnStreamExec, + { + let proto = proto::h2::Server::new(io, service, &self.h2_builder, self.exec.clone()); + Connection { conn: proto } + } +} diff --git a/.cargo-vendor/hyper-0.14.30/src/server/mod.rs b/.cargo-vendor/hyper-0.14.30/src/server/mod.rs new file mode 100644 index 0000000000..65eb7063e5 --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/server/mod.rs @@ -0,0 +1,173 @@ +//! HTTP Server +//! +//! A `Server` is created to listen on a port, parse HTTP requests, and hand +//! them off to a `Service`. +//! +//! There are two levels of APIs provide for constructing HTTP servers: +//! +//! - The higher-level [`Server`](Server) type. +//! - The lower-level [`conn`](conn) module. +//! +//! # Server +//! +//! The [`Server`](Server) is main way to start listening for HTTP requests. +//! It wraps a listener with a [`MakeService`](crate::service), and then should +//! be executed to start serving requests. +//! +//! [`Server`](Server) accepts connections in both HTTP1 and HTTP2 by default. +//! +//! ## Examples +//! +//! ```no_run +//! use std::convert::Infallible; +//! use std::net::SocketAddr; +//! use hyper::{Body, Request, Response, Server}; +//! use hyper::service::{make_service_fn, service_fn}; +//! +//! async fn handle(_req: Request) -> Result, Infallible> { +//! Ok(Response::new(Body::from("Hello World"))) +//! } +//! +//! # #[cfg(feature = "runtime")] +//! #[tokio::main] +//! async fn main() { +//! // Construct our SocketAddr to listen on... +//! let addr = SocketAddr::from(([127, 0, 0, 1], 3000)); +//! +//! // And a MakeService to handle each connection... +//! let make_service = make_service_fn(|_conn| async { +//! Ok::<_, Infallible>(service_fn(handle)) +//! }); +//! +//! // Then bind and serve... +//! let server = Server::bind(&addr).serve(make_service); +//! +//! // And run forever... +//! if let Err(e) = server.await { +//! eprintln!("server error: {}", e); +//! } +//! } +//! # #[cfg(not(feature = "runtime"))] +//! # fn main() {} +//! ``` +//! +//! If you don't need the connection and your service implements `Clone` you can use +//! [`tower::make::Shared`] instead of `make_service_fn` which is a bit simpler: +//! +//! ```no_run +//! # use std::convert::Infallible; +//! # use std::net::SocketAddr; +//! # use hyper::{Body, Request, Response, Server}; +//! # use hyper::service::{make_service_fn, service_fn}; +//! # use tower::make::Shared; +//! # async fn handle(_req: Request) -> Result, Infallible> { +//! # Ok(Response::new(Body::from("Hello World"))) +//! # } +//! # #[cfg(feature = "runtime")] +//! #[tokio::main] +//! async fn main() { +//! // Construct our SocketAddr to listen on... +//! let addr = SocketAddr::from(([127, 0, 0, 1], 3000)); +//! +//! // Shared is a MakeService that produces services by cloning an inner service... +//! let make_service = Shared::new(service_fn(handle)); +//! +//! // Then bind and serve... +//! let server = Server::bind(&addr).serve(make_service); +//! +//! // And run forever... +//! if let Err(e) = server.await { +//! eprintln!("server error: {}", e); +//! } +//! } +//! # #[cfg(not(feature = "runtime"))] +//! # fn main() {} +//! ``` +//! +//! Passing data to your request handler can be done like so: +//! +//! ```no_run +//! use std::convert::Infallible; +//! use std::net::SocketAddr; +//! use hyper::{Body, Request, Response, Server}; +//! use hyper::service::{make_service_fn, service_fn}; +//! # #[cfg(feature = "runtime")] +//! use hyper::server::conn::AddrStream; +//! +//! #[derive(Clone)] +//! struct AppContext { +//! // Whatever data your application needs can go here +//! } +//! +//! async fn handle( +//! context: AppContext, +//! addr: SocketAddr, +//! req: Request +//! ) -> Result, Infallible> { +//! Ok(Response::new(Body::from("Hello World"))) +//! } +//! +//! # #[cfg(feature = "runtime")] +//! #[tokio::main] +//! async fn main() { +//! let context = AppContext { +//! // ... +//! }; +//! +//! // A `MakeService` that produces a `Service` to handle each connection. +//! let make_service = make_service_fn(move |conn: &AddrStream| { +//! // We have to clone the context to share it with each invocation of +//! // `make_service`. If your data doesn't implement `Clone` consider using +//! // an `std::sync::Arc`. +//! let context = context.clone(); +//! +//! // You can grab the address of the incoming connection like so. +//! let addr = conn.remote_addr(); +//! +//! // Create a `Service` for responding to the request. +//! let service = service_fn(move |req| { +//! handle(context.clone(), addr, req) +//! }); +//! +//! // Return the service to hyper. +//! async move { Ok::<_, Infallible>(service) } +//! }); +//! +//! // Run the server like above... +//! let addr = SocketAddr::from(([127, 0, 0, 1], 3000)); +//! +//! let server = Server::bind(&addr).serve(make_service); +//! +//! if let Err(e) = server.await { +//! eprintln!("server error: {}", e); +//! } +//! } +//! # #[cfg(not(feature = "runtime"))] +//! # fn main() {} +//! ``` +//! +//! [`tower::make::Shared`]: https://docs.rs/tower/latest/tower/make/struct.Shared.html + +pub mod accept; +pub mod conn; +#[cfg(feature = "tcp")] +mod tcp; + +pub use self::server::Server; + +cfg_feature! { + #![any(feature = "http1", feature = "http2")] + + #[cfg_attr(feature = "deprecated", allow(deprecated))] + pub(crate) mod server; + pub use self::server::Builder; + + mod shutdown; +} + +cfg_feature! { + #![not(any(feature = "http1", feature = "http2"))] + + mod server_stub; + use server_stub as server; +} diff --git a/.cargo-vendor/hyper/src/server/server.rs b/.cargo-vendor/hyper-0.14.30/src/server/server.rs similarity index 100% rename from .cargo-vendor/hyper/src/server/server.rs rename to .cargo-vendor/hyper-0.14.30/src/server/server.rs diff --git a/.cargo-vendor/hyper/src/server/server_stub.rs b/.cargo-vendor/hyper-0.14.30/src/server/server_stub.rs similarity index 100% rename from .cargo-vendor/hyper/src/server/server_stub.rs rename to .cargo-vendor/hyper-0.14.30/src/server/server_stub.rs diff --git a/.cargo-vendor/hyper/src/server/shutdown.rs b/.cargo-vendor/hyper-0.14.30/src/server/shutdown.rs similarity index 100% rename from .cargo-vendor/hyper/src/server/shutdown.rs rename to .cargo-vendor/hyper-0.14.30/src/server/shutdown.rs diff --git a/.cargo-vendor/hyper/src/server/tcp.rs b/.cargo-vendor/hyper-0.14.30/src/server/tcp.rs similarity index 100% rename from .cargo-vendor/hyper/src/server/tcp.rs rename to .cargo-vendor/hyper-0.14.30/src/server/tcp.rs diff --git a/.cargo-vendor/hyper-0.14.30/src/service/http.rs b/.cargo-vendor/hyper-0.14.30/src/service/http.rs new file mode 100644 index 0000000000..d0586d8bd2 --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/service/http.rs @@ -0,0 +1,59 @@ +use std::error::Error as StdError; +use std::future::Future; +use std::task::{Context, Poll}; + +use crate::body::HttpBody; +use crate::{Request, Response}; + +/// An asynchronous function from `Request` to `Response`. +pub trait HttpService: sealed::Sealed { + /// The `HttpBody` body of the `http::Response`. + type ResBody: HttpBody; + + /// The error type that can occur within this `Service`. + /// + /// Note: Returning an `Error` to a hyper server will cause the connection + /// to be abruptly aborted. In most cases, it is better to return a `Response` + /// with a 4xx or 5xx status code. + type Error: Into>; + + /// The `Future` returned by this `Service`. + type Future: Future, Self::Error>>; + + #[doc(hidden)] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll>; + + #[doc(hidden)] + fn call(&mut self, req: Request) -> Self::Future; +} + +impl HttpService for T +where + T: tower_service::Service, Response = Response>, + B2: HttpBody, + T::Error: Into>, +{ + type ResBody = B2; + + type Error = T::Error; + type Future = T::Future; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + tower_service::Service::poll_ready(self, cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + tower_service::Service::call(self, req) + } +} + +impl sealed::Sealed for T +where + T: tower_service::Service, Response = Response>, + B2: HttpBody, +{ +} + +mod sealed { + pub trait Sealed {} +} diff --git a/.cargo-vendor/hyper/src/service/make.rs b/.cargo-vendor/hyper-0.14.30/src/service/make.rs similarity index 100% rename from .cargo-vendor/hyper/src/service/make.rs rename to .cargo-vendor/hyper-0.14.30/src/service/make.rs diff --git a/.cargo-vendor/hyper-0.14.30/src/service/mod.rs b/.cargo-vendor/hyper-0.14.30/src/service/mod.rs new file mode 100644 index 0000000000..22f850ca47 --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/service/mod.rs @@ -0,0 +1,55 @@ +//! Asynchronous Services +//! +//! A [`Service`](Service) is a trait representing an asynchronous +//! function of a request to a response. It's similar to +//! `async fn(Request) -> Result`. +//! +//! The argument and return value isn't strictly required to be for HTTP. +//! Therefore, hyper uses several "trait aliases" to reduce clutter around +//! bounds. These are: +//! +//! - `HttpService`: This is blanketly implemented for all types that +//! implement `Service, Response = http::Response>`. +//! - `MakeService`: When a `Service` returns a new `Service` as its "response", +//! we consider it a `MakeService`. Again, blanketly implemented in those cases. +//! - `MakeConnection`: A `Service` that returns a "connection", a type that +//! implements `AsyncRead` and `AsyncWrite`. +//! +//! # HttpService +//! +//! In hyper, especially in the server setting, a `Service` is usually bound +//! to a single connection. It defines how to respond to **all** requests that +//! connection will receive. +//! +//! The helper [`service_fn`](service_fn) should be sufficient for most cases, but +//! if you need to implement `Service` for a type manually, you can follow the example +//! in `service_struct_impl.rs`. +//! +//! # MakeService +//! +//! Since a `Service` is bound to a single connection, a [`Server`](crate::Server) +//! needs a way to make them as it accepts connections. This is what a +//! `MakeService` does. +//! +//! Resources that need to be shared by all `Service`s can be put into a +//! `MakeService`, and then passed to individual `Service`s when `call` +//! is called. + +pub use tower_service::Service; + +mod http; +mod make; +#[cfg(all(any(feature = "http1", feature = "http2"), feature = "client"))] +mod oneshot; +mod util; + +pub(super) use self::http::HttpService; +#[cfg(all(any(feature = "http1", feature = "http2"), feature = "client"))] +pub(super) use self::make::MakeConnection; +#[cfg(all(any(feature = "http1", feature = "http2"), feature = "server"))] +pub(super) use self::make::MakeServiceRef; +#[cfg(all(any(feature = "http1", feature = "http2"), feature = "client"))] +pub(super) use self::oneshot::{oneshot, Oneshot}; + +pub use self::make::make_service_fn; +pub use self::util::service_fn; diff --git a/.cargo-vendor/hyper/src/service/oneshot.rs b/.cargo-vendor/hyper-0.14.30/src/service/oneshot.rs similarity index 100% rename from .cargo-vendor/hyper/src/service/oneshot.rs rename to .cargo-vendor/hyper-0.14.30/src/service/oneshot.rs diff --git a/.cargo-vendor/hyper-0.14.30/src/service/util.rs b/.cargo-vendor/hyper-0.14.30/src/service/util.rs new file mode 100644 index 0000000000..59760a6858 --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/service/util.rs @@ -0,0 +1,85 @@ +use std::error::Error as StdError; +use std::fmt; +use std::future::Future; +use std::marker::PhantomData; +use std::task::{Context, Poll}; + +use crate::body::HttpBody; +use crate::{Request, Response}; + +/// Create a `Service` from a function. +/// +/// # Example +/// +/// ``` +/// use hyper::{Body, Request, Response, Version}; +/// use hyper::service::service_fn; +/// +/// let service = service_fn(|req: Request| async move { +/// if req.version() == Version::HTTP_11 { +/// Ok(Response::new(Body::from("Hello World"))) +/// } else { +/// // Note: it's usually better to return a Response +/// // with an appropriate StatusCode instead of an Err. +/// Err("not HTTP/1.1, abort connection") +/// } +/// }); +/// ``` +pub fn service_fn(f: F) -> ServiceFn +where + F: FnMut(Request) -> S, + S: Future, +{ + ServiceFn { + f, + _req: PhantomData, + } +} + +/// Service returned by [`service_fn`] +pub struct ServiceFn { + f: F, + _req: PhantomData, +} + +impl tower_service::Service> + for ServiceFn +where + F: FnMut(Request) -> Ret, + ReqBody: HttpBody, + Ret: Future, E>>, + E: Into>, + ResBody: HttpBody, +{ + type Response = crate::Response; + type Error = E; + type Future = Ret; + + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: Request) -> Self::Future { + (self.f)(req) + } +} + +impl fmt::Debug for ServiceFn { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("impl Service").finish() + } +} + +impl Clone for ServiceFn +where + F: Clone, +{ + fn clone(&self) -> Self { + ServiceFn { + f: self.f.clone(), + _req: PhantomData, + } + } +} + +impl Copy for ServiceFn where F: Copy {} diff --git a/.cargo-vendor/hyper-0.14.30/src/upgrade.rs b/.cargo-vendor/hyper-0.14.30/src/upgrade.rs new file mode 100644 index 0000000000..a46a8d224d --- /dev/null +++ b/.cargo-vendor/hyper-0.14.30/src/upgrade.rs @@ -0,0 +1,381 @@ +//! HTTP Upgrades +//! +//! This module deals with managing [HTTP Upgrades][mdn] in hyper. Since +//! several concepts in HTTP allow for first talking HTTP, and then converting +//! to a different protocol, this module conflates them into a single API. +//! Those include: +//! +//! - HTTP/1.1 Upgrades +//! - HTTP `CONNECT` +//! +//! You are responsible for any other pre-requisites to establish an upgrade, +//! such as sending the appropriate headers, methods, and status codes. You can +//! then use [`on`][] to grab a `Future` which will resolve to the upgraded +//! connection object, or an error if the upgrade fails. +//! +//! [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Protocol_upgrade_mechanism +//! +//! # Client +//! +//! Sending an HTTP upgrade from the [`client`](super::client) involves setting +//! either the appropriate method, if wanting to `CONNECT`, or headers such as +//! `Upgrade` and `Connection`, on the `http::Request`. Once receiving the +//! `http::Response` back, you must check for the specific information that the +//! upgrade is agreed upon by the server (such as a `101` status code), and then +//! get the `Future` from the `Response`. +//! +//! # Server +//! +//! Receiving upgrade requests in a server requires you to check the relevant +//! headers in a `Request`, and if an upgrade should be done, you then send the +//! corresponding headers in a response. To then wait for hyper to finish the +//! upgrade, you call `on()` with the `Request`, and then can spawn a task +//! awaiting it. +//! +//! # Example +//! +//! See [this example][example] showing how upgrades work with both +//! Clients and Servers. +//! +//! [example]: https://github.com/hyperium/hyper/blob/master/examples/upgrades.rs + +use std::any::TypeId; +use std::error::Error as StdError; +use std::fmt; +use std::future::Future; +use std::io; +use std::marker::Unpin; +use std::pin::Pin; +use std::task::{Context, Poll}; + +use bytes::Bytes; +use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; +use tokio::sync::oneshot; +#[cfg(any(feature = "http1", feature = "http2"))] +use tracing::trace; + +use crate::common::io::Rewind; + +/// An upgraded HTTP connection. +/// +/// This type holds a trait object internally of the original IO that +/// was used to speak HTTP before the upgrade. It can be used directly +/// as a `Read` or `Write` for convenience. +/// +/// Alternatively, if the exact type is known, this can be deconstructed +/// into its parts. +pub struct Upgraded { + io: Rewind>, +} + +/// A future for a possible HTTP upgrade. +/// +/// If no upgrade was available, or it doesn't succeed, yields an `Error`. +pub struct OnUpgrade { + rx: Option>>, +} + +/// The deconstructed parts of an [`Upgraded`](Upgraded) type. +/// +/// Includes the original IO type, and a read buffer of bytes that the +/// HTTP state machine may have already read before completing an upgrade. +#[derive(Debug)] +pub struct Parts { + /// The original IO object used before the upgrade. + pub io: T, + /// A buffer of bytes that have been read but not processed as HTTP. + /// + /// For instance, if the `Connection` is used for an HTTP upgrade request, + /// it is possible the server sent back the first bytes of the new protocol + /// along with the response upgrade. + /// + /// You will want to check for any existing bytes if you plan to continue + /// communicating on the IO object. + pub read_buf: Bytes, + _inner: (), +} + +/// Gets a pending HTTP upgrade from this message. +/// +/// This can be called on the following types: +/// +/// - `http::Request` +/// - `http::Response` +/// - `&mut http::Request` +/// - `&mut http::Response` +pub fn on(msg: T) -> OnUpgrade { + msg.on_upgrade() +} + +#[cfg(any(feature = "http1", feature = "http2"))] +pub(super) struct Pending { + tx: oneshot::Sender>, +} + +#[cfg(any(feature = "http1", feature = "http2"))] +pub(super) fn pending() -> (Pending, OnUpgrade) { + let (tx, rx) = oneshot::channel(); + (Pending { tx }, OnUpgrade { rx: Some(rx) }) +} + +// ===== impl Upgraded ===== + +impl Upgraded { + #[cfg(any(feature = "http1", feature = "http2", test))] + pub(super) fn new(io: T, read_buf: Bytes) -> Self + where + T: AsyncRead + AsyncWrite + Unpin + Send + 'static, + { + Upgraded { + io: Rewind::new_buffered(Box::new(io), read_buf), + } + } + + /// Tries to downcast the internal trait object to the type passed. + /// + /// On success, returns the downcasted parts. On error, returns the + /// `Upgraded` back. + pub fn downcast(self) -> Result, Self> { + let (io, buf) = self.io.into_inner(); + match io.__hyper_downcast() { + Ok(t) => Ok(Parts { + io: *t, + read_buf: buf, + _inner: (), + }), + Err(io) => Err(Upgraded { + io: Rewind::new_buffered(io, buf), + }), + } + } +} + +impl AsyncRead for Upgraded { + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut ReadBuf<'_>, + ) -> Poll> { + Pin::new(&mut self.io).poll_read(cx, buf) + } +} + +impl AsyncWrite for Upgraded { + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + Pin::new(&mut self.io).poll_write(cx, buf) + } + + fn poll_write_vectored( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &[io::IoSlice<'_>], + ) -> Poll> { + Pin::new(&mut self.io).poll_write_vectored(cx, bufs) + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.io).poll_flush(cx) + } + + fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.io).poll_shutdown(cx) + } + + fn is_write_vectored(&self) -> bool { + self.io.is_write_vectored() + } +} + +impl fmt::Debug for Upgraded { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Upgraded").finish() + } +} + +// ===== impl OnUpgrade ===== + +impl OnUpgrade { + pub(super) fn none() -> Self { + OnUpgrade { rx: None } + } + + #[cfg(feature = "http1")] + pub(super) fn is_none(&self) -> bool { + self.rx.is_none() + } +} + +impl Future for OnUpgrade { + type Output = Result; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match self.rx { + Some(ref mut rx) => Pin::new(rx).poll(cx).map(|res| match res { + Ok(Ok(upgraded)) => Ok(upgraded), + Ok(Err(err)) => Err(err), + Err(_oneshot_canceled) => Err(crate::Error::new_canceled().with(UpgradeExpected)), + }), + None => Poll::Ready(Err(crate::Error::new_user_no_upgrade())), + } + } +} + +impl fmt::Debug for OnUpgrade { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("OnUpgrade").finish() + } +} + +// ===== impl Pending ===== + +#[cfg(any(feature = "http1", feature = "http2"))] +impl Pending { + pub(super) fn fulfill(self, upgraded: Upgraded) { + trace!("pending upgrade fulfill"); + let _ = self.tx.send(Ok(upgraded)); + } + + #[cfg(feature = "http1")] + /// Don't fulfill the pending Upgrade, but instead signal that + /// upgrades are handled manually. + pub(super) fn manual(self) { + trace!("pending upgrade handled manually"); + let _ = self.tx.send(Err(crate::Error::new_user_manual_upgrade())); + } +} + +// ===== impl UpgradeExpected ===== + +/// Error cause returned when an upgrade was expected but canceled +/// for whatever reason. +/// +/// This likely means the actual `Conn` future wasn't polled and upgraded. +#[derive(Debug)] +struct UpgradeExpected; + +impl fmt::Display for UpgradeExpected { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("upgrade expected but not completed") + } +} + +impl StdError for UpgradeExpected {} + +// ===== impl Io ===== + +pub(super) trait Io: AsyncRead + AsyncWrite + Unpin + 'static { + fn __hyper_type_id(&self) -> TypeId { + TypeId::of::() + } +} + +impl Io for T {} + +impl dyn Io + Send { + fn __hyper_is(&self) -> bool { + let t = TypeId::of::(); + self.__hyper_type_id() == t + } + + fn __hyper_downcast(self: Box) -> Result, Box> { + if self.__hyper_is::() { + // Taken from `std::error::Error::downcast()`. + unsafe { + let raw: *mut dyn Io = Box::into_raw(self); + Ok(Box::from_raw(raw as *mut T)) + } + } else { + Err(self) + } + } +} + +mod sealed { + use super::OnUpgrade; + + pub trait CanUpgrade { + fn on_upgrade(self) -> OnUpgrade; + } + + impl CanUpgrade for http::Request { + fn on_upgrade(mut self) -> OnUpgrade { + self.extensions_mut() + .remove::() + .unwrap_or_else(OnUpgrade::none) + } + } + + impl CanUpgrade for &'_ mut http::Request { + fn on_upgrade(self) -> OnUpgrade { + self.extensions_mut() + .remove::() + .unwrap_or_else(OnUpgrade::none) + } + } + + impl CanUpgrade for http::Response { + fn on_upgrade(mut self) -> OnUpgrade { + self.extensions_mut() + .remove::() + .unwrap_or_else(OnUpgrade::none) + } + } + + impl CanUpgrade for &'_ mut http::Response { + fn on_upgrade(self) -> OnUpgrade { + self.extensions_mut() + .remove::() + .unwrap_or_else(OnUpgrade::none) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn upgraded_downcast() { + let upgraded = Upgraded::new(Mock, Bytes::new()); + + let upgraded = upgraded.downcast::>>().unwrap_err(); + + upgraded.downcast::().unwrap(); + } + + // TODO: replace with tokio_test::io when it can test write_buf + struct Mock; + + impl AsyncRead for Mock { + fn poll_read( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + _buf: &mut ReadBuf<'_>, + ) -> Poll> { + unreachable!("Mock::poll_read") + } + } + + impl AsyncWrite for Mock { + fn poll_write( + self: Pin<&mut Self>, + _: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + // panic!("poll_write shouldn't be called"); + Poll::Ready(Ok(buf.len())) + } + + fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { + unreachable!("Mock::poll_flush") + } + + fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { + unreachable!("Mock::poll_shutdown") + } + } +} diff --git a/.cargo-vendor/hyper-util/.cargo-checksum.json b/.cargo-vendor/hyper-util/.cargo-checksum.json new file mode 100644 index 0000000000..a78fe09c76 --- /dev/null +++ b/.cargo-vendor/hyper-util/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"7a2a5a390b2b4ae981fd2b5745f8f3ecd6868de720648902184259738f788710","Cargo.lock":"aa194de54f8c5232e01cdf6e524ff8d7146224a5667017213de8b1d7d301b30c","Cargo.toml":"3cb151b145cd868b7553420928f00887ec8d1c5843d70946d4a1dd3f53856c97","LICENSE":"9bd20f39249c97415aa23d7a838118b18850a5a385671388d2450432fed01597","README.md":"b47cc5fcb41e5d802bd175f0014f5636eefb897419720ab125b446e0b7f6f666","examples/client.rs":"8153fa0d74b3653f9cee113e2d42448febe20562f9bccc7a32b2fc151fc47dd0","examples/server.rs":"2e04d3f86a76810bb170d8c1633b44b4c5989822b17dbed0f3b63c221cc23f1a","examples/server_graceful.rs":"e4d65a46c7d2f44266b5c540596da1e753eeb74c580c9265296352d6e8bde83c","src/client/client.rs":"fc61cc85756232e33addf964ed95ad4f221fefa00be1f1a341a93a2a90af041b","src/client/legacy/client.rs":"f8b58ee166c802c7dfdcfd2058caf60dfc8e5d2789e5250c39aaeb814f76db30","src/client/legacy/connect/capture.rs":"f9b67f56dc49c731a950f8e7e27b79561024885af6b7b5e943e9d804cd212c90","src/client/legacy/connect/dns.rs":"e6f2311ade27e8dc4d0939e20c93dc02c5a6bd8a44df98d817889613c8e0b8ff","src/client/legacy/connect/http.rs":"58e33295ca41b672781ee3f30718bc306c1ec1a2e801107519ed9b490680bf88","src/client/legacy/connect/mod.rs":"d7431df5609e1ddd048f7aca956d1b45bebbad9abaf319d17b733de4bdd2b9f8","src/client/legacy/mod.rs":"a893df119a025ce5e8226868f76130b50b15dd0a4c8ae007556b14f29cd3520a","src/client/legacy/pool.rs":"3335868501be6d5d4cd2491901819e5a269fd126f6cf8876591404207d650fe8","src/client/mod.rs":"64df79d2796f81ee82a6251d41a048ca0fbd6be5f2579e52038c7608ae54ae30","src/client/service.rs":"506c31d99d94f2ca26e31c2e5546e1d3cae040ed966bc51581fc53f3f7e2c1e6","src/common/exec.rs":"74f899c6a3972fcbecde525c1601ba16945db863e4451660cc6da6008c5d3738","src/common/lazy.rs":"fe7cfc2b88a15f7ac4ee2459f525857d8324d250c51ce6a3047c3fc4ef559bab","src/common/mod.rs":"08f23b07fc4ce6c581cec7b3f93b3935d541063f929ef5bc8fff897d10aa5d70","src/common/rewind.rs":"7c15f53c005aad4e45c6e03cff60c94bbe7c373aadcd47241dce8e38fbba5d5c","src/common/sync.rs":"047d79590bbbf59aae35664b1b38f4d0e6478aa10116ab33a9f231e59b3fa32d","src/common/timer.rs":"7dd2aa8256e5588248fdfa9965152c4ed944dbdebff5cf377fd09b579229cfb5","src/error.rs":"c10d50e4fd57184f9c0529c0f42852845d24455c3563ef445becbec67df67d79","src/lib.rs":"feec0a110b170ba757b00f05a70c7533c0a4bd9de90784128a1c229f3a3df5f9","src/rt/mod.rs":"0a3dc8f7acbf5ca05935f9c230f99e785ed449e1f81a3c62263816eeb506e33e","src/rt/tokio.rs":"8be6de6f97ede1d31c9b879ccc4e1d8c08cd20a79fa739ee5de5c3b3af461a64","src/server/conn/auto.rs":"11a24efe8b160dbde71bdb497b72f01ac053f80947ce1911bb3724e8fd05bbee","src/server/conn/mod.rs":"f192ca3fb030351ad22dba11f58edf02503598e3f9c18652a445f3973d16b723","src/server/graceful.rs":"2ddca90631850878464295dba707ab7c98e28943481392cf27606327e7a7550f","src/server/mod.rs":"c30d25551b75ec6c2915ba9da0a5638488c2760a27c589c02415dec1441fb746","src/service.rs":"9eae915ff8d05b0f4250096f9d1d7dad3c3672162a722dfe93870e0e85c6e54b","tests/legacy_client.rs":"a1719e2823bddfc2a6151db3b256fdd37555b8522fde3a7ca4e38561a5bba968","tests/test_utils/mod.rs":"dd39bb194b214b51b225aa94b746d1168f79c04cde4935ab8ceb157e2f8addfe"},"package":"cde7055719c54e36e95e8719f95883f22072a48ede39db7fc17a4e1d5281e9b9"} \ No newline at end of file diff --git a/.cargo-vendor/hyper-util/CHANGELOG.md b/.cargo-vendor/hyper-util/CHANGELOG.md new file mode 100644 index 0000000000..cbd6b8e120 --- /dev/null +++ b/.cargo-vendor/hyper-util/CHANGELOG.md @@ -0,0 +1,68 @@ +# 0.1.7 (2024-08-06) + +- Add `Connected::poison()` to `legacy` client, a port from hyper v0.14.x. +- Add `Error::connect_info()` to `legacy` client, a port from hyper v0.14.x. + +# 0.1.6 (2024-07-01) + +- Add support for AIX operating system to `legacy` client. +- Fix `legacy` client to better use dying pooled connections. + +# 0.1.5 (2024-05-28) + +- Add `server::graceful::GracefulShutdown` helper to coordinate over many connections. +- Add `server::conn::auto::Connection::into_owned()` to unlink lifetime from `Builder`. +- Allow `service` module to be available with only `service` feature enabled. + +# 0.1.4 (2024-05-24) + +- Add `initial_max_send_streams()` to `legacy` client builder +- Add `max_pending_accept_reset_streams()` to `legacy` client builder +- Add `max_headers(usize)` to `auto` server builder +- Add `http1_onl()` and `http2_only()` to `auto` server builder +- Add connection capturing API to `legacy` client +- Add `impl Connection for TokioIo` +- Fix graceful shutdown hanging on reading the HTTP version + +# 0.1.3 (2024-01-31) + +### Added + +- Add `Error::is_connect()` which returns true if error came from client `Connect`. +- Add timer support to `legacy` pool. +- Add support to enable http1/http2 parts of `auto::Builder` individually. + +### Fixed + +- Fix `auto` connection so it can handle requests shorter than the h2 preface. +- Fix `legacy::Client` to no longer error when keep-alive is diabled. + +# 0.1.2 (2023-12-20) + +### Added + +- Add `graceful_shutdown()` method to `auto` connections. +- Add `rt::TokioTimer` type that implements `hyper::rt::Timer`. +- Add `service::TowerToHyperService` adapter, allowing using `tower::Service`s as a `hyper::service::Service`. +- Implement `Clone` for `auto::Builder`. +- Exports `legacy::{Builder, ResponseFuture}`. + +### Fixed + +- Enable HTTP/1 upgrades on the `legacy::Client`. +- Prevent divide by zero if DNS returns 0 addresses. + +# 0.1.1 (2023-11-17) + +### Added + +- Make `server-auto` enable the `server` feature. + +### Fixed + +- Reduce `Send` bounds requirements for `auto` connections. +- Docs: enable all features when generating. + +# 0.1.0 (2023-11-16) + +Initial release. diff --git a/.cargo-vendor/hyper-util/Cargo.lock b/.cargo-vendor/hyper-util/Cargo.lock new file mode 100644 index 0000000000..3f7a940851 --- /dev/null +++ b/.cargo-vendor/hyper-util/Cargo.lock @@ -0,0 +1,926 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "addr2line" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + +[[package]] +name = "aho-corasick" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" +dependencies = [ + "memchr", +] + +[[package]] +name = "async-stream" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "backtrace" +version = "0.3.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +dependencies = [ + "addr2line", + "cc", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] + +[[package]] +name = "bitflags" +version = "2.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" + +[[package]] +name = "bytes" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" + +[[package]] +name = "cc" +version = "1.0.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +dependencies = [ + "libc", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "env_logger" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd405aab171cb85d6735e5c8d9db038c17d3ca007a4d2c25f337935c3d90580" +dependencies = [ + "humantime", + "is-terminal", + "log", + "regex", + "termcolor", +] + +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + +[[package]] +name = "errno" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "futures-channel" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb" +dependencies = [ + "futures-core", +] + +[[package]] +name = "futures-core" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" + +[[package]] +name = "futures-sink" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" + +[[package]] +name = "futures-task" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2" + +[[package]] +name = "futures-util" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" +dependencies = [ + "futures-core", + "futures-task", + "pin-project-lite", + "pin-utils", +] + +[[package]] +name = "gimli" +version = "0.28.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" + +[[package]] +name = "h2" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa82e28a107a8cc405f0839610bdc9b15f1e25ec7d696aa5cf173edbcb1486ab" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http", + "indexmap", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "hashbrown" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" + +[[package]] +name = "hermit-abi" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d3d0e0f38255e7fa3cf31335b3a56f05febd18025f4db5ef7a0cfb4f8da651f" + +[[package]] +name = "http" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" +dependencies = [ + "bytes", + "http", +] + +[[package]] +name = "http-body-util" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41cb79eb393015dadd30fc252023adb0b2400a0caee0fa2a077e6e21a551e840" +dependencies = [ + "bytes", + "futures-util", + "http", + "http-body", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" + +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + +[[package]] +name = "hyper" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4fe55fb7a772d59a5ff1dfbff4fe0258d19b89fec4b233e75d35d5d2316badc" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "h2", + "http", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "smallvec", + "tokio", + "want", +] + +[[package]] +name = "hyper-util" +version = "0.1.7" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http", + "http-body", + "http-body-util", + "hyper", + "pin-project-lite", + "pnet_datalink", + "pretty_env_logger", + "socket2", + "tokio", + "tokio-test", + "tower", + "tower-service", + "tracing", +] + +[[package]] +name = "indexmap" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" +dependencies = [ + "equivalent", + "hashbrown", +] + +[[package]] +name = "ipnetwork" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf466541e9d546596ee94f9f69590f89473455f88372423e0008fc1a7daf100e" +dependencies = [ + "serde", +] + +[[package]] +name = "is-terminal" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bad00257d07be169d870ab665980b06cdb366d792ad690bf2e76876dc503455" +dependencies = [ + "hermit-abi", + "rustix", + "windows-sys 0.52.0", +] + +[[package]] +name = "itoa" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" + +[[package]] +name = "libc" +version = "0.2.150" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c" + +[[package]] +name = "linux-raw-sys" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" + +[[package]] +name = "log" +version = "0.4.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" + +[[package]] +name = "memchr" +version = "2.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" + +[[package]] +name = "miniz_oxide" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +dependencies = [ + "adler", +] + +[[package]] +name = "mio" +version = "0.8.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" +dependencies = [ + "libc", + "wasi", + "windows-sys 0.48.0", +] + +[[package]] +name = "no-std-net" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43794a0ace135be66a25d3ae77d41b91615fb68ae937f904090203e81f755b65" + +[[package]] +name = "object" +version = "0.32.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" +dependencies = [ + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" + +[[package]] +name = "pin-project" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pnet_base" +version = "0.35.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffc190d4067df16af3aba49b3b74c469e611cad6314676eaf1157f31aa0fb2f7" +dependencies = [ + "no-std-net", +] + +[[package]] +name = "pnet_datalink" +version = "0.35.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e79e70ec0be163102a332e1d2d5586d362ad76b01cec86f830241f2b6452a7b7" +dependencies = [ + "ipnetwork", + "libc", + "pnet_base", + "pnet_sys", + "winapi", +] + +[[package]] +name = "pnet_sys" +version = "0.35.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d4643d3d4db6b08741050c2f3afa9a892c4244c085a72fcda93c9c2c9a00f4b" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "pretty_env_logger" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "865724d4dbe39d9f3dd3b52b88d859d66bcb2d6a0acfd5ea68a65fb66d4bdc1c" +dependencies = [ + "env_logger", + "log", +] + +[[package]] +name = "proc-macro2" +version = "1.0.70" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39278fbbf5fb4f646ce651690877f89d1c5811a3d4acb27700c1cb3cdb78fd3b" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "regex" +version = "1.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b7fa1134405e2ec9353fd416b17f8dacd46c473d7d3fd1cf202706a14eb792a" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" + +[[package]] +name = "rustc-demangle" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" + +[[package]] +name = "rustix" +version = "0.38.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72e572a5e8ca657d7366229cdde4bd14c4eb5499a9573d4d366fe1b599daa316" +dependencies = [ + "bitflags", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.52.0", +] + +[[package]] +name = "serde" +version = "1.0.193" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.193" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "signal-hook-registry" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" +dependencies = [ + "libc", +] + +[[package]] +name = "slab" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg", +] + +[[package]] +name = "smallvec" +version = "1.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" + +[[package]] +name = "socket2" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" +dependencies = [ + "libc", + "windows-sys 0.48.0", +] + +[[package]] +name = "syn" +version = "2.0.39" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "termcolor" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "tokio" +version = "1.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0c014766411e834f7af5b8f4cf46257aab4036ca95e9d2c144a10f59ad6f5b9" +dependencies = [ + "backtrace", + "bytes", + "libc", + "mio", + "pin-project-lite", + "signal-hook-registry", + "socket2", + "tokio-macros", + "windows-sys 0.48.0", +] + +[[package]] +name = "tokio-macros" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tokio-stream" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-test" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e89b3cbabd3ae862100094ae433e1def582cf86451b4e9bf83aa7ac1d8a7d719" +dependencies = [ + "async-stream", + "bytes", + "futures-core", + "tokio", + "tokio-stream", +] + +[[package]] +name = "tokio-util" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", + "tracing", +] + +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "pin-project", + "pin-project-lite", + "tokio", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" + +[[package]] +name = "tower-service" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" + +[[package]] +name = "tracing" +version = "0.1.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +dependencies = [ + "pin-project-lite", + "tracing-core", +] + +[[package]] +name = "tracing-core" +version = "0.1.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +dependencies = [ + "once_cell", +] + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "unicode-ident" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" +dependencies = [ + "winapi", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.0", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +dependencies = [ + "windows_aarch64_gnullvm 0.52.0", + "windows_aarch64_msvc 0.52.0", + "windows_i686_gnu 0.52.0", + "windows_i686_msvc 0.52.0", + "windows_x86_64_gnu 0.52.0", + "windows_x86_64_gnullvm 0.52.0", + "windows_x86_64_msvc 0.52.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" diff --git a/.cargo-vendor/hyper-util/Cargo.toml b/.cargo-vendor/hyper-util/Cargo.toml new file mode 100644 index 0000000000..3a93fcfa5d --- /dev/null +++ b/.cargo-vendor/hyper-util/Cargo.toml @@ -0,0 +1,208 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.63" +name = "hyper-util" +version = "0.1.7" +authors = ["Sean McArthur "] +build = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "hyper utilities" +homepage = "https://hyper.rs" +documentation = "https://docs.rs/hyper-util" +readme = "README.md" +keywords = [ + "http", + "hyper", + "hyperium", +] +categories = [ + "network-programming", + "web-programming::http-client", + "web-programming::http-server", +] +license = "MIT" +repository = "https://github.com/hyperium/hyper-util" + +[package.metadata.docs.rs] +features = ["full"] +rustdoc-args = [ + "--cfg", + "docsrs", +] + +[lib] +name = "hyper_util" +path = "src/lib.rs" + +[[example]] +name = "client" +path = "examples/client.rs" +required-features = [ + "client-legacy", + "http1", + "tokio", +] + +[[example]] +name = "server" +path = "examples/server.rs" +required-features = [ + "server", + "http1", + "tokio", +] + +[[example]] +name = "server_graceful" +path = "examples/server_graceful.rs" +required-features = [ + "tokio", + "server-graceful", + "server-auto", +] + +[[test]] +name = "legacy_client" +path = "tests/legacy_client.rs" + +[dependencies.bytes] +version = "1" + +[dependencies.futures-channel] +version = "0.3" +optional = true + +[dependencies.futures-util] +version = "0.3.16" +default-features = false + +[dependencies.http] +version = "1.0" + +[dependencies.http-body] +version = "1.0.0" + +[dependencies.hyper] +version = "1.4.0" + +[dependencies.pin-project-lite] +version = "0.2.4" + +[dependencies.socket2] +version = "0.5" +features = ["all"] +optional = true + +[dependencies.tokio] +version = "1" +optional = true +default-features = false + +[dependencies.tower] +version = "0.4.1" +features = [ + "make", + "util", +] +optional = true +default-features = false + +[dependencies.tower-service] +version = "0.3" +optional = true + +[dependencies.tracing] +version = "0.1" +features = ["std"] +optional = true +default-features = false + +[dev-dependencies.bytes] +version = "1" + +[dev-dependencies.http-body-util] +version = "0.1.0" + +[dev-dependencies.hyper] +version = "1.4.0" +features = ["full"] + +[dev-dependencies.pretty_env_logger] +version = "0.5" + +[dev-dependencies.tokio] +version = "1" +features = [ + "macros", + "test-util", + "signal", +] + +[dev-dependencies.tokio-test] +version = "0.4" + +[features] +__internal_happy_eyeballs_tests = [] +client = [ + "hyper/client", + "dep:tracing", + "dep:futures-channel", + "dep:tower", + "dep:tower-service", +] +client-legacy = [ + "client", + "dep:socket2", + "tokio/sync", +] +default = [] +full = [ + "client", + "client-legacy", + "server", + "server-auto", + "server-graceful", + "service", + "http1", + "http2", + "tokio", +] +http1 = ["hyper/http1"] +http2 = ["hyper/http2"] +server = ["hyper/server"] +server-auto = [ + "server", + "http1", + "http2", +] +server-graceful = [ + "server", + "tokio/sync", +] +service = [ + "dep:tower", + "dep:tower-service", +] +tokio = [ + "dep:tokio", + "tokio/net", + "tokio/rt", + "tokio/time", +] + +[target.'cfg(any(target_os = "linux", target_os = "macos"))'.dev-dependencies.pnet_datalink] +version = "0.35.0" diff --git a/.cargo-vendor/hyper-util/LICENSE b/.cargo-vendor/hyper-util/LICENSE new file mode 100644 index 0000000000..159ac657e2 --- /dev/null +++ b/.cargo-vendor/hyper-util/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2023 Sean McArthur + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/.cargo-vendor/hyper-util/README.md b/.cargo-vendor/hyper-util/README.md new file mode 100644 index 0000000000..79cb213252 --- /dev/null +++ b/.cargo-vendor/hyper-util/README.md @@ -0,0 +1,11 @@ +# hyper-util + +[![crates.io](https://img.shields.io/crates/v/hyper-util.svg)](https://crates.io/crates/hyper-util) +[![Released API docs](https://docs.rs/hyper-util/badge.svg)](https://docs.rs/hyper-util) +[![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](./LICENSE) + +A collection of utilities to do common things with [hyper](https://hyper.rs). + +## License + +This project is licensed under the [MIT license](./LICENSE). diff --git a/.cargo-vendor/hyper-util/examples/client.rs b/.cargo-vendor/hyper-util/examples/client.rs new file mode 100644 index 0000000000..04defac08f --- /dev/null +++ b/.cargo-vendor/hyper-util/examples/client.rs @@ -0,0 +1,37 @@ +use std::env; + +use http_body_util::Empty; +use hyper::Request; +use hyper_util::client::legacy::{connect::HttpConnector, Client}; + +#[tokio::main(flavor = "current_thread")] +async fn main() -> Result<(), Box> { + let url = match env::args().nth(1) { + Some(url) => url, + None => { + eprintln!("Usage: client "); + return Ok(()); + } + }; + + // HTTPS requires picking a TLS implementation, so give a better + // warning if the user tries to request an 'https' URL. + let url = url.parse::()?; + if url.scheme_str() != Some("http") { + eprintln!("This example only works with 'http' URLs."); + return Ok(()); + } + + let client = Client::builder(hyper_util::rt::TokioExecutor::new()).build(HttpConnector::new()); + + let req = Request::builder() + .uri(url) + .body(Empty::::new())?; + + let resp = client.request(req).await?; + + eprintln!("{:?} {:?}", resp.version(), resp.status()); + eprintln!("{:#?}", resp.headers()); + + Ok(()) +} diff --git a/.cargo-vendor/hyper-util/examples/server.rs b/.cargo-vendor/hyper-util/examples/server.rs new file mode 100644 index 0000000000..48806d800d --- /dev/null +++ b/.cargo-vendor/hyper-util/examples/server.rs @@ -0,0 +1,75 @@ +//! This example runs a server that responds to any request with "Hello, world!" + +use std::{convert::Infallible, error::Error}; + +use bytes::Bytes; +use http::{header::CONTENT_TYPE, Request, Response}; +use http_body_util::{combinators::BoxBody, BodyExt, Full}; +use hyper::{body::Incoming, service::service_fn}; +use hyper_util::{ + rt::{TokioExecutor, TokioIo}, + server::conn::auto::Builder, +}; +use tokio::{net::TcpListener, task::JoinSet}; + +/// Function from an incoming request to an outgoing response +/// +/// This function gets turned into a [`hyper::service::Service`] later via +/// [`service_fn`]. Instead of doing this, you could also write a type that +/// implements [`hyper::service::Service`] directly and pass that in place of +/// writing a function like this and calling [`service_fn`]. +/// +/// This function could use [`Full`] as the body type directly since that's +/// the only type that can be returned in this case, but this uses [`BoxBody`] +/// anyway for demonstration purposes, since this is what's usually used when +/// writing a more complex webserver library. +async fn handle_request( + _request: Request, +) -> Result>, Infallible> { + let response = Response::builder() + .header(CONTENT_TYPE, "text/plain") + .body(Full::new(Bytes::from("Hello, world!\n")).boxed()) + .expect("values provided to the builder should be valid"); + + Ok(response) +} + +#[tokio::main(flavor = "current_thread")] +async fn main() -> Result<(), Box> { + let listen_addr = "127.0.0.1:8000"; + let tcp_listener = TcpListener::bind(listen_addr).await?; + println!("listening on http://{listen_addr}"); + + let mut join_set = JoinSet::new(); + loop { + let (stream, addr) = match tcp_listener.accept().await { + Ok(x) => x, + Err(e) => { + eprintln!("failed to accept connection: {e}"); + continue; + } + }; + + let serve_connection = async move { + println!("handling a request from {addr}"); + + let result = Builder::new(TokioExecutor::new()) + .serve_connection(TokioIo::new(stream), service_fn(handle_request)) + .await; + + if let Err(e) = result { + eprintln!("error serving {addr}: {e}"); + } + + println!("handled a request from {addr}"); + }; + + join_set.spawn(serve_connection); + } + + // If you add a method for breaking the above loop (i.e. graceful shutdown), + // then you may also want to wait for all existing connections to finish + // being served before terminating the program, which can be done like this: + // + // while let Some(_) = join_set.join_next().await {} +} diff --git a/.cargo-vendor/hyper-util/examples/server_graceful.rs b/.cargo-vendor/hyper-util/examples/server_graceful.rs new file mode 100644 index 0000000000..bfb43a4be7 --- /dev/null +++ b/.cargo-vendor/hyper-util/examples/server_graceful.rs @@ -0,0 +1,64 @@ +use bytes::Bytes; +use std::convert::Infallible; +use std::pin::pin; +use std::time::Duration; +use tokio::net::TcpListener; + +#[tokio::main(flavor = "current_thread")] +async fn main() -> Result<(), Box> { + let listener = TcpListener::bind("127.0.0.1:8080").await?; + + let server = hyper_util::server::conn::auto::Builder::new(hyper_util::rt::TokioExecutor::new()); + let graceful = hyper_util::server::graceful::GracefulShutdown::new(); + let mut ctrl_c = pin!(tokio::signal::ctrl_c()); + + loop { + tokio::select! { + conn = listener.accept() => { + let (stream, peer_addr) = match conn { + Ok(conn) => conn, + Err(e) => { + eprintln!("accept error: {}", e); + tokio::time::sleep(Duration::from_secs(1)).await; + continue; + } + }; + eprintln!("incomming connection accepted: {}", peer_addr); + + let stream = hyper_util::rt::TokioIo::new(Box::pin(stream)); + + let conn = server.serve_connection_with_upgrades(stream, hyper::service::service_fn(|_| async move { + tokio::time::sleep(Duration::from_secs(5)).await; // emulate slow request + let body = http_body_util::Full::::from("Hello World!".to_owned()); + Ok::<_, Infallible>(http::Response::new(body)) + })); + + let conn = graceful.watch(conn.into_owned()); + + tokio::spawn(async move { + if let Err(err) = conn.await { + eprintln!("connection error: {}", err); + } + eprintln!("connection dropped: {}", peer_addr); + }); + }, + + _ = ctrl_c.as_mut() => { + drop(listener); + eprintln!("Ctrl-C received, starting shutdown"); + break; + } + } + } + + tokio::select! { + _ = graceful.shutdown() => { + eprintln!("Gracefully shutdown!"); + }, + _ = tokio::time::sleep(Duration::from_secs(10)) => { + eprintln!("Waited 10 seconds for graceful shutdown, aborting..."); + } + } + + Ok(()) +} diff --git a/.cargo-vendor/hyper-util/src/client/client.rs b/.cargo-vendor/hyper-util/src/client/client.rs new file mode 100644 index 0000000000..a9fb244aed --- /dev/null +++ b/.cargo-vendor/hyper-util/src/client/client.rs @@ -0,0 +1,132 @@ +use hyper::{Request, Response}; +use tower::{Service, MakeService}; + +use super::connect::Connect; +use super::pool; + +pub struct Client { + // Hi there. So, let's take a 0.14.x hyper::Client, and build up its layers + // here. We don't need to fully expose the layers to start with, but that + // is the end goal. + // + // Client = MakeSvcAsService< + // SetHost< + // Http1RequestTarget< + // DelayedRelease< + // ConnectingPool + // > + // > + // > + // > + make_svc: M, +} + +// We might change this... :shrug: +type PoolKey = hyper::Uri; + +struct ConnectingPool { + connector: C, + pool: P, +} + +struct PoolableSvc(S); + +/// A marker to identify what version a pooled connection is. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] +#[allow(dead_code)] +pub enum Ver { + Auto, + Http2, +} + +// ===== impl Client ===== + +impl Client +where + M: MakeService< + hyper::Uri, + Request<()>, + Response = Response<()>, + Error = E, + MakeError = E, + >, + //M: Service, + //M::Response: Service, Response = Response>, +{ + pub async fn request(&mut self, req: Request<()>) -> Result, E> { + let mut svc = self.make_svc.make_service(req.uri().clone()).await?; + svc.call(req).await + } +} + +impl Client +where + M: MakeService< + hyper::Uri, + Request<()>, + Response = Response<()>, + Error = E, + MakeError = E, + >, + //M: Service, + //M::Response: Service, Response = Response>, +{ + +} + +// ===== impl ConnectingPool ===== + +impl ConnectingPool +where + C: Connect, + C::_Svc: Unpin + Send + 'static, +{ + async fn connection_for(&self, target: PoolKey) -> Result, PoolKey>, ()> { + todo!() + } +} + +impl pool::Poolable for PoolableSvc +where + S: Unpin + Send + 'static, +{ + fn is_open(&self) -> bool { + /* + match self.tx { + PoolTx::Http1(ref tx) => tx.is_ready(), + #[cfg(feature = "http2")] + PoolTx::Http2(ref tx) => tx.is_ready(), + } + */ + true + } + + fn reserve(self) -> pool::Reservation { + /* + match self.tx { + PoolTx::Http1(tx) => Reservation::Unique(PoolClient { + conn_info: self.conn_info, + tx: PoolTx::Http1(tx), + }), + #[cfg(feature = "http2")] + PoolTx::Http2(tx) => { + let b = PoolClient { + conn_info: self.conn_info.clone(), + tx: PoolTx::Http2(tx.clone()), + }; + let a = PoolClient { + conn_info: self.conn_info, + tx: PoolTx::Http2(tx), + }; + Reservation::Shared(a, b) + } + } + */ + pool::Reservation::Unique(self) + } + + fn can_share(&self) -> bool { + false + //self.is_http2() + } +} diff --git a/.cargo-vendor/hyper-util/src/client/legacy/client.rs b/.cargo-vendor/hyper-util/src/client/legacy/client.rs new file mode 100644 index 0000000000..8562584b77 --- /dev/null +++ b/.cargo-vendor/hyper-util/src/client/legacy/client.rs @@ -0,0 +1,1644 @@ +//! The legacy HTTP Client from 0.14.x +//! +//! This `Client` will eventually be deconstructed into more composable parts. +//! For now, to enable people to use hyper 1.0 quicker, this `Client` exists +//! in much the same way it did in hyper 0.14. + +use std::error::Error as StdError; +use std::fmt; +use std::future::Future; +use std::pin::Pin; +use std::task::{self, Poll}; +use std::time::Duration; + +use futures_util::future::{self, Either, FutureExt, TryFutureExt}; +use http::uri::Scheme; +use hyper::client::conn::TrySendError as ConnTrySendError; +use hyper::header::{HeaderValue, HOST}; +use hyper::rt::Timer; +use hyper::{body::Body, Method, Request, Response, Uri, Version}; +use tracing::{debug, trace, warn}; + +use super::connect::capture::CaptureConnectionExtension; +#[cfg(feature = "tokio")] +use super::connect::HttpConnector; +use super::connect::{Alpn, Connect, Connected, Connection}; +use super::pool::{self, Ver}; + +use crate::common::{lazy as hyper_lazy, timer, Exec, Lazy, SyncWrapper}; + +type BoxSendFuture = Pin + Send>>; + +/// A Client to make outgoing HTTP requests. +/// +/// `Client` is cheap to clone and cloning is the recommended way to share a `Client`. The +/// underlying connection pool will be reused. +#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] +pub struct Client { + config: Config, + connector: C, + exec: Exec, + #[cfg(feature = "http1")] + h1_builder: hyper::client::conn::http1::Builder, + #[cfg(feature = "http2")] + h2_builder: hyper::client::conn::http2::Builder, + pool: pool::Pool, PoolKey>, +} + +#[derive(Clone, Copy, Debug)] +struct Config { + retry_canceled_requests: bool, + set_host: bool, + ver: Ver, +} + +/// Client errors +pub struct Error { + kind: ErrorKind, + source: Option>, + #[cfg(any(feature = "http1", feature = "http2"))] + connect_info: Option, +} + +#[derive(Debug)] +enum ErrorKind { + Canceled, + ChannelClosed, + Connect, + UserUnsupportedRequestMethod, + UserUnsupportedVersion, + UserAbsoluteUriRequired, + SendRequest, +} + +macro_rules! e { + ($kind:ident) => { + Error { + kind: ErrorKind::$kind, + source: None, + connect_info: None, + } + }; + ($kind:ident, $src:expr) => { + Error { + kind: ErrorKind::$kind, + source: Some($src.into()), + connect_info: None, + } + }; +} + +// We might change this... :shrug: +type PoolKey = (http::uri::Scheme, http::uri::Authority); + +enum TrySendError { + Retryable { error: Error, req: Request }, + Nope(Error), +} + +/// A `Future` that will resolve to an HTTP Response. +/// +/// This is returned by `Client::request` (and `Client::get`). +#[must_use = "futures do nothing unless polled"] +pub struct ResponseFuture { + inner: SyncWrapper< + Pin, Error>> + Send>>, + >, +} + +// ===== impl Client ===== + +impl Client<(), ()> { + /// Create a builder to configure a new `Client`. + /// + /// # Example + /// + /// ``` + /// # #[cfg(feature = "tokio")] + /// # fn run () { + /// use std::time::Duration; + /// use hyper_util::client::legacy::Client; + /// use hyper_util::rt::TokioExecutor; + /// + /// let client = Client::builder(TokioExecutor::new()) + /// .pool_idle_timeout(Duration::from_secs(30)) + /// .http2_only(true) + /// .build_http(); + /// # let infer: Client<_, http_body_util::Full> = client; + /// # drop(infer); + /// # } + /// # fn main() {} + /// ``` + pub fn builder(executor: E) -> Builder + where + E: hyper::rt::Executor + Send + Sync + Clone + 'static, + { + Builder::new(executor) + } +} + +impl Client +where + C: Connect + Clone + Send + Sync + 'static, + B: Body + Send + 'static + Unpin, + B::Data: Send, + B::Error: Into>, +{ + /// Send a `GET` request to the supplied `Uri`. + /// + /// # Note + /// + /// This requires that the `Body` type have a `Default` implementation. + /// It *should* return an "empty" version of itself, such that + /// `Body::is_end_stream` is `true`. + /// + /// # Example + /// + /// ``` + /// # #[cfg(feature = "tokio")] + /// # fn run () { + /// use hyper::Uri; + /// use hyper_util::client::legacy::Client; + /// use hyper_util::rt::TokioExecutor; + /// use bytes::Bytes; + /// use http_body_util::Full; + /// + /// let client: Client<_, Full> = Client::builder(TokioExecutor::new()).build_http(); + /// + /// let future = client.get(Uri::from_static("http://httpbin.org/ip")); + /// # } + /// # fn main() {} + /// ``` + pub fn get(&self, uri: Uri) -> ResponseFuture + where + B: Default, + { + let body = B::default(); + if !body.is_end_stream() { + warn!("default Body used for get() does not return true for is_end_stream"); + } + + let mut req = Request::new(body); + *req.uri_mut() = uri; + self.request(req) + } + + /// Send a constructed `Request` using this `Client`. + /// + /// # Example + /// + /// ``` + /// # #[cfg(feature = "tokio")] + /// # fn run () { + /// use hyper::{Method, Request}; + /// use hyper_util::client::legacy::Client; + /// use http_body_util::Full; + /// use hyper_util::rt::TokioExecutor; + /// use bytes::Bytes; + /// + /// let client: Client<_, Full> = Client::builder(TokioExecutor::new()).build_http(); + /// + /// let req: Request> = Request::builder() + /// .method(Method::POST) + /// .uri("http://httpbin.org/post") + /// .body(Full::from("Hallo!")) + /// .expect("request builder"); + /// + /// let future = client.request(req); + /// # } + /// # fn main() {} + /// ``` + pub fn request(&self, mut req: Request) -> ResponseFuture { + let is_http_connect = req.method() == Method::CONNECT; + match req.version() { + Version::HTTP_11 => (), + Version::HTTP_10 => { + if is_http_connect { + warn!("CONNECT is not allowed for HTTP/1.0"); + return ResponseFuture::new(future::err(e!(UserUnsupportedRequestMethod))); + } + } + Version::HTTP_2 => (), + // completely unsupported HTTP version (like HTTP/0.9)! + other => return ResponseFuture::error_version(other), + }; + + let pool_key = match extract_domain(req.uri_mut(), is_http_connect) { + Ok(s) => s, + Err(err) => { + return ResponseFuture::new(future::err(err)); + } + }; + + ResponseFuture::new(self.clone().send_request(req, pool_key)) + } + + async fn send_request( + self, + mut req: Request, + pool_key: PoolKey, + ) -> Result, Error> { + let uri = req.uri().clone(); + + loop { + req = match self.try_send_request(req, pool_key.clone()).await { + Ok(resp) => return Ok(resp), + Err(TrySendError::Nope(err)) => return Err(err), + Err(TrySendError::Retryable { mut req, error }) => { + if !self.config.retry_canceled_requests { + // if client disabled, don't retry + // a fresh connection means we definitely can't retry + return Err(error); + } + + trace!( + "unstarted request canceled, trying again (reason={:?})", + error + ); + *req.uri_mut() = uri.clone(); + req + } + } + } + } + + async fn try_send_request( + &self, + mut req: Request, + pool_key: PoolKey, + ) -> Result, TrySendError> { + let mut pooled = self + .connection_for(pool_key) + .await + // `connection_for` already retries checkout errors, so if + // it returns an error, there's not much else to retry + .map_err(TrySendError::Nope)?; + + req.extensions_mut() + .get_mut::() + .map(|conn| conn.set(&pooled.conn_info)); + + if pooled.is_http1() { + if req.version() == Version::HTTP_2 { + warn!("Connection is HTTP/1, but request requires HTTP/2"); + return Err(TrySendError::Nope( + e!(UserUnsupportedVersion).with_connect_info(pooled.conn_info.clone()), + )); + } + + if self.config.set_host { + let uri = req.uri().clone(); + req.headers_mut().entry(HOST).or_insert_with(|| { + let hostname = uri.host().expect("authority implies host"); + if let Some(port) = get_non_default_port(&uri) { + let s = format!("{}:{}", hostname, port); + HeaderValue::from_str(&s) + } else { + HeaderValue::from_str(hostname) + } + .expect("uri host is valid header value") + }); + } + + // CONNECT always sends authority-form, so check it first... + if req.method() == Method::CONNECT { + authority_form(req.uri_mut()); + } else if pooled.conn_info.is_proxied { + absolute_form(req.uri_mut()); + } else { + origin_form(req.uri_mut()); + } + } else if req.method() == Method::CONNECT { + authority_form(req.uri_mut()); + } + + let mut res = match pooled.try_send_request(req).await { + Ok(res) => res, + Err(mut err) => { + return if let Some(req) = err.take_message() { + Err(TrySendError::Retryable { + error: e!(Canceled, err.into_error()) + .with_connect_info(pooled.conn_info.clone()), + req, + }) + } else { + Err(TrySendError::Nope( + e!(SendRequest, err.into_error()) + .with_connect_info(pooled.conn_info.clone()), + )) + } + } + }; + //.send_request_retryable(req) + //.map_err(ClientError::map_with_reused(pooled.is_reused())); + + // If the Connector included 'extra' info, add to Response... + if let Some(extra) = &pooled.conn_info.extra { + extra.set(res.extensions_mut()); + } + + // As of futures@0.1.21, there is a race condition in the mpsc + // channel, such that sending when the receiver is closing can + // result in the message being stuck inside the queue. It won't + // ever notify until the Sender side is dropped. + // + // To counteract this, we must check if our senders 'want' channel + // has been closed after having tried to send. If so, error out... + if pooled.is_closed() { + return Ok(res); + } + + // If pooled is HTTP/2, we can toss this reference immediately. + // + // when pooled is dropped, it will try to insert back into the + // pool. To delay that, spawn a future that completes once the + // sender is ready again. + // + // This *should* only be once the related `Connection` has polled + // for a new request to start. + // + // It won't be ready if there is a body to stream. + if pooled.is_http2() || !pooled.is_pool_enabled() || pooled.is_ready() { + drop(pooled); + } else if !res.body().is_end_stream() { + //let (delayed_tx, delayed_rx) = oneshot::channel::<()>(); + //res.body_mut().delayed_eof(delayed_rx); + let on_idle = future::poll_fn(move |cx| pooled.poll_ready(cx)).map(move |_| { + // At this point, `pooled` is dropped, and had a chance + // to insert into the pool (if conn was idle) + //drop(delayed_tx); + }); + + self.exec.execute(on_idle); + } else { + // There's no body to delay, but the connection isn't + // ready yet. Only re-insert when it's ready + let on_idle = future::poll_fn(move |cx| pooled.poll_ready(cx)).map(|_| ()); + + self.exec.execute(on_idle); + } + + Ok(res) + } + + async fn connection_for( + &self, + pool_key: PoolKey, + ) -> Result, PoolKey>, Error> { + loop { + match self.one_connection_for(pool_key.clone()).await { + Ok(pooled) => return Ok(pooled), + Err(ClientConnectError::Normal(err)) => return Err(err), + Err(ClientConnectError::CheckoutIsClosed(reason)) => { + if !self.config.retry_canceled_requests { + return Err(e!(Connect, reason)); + } + + trace!( + "unstarted request canceled, trying again (reason={:?})", + reason, + ); + continue; + } + }; + } + } + + async fn one_connection_for( + &self, + pool_key: PoolKey, + ) -> Result, PoolKey>, ClientConnectError> { + // Return a single connection if pooling is not enabled + if !self.pool.is_enabled() { + return self + .connect_to(pool_key) + .await + .map_err(ClientConnectError::Normal); + } + + // This actually races 2 different futures to try to get a ready + // connection the fastest, and to reduce connection churn. + // + // - If the pool has an idle connection waiting, that's used + // immediately. + // - Otherwise, the Connector is asked to start connecting to + // the destination Uri. + // - Meanwhile, the pool Checkout is watching to see if any other + // request finishes and tries to insert an idle connection. + // - If a new connection is started, but the Checkout wins after + // (an idle connection became available first), the started + // connection future is spawned into the runtime to complete, + // and then be inserted into the pool as an idle connection. + let checkout = self.pool.checkout(pool_key.clone()); + let connect = self.connect_to(pool_key); + let is_ver_h2 = self.config.ver == Ver::Http2; + + // The order of the `select` is depended on below... + + match future::select(checkout, connect).await { + // Checkout won, connect future may have been started or not. + // + // If it has, let it finish and insert back into the pool, + // so as to not waste the socket... + Either::Left((Ok(checked_out), connecting)) => { + // This depends on the `select` above having the correct + // order, such that if the checkout future were ready + // immediately, the connect future will never have been + // started. + // + // If it *wasn't* ready yet, then the connect future will + // have been started... + if connecting.started() { + let bg = connecting + .map_err(|err| { + trace!("background connect error: {}", err); + }) + .map(|_pooled| { + // dropping here should just place it in + // the Pool for us... + }); + // An execute error here isn't important, we're just trying + // to prevent a waste of a socket... + self.exec.execute(bg); + } + Ok(checked_out) + } + // Connect won, checkout can just be dropped. + Either::Right((Ok(connected), _checkout)) => Ok(connected), + // Either checkout or connect could get canceled: + // + // 1. Connect is canceled if this is HTTP/2 and there is + // an outstanding HTTP/2 connecting task. + // 2. Checkout is canceled if the pool cannot deliver an + // idle connection reliably. + // + // In both cases, we should just wait for the other future. + Either::Left((Err(err), connecting)) => { + if err.is_canceled() { + connecting.await.map_err(ClientConnectError::Normal) + } else { + Err(ClientConnectError::Normal(e!(Connect, err))) + } + } + Either::Right((Err(err), checkout)) => { + if err.is_canceled() { + checkout.await.map_err(move |err| { + if is_ver_h2 && err.is_canceled() { + ClientConnectError::CheckoutIsClosed(err) + } else { + ClientConnectError::Normal(e!(Connect, err)) + } + }) + } else { + Err(ClientConnectError::Normal(err)) + } + } + } + } + + #[cfg(any(feature = "http1", feature = "http2"))] + fn connect_to( + &self, + pool_key: PoolKey, + ) -> impl Lazy, PoolKey>, Error>> + Send + Unpin + { + let executor = self.exec.clone(); + let pool = self.pool.clone(); + #[cfg(feature = "http1")] + let h1_builder = self.h1_builder.clone(); + #[cfg(feature = "http2")] + let h2_builder = self.h2_builder.clone(); + let ver = self.config.ver; + let is_ver_h2 = ver == Ver::Http2; + let connector = self.connector.clone(); + let dst = domain_as_uri(pool_key.clone()); + hyper_lazy(move || { + // Try to take a "connecting lock". + // + // If the pool_key is for HTTP/2, and there is already a + // connection being established, then this can't take a + // second lock. The "connect_to" future is Canceled. + let connecting = match pool.connecting(&pool_key, ver) { + Some(lock) => lock, + None => { + let canceled = e!(Canceled); + // TODO + //crate::Error::new_canceled().with("HTTP/2 connection in progress"); + return Either::Right(future::err(canceled)); + } + }; + Either::Left( + connector + .connect(super::connect::sealed::Internal, dst) + .map_err(|src| e!(Connect, src)) + .and_then(move |io| { + let connected = io.connected(); + // If ALPN is h2 and we aren't http2_only already, + // then we need to convert our pool checkout into + // a single HTTP2 one. + let connecting = if connected.alpn == Alpn::H2 && !is_ver_h2 { + match connecting.alpn_h2(&pool) { + Some(lock) => { + trace!("ALPN negotiated h2, updating pool"); + lock + } + None => { + // Another connection has already upgraded, + // the pool checkout should finish up for us. + let canceled = e!(Canceled, "ALPN upgraded to HTTP/2"); + return Either::Right(future::err(canceled)); + } + } + } else { + connecting + }; + + #[cfg_attr(not(feature = "http2"), allow(unused))] + let is_h2 = is_ver_h2 || connected.alpn == Alpn::H2; + + Either::Left(Box::pin(async move { + let tx = if is_h2 { + #[cfg(feature = "http2")] { + let (mut tx, conn) = + h2_builder.handshake(io).await.map_err(Error::tx)?; + + trace!( + "http2 handshake complete, spawning background dispatcher task" + ); + executor.execute( + conn.map_err(|e| debug!("client connection error: {}", e)) + .map(|_| ()), + ); + + // Wait for 'conn' to ready up before we + // declare this tx as usable + tx.ready().await.map_err(Error::tx)?; + PoolTx::Http2(tx) + } + #[cfg(not(feature = "http2"))] + panic!("http2 feature is not enabled"); + } else { + #[cfg(feature = "http1")] { + let (mut tx, conn) = + h1_builder.handshake(io).await.map_err(Error::tx)?; + + trace!( + "http1 handshake complete, spawning background dispatcher task" + ); + executor.execute( + conn.with_upgrades() + .map_err(|e| debug!("client connection error: {}", e)) + .map(|_| ()), + ); + + // Wait for 'conn' to ready up before we + // declare this tx as usable + tx.ready().await.map_err(Error::tx)?; + PoolTx::Http1(tx) + } + #[cfg(not(feature = "http1"))] { + panic!("http1 feature is not enabled"); + } + }; + + Ok(pool.pooled( + connecting, + PoolClient { + conn_info: connected, + tx, + }, + )) + })) + }), + ) + }) + } +} + +impl tower_service::Service> for Client +where + C: Connect + Clone + Send + Sync + 'static, + B: Body + Send + 'static + Unpin, + B::Data: Send, + B::Error: Into>, +{ + type Response = Response; + type Error = Error; + type Future = ResponseFuture; + + fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: Request) -> Self::Future { + self.request(req) + } +} + +impl tower_service::Service> for &'_ Client +where + C: Connect + Clone + Send + Sync + 'static, + B: Body + Send + 'static + Unpin, + B::Data: Send, + B::Error: Into>, +{ + type Response = Response; + type Error = Error; + type Future = ResponseFuture; + + fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: Request) -> Self::Future { + self.request(req) + } +} + +impl Clone for Client { + fn clone(&self) -> Client { + Client { + config: self.config, + exec: self.exec.clone(), + #[cfg(feature = "http1")] + h1_builder: self.h1_builder.clone(), + #[cfg(feature = "http2")] + h2_builder: self.h2_builder.clone(), + connector: self.connector.clone(), + pool: self.pool.clone(), + } + } +} + +impl fmt::Debug for Client { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Client").finish() + } +} + +// ===== impl ResponseFuture ===== + +impl ResponseFuture { + fn new(value: F) -> Self + where + F: Future, Error>> + Send + 'static, + { + Self { + inner: SyncWrapper::new(Box::pin(value)), + } + } + + fn error_version(ver: Version) -> Self { + warn!("Request has unsupported version \"{:?}\"", ver); + ResponseFuture::new(Box::pin(future::err(e!(UserUnsupportedVersion)))) + } +} + +impl fmt::Debug for ResponseFuture { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad("Future") + } +} + +impl Future for ResponseFuture { + type Output = Result, Error>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + self.inner.get_mut().as_mut().poll(cx) + } +} + +// ===== impl PoolClient ===== + +// FIXME: allow() required due to `impl Trait` leaking types to this lint +#[allow(missing_debug_implementations)] +struct PoolClient { + conn_info: Connected, + tx: PoolTx, +} + +enum PoolTx { + #[cfg(feature = "http1")] + Http1(hyper::client::conn::http1::SendRequest), + #[cfg(feature = "http2")] + Http2(hyper::client::conn::http2::SendRequest), +} + +impl PoolClient { + fn poll_ready( + &mut self, + #[allow(unused_variables)] cx: &mut task::Context<'_>, + ) -> Poll> { + match self.tx { + #[cfg(feature = "http1")] + PoolTx::Http1(ref mut tx) => tx.poll_ready(cx).map_err(Error::closed), + #[cfg(feature = "http2")] + PoolTx::Http2(_) => Poll::Ready(Ok(())), + } + } + + fn is_http1(&self) -> bool { + !self.is_http2() + } + + fn is_http2(&self) -> bool { + match self.tx { + #[cfg(feature = "http1")] + PoolTx::Http1(_) => false, + #[cfg(feature = "http2")] + PoolTx::Http2(_) => true, + } + } + + fn is_poisoned(&self) -> bool { + self.conn_info.poisoned.poisoned() + } + + fn is_ready(&self) -> bool { + match self.tx { + #[cfg(feature = "http1")] + PoolTx::Http1(ref tx) => tx.is_ready(), + #[cfg(feature = "http2")] + PoolTx::Http2(ref tx) => tx.is_ready(), + } + } + + fn is_closed(&self) -> bool { + match self.tx { + #[cfg(feature = "http1")] + PoolTx::Http1(ref tx) => tx.is_closed(), + #[cfg(feature = "http2")] + PoolTx::Http2(ref tx) => tx.is_closed(), + } + } +} + +impl PoolClient { + fn try_send_request( + &mut self, + req: Request, + ) -> impl Future, ConnTrySendError>>> + where + B: Send, + { + #[cfg(all(feature = "http1", feature = "http2"))] + return match self.tx { + #[cfg(feature = "http1")] + PoolTx::Http1(ref mut tx) => Either::Left(tx.try_send_request(req)), + #[cfg(feature = "http2")] + PoolTx::Http2(ref mut tx) => Either::Right(tx.try_send_request(req)), + }; + + #[cfg(feature = "http1")] + #[cfg(not(feature = "http2"))] + return match self.tx { + #[cfg(feature = "http1")] + PoolTx::Http1(ref mut tx) => tx.try_send_request(req), + }; + + #[cfg(not(feature = "http1"))] + #[cfg(feature = "http2")] + return match self.tx { + #[cfg(feature = "http2")] + PoolTx::Http2(ref mut tx) => tx.try_send_request(req), + }; + } + + /* + //TODO: can we re-introduce this somehow? Or must people use tower::retry? + fn send_request_retryable( + &mut self, + req: Request, + ) -> impl Future, (Error, Option>)>> + where + B: Send, + { + match self.tx { + #[cfg(not(feature = "http2"))] + PoolTx::Http1(ref mut tx) => tx.send_request_retryable(req), + #[cfg(feature = "http1")] + PoolTx::Http1(ref mut tx) => Either::Left(tx.send_request_retryable(req)), + #[cfg(feature = "http2")] + PoolTx::Http2(ref mut tx) => Either::Right(tx.send_request_retryable(req)), + } + } + */ +} + +impl pool::Poolable for PoolClient +where + B: Send + 'static, +{ + fn is_open(&self) -> bool { + !self.is_poisoned() && self.is_ready() + } + + fn reserve(self) -> pool::Reservation { + match self.tx { + #[cfg(feature = "http1")] + PoolTx::Http1(tx) => pool::Reservation::Unique(PoolClient { + conn_info: self.conn_info, + tx: PoolTx::Http1(tx), + }), + #[cfg(feature = "http2")] + PoolTx::Http2(tx) => { + let b = PoolClient { + conn_info: self.conn_info.clone(), + tx: PoolTx::Http2(tx.clone()), + }; + let a = PoolClient { + conn_info: self.conn_info, + tx: PoolTx::Http2(tx), + }; + pool::Reservation::Shared(a, b) + } + } + } + + fn can_share(&self) -> bool { + self.is_http2() + } +} + +enum ClientConnectError { + Normal(Error), + CheckoutIsClosed(pool::Error), +} + +fn origin_form(uri: &mut Uri) { + let path = match uri.path_and_query() { + Some(path) if path.as_str() != "/" => { + let mut parts = ::http::uri::Parts::default(); + parts.path_and_query = Some(path.clone()); + Uri::from_parts(parts).expect("path is valid uri") + } + _none_or_just_slash => { + debug_assert!(Uri::default() == "/"); + Uri::default() + } + }; + *uri = path +} + +fn absolute_form(uri: &mut Uri) { + debug_assert!(uri.scheme().is_some(), "absolute_form needs a scheme"); + debug_assert!( + uri.authority().is_some(), + "absolute_form needs an authority" + ); + // If the URI is to HTTPS, and the connector claimed to be a proxy, + // then it *should* have tunneled, and so we don't want to send + // absolute-form in that case. + if uri.scheme() == Some(&Scheme::HTTPS) { + origin_form(uri); + } +} + +fn authority_form(uri: &mut Uri) { + if let Some(path) = uri.path_and_query() { + // `https://hyper.rs` would parse with `/` path, don't + // annoy people about that... + if path != "/" { + warn!("HTTP/1.1 CONNECT request stripping path: {:?}", path); + } + } + *uri = match uri.authority() { + Some(auth) => { + let mut parts = ::http::uri::Parts::default(); + parts.authority = Some(auth.clone()); + Uri::from_parts(parts).expect("authority is valid") + } + None => { + unreachable!("authority_form with relative uri"); + } + }; +} + +fn extract_domain(uri: &mut Uri, is_http_connect: bool) -> Result { + let uri_clone = uri.clone(); + match (uri_clone.scheme(), uri_clone.authority()) { + (Some(scheme), Some(auth)) => Ok((scheme.clone(), auth.clone())), + (None, Some(auth)) if is_http_connect => { + let scheme = match auth.port_u16() { + Some(443) => { + set_scheme(uri, Scheme::HTTPS); + Scheme::HTTPS + } + _ => { + set_scheme(uri, Scheme::HTTP); + Scheme::HTTP + } + }; + Ok((scheme, auth.clone())) + } + _ => { + debug!("Client requires absolute-form URIs, received: {:?}", uri); + Err(e!(UserAbsoluteUriRequired)) + } + } +} + +fn domain_as_uri((scheme, auth): PoolKey) -> Uri { + http::uri::Builder::new() + .scheme(scheme) + .authority(auth) + .path_and_query("/") + .build() + .expect("domain is valid Uri") +} + +fn set_scheme(uri: &mut Uri, scheme: Scheme) { + debug_assert!( + uri.scheme().is_none(), + "set_scheme expects no existing scheme" + ); + let old = std::mem::take(uri); + let mut parts: ::http::uri::Parts = old.into(); + parts.scheme = Some(scheme); + parts.path_and_query = Some("/".parse().expect("slash is a valid path")); + *uri = Uri::from_parts(parts).expect("scheme is valid"); +} + +fn get_non_default_port(uri: &Uri) -> Option> { + match (uri.port().map(|p| p.as_u16()), is_schema_secure(uri)) { + (Some(443), true) => None, + (Some(80), false) => None, + _ => uri.port(), + } +} + +fn is_schema_secure(uri: &Uri) -> bool { + uri.scheme_str() + .map(|scheme_str| matches!(scheme_str, "wss" | "https")) + .unwrap_or_default() +} + +/// A builder to configure a new [`Client`](Client). +/// +/// # Example +/// +/// ``` +/// # #[cfg(feature = "tokio")] +/// # fn run () { +/// use std::time::Duration; +/// use hyper_util::client::legacy::Client; +/// use hyper_util::rt::TokioExecutor; +/// +/// let client = Client::builder(TokioExecutor::new()) +/// .pool_idle_timeout(Duration::from_secs(30)) +/// .http2_only(true) +/// .build_http(); +/// # let infer: Client<_, http_body_util::Full> = client; +/// # drop(infer); +/// # } +/// # fn main() {} +/// ``` +#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] +#[derive(Clone)] +pub struct Builder { + client_config: Config, + exec: Exec, + #[cfg(feature = "http1")] + h1_builder: hyper::client::conn::http1::Builder, + #[cfg(feature = "http2")] + h2_builder: hyper::client::conn::http2::Builder, + pool_config: pool::Config, + pool_timer: Option, +} + +impl Builder { + /// Construct a new Builder. + pub fn new(executor: E) -> Self + where + E: hyper::rt::Executor + Send + Sync + Clone + 'static, + { + let exec = Exec::new(executor); + Self { + client_config: Config { + retry_canceled_requests: true, + set_host: true, + ver: Ver::Auto, + }, + exec: exec.clone(), + #[cfg(feature = "http1")] + h1_builder: hyper::client::conn::http1::Builder::new(), + #[cfg(feature = "http2")] + h2_builder: hyper::client::conn::http2::Builder::new(exec), + pool_config: pool::Config { + idle_timeout: Some(Duration::from_secs(90)), + max_idle_per_host: usize::MAX, + }, + pool_timer: None, + } + } + /// Set an optional timeout for idle sockets being kept-alive. + /// A `Timer` is required for this to take effect. See `Builder::pool_timer` + /// + /// Pass `None` to disable timeout. + /// + /// Default is 90 seconds. + /// + /// # Example + /// + /// ``` + /// # #[cfg(feature = "tokio")] + /// # fn run () { + /// use std::time::Duration; + /// use hyper_util::client::legacy::Client; + /// use hyper_util::rt::{TokioExecutor, TokioTimer}; + /// + /// let client = Client::builder(TokioExecutor::new()) + /// .pool_idle_timeout(Duration::from_secs(30)) + /// .pool_timer(TokioTimer::new()) + /// .build_http(); + /// + /// # let infer: Client<_, http_body_util::Full> = client; + /// # } + /// # fn main() {} + /// ``` + pub fn pool_idle_timeout(&mut self, val: D) -> &mut Self + where + D: Into>, + { + self.pool_config.idle_timeout = val.into(); + self + } + + #[doc(hidden)] + #[deprecated(note = "renamed to `pool_max_idle_per_host`")] + pub fn max_idle_per_host(&mut self, max_idle: usize) -> &mut Self { + self.pool_config.max_idle_per_host = max_idle; + self + } + + /// Sets the maximum idle connection per host allowed in the pool. + /// + /// Default is `usize::MAX` (no limit). + pub fn pool_max_idle_per_host(&mut self, max_idle: usize) -> &mut Self { + self.pool_config.max_idle_per_host = max_idle; + self + } + + // HTTP/1 options + + /// Sets the exact size of the read buffer to *always* use. + /// + /// Note that setting this option unsets the `http1_max_buf_size` option. + /// + /// Default is an adaptive read buffer. + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_read_buf_exact_size(&mut self, sz: usize) -> &mut Self { + self.h1_builder.read_buf_exact_size(Some(sz)); + self + } + + /// Set the maximum buffer size for the connection. + /// + /// Default is ~400kb. + /// + /// Note that setting this option unsets the `http1_read_exact_buf_size` option. + /// + /// # Panics + /// + /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum. + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_max_buf_size(&mut self, max: usize) -> &mut Self { + self.h1_builder.max_buf_size(max); + self + } + + /// Set whether HTTP/1 connections will accept spaces between header names + /// and the colon that follow them in responses. + /// + /// Newline codepoints (`\r` and `\n`) will be transformed to spaces when + /// parsing. + /// + /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has + /// to say about it: + /// + /// > No whitespace is allowed between the header field-name and colon. In + /// > the past, differences in the handling of such whitespace have led to + /// > security vulnerabilities in request routing and response handling. A + /// > server MUST reject any received request message that contains + /// > whitespace between a header field-name and colon with a response code + /// > of 400 (Bad Request). A proxy MUST remove any such whitespace from a + /// > response message before forwarding the message downstream. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + /// + /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4 + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_allow_spaces_after_header_name_in_responses(&mut self, val: bool) -> &mut Self { + self.h1_builder + .allow_spaces_after_header_name_in_responses(val); + self + } + + /// Set whether HTTP/1 connections will accept obsolete line folding for + /// header values. + /// + /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has + /// to say about it: + /// + /// > A server that receives an obs-fold in a request message that is not + /// > within a message/http container MUST either reject the message by + /// > sending a 400 (Bad Request), preferably with a representation + /// > explaining that obsolete line folding is unacceptable, or replace + /// > each received obs-fold with one or more SP octets prior to + /// > interpreting the field value or forwarding the message downstream. + /// + /// > A proxy or gateway that receives an obs-fold in a response message + /// > that is not within a message/http container MUST either discard the + /// > message and replace it with a 502 (Bad Gateway) response, preferably + /// > with a representation explaining that unacceptable line folding was + /// > received, or replace each received obs-fold with one or more SP + /// > octets prior to interpreting the field value or forwarding the + /// > message downstream. + /// + /// > A user agent that receives an obs-fold in a response message that is + /// > not within a message/http container MUST replace each received + /// > obs-fold with one or more SP octets prior to interpreting the field + /// > value. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + /// + /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4 + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_allow_obsolete_multiline_headers_in_responses(&mut self, val: bool) -> &mut Self { + self.h1_builder + .allow_obsolete_multiline_headers_in_responses(val); + self + } + + /// Sets whether invalid header lines should be silently ignored in HTTP/1 responses. + /// + /// This mimics the behaviour of major browsers. You probably don't want this. + /// You should only want this if you are implementing a proxy whose main + /// purpose is to sit in front of browsers whose users access arbitrary content + /// which may be malformed, and they expect everything that works without + /// the proxy to keep working with the proxy. + /// + /// This option will prevent Hyper's client from returning an error encountered + /// when parsing a header, except if the error was caused by the character NUL + /// (ASCII code 0), as Chrome specifically always reject those. + /// + /// The ignorable errors are: + /// * empty header names; + /// * characters that are not allowed in header names, except for `\0` and `\r`; + /// * when `allow_spaces_after_header_name_in_responses` is not enabled, + /// spaces and tabs between the header name and the colon; + /// * missing colon between header name and colon; + /// * characters that are not allowed in header values except for `\0` and `\r`. + /// + /// If an ignorable error is encountered, the parser tries to find the next + /// line in the input to resume parsing the rest of the headers. An error + /// will be emitted nonetheless if it finds `\0` or a lone `\r` while + /// looking for the next line. + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_ignore_invalid_headers_in_responses(&mut self, val: bool) -> &mut Builder { + self.h1_builder.ignore_invalid_headers_in_responses(val); + self + } + + /// Set whether HTTP/1 connections should try to use vectored writes, + /// or always flatten into a single buffer. + /// + /// Note that setting this to false may mean more copies of body data, + /// but may also improve performance when an IO transport doesn't + /// support vectored writes well, such as most TLS implementations. + /// + /// Setting this to true will force hyper to use queued strategy + /// which may eliminate unnecessary cloning on some TLS backends + /// + /// Default is `auto`. In this mode hyper will try to guess which + /// mode to use + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_writev(&mut self, enabled: bool) -> &mut Builder { + self.h1_builder.writev(enabled); + self + } + + /// Set whether HTTP/1 connections will write header names as title case at + /// the socket level. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_title_case_headers(&mut self, val: bool) -> &mut Self { + self.h1_builder.title_case_headers(val); + self + } + + /// Set whether to support preserving original header cases. + /// + /// Currently, this will record the original cases received, and store them + /// in a private extension on the `Response`. It will also look for and use + /// such an extension in any provided `Request`. + /// + /// Since the relevant extension is still private, there is no way to + /// interact with the original cases. The only effect this can have now is + /// to forward the cases in a proxy-like fashion. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_preserve_header_case(&mut self, val: bool) -> &mut Self { + self.h1_builder.preserve_header_case(val); + self + } + + /// Set the maximum number of headers. + /// + /// When a response is received, the parser will reserve a buffer to store headers for optimal + /// performance. + /// + /// If client receives more headers than the buffer size, the error "message header too large" + /// is returned. + /// + /// The headers is allocated on the stack by default, which has higher performance. After + /// setting this value, headers will be allocated in heap memory, that is, heap memory + /// allocation will occur for each response, and there will be a performance drop of about 5%. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is 100. + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_max_headers(&mut self, val: usize) -> &mut Self { + self.h1_builder.max_headers(val); + self + } + + /// Set whether HTTP/0.9 responses should be tolerated. + /// + /// Default is false. + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http09_responses(&mut self, val: bool) -> &mut Self { + self.h1_builder.http09_responses(val); + self + } + + /// Set whether the connection **must** use HTTP/2. + /// + /// The destination must either allow HTTP2 Prior Knowledge, or the + /// `Connect` should be configured to do use ALPN to upgrade to `h2` + /// as part of the connection process. This will not make the `Client` + /// utilize ALPN by itself. + /// + /// Note that setting this to true prevents HTTP/1 from being allowed. + /// + /// Default is false. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_only(&mut self, val: bool) -> &mut Self { + self.client_config.ver = if val { Ver::Http2 } else { Ver::Auto }; + self + } + + /// Configures the maximum number of pending reset streams allowed before a GOAWAY will be sent. + /// + /// This will default to the default value set by the [`h2` crate](https://crates.io/crates/h2). + /// As of v0.4.0, it is 20. + /// + /// See for more information. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_max_pending_accept_reset_streams( + &mut self, + max: impl Into>, + ) -> &mut Self { + self.h2_builder.max_pending_accept_reset_streams(max.into()); + self + } + + /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2 + /// stream-level flow control. + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + /// + /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_initial_stream_window_size(&mut self, sz: impl Into>) -> &mut Self { + self.h2_builder.initial_stream_window_size(sz.into()); + self + } + + /// Sets the max connection-level flow control for HTTP2 + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_initial_connection_window_size( + &mut self, + sz: impl Into>, + ) -> &mut Self { + self.h2_builder.initial_connection_window_size(sz.into()); + self + } + + /// Sets the initial maximum of locally initiated (send) streams. + /// + /// This value will be overwritten by the value included in the initial + /// SETTINGS frame received from the peer as part of a [connection preface]. + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + /// + /// [connection preface]: https://httpwg.org/specs/rfc9113.html#preface + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_initial_max_send_streams( + &mut self, + initial: impl Into>, + ) -> &mut Self { + self.h2_builder.initial_max_send_streams(initial); + self + } + + /// Sets whether to use an adaptive flow control. + /// + /// Enabling this will override the limits set in + /// `http2_initial_stream_window_size` and + /// `http2_initial_connection_window_size`. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_adaptive_window(&mut self, enabled: bool) -> &mut Self { + self.h2_builder.adaptive_window(enabled); + self + } + + /// Sets the maximum frame size to use for HTTP2. + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_max_frame_size(&mut self, sz: impl Into>) -> &mut Self { + self.h2_builder.max_frame_size(sz); + self + } + + /// Sets an interval for HTTP2 Ping frames should be sent to keep a + /// connection alive. + /// + /// Pass `None` to disable HTTP2 keep-alive. + /// + /// Default is currently disabled. + /// + /// # Cargo Feature + /// + /// Requires the `tokio` cargo feature to be enabled. + #[cfg(feature = "tokio")] + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_keep_alive_interval( + &mut self, + interval: impl Into>, + ) -> &mut Self { + self.h2_builder.keep_alive_interval(interval); + self + } + + /// Sets a timeout for receiving an acknowledgement of the keep-alive ping. + /// + /// If the ping is not acknowledged within the timeout, the connection will + /// be closed. Does nothing if `http2_keep_alive_interval` is disabled. + /// + /// Default is 20 seconds. + /// + /// # Cargo Feature + /// + /// Requires the `tokio` cargo feature to be enabled. + #[cfg(feature = "tokio")] + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self { + self.h2_builder.keep_alive_timeout(timeout); + self + } + + /// Sets whether HTTP2 keep-alive should apply while the connection is idle. + /// + /// If disabled, keep-alive pings are only sent while there are open + /// request/responses streams. If enabled, pings are also sent when no + /// streams are active. Does nothing if `http2_keep_alive_interval` is + /// disabled. + /// + /// Default is `false`. + /// + /// # Cargo Feature + /// + /// Requires the `tokio` cargo feature to be enabled. + #[cfg(feature = "tokio")] + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_keep_alive_while_idle(&mut self, enabled: bool) -> &mut Self { + self.h2_builder.keep_alive_while_idle(enabled); + self + } + + /// Sets the maximum number of HTTP2 concurrent locally reset streams. + /// + /// See the documentation of [`h2::client::Builder::max_concurrent_reset_streams`] for more + /// details. + /// + /// The default value is determined by the `h2` crate. + /// + /// [`h2::client::Builder::max_concurrent_reset_streams`]: https://docs.rs/h2/client/struct.Builder.html#method.max_concurrent_reset_streams + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_max_concurrent_reset_streams(&mut self, max: usize) -> &mut Self { + self.h2_builder.max_concurrent_reset_streams(max); + self + } + + /// Provide a timer to be used for h2 + /// + /// See the documentation of [`h2::client::Builder::timer`] for more + /// details. + /// + /// [`h2::client::Builder::timer`]: https://docs.rs/h2/client/struct.Builder.html#method.timer + pub fn timer(&mut self, timer: M) -> &mut Self + where + M: Timer + Send + Sync + 'static, + { + #[cfg(feature = "http2")] + self.h2_builder.timer(timer); + self + } + + /// Provide a timer to be used for timeouts and intervals in connection pools. + pub fn pool_timer(&mut self, timer: M) -> &mut Self + where + M: Timer + Clone + Send + Sync + 'static, + { + self.pool_timer = Some(timer::Timer::new(timer.clone())); + self + } + + /// Set the maximum write buffer size for each HTTP/2 stream. + /// + /// Default is currently 1MB, but may change. + /// + /// # Panics + /// + /// The value must be no larger than `u32::MAX`. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_max_send_buf_size(&mut self, max: usize) -> &mut Self { + self.h2_builder.max_send_buf_size(max); + self + } + + /// Set whether to retry requests that get disrupted before ever starting + /// to write. + /// + /// This means a request that is queued, and gets given an idle, reused + /// connection, and then encounters an error immediately as the idle + /// connection was found to be unusable. + /// + /// When this is set to `false`, the related `ResponseFuture` would instead + /// resolve to an `Error::Cancel`. + /// + /// Default is `true`. + #[inline] + pub fn retry_canceled_requests(&mut self, val: bool) -> &mut Self { + self.client_config.retry_canceled_requests = val; + self + } + + /// Set whether to automatically add the `Host` header to requests. + /// + /// If true, and a request does not include a `Host` header, one will be + /// added automatically, derived from the authority of the `Uri`. + /// + /// Default is `true`. + #[inline] + pub fn set_host(&mut self, val: bool) -> &mut Self { + self.client_config.set_host = val; + self + } + + /// Build a client with this configuration and the default `HttpConnector`. + #[cfg(feature = "tokio")] + pub fn build_http(&self) -> Client + where + B: Body + Send, + B::Data: Send, + { + let mut connector = HttpConnector::new(); + if self.pool_config.is_enabled() { + connector.set_keepalive(self.pool_config.idle_timeout); + } + self.build(connector) + } + + /// Combine the configuration of this builder with a connector to create a `Client`. + pub fn build(&self, connector: C) -> Client + where + C: Connect + Clone, + B: Body + Send, + B::Data: Send, + { + let exec = self.exec.clone(); + let timer = self.pool_timer.clone(); + Client { + config: self.client_config, + exec: exec.clone(), + #[cfg(feature = "http1")] + h1_builder: self.h1_builder.clone(), + #[cfg(feature = "http2")] + h2_builder: self.h2_builder.clone(), + connector, + pool: pool::Pool::new(self.pool_config, exec, timer), + } + } +} + +impl fmt::Debug for Builder { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Builder") + .field("client_config", &self.client_config) + .field("pool_config", &self.pool_config) + .finish() + } +} + +// ==== impl Error ==== + +impl fmt::Debug for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut f = f.debug_tuple("hyper_util::client::legacy::Error"); + f.field(&self.kind); + if let Some(ref cause) = self.source { + f.field(cause); + } + f.finish() + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "client error ({:?})", self.kind) + } +} + +impl StdError for Error { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + self.source.as_ref().map(|e| &**e as _) + } +} + +impl Error { + /// Returns true if this was an error from `Connect`. + pub fn is_connect(&self) -> bool { + matches!(self.kind, ErrorKind::Connect) + } + + /// Returns the info of the client connection on which this error occurred. + #[cfg(any(feature = "http1", feature = "http2"))] + pub fn connect_info(&self) -> Option<&Connected> { + self.connect_info.as_ref() + } + + #[cfg(any(feature = "http1", feature = "http2"))] + fn with_connect_info(self, connect_info: Connected) -> Self { + Self { + connect_info: Some(connect_info), + ..self + } + } + fn is_canceled(&self) -> bool { + matches!(self.kind, ErrorKind::Canceled) + } + + fn tx(src: hyper::Error) -> Self { + e!(SendRequest, src) + } + + fn closed(src: hyper::Error) -> Self { + e!(ChannelClosed, src) + } +} diff --git a/.cargo-vendor/hyper-util/src/client/legacy/connect/capture.rs b/.cargo-vendor/hyper-util/src/client/legacy/connect/capture.rs new file mode 100644 index 0000000000..4fbe38496a --- /dev/null +++ b/.cargo-vendor/hyper-util/src/client/legacy/connect/capture.rs @@ -0,0 +1,191 @@ +use std::{ops::Deref, sync::Arc}; + +use http::Request; +use tokio::sync::watch; + +use super::Connected; + +/// [`CaptureConnection`] allows callers to capture [`Connected`] information +/// +/// To capture a connection for a request, use [`capture_connection`]. +#[derive(Debug, Clone)] +pub struct CaptureConnection { + rx: watch::Receiver>, +} + +/// Capture the connection for a given request +/// +/// When making a request with Hyper, the underlying connection must implement the [`Connection`] trait. +/// [`capture_connection`] allows a caller to capture the returned [`Connected`] structure as soon +/// as the connection is established. +/// +/// *Note*: If establishing a connection fails, [`CaptureConnection::connection_metadata`] will always return none. +/// +/// # Examples +/// +/// **Synchronous access**: +/// The [`CaptureConnection::connection_metadata`] method allows callers to check if a connection has been +/// established. This is ideal for situations where you are certain the connection has already +/// been established (e.g. after the response future has already completed). +/// ```rust +/// use hyper_util::client::legacy::connect::capture_connection; +/// let mut request = http::Request::builder() +/// .uri("http://foo.com") +/// .body(()) +/// .unwrap(); +/// +/// let captured_connection = capture_connection(&mut request); +/// // some time later after the request has been sent... +/// let connection_info = captured_connection.connection_metadata(); +/// println!("we are connected! {:?}", connection_info.as_ref()); +/// ``` +/// +/// **Asynchronous access**: +/// The [`CaptureConnection::wait_for_connection_metadata`] method returns a future resolves as soon as the +/// connection is available. +/// +/// ```rust +/// # #[cfg(feature = "tokio")] +/// # async fn example() { +/// use hyper_util::client::legacy::connect::capture_connection; +/// use hyper_util::client::legacy::Client; +/// use hyper_util::rt::TokioExecutor; +/// use bytes::Bytes; +/// use http_body_util::Empty; +/// let mut request = http::Request::builder() +/// .uri("http://foo.com") +/// .body(Empty::::new()) +/// .unwrap(); +/// +/// let mut captured = capture_connection(&mut request); +/// tokio::task::spawn(async move { +/// let connection_info = captured.wait_for_connection_metadata().await; +/// println!("we are connected! {:?}", connection_info.as_ref()); +/// }); +/// +/// let client = Client::builder(TokioExecutor::new()).build_http(); +/// client.request(request).await.expect("request failed"); +/// # } +/// ``` +pub fn capture_connection(request: &mut Request) -> CaptureConnection { + let (tx, rx) = CaptureConnection::new(); + request.extensions_mut().insert(tx); + rx +} + +/// TxSide for [`CaptureConnection`] +/// +/// This is inserted into `Extensions` to allow Hyper to back channel connection info +#[derive(Clone)] +pub(crate) struct CaptureConnectionExtension { + tx: Arc>>, +} + +impl CaptureConnectionExtension { + pub(crate) fn set(&self, connected: &Connected) { + self.tx.send_replace(Some(connected.clone())); + } +} + +impl CaptureConnection { + /// Internal API to create the tx and rx half of [`CaptureConnection`] + pub(crate) fn new() -> (CaptureConnectionExtension, Self) { + let (tx, rx) = watch::channel(None); + ( + CaptureConnectionExtension { tx: Arc::new(tx) }, + CaptureConnection { rx }, + ) + } + + /// Retrieve the connection metadata, if available + pub fn connection_metadata(&self) -> impl Deref> + '_ { + self.rx.borrow() + } + + /// Wait for the connection to be established + /// + /// If a connection was established, this will always return `Some(...)`. If the request never + /// successfully connected (e.g. DNS resolution failure), this method will never return. + pub async fn wait_for_connection_metadata( + &mut self, + ) -> impl Deref> + '_ { + if self.rx.borrow().is_some() { + return self.rx.borrow(); + } + let _ = self.rx.changed().await; + self.rx.borrow() + } +} + +#[cfg(all(test, not(miri)))] +mod test { + use super::*; + + #[test] + fn test_sync_capture_connection() { + let (tx, rx) = CaptureConnection::new(); + assert!( + rx.connection_metadata().is_none(), + "connection has not been set" + ); + tx.set(&Connected::new().proxy(true)); + assert_eq!( + rx.connection_metadata() + .as_ref() + .expect("connected should be set") + .is_proxied(), + true + ); + + // ensure it can be called multiple times + assert_eq!( + rx.connection_metadata() + .as_ref() + .expect("connected should be set") + .is_proxied(), + true + ); + } + + #[tokio::test] + async fn async_capture_connection() { + let (tx, mut rx) = CaptureConnection::new(); + assert!( + rx.connection_metadata().is_none(), + "connection has not been set" + ); + let test_task = tokio::spawn(async move { + assert_eq!( + rx.wait_for_connection_metadata() + .await + .as_ref() + .expect("connection should be set") + .is_proxied(), + true + ); + // can be awaited multiple times + assert!( + rx.wait_for_connection_metadata().await.is_some(), + "should be awaitable multiple times" + ); + + assert_eq!(rx.connection_metadata().is_some(), true); + }); + // can't be finished, we haven't set the connection yet + assert_eq!(test_task.is_finished(), false); + tx.set(&Connected::new().proxy(true)); + + assert!(test_task.await.is_ok()); + } + + #[tokio::test] + async fn capture_connection_sender_side_dropped() { + let (tx, mut rx) = CaptureConnection::new(); + assert!( + rx.connection_metadata().is_none(), + "connection has not been set" + ); + drop(tx); + assert!(rx.wait_for_connection_metadata().await.is_none()); + } +} diff --git a/.cargo-vendor/hyper-util/src/client/legacy/connect/dns.rs b/.cargo-vendor/hyper-util/src/client/legacy/connect/dns.rs new file mode 100644 index 0000000000..e26b96cecc --- /dev/null +++ b/.cargo-vendor/hyper-util/src/client/legacy/connect/dns.rs @@ -0,0 +1,363 @@ +//! DNS Resolution used by the `HttpConnector`. +//! +//! This module contains: +//! +//! - A [`GaiResolver`](GaiResolver) that is the default resolver for the +//! `HttpConnector`. +//! - The `Name` type used as an argument to custom resolvers. +//! +//! # Resolvers are `Service`s +//! +//! A resolver is just a +//! `Service>`. +//! +//! A simple resolver that ignores the name and always returns a specific +//! address: +//! +//! ```rust,ignore +//! use std::{convert::Infallible, iter, net::SocketAddr}; +//! +//! let resolver = tower::service_fn(|_name| async { +//! Ok::<_, Infallible>(iter::once(SocketAddr::from(([127, 0, 0, 1], 8080)))) +//! }); +//! ``` +use std::error::Error; +use std::future::Future; +use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6, ToSocketAddrs}; +use std::pin::Pin; +use std::str::FromStr; +use std::task::{self, Poll}; +use std::{fmt, io, vec}; + +use tokio::task::JoinHandle; +use tower_service::Service; +use tracing::debug; + +pub(super) use self::sealed::Resolve; + +/// A domain name to resolve into IP addresses. +#[derive(Clone, Hash, Eq, PartialEq)] +pub struct Name { + host: Box, +} + +/// A resolver using blocking `getaddrinfo` calls in a threadpool. +#[derive(Clone)] +pub struct GaiResolver { + _priv: (), +} + +/// An iterator of IP addresses returned from `getaddrinfo`. +pub struct GaiAddrs { + inner: SocketAddrs, +} + +/// A future to resolve a name returned by `GaiResolver`. +pub struct GaiFuture { + inner: JoinHandle>, +} + +impl Name { + pub(super) fn new(host: Box) -> Name { + Name { host } + } + + /// View the hostname as a string slice. + pub fn as_str(&self) -> &str { + &self.host + } +} + +impl fmt::Debug for Name { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(&self.host, f) + } +} + +impl fmt::Display for Name { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(&self.host, f) + } +} + +impl FromStr for Name { + type Err = InvalidNameError; + + fn from_str(host: &str) -> Result { + // Possibly add validation later + Ok(Name::new(host.into())) + } +} + +/// Error indicating a given string was not a valid domain name. +#[derive(Debug)] +pub struct InvalidNameError(()); + +impl fmt::Display for InvalidNameError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("Not a valid domain name") + } +} + +impl Error for InvalidNameError {} + +impl GaiResolver { + /// Construct a new `GaiResolver`. + pub fn new() -> Self { + GaiResolver { _priv: () } + } +} + +impl Service for GaiResolver { + type Response = GaiAddrs; + type Error = io::Error; + type Future = GaiFuture; + + fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, name: Name) -> Self::Future { + let blocking = tokio::task::spawn_blocking(move || { + debug!("resolving host={:?}", name.host); + (&*name.host, 0) + .to_socket_addrs() + .map(|i| SocketAddrs { iter: i }) + }); + + GaiFuture { inner: blocking } + } +} + +impl fmt::Debug for GaiResolver { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad("GaiResolver") + } +} + +impl Future for GaiFuture { + type Output = Result; + + fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + Pin::new(&mut self.inner).poll(cx).map(|res| match res { + Ok(Ok(addrs)) => Ok(GaiAddrs { inner: addrs }), + Ok(Err(err)) => Err(err), + Err(join_err) => { + if join_err.is_cancelled() { + Err(io::Error::new(io::ErrorKind::Interrupted, join_err)) + } else { + panic!("gai background task failed: {:?}", join_err) + } + } + }) + } +} + +impl fmt::Debug for GaiFuture { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad("GaiFuture") + } +} + +impl Drop for GaiFuture { + fn drop(&mut self) { + self.inner.abort(); + } +} + +impl Iterator for GaiAddrs { + type Item = SocketAddr; + + fn next(&mut self) -> Option { + self.inner.next() + } +} + +impl fmt::Debug for GaiAddrs { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad("GaiAddrs") + } +} + +pub(super) struct SocketAddrs { + iter: vec::IntoIter, +} + +impl SocketAddrs { + pub(super) fn new(addrs: Vec) -> Self { + SocketAddrs { + iter: addrs.into_iter(), + } + } + + pub(super) fn try_parse(host: &str, port: u16) -> Option { + if let Ok(addr) = host.parse::() { + let addr = SocketAddrV4::new(addr, port); + return Some(SocketAddrs { + iter: vec![SocketAddr::V4(addr)].into_iter(), + }); + } + if let Ok(addr) = host.parse::() { + let addr = SocketAddrV6::new(addr, port, 0, 0); + return Some(SocketAddrs { + iter: vec![SocketAddr::V6(addr)].into_iter(), + }); + } + None + } + + #[inline] + fn filter(self, predicate: impl FnMut(&SocketAddr) -> bool) -> SocketAddrs { + SocketAddrs::new(self.iter.filter(predicate).collect()) + } + + pub(super) fn split_by_preference( + self, + local_addr_ipv4: Option, + local_addr_ipv6: Option, + ) -> (SocketAddrs, SocketAddrs) { + match (local_addr_ipv4, local_addr_ipv6) { + (Some(_), None) => (self.filter(SocketAddr::is_ipv4), SocketAddrs::new(vec![])), + (None, Some(_)) => (self.filter(SocketAddr::is_ipv6), SocketAddrs::new(vec![])), + _ => { + let preferring_v6 = self + .iter + .as_slice() + .first() + .map(SocketAddr::is_ipv6) + .unwrap_or(false); + + let (preferred, fallback) = self + .iter + .partition::, _>(|addr| addr.is_ipv6() == preferring_v6); + + (SocketAddrs::new(preferred), SocketAddrs::new(fallback)) + } + } + } + + pub(super) fn is_empty(&self) -> bool { + self.iter.as_slice().is_empty() + } + + pub(super) fn len(&self) -> usize { + self.iter.as_slice().len() + } +} + +impl Iterator for SocketAddrs { + type Item = SocketAddr; + #[inline] + fn next(&mut self) -> Option { + self.iter.next() + } +} + +mod sealed { + use std::future::Future; + use std::task::{self, Poll}; + + use super::{Name, SocketAddr}; + use tower_service::Service; + + // "Trait alias" for `Service` + pub trait Resolve { + type Addrs: Iterator; + type Error: Into>; + type Future: Future>; + + fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll>; + fn resolve(&mut self, name: Name) -> Self::Future; + } + + impl Resolve for S + where + S: Service, + S::Response: Iterator, + S::Error: Into>, + { + type Addrs = S::Response; + type Error = S::Error; + type Future = S::Future; + + fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { + Service::poll_ready(self, cx) + } + + fn resolve(&mut self, name: Name) -> Self::Future { + Service::call(self, name) + } + } +} + +pub(super) async fn resolve(resolver: &mut R, name: Name) -> Result +where + R: Resolve, +{ + futures_util::future::poll_fn(|cx| resolver.poll_ready(cx)).await?; + resolver.resolve(name).await +} + +#[cfg(test)] +mod tests { + use super::*; + use std::net::{Ipv4Addr, Ipv6Addr}; + + #[test] + fn test_ip_addrs_split_by_preference() { + let ip_v4 = Ipv4Addr::new(127, 0, 0, 1); + let ip_v6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); + let v4_addr = (ip_v4, 80).into(); + let v6_addr = (ip_v6, 80).into(); + + let (mut preferred, mut fallback) = SocketAddrs { + iter: vec![v4_addr, v6_addr].into_iter(), + } + .split_by_preference(None, None); + assert!(preferred.next().unwrap().is_ipv4()); + assert!(fallback.next().unwrap().is_ipv6()); + + let (mut preferred, mut fallback) = SocketAddrs { + iter: vec![v6_addr, v4_addr].into_iter(), + } + .split_by_preference(None, None); + assert!(preferred.next().unwrap().is_ipv6()); + assert!(fallback.next().unwrap().is_ipv4()); + + let (mut preferred, mut fallback) = SocketAddrs { + iter: vec![v4_addr, v6_addr].into_iter(), + } + .split_by_preference(Some(ip_v4), Some(ip_v6)); + assert!(preferred.next().unwrap().is_ipv4()); + assert!(fallback.next().unwrap().is_ipv6()); + + let (mut preferred, mut fallback) = SocketAddrs { + iter: vec![v6_addr, v4_addr].into_iter(), + } + .split_by_preference(Some(ip_v4), Some(ip_v6)); + assert!(preferred.next().unwrap().is_ipv6()); + assert!(fallback.next().unwrap().is_ipv4()); + + let (mut preferred, fallback) = SocketAddrs { + iter: vec![v4_addr, v6_addr].into_iter(), + } + .split_by_preference(Some(ip_v4), None); + assert!(preferred.next().unwrap().is_ipv4()); + assert!(fallback.is_empty()); + + let (mut preferred, fallback) = SocketAddrs { + iter: vec![v4_addr, v6_addr].into_iter(), + } + .split_by_preference(None, Some(ip_v6)); + assert!(preferred.next().unwrap().is_ipv6()); + assert!(fallback.is_empty()); + } + + #[test] + fn test_name_from_str() { + const DOMAIN: &str = "test.example.com"; + let name = Name::from_str(DOMAIN).expect("Should be a valid domain"); + assert_eq!(name.as_str(), DOMAIN); + assert_eq!(name.to_string(), DOMAIN); + } +} diff --git a/.cargo-vendor/hyper-util/src/client/legacy/connect/http.rs b/.cargo-vendor/hyper-util/src/client/legacy/connect/http.rs new file mode 100644 index 0000000000..6af9e9877a --- /dev/null +++ b/.cargo-vendor/hyper-util/src/client/legacy/connect/http.rs @@ -0,0 +1,1230 @@ +use std::error::Error as StdError; +use std::fmt; +use std::future::Future; +use std::io; +use std::marker::PhantomData; +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{self, Poll}; +use std::time::Duration; + +use futures_util::future::Either; +use http::uri::{Scheme, Uri}; +use pin_project_lite::pin_project; +use socket2::TcpKeepalive; +use tokio::net::{TcpSocket, TcpStream}; +use tokio::time::Sleep; +use tracing::{debug, trace, warn}; + +use super::dns::{self, resolve, GaiResolver, Resolve}; +use super::{Connected, Connection}; +use crate::rt::TokioIo; + +/// A connector for the `http` scheme. +/// +/// Performs DNS resolution in a thread pool, and then connects over TCP. +/// +/// # Note +/// +/// Sets the [`HttpInfo`](HttpInfo) value on responses, which includes +/// transport information such as the remote socket address used. +#[derive(Clone)] +pub struct HttpConnector { + config: Arc, + resolver: R, +} + +/// Extra information about the transport when an HttpConnector is used. +/// +/// # Example +/// +/// ``` +/// # fn doc(res: http::Response<()>) { +/// use hyper_util::client::legacy::connect::HttpInfo; +/// +/// // res = http::Response +/// res +/// .extensions() +/// .get::() +/// .map(|info| { +/// println!("remote addr = {}", info.remote_addr()); +/// }); +/// # } +/// ``` +/// +/// # Note +/// +/// If a different connector is used besides [`HttpConnector`](HttpConnector), +/// this value will not exist in the extensions. Consult that specific +/// connector to see what "extra" information it might provide to responses. +#[derive(Clone, Debug)] +pub struct HttpInfo { + remote_addr: SocketAddr, + local_addr: SocketAddr, +} + +#[derive(Clone)] +struct Config { + connect_timeout: Option, + enforce_http: bool, + happy_eyeballs_timeout: Option, + tcp_keepalive_config: TcpKeepaliveConfig, + local_address_ipv4: Option, + local_address_ipv6: Option, + nodelay: bool, + reuse_address: bool, + send_buffer_size: Option, + recv_buffer_size: Option, + interface: Option, +} + +#[derive(Default, Debug, Clone, Copy)] +struct TcpKeepaliveConfig { + time: Option, + interval: Option, + retries: Option, +} + +impl TcpKeepaliveConfig { + /// Converts into a `socket2::TcpKeealive` if there is any keep alive configuration. + fn into_tcpkeepalive(self) -> Option { + let mut dirty = false; + let mut ka = TcpKeepalive::new(); + if let Some(time) = self.time { + ka = ka.with_time(time); + dirty = true + } + if let Some(interval) = self.interval { + ka = Self::ka_with_interval(ka, interval, &mut dirty) + }; + if let Some(retries) = self.retries { + ka = Self::ka_with_retries(ka, retries, &mut dirty) + }; + if dirty { + Some(ka) + } else { + None + } + } + + #[cfg(not(any( + target_os = "aix", + target_os = "openbsd", + target_os = "redox", + target_os = "solaris" + )))] + fn ka_with_interval(ka: TcpKeepalive, interval: Duration, dirty: &mut bool) -> TcpKeepalive { + *dirty = true; + ka.with_interval(interval) + } + + #[cfg(any( + target_os = "aix", + target_os = "openbsd", + target_os = "redox", + target_os = "solaris" + ))] + fn ka_with_interval(ka: TcpKeepalive, _: Duration, _: &mut bool) -> TcpKeepalive { + ka // no-op as keepalive interval is not supported on this platform + } + + #[cfg(not(any( + target_os = "aix", + target_os = "openbsd", + target_os = "redox", + target_os = "solaris", + target_os = "windows" + )))] + fn ka_with_retries(ka: TcpKeepalive, retries: u32, dirty: &mut bool) -> TcpKeepalive { + *dirty = true; + ka.with_retries(retries) + } + + #[cfg(any( + target_os = "aix", + target_os = "openbsd", + target_os = "redox", + target_os = "solaris", + target_os = "windows" + ))] + fn ka_with_retries(ka: TcpKeepalive, _: u32, _: &mut bool) -> TcpKeepalive { + ka // no-op as keepalive retries is not supported on this platform + } +} + +// ===== impl HttpConnector ===== + +impl HttpConnector { + /// Construct a new HttpConnector. + pub fn new() -> HttpConnector { + HttpConnector::new_with_resolver(GaiResolver::new()) + } +} + +impl HttpConnector { + /// Construct a new HttpConnector. + /// + /// Takes a [`Resolver`](crate::client::connect::dns#resolvers-are-services) to handle DNS lookups. + pub fn new_with_resolver(resolver: R) -> HttpConnector { + HttpConnector { + config: Arc::new(Config { + connect_timeout: None, + enforce_http: true, + happy_eyeballs_timeout: Some(Duration::from_millis(300)), + tcp_keepalive_config: TcpKeepaliveConfig::default(), + local_address_ipv4: None, + local_address_ipv6: None, + nodelay: false, + reuse_address: false, + send_buffer_size: None, + recv_buffer_size: None, + interface: None, + }), + resolver, + } + } + + /// Option to enforce all `Uri`s have the `http` scheme. + /// + /// Enabled by default. + #[inline] + pub fn enforce_http(&mut self, is_enforced: bool) { + self.config_mut().enforce_http = is_enforced; + } + + /// Set that all sockets have `SO_KEEPALIVE` set with the supplied duration + /// to remain idle before sending TCP keepalive probes. + /// + /// If `None`, keepalive is disabled. + /// + /// Default is `None`. + #[inline] + pub fn set_keepalive(&mut self, time: Option) { + self.config_mut().tcp_keepalive_config.time = time; + } + + /// Set the duration between two successive TCP keepalive retransmissions, + /// if acknowledgement to the previous keepalive transmission is not received. + #[inline] + pub fn set_keepalive_interval(&mut self, interval: Option) { + self.config_mut().tcp_keepalive_config.interval = interval; + } + + /// Set the number of retransmissions to be carried out before declaring that remote end is not available. + #[inline] + pub fn set_keepalive_retries(&mut self, retries: Option) { + self.config_mut().tcp_keepalive_config.retries = retries; + } + + /// Set that all sockets have `SO_NODELAY` set to the supplied value `nodelay`. + /// + /// Default is `false`. + #[inline] + pub fn set_nodelay(&mut self, nodelay: bool) { + self.config_mut().nodelay = nodelay; + } + + /// Sets the value of the SO_SNDBUF option on the socket. + #[inline] + pub fn set_send_buffer_size(&mut self, size: Option) { + self.config_mut().send_buffer_size = size; + } + + /// Sets the value of the SO_RCVBUF option on the socket. + #[inline] + pub fn set_recv_buffer_size(&mut self, size: Option) { + self.config_mut().recv_buffer_size = size; + } + + /// Set that all sockets are bound to the configured address before connection. + /// + /// If `None`, the sockets will not be bound. + /// + /// Default is `None`. + #[inline] + pub fn set_local_address(&mut self, addr: Option) { + let (v4, v6) = match addr { + Some(IpAddr::V4(a)) => (Some(a), None), + Some(IpAddr::V6(a)) => (None, Some(a)), + _ => (None, None), + }; + + let cfg = self.config_mut(); + + cfg.local_address_ipv4 = v4; + cfg.local_address_ipv6 = v6; + } + + /// Set that all sockets are bound to the configured IPv4 or IPv6 address (depending on host's + /// preferences) before connection. + #[inline] + pub fn set_local_addresses(&mut self, addr_ipv4: Ipv4Addr, addr_ipv6: Ipv6Addr) { + let cfg = self.config_mut(); + + cfg.local_address_ipv4 = Some(addr_ipv4); + cfg.local_address_ipv6 = Some(addr_ipv6); + } + + /// Set the connect timeout. + /// + /// If a domain resolves to multiple IP addresses, the timeout will be + /// evenly divided across them. + /// + /// Default is `None`. + #[inline] + pub fn set_connect_timeout(&mut self, dur: Option) { + self.config_mut().connect_timeout = dur; + } + + /// Set timeout for [RFC 6555 (Happy Eyeballs)][RFC 6555] algorithm. + /// + /// If hostname resolves to both IPv4 and IPv6 addresses and connection + /// cannot be established using preferred address family before timeout + /// elapses, then connector will in parallel attempt connection using other + /// address family. + /// + /// If `None`, parallel connection attempts are disabled. + /// + /// Default is 300 milliseconds. + /// + /// [RFC 6555]: https://tools.ietf.org/html/rfc6555 + #[inline] + pub fn set_happy_eyeballs_timeout(&mut self, dur: Option) { + self.config_mut().happy_eyeballs_timeout = dur; + } + + /// Set that all socket have `SO_REUSEADDR` set to the supplied value `reuse_address`. + /// + /// Default is `false`. + #[inline] + pub fn set_reuse_address(&mut self, reuse_address: bool) -> &mut Self { + self.config_mut().reuse_address = reuse_address; + self + } + + /// Sets the value for the `SO_BINDTODEVICE` option on this socket. + /// + /// If a socket is bound to an interface, only packets received from that particular + /// interface are processed by the socket. Note that this only works for some socket + /// types, particularly AF_INET sockets. + /// + /// On Linux it can be used to specify a [VRF], but the binary needs + /// to either have `CAP_NET_RAW` or to be run as root. + /// + /// This function is only available on Android、Fuchsia and Linux. + /// + /// [VRF]: https://www.kernel.org/doc/Documentation/networking/vrf.txt + #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] + #[inline] + pub fn set_interface>(&mut self, interface: S) -> &mut Self { + self.config_mut().interface = Some(interface.into()); + self + } + + // private + + fn config_mut(&mut self) -> &mut Config { + // If the are HttpConnector clones, this will clone the inner + // config. So mutating the config won't ever affect previous + // clones. + Arc::make_mut(&mut self.config) + } +} + +static INVALID_NOT_HTTP: &str = "invalid URL, scheme is not http"; +static INVALID_MISSING_SCHEME: &str = "invalid URL, scheme is missing"; +static INVALID_MISSING_HOST: &str = "invalid URL, host is missing"; + +// R: Debug required for now to allow adding it to debug output later... +impl fmt::Debug for HttpConnector { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("HttpConnector").finish() + } +} + +impl tower_service::Service for HttpConnector +where + R: Resolve + Clone + Send + Sync + 'static, + R::Future: Send, +{ + type Response = TokioIo; + type Error = ConnectError; + type Future = HttpConnecting; + + fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { + futures_util::ready!(self.resolver.poll_ready(cx)).map_err(ConnectError::dns)?; + Poll::Ready(Ok(())) + } + + fn call(&mut self, dst: Uri) -> Self::Future { + let mut self_ = self.clone(); + HttpConnecting { + fut: Box::pin(async move { self_.call_async(dst).await }), + _marker: PhantomData, + } + } +} + +fn get_host_port<'u>(config: &Config, dst: &'u Uri) -> Result<(&'u str, u16), ConnectError> { + trace!( + "Http::connect; scheme={:?}, host={:?}, port={:?}", + dst.scheme(), + dst.host(), + dst.port(), + ); + + if config.enforce_http { + if dst.scheme() != Some(&Scheme::HTTP) { + return Err(ConnectError { + msg: INVALID_NOT_HTTP.into(), + cause: None, + }); + } + } else if dst.scheme().is_none() { + return Err(ConnectError { + msg: INVALID_MISSING_SCHEME.into(), + cause: None, + }); + } + + let host = match dst.host() { + Some(s) => s, + None => { + return Err(ConnectError { + msg: INVALID_MISSING_HOST.into(), + cause: None, + }) + } + }; + let port = match dst.port() { + Some(port) => port.as_u16(), + None => { + if dst.scheme() == Some(&Scheme::HTTPS) { + 443 + } else { + 80 + } + } + }; + + Ok((host, port)) +} + +impl HttpConnector +where + R: Resolve, +{ + async fn call_async(&mut self, dst: Uri) -> Result, ConnectError> { + let config = &self.config; + + let (host, port) = get_host_port(config, &dst)?; + let host = host.trim_start_matches('[').trim_end_matches(']'); + + // If the host is already an IP addr (v4 or v6), + // skip resolving the dns and start connecting right away. + let addrs = if let Some(addrs) = dns::SocketAddrs::try_parse(host, port) { + addrs + } else { + let addrs = resolve(&mut self.resolver, dns::Name::new(host.into())) + .await + .map_err(ConnectError::dns)?; + let addrs = addrs + .map(|mut addr| { + addr.set_port(port); + addr + }) + .collect(); + dns::SocketAddrs::new(addrs) + }; + + let c = ConnectingTcp::new(addrs, config); + + let sock = c.connect().await?; + + if let Err(e) = sock.set_nodelay(config.nodelay) { + warn!("tcp set_nodelay error: {}", e); + } + + Ok(TokioIo::new(sock)) + } +} + +impl Connection for TcpStream { + fn connected(&self) -> Connected { + let connected = Connected::new(); + if let (Ok(remote_addr), Ok(local_addr)) = (self.peer_addr(), self.local_addr()) { + connected.extra(HttpInfo { + remote_addr, + local_addr, + }) + } else { + connected + } + } +} + +// Implement `Connection` for generic `TokioIo` so that external crates can +// implement their own `HttpConnector` with `TokioIo`. +impl Connection for TokioIo +where + T: Connection, +{ + fn connected(&self) -> Connected { + self.inner().connected() + } +} + +impl HttpInfo { + /// Get the remote address of the transport used. + pub fn remote_addr(&self) -> SocketAddr { + self.remote_addr + } + + /// Get the local address of the transport used. + pub fn local_addr(&self) -> SocketAddr { + self.local_addr + } +} + +pin_project! { + // Not publicly exported (so missing_docs doesn't trigger). + // + // We return this `Future` instead of the `Pin>` directly + // so that users don't rely on it fitting in a `Pin>` slot + // (and thus we can change the type in the future). + #[must_use = "futures do nothing unless polled"] + #[allow(missing_debug_implementations)] + pub struct HttpConnecting { + #[pin] + fut: BoxConnecting, + _marker: PhantomData, + } +} + +type ConnectResult = Result, ConnectError>; +type BoxConnecting = Pin + Send>>; + +impl Future for HttpConnecting { + type Output = ConnectResult; + + fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + self.project().fut.poll(cx) + } +} + +// Not publicly exported (so missing_docs doesn't trigger). +pub struct ConnectError { + msg: Box, + cause: Option>, +} + +impl ConnectError { + fn new(msg: S, cause: E) -> ConnectError + where + S: Into>, + E: Into>, + { + ConnectError { + msg: msg.into(), + cause: Some(cause.into()), + } + } + + fn dns(cause: E) -> ConnectError + where + E: Into>, + { + ConnectError::new("dns error", cause) + } + + fn m(msg: S) -> impl FnOnce(E) -> ConnectError + where + S: Into>, + E: Into>, + { + move |cause| ConnectError::new(msg, cause) + } +} + +impl fmt::Debug for ConnectError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if let Some(ref cause) = self.cause { + f.debug_tuple("ConnectError") + .field(&self.msg) + .field(cause) + .finish() + } else { + self.msg.fmt(f) + } + } +} + +impl fmt::Display for ConnectError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(&self.msg)?; + + if let Some(ref cause) = self.cause { + write!(f, ": {}", cause)?; + } + + Ok(()) + } +} + +impl StdError for ConnectError { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + self.cause.as_ref().map(|e| &**e as _) + } +} + +struct ConnectingTcp<'a> { + preferred: ConnectingTcpRemote, + fallback: Option, + config: &'a Config, +} + +impl<'a> ConnectingTcp<'a> { + fn new(remote_addrs: dns::SocketAddrs, config: &'a Config) -> Self { + if let Some(fallback_timeout) = config.happy_eyeballs_timeout { + let (preferred_addrs, fallback_addrs) = remote_addrs + .split_by_preference(config.local_address_ipv4, config.local_address_ipv6); + if fallback_addrs.is_empty() { + return ConnectingTcp { + preferred: ConnectingTcpRemote::new(preferred_addrs, config.connect_timeout), + fallback: None, + config, + }; + } + + ConnectingTcp { + preferred: ConnectingTcpRemote::new(preferred_addrs, config.connect_timeout), + fallback: Some(ConnectingTcpFallback { + delay: tokio::time::sleep(fallback_timeout), + remote: ConnectingTcpRemote::new(fallback_addrs, config.connect_timeout), + }), + config, + } + } else { + ConnectingTcp { + preferred: ConnectingTcpRemote::new(remote_addrs, config.connect_timeout), + fallback: None, + config, + } + } + } +} + +struct ConnectingTcpFallback { + delay: Sleep, + remote: ConnectingTcpRemote, +} + +struct ConnectingTcpRemote { + addrs: dns::SocketAddrs, + connect_timeout: Option, +} + +impl ConnectingTcpRemote { + fn new(addrs: dns::SocketAddrs, connect_timeout: Option) -> Self { + let connect_timeout = connect_timeout.and_then(|t| t.checked_div(addrs.len() as u32)); + + Self { + addrs, + connect_timeout, + } + } +} + +impl ConnectingTcpRemote { + async fn connect(&mut self, config: &Config) -> Result { + let mut err = None; + for addr in &mut self.addrs { + debug!("connecting to {}", addr); + match connect(&addr, config, self.connect_timeout)?.await { + Ok(tcp) => { + debug!("connected to {}", addr); + return Ok(tcp); + } + Err(e) => { + trace!("connect error for {}: {:?}", addr, e); + err = Some(e); + } + } + } + + match err { + Some(e) => Err(e), + None => Err(ConnectError::new( + "tcp connect error", + std::io::Error::new(std::io::ErrorKind::NotConnected, "Network unreachable"), + )), + } + } +} + +fn bind_local_address( + socket: &socket2::Socket, + dst_addr: &SocketAddr, + local_addr_ipv4: &Option, + local_addr_ipv6: &Option, +) -> io::Result<()> { + match (*dst_addr, local_addr_ipv4, local_addr_ipv6) { + (SocketAddr::V4(_), Some(addr), _) => { + socket.bind(&SocketAddr::new((*addr).into(), 0).into())?; + } + (SocketAddr::V6(_), _, Some(addr)) => { + socket.bind(&SocketAddr::new((*addr).into(), 0).into())?; + } + _ => { + if cfg!(windows) { + // Windows requires a socket be bound before calling connect + let any: SocketAddr = match *dst_addr { + SocketAddr::V4(_) => ([0, 0, 0, 0], 0).into(), + SocketAddr::V6(_) => ([0, 0, 0, 0, 0, 0, 0, 0], 0).into(), + }; + socket.bind(&any.into())?; + } + } + } + + Ok(()) +} + +fn connect( + addr: &SocketAddr, + config: &Config, + connect_timeout: Option, +) -> Result>, ConnectError> { + // TODO(eliza): if Tokio's `TcpSocket` gains support for setting the + // keepalive timeout, it would be nice to use that instead of socket2, + // and avoid the unsafe `into_raw_fd`/`from_raw_fd` dance... + use socket2::{Domain, Protocol, Socket, Type}; + + let domain = Domain::for_address(*addr); + let socket = Socket::new(domain, Type::STREAM, Some(Protocol::TCP)) + .map_err(ConnectError::m("tcp open error"))?; + + // When constructing a Tokio `TcpSocket` from a raw fd/socket, the user is + // responsible for ensuring O_NONBLOCK is set. + socket + .set_nonblocking(true) + .map_err(ConnectError::m("tcp set_nonblocking error"))?; + + if let Some(tcp_keepalive) = &config.tcp_keepalive_config.into_tcpkeepalive() { + if let Err(e) = socket.set_tcp_keepalive(tcp_keepalive) { + warn!("tcp set_keepalive error: {}", e); + } + } + + #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] + // That this only works for some socket types, particularly AF_INET sockets. + if let Some(interface) = &config.interface { + socket + .bind_device(Some(interface.as_bytes())) + .map_err(ConnectError::m("tcp bind interface error"))?; + } + + bind_local_address( + &socket, + addr, + &config.local_address_ipv4, + &config.local_address_ipv6, + ) + .map_err(ConnectError::m("tcp bind local error"))?; + + #[cfg(unix)] + let socket = unsafe { + // Safety: `from_raw_fd` is only safe to call if ownership of the raw + // file descriptor is transferred. Since we call `into_raw_fd` on the + // socket2 socket, it gives up ownership of the fd and will not close + // it, so this is safe. + use std::os::unix::io::{FromRawFd, IntoRawFd}; + TcpSocket::from_raw_fd(socket.into_raw_fd()) + }; + #[cfg(windows)] + let socket = unsafe { + // Safety: `from_raw_socket` is only safe to call if ownership of the raw + // Windows SOCKET is transferred. Since we call `into_raw_socket` on the + // socket2 socket, it gives up ownership of the SOCKET and will not close + // it, so this is safe. + use std::os::windows::io::{FromRawSocket, IntoRawSocket}; + TcpSocket::from_raw_socket(socket.into_raw_socket()) + }; + + if config.reuse_address { + if let Err(e) = socket.set_reuseaddr(true) { + warn!("tcp set_reuse_address error: {}", e); + } + } + + if let Some(size) = config.send_buffer_size { + if let Err(e) = socket.set_send_buffer_size(size.try_into().unwrap_or(u32::MAX)) { + warn!("tcp set_buffer_size error: {}", e); + } + } + + if let Some(size) = config.recv_buffer_size { + if let Err(e) = socket.set_recv_buffer_size(size.try_into().unwrap_or(u32::MAX)) { + warn!("tcp set_recv_buffer_size error: {}", e); + } + } + + let connect = socket.connect(*addr); + Ok(async move { + match connect_timeout { + Some(dur) => match tokio::time::timeout(dur, connect).await { + Ok(Ok(s)) => Ok(s), + Ok(Err(e)) => Err(e), + Err(e) => Err(io::Error::new(io::ErrorKind::TimedOut, e)), + }, + None => connect.await, + } + .map_err(ConnectError::m("tcp connect error")) + }) +} + +impl ConnectingTcp<'_> { + async fn connect(mut self) -> Result { + match self.fallback { + None => self.preferred.connect(self.config).await, + Some(mut fallback) => { + let preferred_fut = self.preferred.connect(self.config); + futures_util::pin_mut!(preferred_fut); + + let fallback_fut = fallback.remote.connect(self.config); + futures_util::pin_mut!(fallback_fut); + + let fallback_delay = fallback.delay; + futures_util::pin_mut!(fallback_delay); + + let (result, future) = + match futures_util::future::select(preferred_fut, fallback_delay).await { + Either::Left((result, _fallback_delay)) => { + (result, Either::Right(fallback_fut)) + } + Either::Right(((), preferred_fut)) => { + // Delay is done, start polling both the preferred and the fallback + futures_util::future::select(preferred_fut, fallback_fut) + .await + .factor_first() + } + }; + + if result.is_err() { + // Fallback to the remaining future (could be preferred or fallback) + // if we get an error + future.await + } else { + result + } + } + } + } +} + +#[cfg(test)] +mod tests { + use std::io; + + use ::http::Uri; + + use crate::client::legacy::connect::http::TcpKeepaliveConfig; + + use super::super::sealed::{Connect, ConnectSvc}; + use super::{Config, ConnectError, HttpConnector}; + + async fn connect( + connector: C, + dst: Uri, + ) -> Result<::Connection, ::Error> + where + C: Connect, + { + connector.connect(super::super::sealed::Internal, dst).await + } + + #[tokio::test] + #[cfg_attr(miri, ignore)] + async fn test_errors_enforce_http() { + let dst = "https://example.domain/foo/bar?baz".parse().unwrap(); + let connector = HttpConnector::new(); + + let err = connect(connector, dst).await.unwrap_err(); + assert_eq!(&*err.msg, super::INVALID_NOT_HTTP); + } + + #[cfg(any(target_os = "linux", target_os = "macos"))] + fn get_local_ips() -> (Option, Option) { + use std::net::{IpAddr, TcpListener}; + + let mut ip_v4 = None; + let mut ip_v6 = None; + + let ips = pnet_datalink::interfaces() + .into_iter() + .flat_map(|i| i.ips.into_iter().map(|n| n.ip())); + + for ip in ips { + match ip { + IpAddr::V4(ip) if TcpListener::bind((ip, 0)).is_ok() => ip_v4 = Some(ip), + IpAddr::V6(ip) if TcpListener::bind((ip, 0)).is_ok() => ip_v6 = Some(ip), + _ => (), + } + + if ip_v4.is_some() && ip_v6.is_some() { + break; + } + } + + (ip_v4, ip_v6) + } + + #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] + fn default_interface() -> Option { + pnet_datalink::interfaces() + .iter() + .find(|e| e.is_up() && !e.is_loopback() && !e.ips.is_empty()) + .map(|e| e.name.clone()) + } + + #[tokio::test] + #[cfg_attr(miri, ignore)] + async fn test_errors_missing_scheme() { + let dst = "example.domain".parse().unwrap(); + let mut connector = HttpConnector::new(); + connector.enforce_http(false); + + let err = connect(connector, dst).await.unwrap_err(); + assert_eq!(&*err.msg, super::INVALID_MISSING_SCHEME); + } + + // NOTE: pnet crate that we use in this test doesn't compile on Windows + #[cfg(any(target_os = "linux", target_os = "macos"))] + #[cfg_attr(miri, ignore)] + #[tokio::test] + async fn local_address() { + use std::net::{IpAddr, TcpListener}; + + let (bind_ip_v4, bind_ip_v6) = get_local_ips(); + let server4 = TcpListener::bind("127.0.0.1:0").unwrap(); + let port = server4.local_addr().unwrap().port(); + let server6 = TcpListener::bind(&format!("[::1]:{}", port)).unwrap(); + + let assert_client_ip = |dst: String, server: TcpListener, expected_ip: IpAddr| async move { + let mut connector = HttpConnector::new(); + + match (bind_ip_v4, bind_ip_v6) { + (Some(v4), Some(v6)) => connector.set_local_addresses(v4, v6), + (Some(v4), None) => connector.set_local_address(Some(v4.into())), + (None, Some(v6)) => connector.set_local_address(Some(v6.into())), + _ => unreachable!(), + } + + connect(connector, dst.parse().unwrap()).await.unwrap(); + + let (_, client_addr) = server.accept().unwrap(); + + assert_eq!(client_addr.ip(), expected_ip); + }; + + if let Some(ip) = bind_ip_v4 { + assert_client_ip(format!("http://127.0.0.1:{}", port), server4, ip.into()).await; + } + + if let Some(ip) = bind_ip_v6 { + assert_client_ip(format!("http://[::1]:{}", port), server6, ip.into()).await; + } + } + + // NOTE: pnet crate that we use in this test doesn't compile on Windows + #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] + #[tokio::test] + #[ignore = "setting `SO_BINDTODEVICE` requires the `CAP_NET_RAW` capability (works when running as root)"] + async fn interface() { + use socket2::{Domain, Protocol, Socket, Type}; + use std::net::TcpListener; + + let interface: Option = default_interface(); + + let server4 = TcpListener::bind("127.0.0.1:0").unwrap(); + let port = server4.local_addr().unwrap().port(); + + let server6 = TcpListener::bind(&format!("[::1]:{}", port)).unwrap(); + + let assert_interface_name = + |dst: String, + server: TcpListener, + bind_iface: Option, + expected_interface: Option| async move { + let mut connector = HttpConnector::new(); + if let Some(iface) = bind_iface { + connector.set_interface(iface); + } + + connect(connector, dst.parse().unwrap()).await.unwrap(); + let domain = Domain::for_address(server.local_addr().unwrap()); + let socket = Socket::new(domain, Type::STREAM, Some(Protocol::TCP)).unwrap(); + + assert_eq!( + socket.device().unwrap().as_deref(), + expected_interface.as_deref().map(|val| val.as_bytes()) + ); + }; + + assert_interface_name( + format!("http://127.0.0.1:{}", port), + server4, + interface.clone(), + interface.clone(), + ) + .await; + assert_interface_name( + format!("http://[::1]:{}", port), + server6, + interface.clone(), + interface.clone(), + ) + .await; + } + + #[test] + #[ignore] // TODO + #[cfg_attr(not(feature = "__internal_happy_eyeballs_tests"), ignore)] + fn client_happy_eyeballs() { + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, TcpListener}; + use std::time::{Duration, Instant}; + + use super::dns; + use super::ConnectingTcp; + + let server4 = TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = server4.local_addr().unwrap(); + let _server6 = TcpListener::bind(&format!("[::1]:{}", addr.port())).unwrap(); + let rt = tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .unwrap(); + + let local_timeout = Duration::default(); + let unreachable_v4_timeout = measure_connect(unreachable_ipv4_addr()).1; + let unreachable_v6_timeout = measure_connect(unreachable_ipv6_addr()).1; + let fallback_timeout = std::cmp::max(unreachable_v4_timeout, unreachable_v6_timeout) + + Duration::from_millis(250); + + let scenarios = &[ + // Fast primary, without fallback. + (&[local_ipv4_addr()][..], 4, local_timeout, false), + (&[local_ipv6_addr()][..], 6, local_timeout, false), + // Fast primary, with (unused) fallback. + ( + &[local_ipv4_addr(), local_ipv6_addr()][..], + 4, + local_timeout, + false, + ), + ( + &[local_ipv6_addr(), local_ipv4_addr()][..], + 6, + local_timeout, + false, + ), + // Unreachable + fast primary, without fallback. + ( + &[unreachable_ipv4_addr(), local_ipv4_addr()][..], + 4, + unreachable_v4_timeout, + false, + ), + ( + &[unreachable_ipv6_addr(), local_ipv6_addr()][..], + 6, + unreachable_v6_timeout, + false, + ), + // Unreachable + fast primary, with (unused) fallback. + ( + &[ + unreachable_ipv4_addr(), + local_ipv4_addr(), + local_ipv6_addr(), + ][..], + 4, + unreachable_v4_timeout, + false, + ), + ( + &[ + unreachable_ipv6_addr(), + local_ipv6_addr(), + local_ipv4_addr(), + ][..], + 6, + unreachable_v6_timeout, + true, + ), + // Slow primary, with (used) fallback. + ( + &[slow_ipv4_addr(), local_ipv4_addr(), local_ipv6_addr()][..], + 6, + fallback_timeout, + false, + ), + ( + &[slow_ipv6_addr(), local_ipv6_addr(), local_ipv4_addr()][..], + 4, + fallback_timeout, + true, + ), + // Slow primary, with (used) unreachable + fast fallback. + ( + &[slow_ipv4_addr(), unreachable_ipv6_addr(), local_ipv6_addr()][..], + 6, + fallback_timeout + unreachable_v6_timeout, + false, + ), + ( + &[slow_ipv6_addr(), unreachable_ipv4_addr(), local_ipv4_addr()][..], + 4, + fallback_timeout + unreachable_v4_timeout, + true, + ), + ]; + + // Scenarios for IPv6 -> IPv4 fallback require that host can access IPv6 network. + // Otherwise, connection to "slow" IPv6 address will error-out immediately. + let ipv6_accessible = measure_connect(slow_ipv6_addr()).0; + + for &(hosts, family, timeout, needs_ipv6_access) in scenarios { + if needs_ipv6_access && !ipv6_accessible { + continue; + } + + let (start, stream) = rt + .block_on(async move { + let addrs = hosts + .iter() + .map(|host| (host.clone(), addr.port()).into()) + .collect(); + let cfg = Config { + local_address_ipv4: None, + local_address_ipv6: None, + connect_timeout: None, + tcp_keepalive_config: TcpKeepaliveConfig::default(), + happy_eyeballs_timeout: Some(fallback_timeout), + nodelay: false, + reuse_address: false, + enforce_http: false, + send_buffer_size: None, + recv_buffer_size: None, + interface: None, + }; + let connecting_tcp = ConnectingTcp::new(dns::SocketAddrs::new(addrs), &cfg); + let start = Instant::now(); + Ok::<_, ConnectError>((start, ConnectingTcp::connect(connecting_tcp).await?)) + }) + .unwrap(); + let res = if stream.peer_addr().unwrap().is_ipv4() { + 4 + } else { + 6 + }; + let duration = start.elapsed(); + + // Allow actual duration to be +/- 150ms off. + let min_duration = if timeout >= Duration::from_millis(150) { + timeout - Duration::from_millis(150) + } else { + Duration::default() + }; + let max_duration = timeout + Duration::from_millis(150); + + assert_eq!(res, family); + assert!(duration >= min_duration); + assert!(duration <= max_duration); + } + + fn local_ipv4_addr() -> IpAddr { + Ipv4Addr::new(127, 0, 0, 1).into() + } + + fn local_ipv6_addr() -> IpAddr { + Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).into() + } + + fn unreachable_ipv4_addr() -> IpAddr { + Ipv4Addr::new(127, 0, 0, 2).into() + } + + fn unreachable_ipv6_addr() -> IpAddr { + Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 2).into() + } + + fn slow_ipv4_addr() -> IpAddr { + // RFC 6890 reserved IPv4 address. + Ipv4Addr::new(198, 18, 0, 25).into() + } + + fn slow_ipv6_addr() -> IpAddr { + // RFC 6890 reserved IPv6 address. + Ipv6Addr::new(2001, 2, 0, 0, 0, 0, 0, 254).into() + } + + fn measure_connect(addr: IpAddr) -> (bool, Duration) { + let start = Instant::now(); + let result = + std::net::TcpStream::connect_timeout(&(addr, 80).into(), Duration::from_secs(1)); + + let reachable = result.is_ok() || result.unwrap_err().kind() == io::ErrorKind::TimedOut; + let duration = start.elapsed(); + (reachable, duration) + } + } + + use std::time::Duration; + + #[test] + fn no_tcp_keepalive_config() { + assert!(TcpKeepaliveConfig::default().into_tcpkeepalive().is_none()); + } + + #[test] + fn tcp_keepalive_time_config() { + let mut kac = TcpKeepaliveConfig::default(); + kac.time = Some(Duration::from_secs(60)); + if let Some(tcp_keepalive) = kac.into_tcpkeepalive() { + assert!(format!("{tcp_keepalive:?}").contains("time: Some(60s)")); + } else { + panic!("test failed"); + } + } + + #[cfg(not(any(target_os = "openbsd", target_os = "redox", target_os = "solaris")))] + #[test] + fn tcp_keepalive_interval_config() { + let mut kac = TcpKeepaliveConfig::default(); + kac.interval = Some(Duration::from_secs(1)); + if let Some(tcp_keepalive) = kac.into_tcpkeepalive() { + assert!(format!("{tcp_keepalive:?}").contains("interval: Some(1s)")); + } else { + panic!("test failed"); + } + } + + #[cfg(not(any( + target_os = "openbsd", + target_os = "redox", + target_os = "solaris", + target_os = "windows" + )))] + #[test] + fn tcp_keepalive_retries_config() { + let mut kac = TcpKeepaliveConfig::default(); + kac.retries = Some(3); + if let Some(tcp_keepalive) = kac.into_tcpkeepalive() { + assert!(format!("{tcp_keepalive:?}").contains("retries: Some(3)")); + } else { + panic!("test failed"); + } + } +} diff --git a/.cargo-vendor/hyper-util/src/client/legacy/connect/mod.rs b/.cargo-vendor/hyper-util/src/client/legacy/connect/mod.rs new file mode 100644 index 0000000000..e3369b5372 --- /dev/null +++ b/.cargo-vendor/hyper-util/src/client/legacy/connect/mod.rs @@ -0,0 +1,442 @@ +//! Connectors used by the `Client`. +//! +//! This module contains: +//! +//! - A default [`HttpConnector`][] that does DNS resolution and establishes +//! connections over TCP. +//! - Types to build custom connectors. +//! +//! # Connectors +//! +//! A "connector" is a [`Service`][] that takes a [`Uri`][] destination, and +//! its `Response` is some type implementing [`Read`][], [`Write`][], +//! and [`Connection`][]. +//! +//! ## Custom Connectors +//! +//! A simple connector that ignores the `Uri` destination and always returns +//! a TCP connection to the same address could be written like this: +//! +//! ```rust,ignore +//! let connector = tower::service_fn(|_dst| async { +//! tokio::net::TcpStream::connect("127.0.0.1:1337") +//! }) +//! ``` +//! +//! Or, fully written out: +//! +//! ``` +//! use std::{future::Future, net::SocketAddr, pin::Pin, task::{self, Poll}}; +//! use http::Uri; +//! use tokio::net::TcpStream; +//! use tower::Service; +//! +//! #[derive(Clone)] +//! struct LocalConnector; +//! +//! impl Service for LocalConnector { +//! type Response = TcpStream; +//! type Error = std::io::Error; +//! // We can't "name" an `async` generated future. +//! type Future = Pin> + Send +//! >>; +//! +//! fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll> { +//! // This connector is always ready, but others might not be. +//! Poll::Ready(Ok(())) +//! } +//! +//! fn call(&mut self, _: Uri) -> Self::Future { +//! Box::pin(TcpStream::connect(SocketAddr::from(([127, 0, 0, 1], 1337)))) +//! } +//! } +//! ``` +//! +//! It's worth noting that for `TcpStream`s, the [`HttpConnector`][] is a +//! better starting place to extend from. +//! +//! [`HttpConnector`]: HttpConnector +//! [`Service`]: tower::Service +//! [`Uri`]: ::http::Uri +//! [`Read`]: hyper::rt::Read +//! [`Write`]: hyper::rt::Write +//! [`Connection`]: Connection +use std::{ + fmt::{self, Formatter}, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, +}; + +use ::http::Extensions; + +#[cfg(feature = "tokio")] +pub use self::http::{HttpConnector, HttpInfo}; + +#[cfg(feature = "tokio")] +pub mod dns; +#[cfg(feature = "tokio")] +mod http; + +pub(crate) mod capture; +pub use capture::{capture_connection, CaptureConnection}; + +pub use self::sealed::Connect; + +/// Describes a type returned by a connector. +pub trait Connection { + /// Return metadata describing the connection. + fn connected(&self) -> Connected; +} + +/// Extra information about the connected transport. +/// +/// This can be used to inform recipients about things like if ALPN +/// was used, or if connected to an HTTP proxy. +#[derive(Debug)] +pub struct Connected { + pub(super) alpn: Alpn, + pub(super) is_proxied: bool, + pub(super) extra: Option, + pub(super) poisoned: PoisonPill, +} + +#[derive(Clone)] +pub(crate) struct PoisonPill { + poisoned: Arc, +} + +impl fmt::Debug for PoisonPill { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + // print the address of the pill—this makes debugging issues much easier + write!( + f, + "PoisonPill@{:p} {{ poisoned: {} }}", + self.poisoned, + self.poisoned.load(Ordering::Relaxed) + ) + } +} + +impl PoisonPill { + pub(crate) fn healthy() -> Self { + Self { + poisoned: Arc::new(AtomicBool::new(false)), + } + } + pub(crate) fn poison(&self) { + self.poisoned.store(true, Ordering::Relaxed) + } + + pub(crate) fn poisoned(&self) -> bool { + self.poisoned.load(Ordering::Relaxed) + } +} + +pub(super) struct Extra(Box); + +#[derive(Clone, Copy, Debug, PartialEq)] +pub(super) enum Alpn { + H2, + None, +} + +impl Connected { + /// Create new `Connected` type with empty metadata. + pub fn new() -> Connected { + Connected { + alpn: Alpn::None, + is_proxied: false, + extra: None, + poisoned: PoisonPill::healthy(), + } + } + + /// Set whether the connected transport is to an HTTP proxy. + /// + /// This setting will affect if HTTP/1 requests written on the transport + /// will have the request-target in absolute-form or origin-form: + /// + /// - When `proxy(false)`: + /// + /// ```http + /// GET /guide HTTP/1.1 + /// ``` + /// + /// - When `proxy(true)`: + /// + /// ```http + /// GET http://hyper.rs/guide HTTP/1.1 + /// ``` + /// + /// Default is `false`. + pub fn proxy(mut self, is_proxied: bool) -> Connected { + self.is_proxied = is_proxied; + self + } + + /// Determines if the connected transport is to an HTTP proxy. + pub fn is_proxied(&self) -> bool { + self.is_proxied + } + + /// Set extra connection information to be set in the extensions of every `Response`. + pub fn extra(mut self, extra: T) -> Connected { + if let Some(prev) = self.extra { + self.extra = Some(Extra(Box::new(ExtraChain(prev.0, extra)))); + } else { + self.extra = Some(Extra(Box::new(ExtraEnvelope(extra)))); + } + self + } + + /// Copies the extra connection information into an `Extensions` map. + pub fn get_extras(&self, extensions: &mut Extensions) { + if let Some(extra) = &self.extra { + extra.set(extensions); + } + } + + /// Set that the connected transport negotiated HTTP/2 as its next protocol. + pub fn negotiated_h2(mut self) -> Connected { + self.alpn = Alpn::H2; + self + } + + /// Determines if the connected transport negotiated HTTP/2 as its next protocol. + pub fn is_negotiated_h2(&self) -> bool { + self.alpn == Alpn::H2 + } + + /// Poison this connection + /// + /// A poisoned connection will not be reused for subsequent requests by the pool + pub fn poison(&self) { + self.poisoned.poison(); + tracing::debug!( + poison_pill = ?self.poisoned, "connection was poisoned. this connection will not be reused for subsequent requests" + ); + } + + // Don't public expose that `Connected` is `Clone`, unsure if we want to + // keep that contract... + pub(super) fn clone(&self) -> Connected { + Connected { + alpn: self.alpn, + is_proxied: self.is_proxied, + extra: self.extra.clone(), + poisoned: self.poisoned.clone(), + } + } +} + +// ===== impl Extra ===== + +impl Extra { + pub(super) fn set(&self, res: &mut Extensions) { + self.0.set(res); + } +} + +impl Clone for Extra { + fn clone(&self) -> Extra { + Extra(self.0.clone_box()) + } +} + +impl fmt::Debug for Extra { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Extra").finish() + } +} + +trait ExtraInner: Send + Sync { + fn clone_box(&self) -> Box; + fn set(&self, res: &mut Extensions); +} + +// This indirection allows the `Connected` to have a type-erased "extra" value, +// while that type still knows its inner extra type. This allows the correct +// TypeId to be used when inserting into `res.extensions_mut()`. +#[derive(Clone)] +struct ExtraEnvelope(T); + +impl ExtraInner for ExtraEnvelope +where + T: Clone + Send + Sync + 'static, +{ + fn clone_box(&self) -> Box { + Box::new(self.clone()) + } + + fn set(&self, res: &mut Extensions) { + res.insert(self.0.clone()); + } +} + +struct ExtraChain(Box, T); + +impl Clone for ExtraChain { + fn clone(&self) -> Self { + ExtraChain(self.0.clone_box(), self.1.clone()) + } +} + +impl ExtraInner for ExtraChain +where + T: Clone + Send + Sync + 'static, +{ + fn clone_box(&self) -> Box { + Box::new(self.clone()) + } + + fn set(&self, res: &mut Extensions) { + self.0.set(res); + res.insert(self.1.clone()); + } +} + +pub(super) mod sealed { + use std::error::Error as StdError; + use std::future::Future; + + use ::http::Uri; + use hyper::rt::{Read, Write}; + + use super::Connection; + + /// Connect to a destination, returning an IO transport. + /// + /// A connector receives a [`Uri`](::http::Uri) and returns a `Future` of the + /// ready connection. + /// + /// # Trait Alias + /// + /// This is really just an *alias* for the `tower::Service` trait, with + /// additional bounds set for convenience *inside* hyper. You don't actually + /// implement this trait, but `tower::Service` instead. + // The `Sized` bound is to prevent creating `dyn Connect`, since they cannot + // fit the `Connect` bounds because of the blanket impl for `Service`. + pub trait Connect: Sealed + Sized { + #[doc(hidden)] + type _Svc: ConnectSvc; + #[doc(hidden)] + fn connect(self, internal_only: Internal, dst: Uri) -> ::Future; + } + + pub trait ConnectSvc { + type Connection: Read + Write + Connection + Unpin + Send + 'static; + type Error: Into>; + type Future: Future> + Unpin + Send + 'static; + + fn connect(self, internal_only: Internal, dst: Uri) -> Self::Future; + } + + impl Connect for S + where + S: tower_service::Service + Send + 'static, + S::Error: Into>, + S::Future: Unpin + Send, + T: Read + Write + Connection + Unpin + Send + 'static, + { + type _Svc = S; + + fn connect(self, _: Internal, dst: Uri) -> tower::util::Oneshot { + tower::util::Oneshot::new(self, dst) + } + } + + impl ConnectSvc for S + where + S: tower_service::Service + Send + 'static, + S::Error: Into>, + S::Future: Unpin + Send, + T: Read + Write + Connection + Unpin + Send + 'static, + { + type Connection = T; + type Error = S::Error; + type Future = tower::util::Oneshot; + + fn connect(self, _: Internal, dst: Uri) -> Self::Future { + tower::util::Oneshot::new(self, dst) + } + } + + impl Sealed for S + where + S: tower_service::Service + Send, + S::Error: Into>, + S::Future: Unpin + Send, + T: Read + Write + Connection + Unpin + Send + 'static, + { + } + + pub trait Sealed {} + #[allow(missing_debug_implementations)] + pub struct Internal; +} + +#[cfg(test)] +mod tests { + use super::Connected; + + #[derive(Clone, Debug, PartialEq)] + struct Ex1(usize); + + #[derive(Clone, Debug, PartialEq)] + struct Ex2(&'static str); + + #[derive(Clone, Debug, PartialEq)] + struct Ex3(&'static str); + + #[test] + fn test_connected_extra() { + let c1 = Connected::new().extra(Ex1(41)); + + let mut ex = ::http::Extensions::new(); + + assert_eq!(ex.get::(), None); + + c1.extra.as_ref().expect("c1 extra").set(&mut ex); + + assert_eq!(ex.get::(), Some(&Ex1(41))); + } + + #[test] + fn test_connected_extra_chain() { + // If a user composes connectors and at each stage, there's "extra" + // info to attach, it shouldn't override the previous extras. + + let c1 = Connected::new() + .extra(Ex1(45)) + .extra(Ex2("zoom")) + .extra(Ex3("pew pew")); + + let mut ex1 = ::http::Extensions::new(); + + assert_eq!(ex1.get::(), None); + assert_eq!(ex1.get::(), None); + assert_eq!(ex1.get::(), None); + + c1.extra.as_ref().expect("c1 extra").set(&mut ex1); + + assert_eq!(ex1.get::(), Some(&Ex1(45))); + assert_eq!(ex1.get::(), Some(&Ex2("zoom"))); + assert_eq!(ex1.get::(), Some(&Ex3("pew pew"))); + + // Just like extensions, inserting the same type overrides previous type. + let c2 = Connected::new() + .extra(Ex1(33)) + .extra(Ex2("hiccup")) + .extra(Ex1(99)); + + let mut ex2 = ::http::Extensions::new(); + + c2.extra.as_ref().expect("c2 extra").set(&mut ex2); + + assert_eq!(ex2.get::(), Some(&Ex1(99))); + assert_eq!(ex2.get::(), Some(&Ex2("hiccup"))); + } +} diff --git a/.cargo-vendor/hyper-util/src/client/legacy/mod.rs b/.cargo-vendor/hyper-util/src/client/legacy/mod.rs new file mode 100644 index 0000000000..1649ae7ea0 --- /dev/null +++ b/.cargo-vendor/hyper-util/src/client/legacy/mod.rs @@ -0,0 +1,10 @@ +#[cfg(any(feature = "http1", feature = "http2"))] +mod client; +#[cfg(any(feature = "http1", feature = "http2"))] +pub use client::{Builder, Client, Error, ResponseFuture}; + +pub mod connect; +#[doc(hidden)] +// Publicly available, but just for legacy purposes. A better pool will be +// designed. +pub mod pool; diff --git a/.cargo-vendor/hyper-util/src/client/legacy/pool.rs b/.cargo-vendor/hyper-util/src/client/legacy/pool.rs new file mode 100644 index 0000000000..c57b7ff904 --- /dev/null +++ b/.cargo-vendor/hyper-util/src/client/legacy/pool.rs @@ -0,0 +1,1093 @@ +#![allow(dead_code)] + +use std::collections::{HashMap, HashSet, VecDeque}; +use std::convert::Infallible; +use std::error::Error as StdError; +use std::fmt::{self, Debug}; +use std::future::Future; +use std::hash::Hash; +use std::ops::{Deref, DerefMut}; +use std::pin::Pin; +use std::sync::{Arc, Mutex, Weak}; +use std::task::{self, Poll}; + +use std::time::{Duration, Instant}; + +use futures_channel::oneshot; +use futures_util::ready; +use tracing::{debug, trace}; + +use hyper::rt::Sleep; +use hyper::rt::Timer as _; + +use crate::common::{exec, exec::Exec, timer::Timer}; + +// FIXME: allow() required due to `impl Trait` leaking types to this lint +#[allow(missing_debug_implementations)] +pub struct Pool { + // If the pool is disabled, this is None. + inner: Option>>>, +} + +// Before using a pooled connection, make sure the sender is not dead. +// +// This is a trait to allow the `client::pool::tests` to work for `i32`. +// +// See https://github.com/hyperium/hyper/issues/1429 +pub trait Poolable: Unpin + Send + Sized + 'static { + fn is_open(&self) -> bool; + /// Reserve this connection. + /// + /// Allows for HTTP/2 to return a shared reservation. + fn reserve(self) -> Reservation; + fn can_share(&self) -> bool; +} + +pub trait Key: Eq + Hash + Clone + Debug + Unpin + Send + 'static {} + +impl Key for T where T: Eq + Hash + Clone + Debug + Unpin + Send + 'static {} + +/// A marker to identify what version a pooled connection is. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] +#[allow(dead_code)] +pub enum Ver { + Auto, + Http2, +} + +/// When checking out a pooled connection, it might be that the connection +/// only supports a single reservation, or it might be usable for many. +/// +/// Specifically, HTTP/1 requires a unique reservation, but HTTP/2 can be +/// used for multiple requests. +// FIXME: allow() required due to `impl Trait` leaking types to this lint +#[allow(missing_debug_implementations)] +pub enum Reservation { + /// This connection could be used multiple times, the first one will be + /// reinserted into the `idle` pool, and the second will be given to + /// the `Checkout`. + #[cfg(feature = "http2")] + Shared(T, T), + /// This connection requires unique access. It will be returned after + /// use is complete. + Unique(T), +} + +/// Simple type alias in case the key type needs to be adjusted. +// pub type Key = (http::uri::Scheme, http::uri::Authority); //Arc; + +struct PoolInner { + // A flag that a connection is being established, and the connection + // should be shared. This prevents making multiple HTTP/2 connections + // to the same host. + connecting: HashSet, + // These are internal Conns sitting in the event loop in the KeepAlive + // state, waiting to receive a new Request to send on the socket. + idle: HashMap>>, + max_idle_per_host: usize, + // These are outstanding Checkouts that are waiting for a socket to be + // able to send a Request one. This is used when "racing" for a new + // connection. + // + // The Client starts 2 tasks, 1 to connect a new socket, and 1 to wait + // for the Pool to receive an idle Conn. When a Conn becomes idle, + // this list is checked for any parked Checkouts, and tries to notify + // them that the Conn could be used instead of waiting for a brand new + // connection. + waiters: HashMap>>, + // A oneshot channel is used to allow the interval to be notified when + // the Pool completely drops. That way, the interval can cancel immediately. + idle_interval_ref: Option>, + exec: Exec, + timer: Option, + timeout: Option, +} + +// This is because `Weak::new()` *allocates* space for `T`, even if it +// doesn't need it! +struct WeakOpt(Option>); + +#[derive(Clone, Copy, Debug)] +pub struct Config { + pub idle_timeout: Option, + pub max_idle_per_host: usize, +} + +impl Config { + pub fn is_enabled(&self) -> bool { + self.max_idle_per_host > 0 + } +} + +impl Pool { + pub fn new(config: Config, executor: E, timer: Option) -> Pool + where + E: hyper::rt::Executor + Send + Sync + Clone + 'static, + M: hyper::rt::Timer + Send + Sync + Clone + 'static, + { + let exec = Exec::new(executor); + let timer = timer.map(|t| Timer::new(t)); + let inner = if config.is_enabled() { + Some(Arc::new(Mutex::new(PoolInner { + connecting: HashSet::new(), + idle: HashMap::new(), + idle_interval_ref: None, + max_idle_per_host: config.max_idle_per_host, + waiters: HashMap::new(), + exec, + timer, + timeout: config.idle_timeout, + }))) + } else { + None + }; + + Pool { inner } + } + + pub(crate) fn is_enabled(&self) -> bool { + self.inner.is_some() + } + + #[cfg(test)] + pub(super) fn no_timer(&self) { + // Prevent an actual interval from being created for this pool... + { + let mut inner = self.inner.as_ref().unwrap().lock().unwrap(); + assert!(inner.idle_interval_ref.is_none(), "timer already spawned"); + let (tx, _) = oneshot::channel(); + inner.idle_interval_ref = Some(tx); + } + } +} + +impl Pool { + /// Returns a `Checkout` which is a future that resolves if an idle + /// connection becomes available. + pub fn checkout(&self, key: K) -> Checkout { + Checkout { + key, + pool: self.clone(), + waiter: None, + } + } + + /// Ensure that there is only ever 1 connecting task for HTTP/2 + /// connections. This does nothing for HTTP/1. + pub fn connecting(&self, key: &K, ver: Ver) -> Option> { + if ver == Ver::Http2 { + if let Some(ref enabled) = self.inner { + let mut inner = enabled.lock().unwrap(); + return if inner.connecting.insert(key.clone()) { + let connecting = Connecting { + key: key.clone(), + pool: WeakOpt::downgrade(enabled), + }; + Some(connecting) + } else { + trace!("HTTP/2 connecting already in progress for {:?}", key); + None + }; + } + } + + // else + Some(Connecting { + key: key.clone(), + // in HTTP/1's case, there is never a lock, so we don't + // need to do anything in Drop. + pool: WeakOpt::none(), + }) + } + + #[cfg(test)] + fn locked(&self) -> std::sync::MutexGuard<'_, PoolInner> { + self.inner.as_ref().expect("enabled").lock().expect("lock") + } + + /* Used in client/tests.rs... + #[cfg(test)] + pub(super) fn h1_key(&self, s: &str) -> Key { + Arc::new(s.to_string()) + } + + #[cfg(test)] + pub(super) fn idle_count(&self, key: &Key) -> usize { + self + .locked() + .idle + .get(key) + .map(|list| list.len()) + .unwrap_or(0) + } + */ + + pub fn pooled( + &self, + #[cfg_attr(not(feature = "http2"), allow(unused_mut))] mut connecting: Connecting, + value: T, + ) -> Pooled { + let (value, pool_ref) = if let Some(ref enabled) = self.inner { + match value.reserve() { + #[cfg(feature = "http2")] + Reservation::Shared(to_insert, to_return) => { + let mut inner = enabled.lock().unwrap(); + inner.put(connecting.key.clone(), to_insert, enabled); + // Do this here instead of Drop for Connecting because we + // already have a lock, no need to lock the mutex twice. + inner.connected(&connecting.key); + // prevent the Drop of Connecting from repeating inner.connected() + connecting.pool = WeakOpt::none(); + + // Shared reservations don't need a reference to the pool, + // since the pool always keeps a copy. + (to_return, WeakOpt::none()) + } + Reservation::Unique(value) => { + // Unique reservations must take a reference to the pool + // since they hope to reinsert once the reservation is + // completed + (value, WeakOpt::downgrade(enabled)) + } + } + } else { + // If pool is not enabled, skip all the things... + + // The Connecting should have had no pool ref + debug_assert!(connecting.pool.upgrade().is_none()); + + (value, WeakOpt::none()) + }; + Pooled { + key: connecting.key.clone(), + is_reused: false, + pool: pool_ref, + value: Some(value), + } + } + + fn reuse(&self, key: &K, value: T) -> Pooled { + debug!("reuse idle connection for {:?}", key); + // TODO: unhack this + // In Pool::pooled(), which is used for inserting brand new connections, + // there's some code that adjusts the pool reference taken depending + // on if the Reservation can be shared or is unique. By the time + // reuse() is called, the reservation has already been made, and + // we just have the final value, without knowledge of if this is + // unique or shared. So, the hack is to just assume Ver::Http2 means + // shared... :( + let mut pool_ref = WeakOpt::none(); + if !value.can_share() { + if let Some(ref enabled) = self.inner { + pool_ref = WeakOpt::downgrade(enabled); + } + } + + Pooled { + is_reused: true, + key: key.clone(), + pool: pool_ref, + value: Some(value), + } + } +} + +/// Pop off this list, looking for a usable connection that hasn't expired. +struct IdlePopper<'a, T, K> { + key: &'a K, + list: &'a mut Vec>, +} + +impl<'a, T: Poolable + 'a, K: Debug> IdlePopper<'a, T, K> { + fn pop(self, expiration: &Expiration) -> Option> { + while let Some(entry) = self.list.pop() { + // If the connection has been closed, or is older than our idle + // timeout, simply drop it and keep looking... + if !entry.value.is_open() { + trace!("removing closed connection for {:?}", self.key); + continue; + } + // TODO: Actually, since the `idle` list is pushed to the end always, + // that would imply that if *this* entry is expired, then anything + // "earlier" in the list would *have* to be expired also... Right? + // + // In that case, we could just break out of the loop and drop the + // whole list... + if expiration.expires(entry.idle_at) { + trace!("removing expired connection for {:?}", self.key); + continue; + } + + let value = match entry.value.reserve() { + #[cfg(feature = "http2")] + Reservation::Shared(to_reinsert, to_checkout) => { + self.list.push(Idle { + idle_at: Instant::now(), + value: to_reinsert, + }); + to_checkout + } + Reservation::Unique(unique) => unique, + }; + + return Some(Idle { + idle_at: entry.idle_at, + value, + }); + } + + None + } +} + +impl PoolInner { + fn put(&mut self, key: K, value: T, __pool_ref: &Arc>>) { + if value.can_share() && self.idle.contains_key(&key) { + trace!("put; existing idle HTTP/2 connection for {:?}", key); + return; + } + trace!("put; add idle connection for {:?}", key); + let mut remove_waiters = false; + let mut value = Some(value); + if let Some(waiters) = self.waiters.get_mut(&key) { + while let Some(tx) = waiters.pop_front() { + if !tx.is_canceled() { + let reserved = value.take().expect("value already sent"); + let reserved = match reserved.reserve() { + #[cfg(feature = "http2")] + Reservation::Shared(to_keep, to_send) => { + value = Some(to_keep); + to_send + } + Reservation::Unique(uniq) => uniq, + }; + match tx.send(reserved) { + Ok(()) => { + if value.is_none() { + break; + } else { + continue; + } + } + Err(e) => { + value = Some(e); + } + } + } + + trace!("put; removing canceled waiter for {:?}", key); + } + remove_waiters = waiters.is_empty(); + } + if remove_waiters { + self.waiters.remove(&key); + } + + match value { + Some(value) => { + // borrow-check scope... + { + let idle_list = self.idle.entry(key.clone()).or_default(); + if self.max_idle_per_host <= idle_list.len() { + trace!("max idle per host for {:?}, dropping connection", key); + return; + } + + debug!("pooling idle connection for {:?}", key); + idle_list.push(Idle { + value, + idle_at: Instant::now(), + }); + } + + self.spawn_idle_interval(__pool_ref); + } + None => trace!("put; found waiter for {:?}", key), + } + } + + /// A `Connecting` task is complete. Not necessarily successfully, + /// but the lock is going away, so clean up. + fn connected(&mut self, key: &K) { + let existed = self.connecting.remove(key); + debug_assert!(existed, "Connecting dropped, key not in pool.connecting"); + // cancel any waiters. if there are any, it's because + // this Connecting task didn't complete successfully. + // those waiters would never receive a connection. + self.waiters.remove(key); + } + + fn spawn_idle_interval(&mut self, pool_ref: &Arc>>) { + if self.idle_interval_ref.is_some() { + return; + } + let dur = if let Some(dur) = self.timeout { + dur + } else { + return; + }; + let timer = if let Some(timer) = self.timer.clone() { + timer + } else { + return; + }; + let (tx, rx) = oneshot::channel(); + self.idle_interval_ref = Some(tx); + + let interval = IdleTask { + timer: timer.clone(), + duration: dur, + deadline: Instant::now(), + fut: timer.sleep_until(Instant::now()), // ready at first tick + pool: WeakOpt::downgrade(pool_ref), + pool_drop_notifier: rx, + }; + + self.exec.execute(interval); + } +} + +impl PoolInner { + /// Any `FutureResponse`s that were created will have made a `Checkout`, + /// and possibly inserted into the pool that it is waiting for an idle + /// connection. If a user ever dropped that future, we need to clean out + /// those parked senders. + fn clean_waiters(&mut self, key: &K) { + let mut remove_waiters = false; + if let Some(waiters) = self.waiters.get_mut(key) { + waiters.retain(|tx| !tx.is_canceled()); + remove_waiters = waiters.is_empty(); + } + if remove_waiters { + self.waiters.remove(key); + } + } +} + +impl PoolInner { + /// This should *only* be called by the IdleTask + fn clear_expired(&mut self) { + let dur = self.timeout.expect("interval assumes timeout"); + + let now = Instant::now(); + //self.last_idle_check_at = now; + + self.idle.retain(|key, values| { + values.retain(|entry| { + if !entry.value.is_open() { + trace!("idle interval evicting closed for {:?}", key); + return false; + } + + // Avoid `Instant::sub` to avoid issues like rust-lang/rust#86470. + if now.saturating_duration_since(entry.idle_at) > dur { + trace!("idle interval evicting expired for {:?}", key); + return false; + } + + // Otherwise, keep this value... + true + }); + + // returning false evicts this key/val + !values.is_empty() + }); + } +} + +impl Clone for Pool { + fn clone(&self) -> Pool { + Pool { + inner: self.inner.clone(), + } + } +} + +/// A wrapped poolable value that tries to reinsert to the Pool on Drop. +// Note: The bounds `T: Poolable` is needed for the Drop impl. +pub struct Pooled { + value: Option, + is_reused: bool, + key: K, + pool: WeakOpt>>, +} + +impl Pooled { + pub fn is_reused(&self) -> bool { + self.is_reused + } + + pub fn is_pool_enabled(&self) -> bool { + self.pool.0.is_some() + } + + fn as_ref(&self) -> &T { + self.value.as_ref().expect("not dropped") + } + + fn as_mut(&mut self) -> &mut T { + self.value.as_mut().expect("not dropped") + } +} + +impl Deref for Pooled { + type Target = T; + fn deref(&self) -> &T { + self.as_ref() + } +} + +impl DerefMut for Pooled { + fn deref_mut(&mut self) -> &mut T { + self.as_mut() + } +} + +impl Drop for Pooled { + fn drop(&mut self) { + if let Some(value) = self.value.take() { + if !value.is_open() { + // If we *already* know the connection is done here, + // it shouldn't be re-inserted back into the pool. + return; + } + + if let Some(pool) = self.pool.upgrade() { + if let Ok(mut inner) = pool.lock() { + inner.put(self.key.clone(), value, &pool); + } + } else if !value.can_share() { + trace!("pool dropped, dropping pooled ({:?})", self.key); + } + // Ver::Http2 is already in the Pool (or dead), so we wouldn't + // have an actual reference to the Pool. + } + } +} + +impl fmt::Debug for Pooled { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Pooled").field("key", &self.key).finish() + } +} + +struct Idle { + idle_at: Instant, + value: T, +} + +// FIXME: allow() required due to `impl Trait` leaking types to this lint +#[allow(missing_debug_implementations)] +pub struct Checkout { + key: K, + pool: Pool, + waiter: Option>, +} + +#[derive(Debug)] +#[non_exhaustive] +pub enum Error { + PoolDisabled, + CheckoutNoLongerWanted, + CheckedOutClosedValue, +} + +impl Error { + pub(super) fn is_canceled(&self) -> bool { + matches!(self, Error::CheckedOutClosedValue) + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(match self { + Error::PoolDisabled => "pool is disabled", + Error::CheckedOutClosedValue => "checked out connection was closed", + Error::CheckoutNoLongerWanted => "request was canceled", + }) + } +} + +impl StdError for Error {} + +impl Checkout { + fn poll_waiter( + &mut self, + cx: &mut task::Context<'_>, + ) -> Poll, Error>>> { + if let Some(mut rx) = self.waiter.take() { + match Pin::new(&mut rx).poll(cx) { + Poll::Ready(Ok(value)) => { + if value.is_open() { + Poll::Ready(Some(Ok(self.pool.reuse(&self.key, value)))) + } else { + Poll::Ready(Some(Err(Error::CheckedOutClosedValue))) + } + } + Poll::Pending => { + self.waiter = Some(rx); + Poll::Pending + } + Poll::Ready(Err(_canceled)) => { + Poll::Ready(Some(Err(Error::CheckoutNoLongerWanted))) + } + } + } else { + Poll::Ready(None) + } + } + + fn checkout(&mut self, cx: &mut task::Context<'_>) -> Option> { + let entry = { + let mut inner = self.pool.inner.as_ref()?.lock().unwrap(); + let expiration = Expiration::new(inner.timeout); + let maybe_entry = inner.idle.get_mut(&self.key).and_then(|list| { + trace!("take? {:?}: expiration = {:?}", self.key, expiration.0); + // A block to end the mutable borrow on list, + // so the map below can check is_empty() + { + let popper = IdlePopper { + key: &self.key, + list, + }; + popper.pop(&expiration) + } + .map(|e| (e, list.is_empty())) + }); + + let (entry, empty) = if let Some((e, empty)) = maybe_entry { + (Some(e), empty) + } else { + // No entry found means nuke the list for sure. + (None, true) + }; + if empty { + //TODO: This could be done with the HashMap::entry API instead. + inner.idle.remove(&self.key); + } + + if entry.is_none() && self.waiter.is_none() { + let (tx, mut rx) = oneshot::channel(); + trace!("checkout waiting for idle connection: {:?}", self.key); + inner + .waiters + .entry(self.key.clone()) + .or_insert_with(VecDeque::new) + .push_back(tx); + + // register the waker with this oneshot + assert!(Pin::new(&mut rx).poll(cx).is_pending()); + self.waiter = Some(rx); + } + + entry + }; + + entry.map(|e| self.pool.reuse(&self.key, e.value)) + } +} + +impl Future for Checkout { + type Output = Result, Error>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + if let Some(pooled) = ready!(self.poll_waiter(cx)?) { + return Poll::Ready(Ok(pooled)); + } + + if let Some(pooled) = self.checkout(cx) { + Poll::Ready(Ok(pooled)) + } else if !self.pool.is_enabled() { + Poll::Ready(Err(Error::PoolDisabled)) + } else { + // There's a new waiter, already registered in self.checkout() + debug_assert!(self.waiter.is_some()); + Poll::Pending + } + } +} + +impl Drop for Checkout { + fn drop(&mut self) { + if self.waiter.take().is_some() { + trace!("checkout dropped for {:?}", self.key); + if let Some(Ok(mut inner)) = self.pool.inner.as_ref().map(|i| i.lock()) { + inner.clean_waiters(&self.key); + } + } + } +} + +// FIXME: allow() required due to `impl Trait` leaking types to this lint +#[allow(missing_debug_implementations)] +pub struct Connecting { + key: K, + pool: WeakOpt>>, +} + +impl Connecting { + pub fn alpn_h2(self, pool: &Pool) -> Option { + debug_assert!( + self.pool.0.is_none(), + "Connecting::alpn_h2 but already Http2" + ); + + pool.connecting(&self.key, Ver::Http2) + } +} + +impl Drop for Connecting { + fn drop(&mut self) { + if let Some(pool) = self.pool.upgrade() { + // No need to panic on drop, that could abort! + if let Ok(mut inner) = pool.lock() { + inner.connected(&self.key); + } + } + } +} + +struct Expiration(Option); + +impl Expiration { + fn new(dur: Option) -> Expiration { + Expiration(dur) + } + + fn expires(&self, instant: Instant) -> bool { + match self.0 { + // Avoid `Instant::elapsed` to avoid issues like rust-lang/rust#86470. + Some(timeout) => Instant::now().saturating_duration_since(instant) > timeout, + None => false, + } + } +} + +pin_project_lite::pin_project! { + struct IdleTask { + timer: Timer, + duration: Duration, + deadline: Instant, + fut: Pin>, + pool: WeakOpt>>, + // This allows the IdleTask to be notified as soon as the entire + // Pool is fully dropped, and shutdown. This channel is never sent on, + // but Err(Canceled) will be received when the Pool is dropped. + #[pin] + pool_drop_notifier: oneshot::Receiver, + } +} + +impl Future for IdleTask { + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + let mut this = self.project(); + loop { + match this.pool_drop_notifier.as_mut().poll(cx) { + Poll::Ready(Ok(n)) => match n {}, + Poll::Pending => (), + Poll::Ready(Err(_canceled)) => { + trace!("pool closed, canceling idle interval"); + return Poll::Ready(()); + } + } + + ready!(Pin::new(&mut this.fut).poll(cx)); + // Set this task to run after the next deadline + // If the poll missed the deadline by a lot, set the deadline + // from the current time instead + *this.deadline += *this.duration; + if *this.deadline < Instant::now() - Duration::from_millis(5) { + *this.deadline = Instant::now() + *this.duration; + } + *this.fut = this.timer.sleep_until(*this.deadline); + + if let Some(inner) = this.pool.upgrade() { + if let Ok(mut inner) = inner.lock() { + trace!("idle interval checking for expired"); + inner.clear_expired(); + continue; + } + } + return Poll::Ready(()); + } + } +} + +impl WeakOpt { + fn none() -> Self { + WeakOpt(None) + } + + fn downgrade(arc: &Arc) -> Self { + WeakOpt(Some(Arc::downgrade(arc))) + } + + fn upgrade(&self) -> Option> { + self.0.as_ref().and_then(Weak::upgrade) + } +} + +#[cfg(all(test, not(miri)))] +mod tests { + use std::fmt::Debug; + use std::future::Future; + use std::hash::Hash; + use std::pin::Pin; + use std::task::{self, Poll}; + use std::time::Duration; + + use super::{Connecting, Key, Pool, Poolable, Reservation, WeakOpt}; + use crate::rt::{TokioExecutor, TokioTimer}; + + use crate::common::timer; + + #[derive(Clone, Debug, PartialEq, Eq, Hash)] + struct KeyImpl(http::uri::Scheme, http::uri::Authority); + + type KeyTuple = (http::uri::Scheme, http::uri::Authority); + + /// Test unique reservations. + #[derive(Debug, PartialEq, Eq)] + struct Uniq(T); + + impl Poolable for Uniq { + fn is_open(&self) -> bool { + true + } + + fn reserve(self) -> Reservation { + Reservation::Unique(self) + } + + fn can_share(&self) -> bool { + false + } + } + + fn c(key: K) -> Connecting { + Connecting { + key, + pool: WeakOpt::none(), + } + } + + fn host_key(s: &str) -> KeyImpl { + KeyImpl(http::uri::Scheme::HTTP, s.parse().expect("host key")) + } + + fn pool_no_timer() -> Pool { + pool_max_idle_no_timer(::std::usize::MAX) + } + + fn pool_max_idle_no_timer(max_idle: usize) -> Pool { + let pool = Pool::new( + super::Config { + idle_timeout: Some(Duration::from_millis(100)), + max_idle_per_host: max_idle, + }, + TokioExecutor::new(), + Option::::None, + ); + pool.no_timer(); + pool + } + + #[tokio::test] + async fn test_pool_checkout_smoke() { + let pool = pool_no_timer(); + let key = host_key("foo"); + let pooled = pool.pooled(c(key.clone()), Uniq(41)); + + drop(pooled); + + match pool.checkout(key).await { + Ok(pooled) => assert_eq!(*pooled, Uniq(41)), + Err(_) => panic!("not ready"), + }; + } + + /// Helper to check if the future is ready after polling once. + struct PollOnce<'a, F>(&'a mut F); + + impl Future for PollOnce<'_, F> + where + F: Future> + Unpin, + { + type Output = Option<()>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + match Pin::new(&mut self.0).poll(cx) { + Poll::Ready(Ok(_)) => Poll::Ready(Some(())), + Poll::Ready(Err(_)) => Poll::Ready(Some(())), + Poll::Pending => Poll::Ready(None), + } + } + } + + #[tokio::test] + async fn test_pool_checkout_returns_none_if_expired() { + let pool = pool_no_timer(); + let key = host_key("foo"); + let pooled = pool.pooled(c(key.clone()), Uniq(41)); + + drop(pooled); + tokio::time::sleep(pool.locked().timeout.unwrap()).await; + let mut checkout = pool.checkout(key); + let poll_once = PollOnce(&mut checkout); + let is_not_ready = poll_once.await.is_none(); + assert!(is_not_ready); + } + + #[tokio::test] + async fn test_pool_checkout_removes_expired() { + let pool = pool_no_timer(); + let key = host_key("foo"); + + pool.pooled(c(key.clone()), Uniq(41)); + pool.pooled(c(key.clone()), Uniq(5)); + pool.pooled(c(key.clone()), Uniq(99)); + + assert_eq!( + pool.locked().idle.get(&key).map(|entries| entries.len()), + Some(3) + ); + tokio::time::sleep(pool.locked().timeout.unwrap()).await; + + let mut checkout = pool.checkout(key.clone()); + let poll_once = PollOnce(&mut checkout); + // checkout.await should clean out the expired + poll_once.await; + assert!(pool.locked().idle.get(&key).is_none()); + } + + #[test] + fn test_pool_max_idle_per_host() { + let pool = pool_max_idle_no_timer(2); + let key = host_key("foo"); + + pool.pooled(c(key.clone()), Uniq(41)); + pool.pooled(c(key.clone()), Uniq(5)); + pool.pooled(c(key.clone()), Uniq(99)); + + // pooled and dropped 3, max_idle should only allow 2 + assert_eq!( + pool.locked().idle.get(&key).map(|entries| entries.len()), + Some(2) + ); + } + + #[tokio::test] + async fn test_pool_timer_removes_expired() { + let pool = Pool::new( + super::Config { + idle_timeout: Some(Duration::from_millis(10)), + max_idle_per_host: std::usize::MAX, + }, + TokioExecutor::new(), + Some(TokioTimer::new()), + ); + + let key = host_key("foo"); + + pool.pooled(c(key.clone()), Uniq(41)); + pool.pooled(c(key.clone()), Uniq(5)); + pool.pooled(c(key.clone()), Uniq(99)); + + assert_eq!( + pool.locked().idle.get(&key).map(|entries| entries.len()), + Some(3) + ); + + // Let the timer tick passed the expiration... + tokio::time::sleep(Duration::from_millis(30)).await; + // Yield so the Interval can reap... + tokio::task::yield_now().await; + + assert!(pool.locked().idle.get(&key).is_none()); + } + + #[tokio::test] + async fn test_pool_checkout_task_unparked() { + use futures_util::future::join; + use futures_util::FutureExt; + + let pool = pool_no_timer(); + let key = host_key("foo"); + let pooled = pool.pooled(c(key.clone()), Uniq(41)); + + let checkout = join(pool.checkout(key), async { + // the checkout future will park first, + // and then this lazy future will be polled, which will insert + // the pooled back into the pool + // + // this test makes sure that doing so will unpark the checkout + drop(pooled); + }) + .map(|(entry, _)| entry); + + assert_eq!(*checkout.await.unwrap(), Uniq(41)); + } + + #[tokio::test] + async fn test_pool_checkout_drop_cleans_up_waiters() { + let pool = pool_no_timer::, KeyImpl>(); + let key = host_key("foo"); + + let mut checkout1 = pool.checkout(key.clone()); + let mut checkout2 = pool.checkout(key.clone()); + + let poll_once1 = PollOnce(&mut checkout1); + let poll_once2 = PollOnce(&mut checkout2); + + // first poll needed to get into Pool's parked + poll_once1.await; + assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 1); + poll_once2.await; + assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 2); + + // on drop, clean up Pool + drop(checkout1); + assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 1); + + drop(checkout2); + assert!(pool.locked().waiters.get(&key).is_none()); + } + + #[derive(Debug)] + struct CanClose { + #[allow(unused)] + val: i32, + closed: bool, + } + + impl Poolable for CanClose { + fn is_open(&self) -> bool { + !self.closed + } + + fn reserve(self) -> Reservation { + Reservation::Unique(self) + } + + fn can_share(&self) -> bool { + false + } + } + + #[test] + fn pooled_drop_if_closed_doesnt_reinsert() { + let pool = pool_no_timer(); + let key = host_key("foo"); + pool.pooled( + c(key.clone()), + CanClose { + val: 57, + closed: true, + }, + ); + + assert!(!pool.locked().idle.contains_key(&key)); + } +} diff --git a/.cargo-vendor/hyper-util/src/client/mod.rs b/.cargo-vendor/hyper-util/src/client/mod.rs new file mode 100644 index 0000000000..e9215425d9 --- /dev/null +++ b/.cargo-vendor/hyper-util/src/client/mod.rs @@ -0,0 +1,5 @@ +//! HTTP client utilities + +/// Legacy implementations of `connect` module and `Client` +#[cfg(feature = "client-legacy")] +pub mod legacy; diff --git a/.cargo-vendor/hyper-util/src/client/service.rs b/.cargo-vendor/hyper-util/src/client/service.rs new file mode 100644 index 0000000000..580fb10508 --- /dev/null +++ b/.cargo-vendor/hyper-util/src/client/service.rs @@ -0,0 +1,8 @@ +struct ConnectingPool { + connector: C, + pool: P, +} + +struct PoolableSvc(S); + + diff --git a/.cargo-vendor/hyper-util/src/common/exec.rs b/.cargo-vendor/hyper-util/src/common/exec.rs new file mode 100644 index 0000000000..40860ee1ea --- /dev/null +++ b/.cargo-vendor/hyper-util/src/common/exec.rs @@ -0,0 +1,53 @@ +#![allow(dead_code)] + +use hyper::rt::Executor; +use std::fmt; +use std::future::Future; +use std::pin::Pin; +use std::sync::Arc; + +pub(crate) type BoxSendFuture = Pin + Send>>; + +// Either the user provides an executor for background tasks, or we use +// `tokio::spawn`. +#[derive(Clone)] +pub(crate) enum Exec { + Executor(Arc + Send + Sync>), +} + +// ===== impl Exec ===== + +impl Exec { + pub(crate) fn new(inner: E) -> Self + where + E: Executor + Send + Sync + 'static, + { + Exec::Executor(Arc::new(inner)) + } + + pub(crate) fn execute(&self, fut: F) + where + F: Future + Send + 'static, + { + match *self { + Exec::Executor(ref e) => { + e.execute(Box::pin(fut)); + } + } + } +} + +impl fmt::Debug for Exec { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Exec").finish() + } +} + +impl hyper::rt::Executor for Exec +where + F: Future + Send + 'static, +{ + fn execute(&self, fut: F) { + Exec::execute(self, fut); + } +} diff --git a/.cargo-vendor/hyper-util/src/common/lazy.rs b/.cargo-vendor/hyper-util/src/common/lazy.rs new file mode 100644 index 0000000000..7ec09bbeb3 --- /dev/null +++ b/.cargo-vendor/hyper-util/src/common/lazy.rs @@ -0,0 +1,78 @@ +use pin_project_lite::pin_project; + +use std::future::Future; +use std::pin::Pin; +use std::task::{self, Poll}; + +pub(crate) trait Started: Future { + fn started(&self) -> bool; +} + +pub(crate) fn lazy(func: F) -> Lazy +where + F: FnOnce() -> R, + R: Future + Unpin, +{ + Lazy { + inner: Inner::Init { func }, + } +} + +// FIXME: allow() required due to `impl Trait` leaking types to this lint +pin_project! { + #[allow(missing_debug_implementations)] + pub(crate) struct Lazy { + #[pin] + inner: Inner, + } +} + +pin_project! { + #[project = InnerProj] + #[project_replace = InnerProjReplace] + enum Inner { + Init { func: F }, + Fut { #[pin] fut: R }, + Empty, + } +} + +impl Started for Lazy +where + F: FnOnce() -> R, + R: Future, +{ + fn started(&self) -> bool { + match self.inner { + Inner::Init { .. } => false, + Inner::Fut { .. } | Inner::Empty => true, + } + } +} + +impl Future for Lazy +where + F: FnOnce() -> R, + R: Future, +{ + type Output = R::Output; + + fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + let mut this = self.project(); + + if let InnerProj::Fut { fut } = this.inner.as_mut().project() { + return fut.poll(cx); + } + + match this.inner.as_mut().project_replace(Inner::Empty) { + InnerProjReplace::Init { func } => { + this.inner.set(Inner::Fut { fut: func() }); + if let InnerProj::Fut { fut } = this.inner.project() { + return fut.poll(cx); + } + unreachable!() + } + _ => unreachable!("lazy state wrong"), + } + } +} diff --git a/.cargo-vendor/hyper-util/src/common/mod.rs b/.cargo-vendor/hyper-util/src/common/mod.rs new file mode 100644 index 0000000000..63b8288573 --- /dev/null +++ b/.cargo-vendor/hyper-util/src/common/mod.rs @@ -0,0 +1,17 @@ +#![allow(missing_docs)] + +pub(crate) mod exec; +#[cfg(feature = "client")] +mod lazy; +pub(crate) mod rewind; +#[cfg(feature = "client")] +mod sync; +pub(crate) mod timer; + +#[cfg(feature = "client")] +pub(crate) use exec::Exec; + +#[cfg(feature = "client")] +pub(crate) use lazy::{lazy, Started as Lazy}; +#[cfg(feature = "client")] +pub(crate) use sync::SyncWrapper; diff --git a/.cargo-vendor/hyper-util/src/common/rewind.rs b/.cargo-vendor/hyper-util/src/common/rewind.rs new file mode 100644 index 0000000000..c75464e45d --- /dev/null +++ b/.cargo-vendor/hyper-util/src/common/rewind.rs @@ -0,0 +1,187 @@ +use std::{cmp, io}; + +use bytes::{Buf, Bytes}; +use hyper::rt::{Read, ReadBufCursor, Write}; + +use std::{ + pin::Pin, + task::{self, Poll}, +}; + +/// Combine a buffer with an IO, rewinding reads to use the buffer. +#[derive(Debug)] +pub(crate) struct Rewind { + pre: Option, + inner: T, +} + +impl Rewind { + #[cfg(test)] + pub(crate) fn new(io: T) -> Self { + Rewind { + pre: None, + inner: io, + } + } + + #[allow(dead_code)] + pub(crate) fn new_buffered(io: T, buf: Bytes) -> Self { + Rewind { + pre: Some(buf), + inner: io, + } + } + + #[cfg(test)] + pub(crate) fn rewind(&mut self, bs: Bytes) { + debug_assert!(self.pre.is_none()); + self.pre = Some(bs); + } + + // pub(crate) fn into_inner(self) -> (T, Bytes) { + // (self.inner, self.pre.unwrap_or_else(Bytes::new)) + // } + + // pub(crate) fn get_mut(&mut self) -> &mut T { + // &mut self.inner + // } +} + +impl Read for Rewind +where + T: Read + Unpin, +{ + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut task::Context<'_>, + mut buf: ReadBufCursor<'_>, + ) -> Poll> { + if let Some(mut prefix) = self.pre.take() { + // If there are no remaining bytes, let the bytes get dropped. + if !prefix.is_empty() { + let copy_len = cmp::min(prefix.len(), remaining(&mut buf)); + // TODO: There should be a way to do following two lines cleaner... + put_slice(&mut buf, &prefix[..copy_len]); + prefix.advance(copy_len); + // Put back what's left + if !prefix.is_empty() { + self.pre = Some(prefix); + } + + return Poll::Ready(Ok(())); + } + } + Pin::new(&mut self.inner).poll_read(cx, buf) + } +} + +fn remaining(cursor: &mut ReadBufCursor<'_>) -> usize { + // SAFETY: + // We do not uninitialize any set bytes. + unsafe { cursor.as_mut().len() } +} + +// Copied from `ReadBufCursor::put_slice`. +// If that becomes public, we could ditch this. +fn put_slice(cursor: &mut ReadBufCursor<'_>, slice: &[u8]) { + assert!( + remaining(cursor) >= slice.len(), + "buf.len() must fit in remaining()" + ); + + let amt = slice.len(); + + // SAFETY: + // the length is asserted above + unsafe { + cursor.as_mut()[..amt] + .as_mut_ptr() + .cast::() + .copy_from_nonoverlapping(slice.as_ptr(), amt); + cursor.advance(amt); + } +} + +impl Write for Rewind +where + T: Write + Unpin, +{ + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut task::Context<'_>, + buf: &[u8], + ) -> Poll> { + Pin::new(&mut self.inner).poll_write(cx, buf) + } + + fn poll_write_vectored( + mut self: Pin<&mut Self>, + cx: &mut task::Context<'_>, + bufs: &[io::IoSlice<'_>], + ) -> Poll> { + Pin::new(&mut self.inner).poll_write_vectored(cx, bufs) + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { + Pin::new(&mut self.inner).poll_flush(cx) + } + + fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { + Pin::new(&mut self.inner).poll_shutdown(cx) + } + + fn is_write_vectored(&self) -> bool { + self.inner.is_write_vectored() + } +} + +/* +#[cfg(test)] +mod tests { + use super::Rewind; + use bytes::Bytes; + use tokio::io::AsyncReadExt; + + #[cfg(not(miri))] + #[tokio::test] + async fn partial_rewind() { + let underlying = [104, 101, 108, 108, 111]; + + let mock = tokio_test::io::Builder::new().read(&underlying).build(); + + let mut stream = Rewind::new(mock); + + // Read off some bytes, ensure we filled o1 + let mut buf = [0; 2]; + stream.read_exact(&mut buf).await.expect("read1"); + + // Rewind the stream so that it is as if we never read in the first place. + stream.rewind(Bytes::copy_from_slice(&buf[..])); + + let mut buf = [0; 5]; + stream.read_exact(&mut buf).await.expect("read1"); + + // At this point we should have read everything that was in the MockStream + assert_eq!(&buf, &underlying); + } + + #[cfg(not(miri))] + #[tokio::test] + async fn full_rewind() { + let underlying = [104, 101, 108, 108, 111]; + + let mock = tokio_test::io::Builder::new().read(&underlying).build(); + + let mut stream = Rewind::new(mock); + + let mut buf = [0; 5]; + stream.read_exact(&mut buf).await.expect("read1"); + + // Rewind the stream so that it is as if we never read in the first place. + stream.rewind(Bytes::copy_from_slice(&buf[..])); + + let mut buf = [0; 5]; + stream.read_exact(&mut buf).await.expect("read1"); + } +} +*/ diff --git a/.cargo-vendor/hyper-util/src/common/sync.rs b/.cargo-vendor/hyper-util/src/common/sync.rs new file mode 100644 index 0000000000..2755fd0519 --- /dev/null +++ b/.cargo-vendor/hyper-util/src/common/sync.rs @@ -0,0 +1,67 @@ +pub(crate) struct SyncWrapper(T); + +impl SyncWrapper { + /// Creates a new SyncWrapper containing the given value. + /// + /// # Examples + /// + /// ```ignore + /// use hyper::common::sync_wrapper::SyncWrapper; + /// + /// let wrapped = SyncWrapper::new(42); + /// ``` + pub(crate) fn new(value: T) -> Self { + Self(value) + } + + /// Acquires a reference to the protected value. + /// + /// This is safe because it requires an exclusive reference to the wrapper. Therefore this method + /// neither panics nor does it return an error. This is in contrast to [`Mutex::get_mut`] which + /// returns an error if another thread panicked while holding the lock. It is not recommended + /// to send an exclusive reference to a potentially damaged value to another thread for further + /// processing. + /// + /// [`Mutex::get_mut`]: https://doc.rust-lang.org/std/sync/struct.Mutex.html#method.get_mut + /// + /// # Examples + /// + /// ```ignore + /// use hyper::common::sync_wrapper::SyncWrapper; + /// + /// let mut wrapped = SyncWrapper::new(42); + /// let value = wrapped.get_mut(); + /// *value = 0; + /// assert_eq!(*wrapped.get_mut(), 0); + /// ``` + pub(crate) fn get_mut(&mut self) -> &mut T { + &mut self.0 + } + + /// Consumes this wrapper, returning the underlying data. + /// + /// This is safe because it requires ownership of the wrapper, aherefore this method will neither + /// panic nor does it return an error. This is in contrast to [`Mutex::into_inner`] which + /// returns an error if another thread panicked while holding the lock. It is not recommended + /// to send an exclusive reference to a potentially damaged value to another thread for further + /// processing. + /// + /// [`Mutex::into_inner`]: https://doc.rust-lang.org/std/sync/struct.Mutex.html#method.into_inner + /// + /// # Examples + /// + /// ```ignore + /// use hyper::common::sync_wrapper::SyncWrapper; + /// + /// let mut wrapped = SyncWrapper::new(42); + /// assert_eq!(wrapped.into_inner(), 42); + /// ``` + #[allow(dead_code)] + pub(crate) fn into_inner(self) -> T { + self.0 + } +} + +// this is safe because the only operations permitted on this data structure require exclusive +// access or ownership +unsafe impl Sync for SyncWrapper {} diff --git a/.cargo-vendor/hyper-util/src/common/timer.rs b/.cargo-vendor/hyper-util/src/common/timer.rs new file mode 100644 index 0000000000..390be3b09c --- /dev/null +++ b/.cargo-vendor/hyper-util/src/common/timer.rs @@ -0,0 +1,38 @@ +#![allow(dead_code)] + +use std::fmt; +use std::pin::Pin; +use std::sync::Arc; +use std::time::Duration; +use std::time::Instant; + +use hyper::rt::Sleep; + +#[derive(Clone)] +pub(crate) struct Timer(Arc); + +// =====impl Timer===== +impl Timer { + pub(crate) fn new(inner: T) -> Self + where + T: hyper::rt::Timer + Send + Sync + 'static, + { + Self(Arc::new(inner)) + } +} + +impl fmt::Debug for Timer { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Timer").finish() + } +} + +impl hyper::rt::Timer for Timer { + fn sleep(&self, duration: Duration) -> Pin> { + self.0.sleep(duration) + } + + fn sleep_until(&self, deadline: Instant) -> Pin> { + self.0.sleep_until(deadline) + } +} diff --git a/.cargo-vendor/hyper-util/src/error.rs b/.cargo-vendor/hyper-util/src/error.rs new file mode 100644 index 0000000000..d189449524 --- /dev/null +++ b/.cargo-vendor/hyper-util/src/error.rs @@ -0,0 +1,14 @@ +/* +use std::error::Error; + +pub(crate) fn find<'a, E: Error + 'static>(top: &'a (dyn Error + 'static)) -> Option<&'a E> { + let mut err = Some(top); + while let Some(src) = err { + if src.is::() { + return src.downcast_ref(); + } + err = src.source(); + } + None +} +*/ diff --git a/.cargo-vendor/hyper-util/src/lib.rs b/.cargo-vendor/hyper-util/src/lib.rs new file mode 100644 index 0000000000..9df734cfda --- /dev/null +++ b/.cargo-vendor/hyper-util/src/lib.rs @@ -0,0 +1,18 @@ +#![deny(missing_docs)] +#![cfg_attr(docsrs, feature(doc_auto_cfg, doc_cfg))] + +//! Utilities for working with hyper. +//! +//! This crate is less-stable than [`hyper`](https://docs.rs/hyper). However, +//! does respect Rust's semantic version regarding breaking changes. + +#[cfg(feature = "client")] +pub mod client; +mod common; +pub mod rt; +#[cfg(feature = "server")] +pub mod server; +#[cfg(feature = "service")] +pub mod service; + +mod error; diff --git a/.cargo-vendor/hyper-util/src/rt/mod.rs b/.cargo-vendor/hyper-util/src/rt/mod.rs new file mode 100644 index 0000000000..3ed86285ca --- /dev/null +++ b/.cargo-vendor/hyper-util/src/rt/mod.rs @@ -0,0 +1,7 @@ +//! Runtime utilities + +#[cfg(feature = "tokio")] +pub mod tokio; + +#[cfg(feature = "tokio")] +pub use self::tokio::{TokioExecutor, TokioIo, TokioTimer}; diff --git a/.cargo-vendor/hyper-util/src/rt/tokio.rs b/.cargo-vendor/hyper-util/src/rt/tokio.rs new file mode 100644 index 0000000000..e5f2eab747 --- /dev/null +++ b/.cargo-vendor/hyper-util/src/rt/tokio.rs @@ -0,0 +1,276 @@ +#![allow(dead_code)] +//! Tokio IO integration for hyper +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, + time::{Duration, Instant}, +}; + +use hyper::rt::{Executor, Sleep, Timer}; +use pin_project_lite::pin_project; + +/// Future executor that utilises `tokio` threads. +#[non_exhaustive] +#[derive(Default, Debug, Clone)] +pub struct TokioExecutor {} + +pin_project! { + /// A wrapper that implements Tokio's IO traits for an inner type that + /// implements hyper's IO traits, or vice versa (implements hyper's IO + /// traits for a type that implements Tokio's IO traits). + #[derive(Debug)] + pub struct TokioIo { + #[pin] + inner: T, + } +} + +/// A Timer that uses the tokio runtime. +#[non_exhaustive] +#[derive(Default, Clone, Debug)] +pub struct TokioTimer; + +// Use TokioSleep to get tokio::time::Sleep to implement Unpin. +// see https://docs.rs/tokio/latest/tokio/time/struct.Sleep.html +pin_project! { + #[derive(Debug)] + struct TokioSleep { + #[pin] + inner: tokio::time::Sleep, + } +} + +// ===== impl TokioExecutor ===== + +impl Executor for TokioExecutor +where + Fut: Future + Send + 'static, + Fut::Output: Send + 'static, +{ + fn execute(&self, fut: Fut) { + tokio::spawn(fut); + } +} + +impl TokioExecutor { + /// Create new executor that relies on [`tokio::spawn`] to execute futures. + pub fn new() -> Self { + Self {} + } +} + +// ==== impl TokioIo ===== + +impl TokioIo { + /// Wrap a type implementing Tokio's or hyper's IO traits. + pub fn new(inner: T) -> Self { + Self { inner } + } + + /// Borrow the inner type. + pub fn inner(&self) -> &T { + &self.inner + } + + /// Mut borrow the inner type. + pub fn inner_mut(&mut self) -> &mut T { + &mut self.inner + } + + /// Consume this wrapper and get the inner type. + pub fn into_inner(self) -> T { + self.inner + } +} + +impl hyper::rt::Read for TokioIo +where + T: tokio::io::AsyncRead, +{ + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + mut buf: hyper::rt::ReadBufCursor<'_>, + ) -> Poll> { + let n = unsafe { + let mut tbuf = tokio::io::ReadBuf::uninit(buf.as_mut()); + match tokio::io::AsyncRead::poll_read(self.project().inner, cx, &mut tbuf) { + Poll::Ready(Ok(())) => tbuf.filled().len(), + other => return other, + } + }; + + unsafe { + buf.advance(n); + } + Poll::Ready(Ok(())) + } +} + +impl hyper::rt::Write for TokioIo +where + T: tokio::io::AsyncWrite, +{ + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + tokio::io::AsyncWrite::poll_write(self.project().inner, cx, buf) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + tokio::io::AsyncWrite::poll_flush(self.project().inner, cx) + } + + fn poll_shutdown( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + tokio::io::AsyncWrite::poll_shutdown(self.project().inner, cx) + } + + fn is_write_vectored(&self) -> bool { + tokio::io::AsyncWrite::is_write_vectored(&self.inner) + } + + fn poll_write_vectored( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &[std::io::IoSlice<'_>], + ) -> Poll> { + tokio::io::AsyncWrite::poll_write_vectored(self.project().inner, cx, bufs) + } +} + +impl tokio::io::AsyncRead for TokioIo +where + T: hyper::rt::Read, +{ + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + tbuf: &mut tokio::io::ReadBuf<'_>, + ) -> Poll> { + //let init = tbuf.initialized().len(); + let filled = tbuf.filled().len(); + let sub_filled = unsafe { + let mut buf = hyper::rt::ReadBuf::uninit(tbuf.unfilled_mut()); + + match hyper::rt::Read::poll_read(self.project().inner, cx, buf.unfilled()) { + Poll::Ready(Ok(())) => buf.filled().len(), + other => return other, + } + }; + + let n_filled = filled + sub_filled; + // At least sub_filled bytes had to have been initialized. + let n_init = sub_filled; + unsafe { + tbuf.assume_init(n_init); + tbuf.set_filled(n_filled); + } + + Poll::Ready(Ok(())) + } +} + +impl tokio::io::AsyncWrite for TokioIo +where + T: hyper::rt::Write, +{ + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + hyper::rt::Write::poll_write(self.project().inner, cx, buf) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + hyper::rt::Write::poll_flush(self.project().inner, cx) + } + + fn poll_shutdown( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + hyper::rt::Write::poll_shutdown(self.project().inner, cx) + } + + fn is_write_vectored(&self) -> bool { + hyper::rt::Write::is_write_vectored(&self.inner) + } + + fn poll_write_vectored( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &[std::io::IoSlice<'_>], + ) -> Poll> { + hyper::rt::Write::poll_write_vectored(self.project().inner, cx, bufs) + } +} + +// ==== impl TokioTimer ===== + +impl Timer for TokioTimer { + fn sleep(&self, duration: Duration) -> Pin> { + Box::pin(TokioSleep { + inner: tokio::time::sleep(duration), + }) + } + + fn sleep_until(&self, deadline: Instant) -> Pin> { + Box::pin(TokioSleep { + inner: tokio::time::sleep_until(deadline.into()), + }) + } + + fn reset(&self, sleep: &mut Pin>, new_deadline: Instant) { + if let Some(sleep) = sleep.as_mut().downcast_mut_pin::() { + sleep.reset(new_deadline) + } + } +} + +impl TokioTimer { + /// Create a new TokioTimer + pub fn new() -> Self { + Self {} + } +} + +impl Future for TokioSleep { + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + self.project().inner.poll(cx) + } +} + +impl Sleep for TokioSleep {} + +impl TokioSleep { + fn reset(self: Pin<&mut Self>, deadline: Instant) { + self.project().inner.as_mut().reset(deadline.into()); + } +} + +#[cfg(test)] +mod tests { + use crate::rt::TokioExecutor; + use hyper::rt::Executor; + use tokio::sync::oneshot; + + #[cfg(not(miri))] + #[tokio::test] + async fn simple_execute() -> Result<(), Box> { + let (tx, rx) = oneshot::channel(); + let executor = TokioExecutor::new(); + executor.execute(async move { + tx.send(()).unwrap(); + }); + rx.await.map_err(Into::into) + } +} diff --git a/.cargo-vendor/hyper-util/src/server/conn/auto.rs b/.cargo-vendor/hyper-util/src/server/conn/auto.rs new file mode 100644 index 0000000000..1351a802ab --- /dev/null +++ b/.cargo-vendor/hyper-util/src/server/conn/auto.rs @@ -0,0 +1,1207 @@ +//! Http1 or Http2 connection. + +use futures_util::ready; +use hyper::service::HttpService; +use std::future::Future; +use std::marker::PhantomPinned; +use std::mem::MaybeUninit; +use std::pin::Pin; +use std::task::{Context, Poll}; +use std::{error::Error as StdError, io, time::Duration}; + +use bytes::Bytes; +use http::{Request, Response}; +use http_body::Body; +use hyper::{ + body::Incoming, + rt::{Read, ReadBuf, Timer, Write}, + service::Service, +}; + +#[cfg(feature = "http1")] +use hyper::server::conn::http1; + +#[cfg(feature = "http2")] +use hyper::{rt::bounds::Http2ServerConnExec, server::conn::http2}; + +#[cfg(any(not(feature = "http2"), not(feature = "http1")))] +use std::marker::PhantomData; + +use pin_project_lite::pin_project; + +use crate::common::rewind::Rewind; + +type Error = Box; + +type Result = std::result::Result; + +const H2_PREFACE: &[u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; + +/// Exactly equivalent to [`Http2ServerConnExec`]. +#[cfg(feature = "http2")] +pub trait HttpServerConnExec: Http2ServerConnExec {} + +#[cfg(feature = "http2")] +impl> HttpServerConnExec for T {} + +/// Exactly equivalent to [`Http2ServerConnExec`]. +#[cfg(not(feature = "http2"))] +pub trait HttpServerConnExec {} + +#[cfg(not(feature = "http2"))] +impl HttpServerConnExec for T {} + +/// Http1 or Http2 connection builder. +#[derive(Clone, Debug)] +pub struct Builder { + #[cfg(feature = "http1")] + http1: http1::Builder, + #[cfg(feature = "http2")] + http2: http2::Builder, + #[cfg(any(feature = "http1", feature = "http2"))] + version: Option, + #[cfg(not(feature = "http2"))] + _executor: E, +} + +impl Builder { + /// Create a new auto connection builder. + /// + /// `executor` parameter should be a type that implements + /// [`Executor`](hyper::rt::Executor) trait. + /// + /// # Example + /// + /// ``` + /// use hyper_util::{ + /// rt::TokioExecutor, + /// server::conn::auto, + /// }; + /// + /// auto::Builder::new(TokioExecutor::new()); + /// ``` + pub fn new(executor: E) -> Self { + Self { + #[cfg(feature = "http1")] + http1: http1::Builder::new(), + #[cfg(feature = "http2")] + http2: http2::Builder::new(executor), + #[cfg(any(feature = "http1", feature = "http2"))] + version: None, + #[cfg(not(feature = "http2"))] + _executor: executor, + } + } + + /// Http1 configuration. + #[cfg(feature = "http1")] + pub fn http1(&mut self) -> Http1Builder<'_, E> { + Http1Builder { inner: self } + } + + /// Http2 configuration. + #[cfg(feature = "http2")] + pub fn http2(&mut self) -> Http2Builder<'_, E> { + Http2Builder { inner: self } + } + + /// Only accepts HTTP/2 + /// + /// Does not do anything if used with [`serve_connection_with_upgrades`] + #[cfg(feature = "http2")] + pub fn http2_only(mut self) -> Self { + assert!(self.version.is_none()); + self.version = Some(Version::H2); + self + } + + /// Only accepts HTTP/1 + /// + /// Does not do anything if used with [`serve_connection_with_upgrades`] + #[cfg(feature = "http1")] + pub fn http1_only(mut self) -> Self { + assert!(self.version.is_none()); + self.version = Some(Version::H1); + self + } + + /// Bind a connection together with a [`Service`]. + pub fn serve_connection(&self, io: I, service: S) -> Connection<'_, I, S, E> + where + S: Service, Response = Response>, + S::Future: 'static, + S::Error: Into>, + B: Body + 'static, + B::Error: Into>, + I: Read + Write + Unpin + 'static, + E: HttpServerConnExec, + { + let state = match self.version { + #[cfg(feature = "http1")] + Some(Version::H1) => { + let io = Rewind::new_buffered(io, Bytes::new()); + let conn = self.http1.serve_connection(io, service); + ConnState::H1 { conn } + } + #[cfg(feature = "http2")] + Some(Version::H2) => { + let io = Rewind::new_buffered(io, Bytes::new()); + let conn = self.http2.serve_connection(io, service); + ConnState::H2 { conn } + } + #[cfg(any(feature = "http1", feature = "http2"))] + _ => ConnState::ReadVersion { + read_version: read_version(io), + builder: Cow::Borrowed(self), + service: Some(service), + }, + }; + + Connection { state } + } + + /// Bind a connection together with a [`Service`], with the ability to + /// handle HTTP upgrades. This requires that the IO object implements + /// `Send`. + pub fn serve_connection_with_upgrades( + &self, + io: I, + service: S, + ) -> UpgradeableConnection<'_, I, S, E> + where + S: Service, Response = Response>, + S::Future: 'static, + S::Error: Into>, + B: Body + 'static, + B::Error: Into>, + I: Read + Write + Unpin + Send + 'static, + E: HttpServerConnExec, + { + UpgradeableConnection { + state: UpgradeableConnState::ReadVersion { + read_version: read_version(io), + builder: Cow::Borrowed(self), + service: Some(service), + }, + } + } +} + +#[derive(Copy, Clone, Debug)] +enum Version { + H1, + H2, +} + +impl Version { + #[must_use] + #[cfg(any(not(feature = "http2"), not(feature = "http1")))] + pub fn unsupported(self) -> Error { + match self { + Version::H1 => Error::from("HTTP/1 is not supported"), + Version::H2 => Error::from("HTTP/2 is not supported"), + } + } +} + +fn read_version(io: I) -> ReadVersion +where + I: Read + Unpin, +{ + ReadVersion { + io: Some(io), + buf: [MaybeUninit::uninit(); 24], + filled: 0, + version: Version::H2, + cancelled: false, + _pin: PhantomPinned, + } +} + +pin_project! { + struct ReadVersion { + io: Option, + buf: [MaybeUninit; 24], + // the amount of `buf` thats been filled + filled: usize, + version: Version, + cancelled: bool, + // Make this future `!Unpin` for compatibility with async trait methods. + #[pin] + _pin: PhantomPinned, + } +} + +impl ReadVersion { + pub fn cancel(self: Pin<&mut Self>) { + *self.project().cancelled = true; + } +} + +impl Future for ReadVersion +where + I: Read + Unpin, +{ + type Output = io::Result<(Version, Rewind)>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + if *this.cancelled { + return Poll::Ready(Err(io::Error::new(io::ErrorKind::Interrupted, "Cancelled"))); + } + + let mut buf = ReadBuf::uninit(&mut *this.buf); + // SAFETY: `this.filled` tracks how many bytes have been read (and thus initialized) and + // we're only advancing by that many. + unsafe { + buf.unfilled().advance(*this.filled); + }; + + // We start as H2 and switch to H1 as soon as we don't have the preface. + while buf.filled().len() < H2_PREFACE.len() { + let len = buf.filled().len(); + ready!(Pin::new(this.io.as_mut().unwrap()).poll_read(cx, buf.unfilled()))?; + *this.filled = buf.filled().len(); + + // We starts as H2 and switch to H1 when we don't get the preface. + if buf.filled().len() == len + || buf.filled()[len..] != H2_PREFACE[len..buf.filled().len()] + { + *this.version = Version::H1; + break; + } + } + + let io = this.io.take().unwrap(); + let buf = buf.filled().to_vec(); + Poll::Ready(Ok(( + *this.version, + Rewind::new_buffered(io, Bytes::from(buf)), + ))) + } +} + +pin_project! { + /// Connection future. + pub struct Connection<'a, I, S, E> + where + S: HttpService, + { + #[pin] + state: ConnState<'a, I, S, E>, + } +} + +// A custom COW, since the libstd is has ToOwned bounds that are too eager. +enum Cow<'a, T> { + Borrowed(&'a T), + Owned(T), +} + +impl<'a, T> std::ops::Deref for Cow<'a, T> { + type Target = T; + fn deref(&self) -> &T { + match self { + Cow::Borrowed(t) => &*t, + Cow::Owned(ref t) => t, + } + } +} + +#[cfg(feature = "http1")] +type Http1Connection = hyper::server::conn::http1::Connection, S>; + +#[cfg(not(feature = "http1"))] +type Http1Connection = (PhantomData, PhantomData); + +#[cfg(feature = "http2")] +type Http2Connection = hyper::server::conn::http2::Connection, S, E>; + +#[cfg(not(feature = "http2"))] +type Http2Connection = (PhantomData, PhantomData, PhantomData); + +pin_project! { + #[project = ConnStateProj] + enum ConnState<'a, I, S, E> + where + S: HttpService, + { + ReadVersion { + #[pin] + read_version: ReadVersion, + builder: Cow<'a, Builder>, + service: Option, + }, + H1 { + #[pin] + conn: Http1Connection, + }, + H2 { + #[pin] + conn: Http2Connection, + }, + } +} + +impl Connection<'_, I, S, E> +where + S: HttpService, + S::Error: Into>, + I: Read + Write + Unpin, + B: Body + 'static, + B::Error: Into>, + E: HttpServerConnExec, +{ + /// Start a graceful shutdown process for this connection. + /// + /// This `Connection` should continue to be polled until shutdown can finish. + /// + /// # Note + /// + /// This should only be called while the `Connection` future is still pending. If called after + /// `Connection::poll` has resolved, this does nothing. + pub fn graceful_shutdown(self: Pin<&mut Self>) { + match self.project().state.project() { + ConnStateProj::ReadVersion { read_version, .. } => read_version.cancel(), + #[cfg(feature = "http1")] + ConnStateProj::H1 { conn } => conn.graceful_shutdown(), + #[cfg(feature = "http2")] + ConnStateProj::H2 { conn } => conn.graceful_shutdown(), + #[cfg(any(not(feature = "http1"), not(feature = "http2")))] + _ => unreachable!(), + } + } + + /// Make this Connection static, instead of borrowing from Builder. + pub fn into_owned(self) -> Connection<'static, I, S, E> + where + Builder: Clone, + { + Connection { + state: match self.state { + ConnState::ReadVersion { + read_version, + builder, + service, + } => ConnState::ReadVersion { + read_version, + service, + builder: Cow::Owned(builder.clone()), + }, + #[cfg(feature = "http1")] + ConnState::H1 { conn } => ConnState::H1 { conn }, + #[cfg(feature = "http2")] + ConnState::H2 { conn } => ConnState::H2 { conn }, + #[cfg(any(not(feature = "http1"), not(feature = "http2")))] + _ => unreachable!(), + }, + } + } +} + +impl Future for Connection<'_, I, S, E> +where + S: Service, Response = Response>, + S::Future: 'static, + S::Error: Into>, + B: Body + 'static, + B::Error: Into>, + I: Read + Write + Unpin + 'static, + E: HttpServerConnExec, +{ + type Output = Result<()>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + loop { + let mut this = self.as_mut().project(); + + match this.state.as_mut().project() { + ConnStateProj::ReadVersion { + read_version, + builder, + service, + } => { + let (version, io) = ready!(read_version.poll(cx))?; + let service = service.take().unwrap(); + match version { + #[cfg(feature = "http1")] + Version::H1 => { + let conn = builder.http1.serve_connection(io, service); + this.state.set(ConnState::H1 { conn }); + } + #[cfg(feature = "http2")] + Version::H2 => { + let conn = builder.http2.serve_connection(io, service); + this.state.set(ConnState::H2 { conn }); + } + #[cfg(any(not(feature = "http1"), not(feature = "http2")))] + _ => return Poll::Ready(Err(version.unsupported())), + } + } + #[cfg(feature = "http1")] + ConnStateProj::H1 { conn } => { + return conn.poll(cx).map_err(Into::into); + } + #[cfg(feature = "http2")] + ConnStateProj::H2 { conn } => { + return conn.poll(cx).map_err(Into::into); + } + #[cfg(any(not(feature = "http1"), not(feature = "http2")))] + _ => unreachable!(), + } + } + } +} + +pin_project! { + /// Connection future. + pub struct UpgradeableConnection<'a, I, S, E> + where + S: HttpService, + { + #[pin] + state: UpgradeableConnState<'a, I, S, E>, + } +} + +#[cfg(feature = "http1")] +type Http1UpgradeableConnection = hyper::server::conn::http1::UpgradeableConnection; + +#[cfg(not(feature = "http1"))] +type Http1UpgradeableConnection = (PhantomData, PhantomData); + +pin_project! { + #[project = UpgradeableConnStateProj] + enum UpgradeableConnState<'a, I, S, E> + where + S: HttpService, + { + ReadVersion { + #[pin] + read_version: ReadVersion, + builder: Cow<'a, Builder>, + service: Option, + }, + H1 { + #[pin] + conn: Http1UpgradeableConnection, S>, + }, + H2 { + #[pin] + conn: Http2Connection, + }, + } +} + +impl UpgradeableConnection<'_, I, S, E> +where + S: HttpService, + S::Error: Into>, + I: Read + Write + Unpin, + B: Body + 'static, + B::Error: Into>, + E: HttpServerConnExec, +{ + /// Start a graceful shutdown process for this connection. + /// + /// This `UpgradeableConnection` should continue to be polled until shutdown can finish. + /// + /// # Note + /// + /// This should only be called while the `Connection` future is still nothing. pending. If + /// called after `UpgradeableConnection::poll` has resolved, this does nothing. + pub fn graceful_shutdown(self: Pin<&mut Self>) { + match self.project().state.project() { + UpgradeableConnStateProj::ReadVersion { read_version, .. } => read_version.cancel(), + #[cfg(feature = "http1")] + UpgradeableConnStateProj::H1 { conn } => conn.graceful_shutdown(), + #[cfg(feature = "http2")] + UpgradeableConnStateProj::H2 { conn } => conn.graceful_shutdown(), + #[cfg(any(not(feature = "http1"), not(feature = "http2")))] + _ => unreachable!(), + } + } + + /// Make this Connection static, instead of borrowing from Builder. + pub fn into_owned(self) -> UpgradeableConnection<'static, I, S, E> + where + Builder: Clone, + { + UpgradeableConnection { + state: match self.state { + UpgradeableConnState::ReadVersion { + read_version, + builder, + service, + } => UpgradeableConnState::ReadVersion { + read_version, + service, + builder: Cow::Owned(builder.clone()), + }, + #[cfg(feature = "http1")] + UpgradeableConnState::H1 { conn } => UpgradeableConnState::H1 { conn }, + #[cfg(feature = "http2")] + UpgradeableConnState::H2 { conn } => UpgradeableConnState::H2 { conn }, + #[cfg(any(not(feature = "http1"), not(feature = "http2")))] + _ => unreachable!(), + }, + } + } +} + +impl Future for UpgradeableConnection<'_, I, S, E> +where + S: Service, Response = Response>, + S::Future: 'static, + S::Error: Into>, + B: Body + 'static, + B::Error: Into>, + I: Read + Write + Unpin + Send + 'static, + E: HttpServerConnExec, +{ + type Output = Result<()>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + loop { + let mut this = self.as_mut().project(); + + match this.state.as_mut().project() { + UpgradeableConnStateProj::ReadVersion { + read_version, + builder, + service, + } => { + let (version, io) = ready!(read_version.poll(cx))?; + let service = service.take().unwrap(); + match version { + #[cfg(feature = "http1")] + Version::H1 => { + let conn = builder.http1.serve_connection(io, service).with_upgrades(); + this.state.set(UpgradeableConnState::H1 { conn }); + } + #[cfg(feature = "http2")] + Version::H2 => { + let conn = builder.http2.serve_connection(io, service); + this.state.set(UpgradeableConnState::H2 { conn }); + } + #[cfg(any(not(feature = "http1"), not(feature = "http2")))] + _ => return Poll::Ready(Err(version.unsupported())), + } + } + #[cfg(feature = "http1")] + UpgradeableConnStateProj::H1 { conn } => { + return conn.poll(cx).map_err(Into::into); + } + #[cfg(feature = "http2")] + UpgradeableConnStateProj::H2 { conn } => { + return conn.poll(cx).map_err(Into::into); + } + #[cfg(any(not(feature = "http1"), not(feature = "http2")))] + _ => unreachable!(), + } + } + } +} + +/// Http1 part of builder. +#[cfg(feature = "http1")] +pub struct Http1Builder<'a, E> { + inner: &'a mut Builder, +} + +#[cfg(feature = "http1")] +impl Http1Builder<'_, E> { + /// Http2 configuration. + #[cfg(feature = "http2")] + pub fn http2(&mut self) -> Http2Builder<'_, E> { + Http2Builder { inner: self.inner } + } + + /// Set whether HTTP/1 connections should support half-closures. + /// + /// Clients can chose to shutdown their write-side while waiting + /// for the server to respond. Setting this to `true` will + /// prevent closing the connection immediately if `read` + /// detects an EOF in the middle of a request. + /// + /// Default is `false`. + pub fn half_close(&mut self, val: bool) -> &mut Self { + self.inner.http1.half_close(val); + self + } + + /// Enables or disables HTTP/1 keep-alive. + /// + /// Default is true. + pub fn keep_alive(&mut self, val: bool) -> &mut Self { + self.inner.http1.keep_alive(val); + self + } + + /// Set whether HTTP/1 connections will write header names as title case at + /// the socket level. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + pub fn title_case_headers(&mut self, enabled: bool) -> &mut Self { + self.inner.http1.title_case_headers(enabled); + self + } + + /// Set whether to support preserving original header cases. + /// + /// Currently, this will record the original cases received, and store them + /// in a private extension on the `Request`. It will also look for and use + /// such an extension in any provided `Response`. + /// + /// Since the relevant extension is still private, there is no way to + /// interact with the original cases. The only effect this can have now is + /// to forward the cases in a proxy-like fashion. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + pub fn preserve_header_case(&mut self, enabled: bool) -> &mut Self { + self.inner.http1.preserve_header_case(enabled); + self + } + + /// Set the maximum number of headers. + /// + /// When a request is received, the parser will reserve a buffer to store headers for optimal + /// performance. + /// + /// If server receives more headers than the buffer size, it responds to the client with + /// "431 Request Header Fields Too Large". + /// + /// The headers is allocated on the stack by default, which has higher performance. After + /// setting this value, headers will be allocated in heap memory, that is, heap memory + /// allocation will occur for each request, and there will be a performance drop of about 5%. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is 100. + pub fn max_headers(&mut self, val: usize) -> &mut Self { + self.inner.http1.max_headers(val); + self + } + + /// Set a timeout for reading client request headers. If a client does not + /// transmit the entire header within this time, the connection is closed. + /// + /// Requires a [`Timer`] set by [`Http1Builder::timer`] to take effect. Panics if `header_read_timeout` is configured + /// without a [`Timer`]. + /// + /// Pass `None` to disable. + /// + /// Default is currently 30 seconds, but do not depend on that. + pub fn header_read_timeout(&mut self, read_timeout: impl Into>) -> &mut Self { + self.inner.http1.header_read_timeout(read_timeout); + self + } + + /// Set whether HTTP/1 connections should try to use vectored writes, + /// or always flatten into a single buffer. + /// + /// Note that setting this to false may mean more copies of body data, + /// but may also improve performance when an IO transport doesn't + /// support vectored writes well, such as most TLS implementations. + /// + /// Setting this to true will force hyper to use queued strategy + /// which may eliminate unnecessary cloning on some TLS backends + /// + /// Default is `auto`. In this mode hyper will try to guess which + /// mode to use + pub fn writev(&mut self, val: bool) -> &mut Self { + self.inner.http1.writev(val); + self + } + + /// Set the maximum buffer size for the connection. + /// + /// Default is ~400kb. + /// + /// # Panics + /// + /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum. + pub fn max_buf_size(&mut self, max: usize) -> &mut Self { + self.inner.http1.max_buf_size(max); + self + } + + /// Aggregates flushes to better support pipelined responses. + /// + /// Experimental, may have bugs. + /// + /// Default is false. + pub fn pipeline_flush(&mut self, enabled: bool) -> &mut Self { + self.inner.http1.pipeline_flush(enabled); + self + } + + /// Set the timer used in background tasks. + pub fn timer(&mut self, timer: M) -> &mut Self + where + M: Timer + Send + Sync + 'static, + { + self.inner.http1.timer(timer); + self + } + + /// Bind a connection together with a [`Service`]. + #[cfg(feature = "http2")] + pub async fn serve_connection(&self, io: I, service: S) -> Result<()> + where + S: Service, Response = Response>, + S::Future: 'static, + S::Error: Into>, + B: Body + 'static, + B::Error: Into>, + I: Read + Write + Unpin + 'static, + E: HttpServerConnExec, + { + self.inner.serve_connection(io, service).await + } + + /// Bind a connection together with a [`Service`]. + #[cfg(not(feature = "http2"))] + pub async fn serve_connection(&self, io: I, service: S) -> Result<()> + where + S: Service, Response = Response>, + S::Future: 'static, + S::Error: Into>, + B: Body + 'static, + B::Error: Into>, + I: Read + Write + Unpin + 'static, + { + self.inner.serve_connection(io, service).await + } + + /// Bind a connection together with a [`Service`], with the ability to + /// handle HTTP upgrades. This requires that the IO object implements + /// `Send`. + #[cfg(feature = "http2")] + pub fn serve_connection_with_upgrades( + &self, + io: I, + service: S, + ) -> UpgradeableConnection<'_, I, S, E> + where + S: Service, Response = Response>, + S::Future: 'static, + S::Error: Into>, + B: Body + 'static, + B::Error: Into>, + I: Read + Write + Unpin + Send + 'static, + E: HttpServerConnExec, + { + self.inner.serve_connection_with_upgrades(io, service) + } +} + +/// Http2 part of builder. +#[cfg(feature = "http2")] +pub struct Http2Builder<'a, E> { + inner: &'a mut Builder, +} + +#[cfg(feature = "http2")] +impl Http2Builder<'_, E> { + #[cfg(feature = "http1")] + /// Http1 configuration. + pub fn http1(&mut self) -> Http1Builder<'_, E> { + Http1Builder { inner: self.inner } + } + + /// Configures the maximum number of pending reset streams allowed before a GOAWAY will be sent. + /// + /// This will default to the default value set by the [`h2` crate](https://crates.io/crates/h2). + /// As of v0.4.0, it is 20. + /// + /// See for more information. + pub fn max_pending_accept_reset_streams(&mut self, max: impl Into>) -> &mut Self { + self.inner.http2.max_pending_accept_reset_streams(max); + self + } + + /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2 + /// stream-level flow control. + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + /// + /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE + pub fn initial_stream_window_size(&mut self, sz: impl Into>) -> &mut Self { + self.inner.http2.initial_stream_window_size(sz); + self + } + + /// Sets the max connection-level flow control for HTTP2. + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + pub fn initial_connection_window_size(&mut self, sz: impl Into>) -> &mut Self { + self.inner.http2.initial_connection_window_size(sz); + self + } + + /// Sets whether to use an adaptive flow control. + /// + /// Enabling this will override the limits set in + /// `http2_initial_stream_window_size` and + /// `http2_initial_connection_window_size`. + pub fn adaptive_window(&mut self, enabled: bool) -> &mut Self { + self.inner.http2.adaptive_window(enabled); + self + } + + /// Sets the maximum frame size to use for HTTP2. + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + pub fn max_frame_size(&mut self, sz: impl Into>) -> &mut Self { + self.inner.http2.max_frame_size(sz); + self + } + + /// Sets the [`SETTINGS_MAX_CONCURRENT_STREAMS`][spec] option for HTTP2 + /// connections. + /// + /// Default is 200. Passing `None` will remove any limit. + /// + /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_MAX_CONCURRENT_STREAMS + pub fn max_concurrent_streams(&mut self, max: impl Into>) -> &mut Self { + self.inner.http2.max_concurrent_streams(max); + self + } + + /// Sets an interval for HTTP2 Ping frames should be sent to keep a + /// connection alive. + /// + /// Pass `None` to disable HTTP2 keep-alive. + /// + /// Default is currently disabled. + /// + /// # Cargo Feature + /// + pub fn keep_alive_interval(&mut self, interval: impl Into>) -> &mut Self { + self.inner.http2.keep_alive_interval(interval); + self + } + + /// Sets a timeout for receiving an acknowledgement of the keep-alive ping. + /// + /// If the ping is not acknowledged within the timeout, the connection will + /// be closed. Does nothing if `http2_keep_alive_interval` is disabled. + /// + /// Default is 20 seconds. + /// + /// # Cargo Feature + /// + pub fn keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self { + self.inner.http2.keep_alive_timeout(timeout); + self + } + + /// Set the maximum write buffer size for each HTTP/2 stream. + /// + /// Default is currently ~400KB, but may change. + /// + /// # Panics + /// + /// The value must be no larger than `u32::MAX`. + pub fn max_send_buf_size(&mut self, max: usize) -> &mut Self { + self.inner.http2.max_send_buf_size(max); + self + } + + /// Enables the [extended CONNECT protocol]. + /// + /// [extended CONNECT protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4 + pub fn enable_connect_protocol(&mut self) -> &mut Self { + self.inner.http2.enable_connect_protocol(); + self + } + + /// Sets the max size of received header frames. + /// + /// Default is currently ~16MB, but may change. + pub fn max_header_list_size(&mut self, max: u32) -> &mut Self { + self.inner.http2.max_header_list_size(max); + self + } + + /// Set the timer used in background tasks. + pub fn timer(&mut self, timer: M) -> &mut Self + where + M: Timer + Send + Sync + 'static, + { + self.inner.http2.timer(timer); + self + } + + /// Bind a connection together with a [`Service`]. + pub async fn serve_connection(&self, io: I, service: S) -> Result<()> + where + S: Service, Response = Response>, + S::Future: 'static, + S::Error: Into>, + B: Body + 'static, + B::Error: Into>, + I: Read + Write + Unpin + 'static, + E: HttpServerConnExec, + { + self.inner.serve_connection(io, service).await + } + + /// Bind a connection together with a [`Service`], with the ability to + /// handle HTTP upgrades. This requires that the IO object implements + /// `Send`. + pub fn serve_connection_with_upgrades( + &self, + io: I, + service: S, + ) -> UpgradeableConnection<'_, I, S, E> + where + S: Service, Response = Response>, + S::Future: 'static, + S::Error: Into>, + B: Body + 'static, + B::Error: Into>, + I: Read + Write + Unpin + Send + 'static, + E: HttpServerConnExec, + { + self.inner.serve_connection_with_upgrades(io, service) + } +} + +#[cfg(test)] +mod tests { + use crate::{ + rt::{TokioExecutor, TokioIo}, + server::conn::auto, + }; + use http::{Request, Response}; + use http_body::Body; + use http_body_util::{BodyExt, Empty, Full}; + use hyper::{body, body::Bytes, client, service::service_fn}; + use std::{convert::Infallible, error::Error as StdError, net::SocketAddr, time::Duration}; + use tokio::{ + net::{TcpListener, TcpStream}, + pin, + }; + + const BODY: &[u8] = b"Hello, world!"; + + #[test] + fn configuration() { + // One liner. + auto::Builder::new(TokioExecutor::new()) + .http1() + .keep_alive(true) + .http2() + .keep_alive_interval(None); + // .serve_connection(io, service); + + // Using variable. + let mut builder = auto::Builder::new(TokioExecutor::new()); + + builder.http1().keep_alive(true); + builder.http2().keep_alive_interval(None); + // builder.serve_connection(io, service); + } + + #[cfg(not(miri))] + #[tokio::test] + async fn http1() { + let addr = start_server(false, false).await; + let mut sender = connect_h1(addr).await; + + let response = sender + .send_request(Request::new(Empty::::new())) + .await + .unwrap(); + + let body = response.into_body().collect().await.unwrap().to_bytes(); + + assert_eq!(body, BODY); + } + + #[cfg(not(miri))] + #[tokio::test] + async fn http2() { + let addr = start_server(false, false).await; + let mut sender = connect_h2(addr).await; + + let response = sender + .send_request(Request::new(Empty::::new())) + .await + .unwrap(); + + let body = response.into_body().collect().await.unwrap().to_bytes(); + + assert_eq!(body, BODY); + } + + #[cfg(not(miri))] + #[tokio::test] + async fn http2_only() { + let addr = start_server(false, true).await; + let mut sender = connect_h2(addr).await; + + let response = sender + .send_request(Request::new(Empty::::new())) + .await + .unwrap(); + + let body = response.into_body().collect().await.unwrap().to_bytes(); + + assert_eq!(body, BODY); + } + + #[cfg(not(miri))] + #[tokio::test] + async fn http2_only_fail_if_client_is_http1() { + let addr = start_server(false, true).await; + let mut sender = connect_h1(addr).await; + + let _ = sender + .send_request(Request::new(Empty::::new())) + .await + .expect_err("should fail"); + } + + #[cfg(not(miri))] + #[tokio::test] + async fn http1_only() { + let addr = start_server(true, false).await; + let mut sender = connect_h1(addr).await; + + let response = sender + .send_request(Request::new(Empty::::new())) + .await + .unwrap(); + + let body = response.into_body().collect().await.unwrap().to_bytes(); + + assert_eq!(body, BODY); + } + + #[cfg(not(miri))] + #[tokio::test] + async fn http1_only_fail_if_client_is_http2() { + let addr = start_server(true, false).await; + let mut sender = connect_h2(addr).await; + + let _ = sender + .send_request(Request::new(Empty::::new())) + .await + .expect_err("should fail"); + } + + #[cfg(not(miri))] + #[tokio::test] + async fn graceful_shutdown() { + let listener = TcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0))) + .await + .unwrap(); + + let listener_addr = listener.local_addr().unwrap(); + + // Spawn the task in background so that we can connect there + let listen_task = tokio::spawn(async move { listener.accept().await.unwrap() }); + // Only connect a stream, do not send headers or anything + let _stream = TcpStream::connect(listener_addr).await.unwrap(); + + let (stream, _) = listen_task.await.unwrap(); + let stream = TokioIo::new(stream); + let builder = auto::Builder::new(TokioExecutor::new()); + let connection = builder.serve_connection(stream, service_fn(hello)); + + pin!(connection); + + connection.as_mut().graceful_shutdown(); + + let connection_error = tokio::time::timeout(Duration::from_millis(200), connection) + .await + .expect("Connection should have finished in a timely manner after graceful shutdown.") + .expect_err("Connection should have been interrupted."); + + let connection_error = connection_error + .downcast_ref::() + .expect("The error should have been `std::io::Error`."); + assert_eq!(connection_error.kind(), std::io::ErrorKind::Interrupted); + } + + async fn connect_h1(addr: SocketAddr) -> client::conn::http1::SendRequest + where + B: Body + Send + 'static, + B::Data: Send, + B::Error: Into>, + { + let stream = TokioIo::new(TcpStream::connect(addr).await.unwrap()); + let (sender, connection) = client::conn::http1::handshake(stream).await.unwrap(); + + tokio::spawn(connection); + + sender + } + + async fn connect_h2(addr: SocketAddr) -> client::conn::http2::SendRequest + where + B: Body + Unpin + Send + 'static, + B::Data: Send, + B::Error: Into>, + { + let stream = TokioIo::new(TcpStream::connect(addr).await.unwrap()); + let (sender, connection) = client::conn::http2::Builder::new(TokioExecutor::new()) + .handshake(stream) + .await + .unwrap(); + + tokio::spawn(connection); + + sender + } + + async fn start_server(h1_only: bool, h2_only: bool) -> SocketAddr { + let addr: SocketAddr = ([127, 0, 0, 1], 0).into(); + let listener = TcpListener::bind(addr).await.unwrap(); + + let local_addr = listener.local_addr().unwrap(); + + tokio::spawn(async move { + loop { + let (stream, _) = listener.accept().await.unwrap(); + let stream = TokioIo::new(stream); + tokio::task::spawn(async move { + let mut builder = auto::Builder::new(TokioExecutor::new()); + if h1_only { + builder = builder.http1_only(); + builder.serve_connection(stream, service_fn(hello)).await + } else if h2_only { + builder = builder.http2_only(); + builder.serve_connection(stream, service_fn(hello)).await + } else { + builder + .http2() + .max_header_list_size(4096) + .serve_connection_with_upgrades(stream, service_fn(hello)) + .await + } + .unwrap(); + }); + } + }); + + local_addr + } + + async fn hello(_req: Request) -> Result>, Infallible> { + Ok(Response::new(Full::new(Bytes::from(BODY)))) + } +} diff --git a/.cargo-vendor/hyper-util/src/server/conn/mod.rs b/.cargo-vendor/hyper-util/src/server/conn/mod.rs new file mode 100644 index 0000000000..b23503a12c --- /dev/null +++ b/.cargo-vendor/hyper-util/src/server/conn/mod.rs @@ -0,0 +1,4 @@ +//! Connection utilities. + +#[cfg(any(feature = "http1", feature = "http2"))] +pub mod auto; diff --git a/.cargo-vendor/hyper-util/src/server/graceful.rs b/.cargo-vendor/hyper-util/src/server/graceful.rs new file mode 100644 index 0000000000..dcd9f0640c --- /dev/null +++ b/.cargo-vendor/hyper-util/src/server/graceful.rs @@ -0,0 +1,447 @@ +//! Utility to gracefully shutdown a server. +//! +//! This module provides a [`GracefulShutdown`] type, +//! which can be used to gracefully shutdown a server. +//! +//! See +//! for an example of how to use this. + +use std::{ + fmt::{self, Debug}, + future::Future, + pin::Pin, + task::{self, Poll}, +}; + +use pin_project_lite::pin_project; +use tokio::sync::watch; + +/// A graceful shutdown utility +pub struct GracefulShutdown { + tx: watch::Sender<()>, +} + +impl GracefulShutdown { + /// Create a new graceful shutdown helper. + pub fn new() -> Self { + let (tx, _) = watch::channel(()); + Self { tx } + } + + /// Wrap a future for graceful shutdown watching. + pub fn watch(&self, conn: C) -> impl Future { + let mut rx = self.tx.subscribe(); + GracefulConnectionFuture::new(conn, async move { + let _ = rx.changed().await; + // hold onto the rx until the watched future is completed + rx + }) + } + + /// Signal shutdown for all watched connections. + /// + /// This returns a `Future` which will complete once all watched + /// connections have shutdown. + pub async fn shutdown(self) { + let Self { tx } = self; + + // signal all the watched futures about the change + let _ = tx.send(()); + // and then wait for all of them to complete + tx.closed().await; + } +} + +impl Debug for GracefulShutdown { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("GracefulShutdown").finish() + } +} + +impl Default for GracefulShutdown { + fn default() -> Self { + Self::new() + } +} + +pin_project! { + struct GracefulConnectionFuture { + #[pin] + conn: C, + #[pin] + cancel: F, + #[pin] + // If cancelled, this is held until the inner conn is done. + cancelled_guard: Option, + } +} + +impl GracefulConnectionFuture { + fn new(conn: C, cancel: F) -> Self { + Self { + conn, + cancel, + cancelled_guard: None, + } + } +} + +impl Debug for GracefulConnectionFuture { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("GracefulConnectionFuture").finish() + } +} + +impl Future for GracefulConnectionFuture +where + C: GracefulConnection, + F: Future, +{ + type Output = C::Output; + + fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + let mut this = self.project(); + if this.cancelled_guard.is_none() { + if let Poll::Ready(guard) = this.cancel.poll(cx) { + this.cancelled_guard.set(Some(guard)); + this.conn.as_mut().graceful_shutdown(); + } + } + this.conn.poll(cx) + } +} + +/// An internal utility trait as an umbrella target for all (hyper) connection +/// types that the [`GracefulShutdown`] can watch. +pub trait GracefulConnection: Future> + private::Sealed { + /// The error type returned by the connection when used as a future. + type Error; + + /// Start a graceful shutdown process for this connection. + fn graceful_shutdown(self: Pin<&mut Self>); +} + +#[cfg(feature = "http1")] +impl GracefulConnection for hyper::server::conn::http1::Connection +where + S: hyper::service::HttpService, + S::Error: Into>, + I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static, + B: hyper::body::Body + 'static, + B::Error: Into>, +{ + type Error = hyper::Error; + + fn graceful_shutdown(self: Pin<&mut Self>) { + hyper::server::conn::http1::Connection::graceful_shutdown(self); + } +} + +#[cfg(feature = "http2")] +impl GracefulConnection for hyper::server::conn::http2::Connection +where + S: hyper::service::HttpService, + S::Error: Into>, + I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static, + B: hyper::body::Body + 'static, + B::Error: Into>, + E: hyper::rt::bounds::Http2ServerConnExec, +{ + type Error = hyper::Error; + + fn graceful_shutdown(self: Pin<&mut Self>) { + hyper::server::conn::http2::Connection::graceful_shutdown(self); + } +} + +#[cfg(feature = "server-auto")] +impl<'a, I, B, S, E> GracefulConnection for crate::server::conn::auto::Connection<'a, I, S, E> +where + S: hyper::service::Service, Response = http::Response>, + S::Error: Into>, + S::Future: 'static, + I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static, + B: hyper::body::Body + 'static, + B::Error: Into>, + E: hyper::rt::bounds::Http2ServerConnExec, +{ + type Error = Box; + + fn graceful_shutdown(self: Pin<&mut Self>) { + crate::server::conn::auto::Connection::graceful_shutdown(self); + } +} + +#[cfg(feature = "server-auto")] +impl<'a, I, B, S, E> GracefulConnection + for crate::server::conn::auto::UpgradeableConnection<'a, I, S, E> +where + S: hyper::service::Service, Response = http::Response>, + S::Error: Into>, + S::Future: 'static, + I: hyper::rt::Read + hyper::rt::Write + Unpin + Send + 'static, + B: hyper::body::Body + 'static, + B::Error: Into>, + E: hyper::rt::bounds::Http2ServerConnExec, +{ + type Error = Box; + + fn graceful_shutdown(self: Pin<&mut Self>) { + crate::server::conn::auto::UpgradeableConnection::graceful_shutdown(self); + } +} + +mod private { + pub trait Sealed {} + + #[cfg(feature = "http1")] + impl Sealed for hyper::server::conn::http1::Connection + where + S: hyper::service::HttpService, + S::Error: Into>, + I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static, + B: hyper::body::Body + 'static, + B::Error: Into>, + { + } + + #[cfg(feature = "http1")] + impl Sealed for hyper::server::conn::http1::UpgradeableConnection + where + S: hyper::service::HttpService, + S::Error: Into>, + I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static, + B: hyper::body::Body + 'static, + B::Error: Into>, + { + } + + #[cfg(feature = "http2")] + impl Sealed for hyper::server::conn::http2::Connection + where + S: hyper::service::HttpService, + S::Error: Into>, + I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static, + B: hyper::body::Body + 'static, + B::Error: Into>, + E: hyper::rt::bounds::Http2ServerConnExec, + { + } + + #[cfg(feature = "server-auto")] + impl<'a, I, B, S, E> Sealed for crate::server::conn::auto::Connection<'a, I, S, E> + where + S: hyper::service::Service< + http::Request, + Response = http::Response, + >, + S::Error: Into>, + S::Future: 'static, + I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static, + B: hyper::body::Body + 'static, + B::Error: Into>, + E: hyper::rt::bounds::Http2ServerConnExec, + { + } + + #[cfg(feature = "server-auto")] + impl<'a, I, B, S, E> Sealed for crate::server::conn::auto::UpgradeableConnection<'a, I, S, E> + where + S: hyper::service::Service< + http::Request, + Response = http::Response, + >, + S::Error: Into>, + S::Future: 'static, + I: hyper::rt::Read + hyper::rt::Write + Unpin + Send + 'static, + B: hyper::body::Body + 'static, + B::Error: Into>, + E: hyper::rt::bounds::Http2ServerConnExec, + { + } +} + +#[cfg(test)] +mod test { + use super::*; + use pin_project_lite::pin_project; + use std::sync::atomic::{AtomicUsize, Ordering}; + use std::sync::Arc; + + pin_project! { + #[derive(Debug)] + struct DummyConnection { + #[pin] + future: F, + shutdown_counter: Arc, + } + } + + impl private::Sealed for DummyConnection {} + + impl GracefulConnection for DummyConnection { + type Error = (); + + fn graceful_shutdown(self: Pin<&mut Self>) { + self.shutdown_counter.fetch_add(1, Ordering::SeqCst); + } + } + + impl Future for DummyConnection { + type Output = Result<(), ()>; + + fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + match self.project().future.poll(cx) { + Poll::Ready(_) => Poll::Ready(Ok(())), + Poll::Pending => Poll::Pending, + } + } + } + + #[cfg(not(miri))] + #[tokio::test] + async fn test_graceful_shutdown_ok() { + let graceful = GracefulShutdown::new(); + let shutdown_counter = Arc::new(AtomicUsize::new(0)); + let (dummy_tx, _) = tokio::sync::broadcast::channel(1); + + for i in 1..=3 { + let mut dummy_rx = dummy_tx.subscribe(); + let shutdown_counter = shutdown_counter.clone(); + + let future = async move { + tokio::time::sleep(std::time::Duration::from_millis(i * 10)).await; + let _ = dummy_rx.recv().await; + }; + let dummy_conn = DummyConnection { + future, + shutdown_counter, + }; + let conn = graceful.watch(dummy_conn); + tokio::spawn(async move { + conn.await.unwrap(); + }); + } + + assert_eq!(shutdown_counter.load(Ordering::SeqCst), 0); + let _ = dummy_tx.send(()); + + tokio::select! { + _ = tokio::time::sleep(std::time::Duration::from_millis(100)) => { + panic!("timeout") + }, + _ = graceful.shutdown() => { + assert_eq!(shutdown_counter.load(Ordering::SeqCst), 3); + } + } + } + + #[cfg(not(miri))] + #[tokio::test] + async fn test_graceful_shutdown_delayed_ok() { + let graceful = GracefulShutdown::new(); + let shutdown_counter = Arc::new(AtomicUsize::new(0)); + + for i in 1..=3 { + let shutdown_counter = shutdown_counter.clone(); + + //tokio::time::sleep(std::time::Duration::from_millis(i * 5)).await; + let future = async move { + tokio::time::sleep(std::time::Duration::from_millis(i * 50)).await; + }; + let dummy_conn = DummyConnection { + future, + shutdown_counter, + }; + let conn = graceful.watch(dummy_conn); + tokio::spawn(async move { + conn.await.unwrap(); + }); + } + + assert_eq!(shutdown_counter.load(Ordering::SeqCst), 0); + + tokio::select! { + _ = tokio::time::sleep(std::time::Duration::from_millis(200)) => { + panic!("timeout") + }, + _ = graceful.shutdown() => { + assert_eq!(shutdown_counter.load(Ordering::SeqCst), 3); + } + } + } + + #[cfg(not(miri))] + #[tokio::test] + async fn test_graceful_shutdown_multi_per_watcher_ok() { + let graceful = GracefulShutdown::new(); + let shutdown_counter = Arc::new(AtomicUsize::new(0)); + + for i in 1..=3 { + let shutdown_counter = shutdown_counter.clone(); + + let mut futures = Vec::new(); + for u in 1..=i { + let future = tokio::time::sleep(std::time::Duration::from_millis(u * 50)); + let dummy_conn = DummyConnection { + future, + shutdown_counter: shutdown_counter.clone(), + }; + let conn = graceful.watch(dummy_conn); + futures.push(conn); + } + tokio::spawn(async move { + futures_util::future::join_all(futures).await; + }); + } + + assert_eq!(shutdown_counter.load(Ordering::SeqCst), 0); + + tokio::select! { + _ = tokio::time::sleep(std::time::Duration::from_millis(200)) => { + panic!("timeout") + }, + _ = graceful.shutdown() => { + assert_eq!(shutdown_counter.load(Ordering::SeqCst), 6); + } + } + } + + #[cfg(not(miri))] + #[tokio::test] + async fn test_graceful_shutdown_timeout() { + let graceful = GracefulShutdown::new(); + let shutdown_counter = Arc::new(AtomicUsize::new(0)); + + for i in 1..=3 { + let shutdown_counter = shutdown_counter.clone(); + + let future = async move { + if i == 1 { + std::future::pending::<()>().await + } else { + std::future::ready(()).await + } + }; + let dummy_conn = DummyConnection { + future, + shutdown_counter, + }; + let conn = graceful.watch(dummy_conn); + tokio::spawn(async move { + conn.await.unwrap(); + }); + } + + assert_eq!(shutdown_counter.load(Ordering::SeqCst), 0); + + tokio::select! { + _ = tokio::time::sleep(std::time::Duration::from_millis(100)) => { + assert_eq!(shutdown_counter.load(Ordering::SeqCst), 3); + }, + _ = graceful.shutdown() => { + panic!("shutdown should not be completed: as not all our conns finish") + } + } + } +} diff --git a/.cargo-vendor/hyper-util/src/server/mod.rs b/.cargo-vendor/hyper-util/src/server/mod.rs new file mode 100644 index 0000000000..a4838ac5d8 --- /dev/null +++ b/.cargo-vendor/hyper-util/src/server/mod.rs @@ -0,0 +1,6 @@ +//! Server utilities. + +pub mod conn; + +#[cfg(feature = "server-graceful")] +pub mod graceful; diff --git a/.cargo-vendor/hyper-util/src/service.rs b/.cargo-vendor/hyper-util/src/service.rs new file mode 100644 index 0000000000..4652a6ab9e --- /dev/null +++ b/.cargo-vendor/hyper-util/src/service.rs @@ -0,0 +1,62 @@ +//! Service utilities. + +use pin_project_lite::pin_project; +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; +use tower::{util::Oneshot, ServiceExt}; + +/// A tower service converted into a hyper service. +#[derive(Debug, Copy, Clone)] +pub struct TowerToHyperService { + service: S, +} + +impl TowerToHyperService { + /// Create a new `TowerToHyperService` from a tower service. + pub fn new(tower_service: S) -> Self { + Self { + service: tower_service, + } + } +} + +impl hyper::service::Service for TowerToHyperService +where + S: tower_service::Service + Clone, +{ + type Response = S::Response; + type Error = S::Error; + type Future = TowerToHyperServiceFuture; + + fn call(&self, req: R) -> Self::Future { + TowerToHyperServiceFuture { + future: self.service.clone().oneshot(req), + } + } +} + +pin_project! { + /// Response future for [`TowerToHyperService`]. + pub struct TowerToHyperServiceFuture + where + S: tower_service::Service, + { + #[pin] + future: Oneshot, + } +} + +impl Future for TowerToHyperServiceFuture +where + S: tower_service::Service, +{ + type Output = Result; + + #[inline] + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + self.project().future.poll(cx) + } +} diff --git a/.cargo-vendor/hyper-util/tests/legacy_client.rs b/.cargo-vendor/hyper-util/tests/legacy_client.rs new file mode 100644 index 0000000000..f2fd8b3b61 --- /dev/null +++ b/.cargo-vendor/hyper-util/tests/legacy_client.rs @@ -0,0 +1,981 @@ +mod test_utils; + +use std::io::{Read, Write}; +use std::net::{SocketAddr, TcpListener}; +use std::pin::Pin; +use std::sync::atomic::Ordering; +use std::sync::Arc; +use std::task::Poll; +use std::thread; +use std::time::Duration; + +use futures_channel::{mpsc, oneshot}; +use futures_util::future::{self, FutureExt, TryFutureExt}; +use futures_util::stream::StreamExt; +use futures_util::{self, Stream}; +use http_body_util::BodyExt; +use http_body_util::{Empty, Full, StreamBody}; + +use hyper::body::Bytes; +use hyper::body::Frame; +use hyper::Request; +use hyper_util::client::legacy::connect::{capture_connection, HttpConnector}; +use hyper_util::client::legacy::Client; +use hyper_util::rt::{TokioExecutor, TokioIo}; + +use test_utils::{DebugConnector, DebugStream}; + +pub fn runtime() -> tokio::runtime::Runtime { + tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .expect("new rt") +} + +fn s(buf: &[u8]) -> &str { + std::str::from_utf8(buf).expect("from_utf8") +} + +#[cfg(not(miri))] +#[test] +fn drop_body_before_eof_closes_connection() { + // https://github.com/hyperium/hyper/issues/1353 + let _ = pretty_env_logger::try_init(); + + let server = TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = server.local_addr().unwrap(); + let rt = runtime(); + let (closes_tx, closes) = mpsc::channel::<()>(10); + let client = Client::builder(hyper_util::rt::TokioExecutor::new()).build( + DebugConnector::with_http_and_closes(HttpConnector::new(), closes_tx), + ); + let (tx1, rx1) = oneshot::channel(); + thread::spawn(move || { + let mut sock = server.accept().unwrap().0; + sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); + sock.set_write_timeout(Some(Duration::from_secs(5))) + .unwrap(); + let mut buf = [0; 4096]; + sock.read(&mut buf).expect("read 1"); + let body = vec![b'x'; 1024 * 128]; + write!( + sock, + "HTTP/1.1 200 OK\r\nContent-Length: {}\r\n\r\n", + body.len() + ) + .expect("write head"); + let _ = sock.write_all(&body); + let _ = tx1.send(()); + }); + + let req = Request::builder() + .uri(&*format!("http://{}/a", addr)) + .body(Empty::::new()) + .unwrap(); + let res = client.request(req).map_ok(move |res| { + assert_eq!(res.status(), hyper::StatusCode::OK); + }); + let rx = rx1; + rt.block_on(async move { + let (res, _) = future::join(res, rx).await; + res.unwrap(); + tokio::time::sleep(Duration::from_secs(1)).await; + }); + rt.block_on(closes.into_future()).0.expect("closes"); +} + +#[cfg(not(miri))] +#[tokio::test] +async fn drop_client_closes_idle_connections() { + let _ = pretty_env_logger::try_init(); + + let server = TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = server.local_addr().unwrap(); + let (closes_tx, mut closes) = mpsc::channel(10); + + let (tx1, rx1) = oneshot::channel(); + let (_client_drop_tx, client_drop_rx) = oneshot::channel::<()>(); + + thread::spawn(move || { + let mut sock = server.accept().unwrap().0; + sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); + sock.set_write_timeout(Some(Duration::from_secs(5))) + .unwrap(); + let mut buf = [0; 4096]; + sock.read(&mut buf).expect("read 1"); + let body = [b'x'; 64]; + write!( + sock, + "HTTP/1.1 200 OK\r\nContent-Length: {}\r\n\r\n", + body.len() + ) + .expect("write head"); + let _ = sock.write_all(&body); + let _ = tx1.send(()); + + // prevent this thread from closing until end of test, so the connection + // stays open and idle until Client is dropped + runtime().block_on(client_drop_rx.into_future()) + }); + + let client = Client::builder(TokioExecutor::new()).build(DebugConnector::with_http_and_closes( + HttpConnector::new(), + closes_tx, + )); + + let req = Request::builder() + .uri(&*format!("http://{}/a", addr)) + .body(Empty::::new()) + .unwrap(); + let res = client.request(req).map_ok(move |res| { + assert_eq!(res.status(), hyper::StatusCode::OK); + }); + let rx = rx1; + let (res, _) = future::join(res, rx).await; + res.unwrap(); + + // not closed yet, just idle + future::poll_fn(|ctx| { + assert!(Pin::new(&mut closes).poll_next(ctx).is_pending()); + Poll::Ready(()) + }) + .await; + + // drop to start the connections closing + drop(client); + + // and wait a few ticks for the connections to close + let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out")); + futures_util::pin_mut!(t); + let close = closes.into_future().map(|(opt, _)| opt.expect("closes")); + future::select(t, close).await; +} + +#[cfg(not(miri))] +#[tokio::test] +async fn drop_response_future_closes_in_progress_connection() { + let _ = pretty_env_logger::try_init(); + + let server = TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = server.local_addr().unwrap(); + let (closes_tx, closes) = mpsc::channel(10); + + let (tx1, rx1) = oneshot::channel(); + let (_client_drop_tx, client_drop_rx) = std::sync::mpsc::channel::<()>(); + + thread::spawn(move || { + let mut sock = server.accept().unwrap().0; + sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); + sock.set_write_timeout(Some(Duration::from_secs(5))) + .unwrap(); + let mut buf = [0; 4096]; + sock.read(&mut buf).expect("read 1"); + // we never write a response head + // simulates a slow server operation + let _ = tx1.send(()); + + // prevent this thread from closing until end of test, so the connection + // stays open and idle until Client is dropped + let _ = client_drop_rx.recv(); + }); + + let res = { + let client = Client::builder(TokioExecutor::new()).build( + DebugConnector::with_http_and_closes(HttpConnector::new(), closes_tx), + ); + + let req = Request::builder() + .uri(&*format!("http://{}/a", addr)) + .body(Empty::::new()) + .unwrap(); + client.request(req).map(|_| unreachable!()) + }; + + future::select(res, rx1).await; + + // res now dropped + let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out")); + futures_util::pin_mut!(t); + let close = closes.into_future().map(|(opt, _)| opt.expect("closes")); + future::select(t, close).await; +} + +#[cfg(not(miri))] +#[tokio::test] +async fn drop_response_body_closes_in_progress_connection() { + let _ = pretty_env_logger::try_init(); + + let server = TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = server.local_addr().unwrap(); + let (closes_tx, closes) = mpsc::channel(10); + + let (tx1, rx1) = oneshot::channel(); + let (_client_drop_tx, client_drop_rx) = std::sync::mpsc::channel::<()>(); + + thread::spawn(move || { + let mut sock = server.accept().unwrap().0; + sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); + sock.set_write_timeout(Some(Duration::from_secs(5))) + .unwrap(); + let mut buf = [0; 4096]; + sock.read(&mut buf).expect("read 1"); + write!( + sock, + "HTTP/1.1 200 OK\r\nTransfer-Encoding: chunked\r\n\r\n" + ) + .expect("write head"); + let _ = tx1.send(()); + + // prevent this thread from closing until end of test, so the connection + // stays open and idle until Client is dropped + let _ = client_drop_rx.recv(); + }); + + let rx = rx1; + let res = { + let client = Client::builder(TokioExecutor::new()).build( + DebugConnector::with_http_and_closes(HttpConnector::new(), closes_tx), + ); + + let req = Request::builder() + .uri(&*format!("http://{}/a", addr)) + .body(Empty::::new()) + .unwrap(); + // notably, haven't read body yet + client.request(req) + }; + + let (res, _) = future::join(res, rx).await; + // drop the body + res.unwrap(); + + // and wait a few ticks to see the connection drop + let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out")); + futures_util::pin_mut!(t); + let close = closes.into_future().map(|(opt, _)| opt.expect("closes")); + future::select(t, close).await; +} + +#[cfg(not(miri))] +#[tokio::test] +async fn no_keep_alive_closes_connection() { + // https://github.com/hyperium/hyper/issues/1383 + let _ = pretty_env_logger::try_init(); + + let server = TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = server.local_addr().unwrap(); + let (closes_tx, closes) = mpsc::channel(10); + + let (tx1, rx1) = oneshot::channel(); + let (_tx2, rx2) = std::sync::mpsc::channel::<()>(); + + thread::spawn(move || { + let mut sock = server.accept().unwrap().0; + sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); + sock.set_write_timeout(Some(Duration::from_secs(5))) + .unwrap(); + let mut buf = [0; 4096]; + sock.read(&mut buf).expect("read 1"); + sock.write(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") + .unwrap(); + let _ = tx1.send(()); + + // prevent this thread from closing until end of test, so the connection + // stays open and idle until Client is dropped + let _ = rx2.recv(); + }); + + let client = Client::builder(TokioExecutor::new()) + .pool_max_idle_per_host(0) + .build(DebugConnector::with_http_and_closes( + HttpConnector::new(), + closes_tx, + )); + + let req = Request::builder() + .uri(&*format!("http://{}/a", addr)) + .body(Empty::::new()) + .unwrap(); + let res = client.request(req).map_ok(move |res| { + assert_eq!(res.status(), hyper::StatusCode::OK); + }); + let rx = rx1; + let (res, _) = future::join(res, rx).await; + res.unwrap(); + + let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out")); + futures_util::pin_mut!(t); + let close = closes.into_future().map(|(opt, _)| opt.expect("closes")); + future::select(close, t).await; +} + +#[cfg(not(miri))] +#[tokio::test] +async fn socket_disconnect_closes_idle_conn() { + // notably when keep-alive is enabled + let _ = pretty_env_logger::try_init(); + + let server = TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = server.local_addr().unwrap(); + let (closes_tx, closes) = mpsc::channel(10); + + let (tx1, rx1) = oneshot::channel(); + + thread::spawn(move || { + let mut sock = server.accept().unwrap().0; + sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); + sock.set_write_timeout(Some(Duration::from_secs(5))) + .unwrap(); + let mut buf = [0; 4096]; + sock.read(&mut buf).expect("read 1"); + sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") + .unwrap(); + let _ = tx1.send(()); + }); + + let client = Client::builder(TokioExecutor::new()).build(DebugConnector::with_http_and_closes( + HttpConnector::new(), + closes_tx, + )); + + let req = Request::builder() + .uri(&*format!("http://{}/a", addr)) + .body(Empty::::new()) + .unwrap(); + let res = client.request(req).map_ok(move |res| { + assert_eq!(res.status(), hyper::StatusCode::OK); + }); + let rx = rx1; + + let (res, _) = future::join(res, rx).await; + res.unwrap(); + + let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out")); + futures_util::pin_mut!(t); + let close = closes.into_future().map(|(opt, _)| opt.expect("closes")); + future::select(t, close).await; +} + +#[cfg(not(miri))] +#[test] +fn connect_call_is_lazy() { + // We especially don't want connects() triggered if there's + // idle connections that the Checkout would have found + let _ = pretty_env_logger::try_init(); + + let _rt = runtime(); + let connector = DebugConnector::new(); + let connects = connector.connects.clone(); + + let client = Client::builder(TokioExecutor::new()).build(connector); + + assert_eq!(connects.load(Ordering::Relaxed), 0); + let req = Request::builder() + .uri("http://hyper.local/a") + .body(Empty::::new()) + .unwrap(); + let _fut = client.request(req); + // internal Connect::connect should have been lazy, and not + // triggered an actual connect yet. + assert_eq!(connects.load(Ordering::Relaxed), 0); +} + +#[cfg(not(miri))] +#[test] +fn client_keep_alive_0() { + let _ = pretty_env_logger::try_init(); + let server = TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = server.local_addr().unwrap(); + let rt = runtime(); + let connector = DebugConnector::new(); + let connects = connector.connects.clone(); + + let client = Client::builder(TokioExecutor::new()).build(connector); + + let (tx1, rx1) = oneshot::channel(); + let (tx2, rx2) = oneshot::channel(); + thread::spawn(move || { + let mut sock = server.accept().unwrap().0; + //drop(server); + sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); + sock.set_write_timeout(Some(Duration::from_secs(5))) + .unwrap(); + let mut buf = [0; 4096]; + sock.read(&mut buf).expect("read 1"); + sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") + .expect("write 1"); + let _ = tx1.send(()); + + let n2 = sock.read(&mut buf).expect("read 2"); + assert_ne!(n2, 0); + let second_get = "GET /b HTTP/1.1\r\n"; + assert_eq!(s(&buf[..second_get.len()]), second_get); + sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") + .expect("write 2"); + let _ = tx2.send(()); + }); + + assert_eq!(connects.load(Ordering::SeqCst), 0); + + let rx = rx1; + let req = Request::builder() + .uri(&*format!("http://{}/a", addr)) + .body(Empty::::new()) + .unwrap(); + let res = client.request(req); + rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap(); + + assert_eq!(connects.load(Ordering::SeqCst), 1); + + // sleep real quick to let the threadpool put connection in ready + // state and back into client pool + thread::sleep(Duration::from_millis(50)); + + let rx = rx2; + let req = Request::builder() + .uri(&*format!("http://{}/b", addr)) + .body(Empty::::new()) + .unwrap(); + let res = client.request(req); + rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap(); + + assert_eq!( + connects.load(Ordering::SeqCst), + 1, + "second request should still only have 1 connect" + ); + drop(client); +} + +#[cfg(not(miri))] +#[test] +fn client_keep_alive_extra_body() { + let _ = pretty_env_logger::try_init(); + let server = TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = server.local_addr().unwrap(); + let rt = runtime(); + + let connector = DebugConnector::new(); + let connects = connector.connects.clone(); + + let client = Client::builder(TokioExecutor::new()).build(connector); + + let (tx1, rx1) = oneshot::channel(); + let (tx2, rx2) = oneshot::channel(); + thread::spawn(move || { + let mut sock = server.accept().unwrap().0; + sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); + sock.set_write_timeout(Some(Duration::from_secs(5))) + .unwrap(); + let mut buf = [0; 4096]; + sock.read(&mut buf).expect("read 1"); + sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 5\r\n\r\nhello") + .expect("write 1"); + // the body "hello", while ignored because its a HEAD request, should mean the connection + // cannot be put back in the pool + let _ = tx1.send(()); + + let mut sock2 = server.accept().unwrap().0; + let n2 = sock2.read(&mut buf).expect("read 2"); + assert_ne!(n2, 0); + let second_get = "GET /b HTTP/1.1\r\n"; + assert_eq!(s(&buf[..second_get.len()]), second_get); + sock2 + .write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") + .expect("write 2"); + let _ = tx2.send(()); + }); + + assert_eq!(connects.load(Ordering::Relaxed), 0); + + let rx = rx1; + let req = Request::builder() + .method("HEAD") + .uri(&*format!("http://{}/a", addr)) + .body(Empty::::new()) + .unwrap(); + let res = client.request(req); + rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap(); + + assert_eq!(connects.load(Ordering::Relaxed), 1); + + let rx = rx2; + let req = Request::builder() + .uri(&*format!("http://{}/b", addr)) + .body(Empty::::new()) + .unwrap(); + let res = client.request(req); + rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap(); + + assert_eq!(connects.load(Ordering::Relaxed), 2); +} + +#[cfg(not(miri))] +#[tokio::test] +async fn client_keep_alive_when_response_before_request_body_ends() { + let _ = pretty_env_logger::try_init(); + let server = TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = server.local_addr().unwrap(); + + let (closes_tx, mut closes) = mpsc::channel::<()>(10); + let connector = DebugConnector::with_http_and_closes(HttpConnector::new(), closes_tx); + let connects = connector.connects.clone(); + let client = Client::builder(TokioExecutor::new()).build(connector.clone()); + + let (tx1, rx1) = oneshot::channel(); + let (tx2, rx2) = oneshot::channel(); + let (_tx3, rx3) = std::sync::mpsc::channel::<()>(); + + thread::spawn(move || { + let mut sock = server.accept().unwrap().0; + sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); + sock.set_write_timeout(Some(Duration::from_secs(5))) + .unwrap(); + let mut buf = [0; 4096]; + sock.read(&mut buf).expect("read 1"); + sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") + .expect("write 1"); + // after writing the response, THEN stream the body + let _ = tx1.send(()); + + sock.read(&mut buf).expect("read 2"); + let _ = tx2.send(()); + + // prevent this thread from closing until end of test, so the connection + // stays open and idle until Client is dropped + let _ = rx3.recv(); + }); + + assert_eq!(connects.load(Ordering::Relaxed), 0); + + let delayed_body = rx1 + .then(|_| Box::pin(tokio::time::sleep(Duration::from_millis(200)))) + .map(|_| Ok::<_, ()>(Frame::data(&b"hello a"[..]))) + .map_err(|_| -> hyper::Error { panic!("rx1") }) + .into_stream(); + + let req = Request::builder() + .method("POST") + .uri(&*format!("http://{}/a", addr)) + .body(StreamBody::new(delayed_body)) + .unwrap(); + let res = client.request(req).map_ok(move |res| { + assert_eq!(res.status(), hyper::StatusCode::OK); + }); + + future::join(res, rx2).await.0.unwrap(); + future::poll_fn(|ctx| { + assert!(Pin::new(&mut closes).poll_next(ctx).is_pending()); + Poll::Ready(()) + }) + .await; + + assert_eq!(connects.load(Ordering::Relaxed), 1); + + drop(client); + let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out")); + futures_util::pin_mut!(t); + let close = closes.into_future().map(|(opt, _)| opt.expect("closes")); + future::select(t, close).await; +} + +#[cfg(not(miri))] +#[tokio::test] +async fn client_keep_alive_eager_when_chunked() { + // If a response body has been read to completion, with completion + // determined by some other factor, like decompression, and thus + // it is in't polled a final time to clear the final 0-len chunk, + // try to eagerly clear it so the connection can still be used. + + let _ = pretty_env_logger::try_init(); + let server = TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = server.local_addr().unwrap(); + let connector = DebugConnector::new(); + let connects = connector.connects.clone(); + + let client = Client::builder(TokioExecutor::new()).build(connector); + + let (tx1, rx1) = oneshot::channel(); + let (tx2, rx2) = oneshot::channel(); + thread::spawn(move || { + let mut sock = server.accept().unwrap().0; + //drop(server); + sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); + sock.set_write_timeout(Some(Duration::from_secs(5))) + .unwrap(); + let mut buf = [0; 4096]; + sock.read(&mut buf).expect("read 1"); + sock.write_all( + b"\ + HTTP/1.1 200 OK\r\n\ + transfer-encoding: chunked\r\n\ + \r\n\ + 5\r\n\ + hello\r\n\ + 0\r\n\r\n\ + ", + ) + .expect("write 1"); + let _ = tx1.send(()); + + let n2 = sock.read(&mut buf).expect("read 2"); + assert_ne!(n2, 0, "bytes of second request"); + let second_get = "GET /b HTTP/1.1\r\n"; + assert_eq!(s(&buf[..second_get.len()]), second_get); + sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") + .expect("write 2"); + let _ = tx2.send(()); + }); + + assert_eq!(connects.load(Ordering::SeqCst), 0); + + let rx = rx1; + let req = Request::builder() + .uri(&*format!("http://{}/a", addr)) + .body(Empty::::new()) + .unwrap(); + let fut = client.request(req); + + let resp = future::join(fut, rx).map(|r| r.0).await.unwrap(); + assert_eq!(connects.load(Ordering::SeqCst), 1); + assert_eq!(resp.status(), 200); + assert_eq!(resp.headers()["transfer-encoding"], "chunked"); + + // Read the "hello" chunk... + let chunk = resp.collect().await.unwrap().to_bytes(); + assert_eq!(chunk, "hello"); + + // sleep real quick to let the threadpool put connection in ready + // state and back into client pool + tokio::time::sleep(Duration::from_millis(50)).await; + + let rx = rx2; + let req = Request::builder() + .uri(&*format!("http://{}/b", addr)) + .body(Empty::::new()) + .unwrap(); + let fut = client.request(req); + future::join(fut, rx).map(|r| r.0).await.unwrap(); + + assert_eq!( + connects.load(Ordering::SeqCst), + 1, + "second request should still only have 1 connect" + ); + drop(client); +} + +#[cfg(not(miri))] +#[test] +fn connect_proxy_sends_absolute_uri() { + let _ = pretty_env_logger::try_init(); + let server = TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = server.local_addr().unwrap(); + let rt = runtime(); + let connector = DebugConnector::new().proxy(); + + let client = Client::builder(TokioExecutor::new()).build(connector); + + let (tx1, rx1) = oneshot::channel(); + thread::spawn(move || { + let mut sock = server.accept().unwrap().0; + //drop(server); + sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); + sock.set_write_timeout(Some(Duration::from_secs(5))) + .unwrap(); + let mut buf = [0; 4096]; + let n = sock.read(&mut buf).expect("read 1"); + let expected = format!( + "GET http://{addr}/foo/bar HTTP/1.1\r\nhost: {addr}\r\n\r\n", + addr = addr + ); + assert_eq!(s(&buf[..n]), expected); + + sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") + .expect("write 1"); + let _ = tx1.send(()); + }); + + let rx = rx1; + let req = Request::builder() + .uri(&*format!("http://{}/foo/bar", addr)) + .body(Empty::::new()) + .unwrap(); + let res = client.request(req); + rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap(); +} + +#[cfg(not(miri))] +#[test] +fn connect_proxy_http_connect_sends_authority_form() { + let _ = pretty_env_logger::try_init(); + let server = TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = server.local_addr().unwrap(); + let rt = runtime(); + let connector = DebugConnector::new().proxy(); + + let client = Client::builder(TokioExecutor::new()).build(connector); + + let (tx1, rx1) = oneshot::channel(); + thread::spawn(move || { + let mut sock = server.accept().unwrap().0; + //drop(server); + sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); + sock.set_write_timeout(Some(Duration::from_secs(5))) + .unwrap(); + let mut buf = [0; 4096]; + let n = sock.read(&mut buf).expect("read 1"); + let expected = format!( + "CONNECT {addr} HTTP/1.1\r\nhost: {addr}\r\n\r\n", + addr = addr + ); + assert_eq!(s(&buf[..n]), expected); + + sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") + .expect("write 1"); + let _ = tx1.send(()); + }); + + let rx = rx1; + let req = Request::builder() + .method("CONNECT") + .uri(&*format!("http://{}/useless/path", addr)) + .body(Empty::::new()) + .unwrap(); + let res = client.request(req); + rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap(); +} + +#[cfg(not(miri))] +#[test] +fn client_upgrade() { + use tokio::io::{AsyncReadExt, AsyncWriteExt}; + + let _ = pretty_env_logger::try_init(); + let server = TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = server.local_addr().unwrap(); + let rt = runtime(); + + let connector = DebugConnector::new(); + + let client = Client::builder(TokioExecutor::new()).build(connector); + + let (tx1, rx1) = oneshot::channel(); + thread::spawn(move || { + let mut sock = server.accept().unwrap().0; + sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); + sock.set_write_timeout(Some(Duration::from_secs(5))) + .unwrap(); + let mut buf = [0; 4096]; + sock.read(&mut buf).expect("read 1"); + sock.write_all( + b"\ + HTTP/1.1 101 Switching Protocols\r\n\ + Upgrade: foobar\r\n\ + \r\n\ + foobar=ready\ + ", + ) + .unwrap(); + let _ = tx1.send(()); + + let n = sock.read(&mut buf).expect("read 2"); + assert_eq!(&buf[..n], b"foo=bar"); + sock.write_all(b"bar=foo").expect("write 2"); + }); + + let rx = rx1; + + let req = Request::builder() + .method("GET") + .uri(&*format!("http://{}/up", addr)) + .body(Empty::::new()) + .unwrap(); + + let res = client.request(req); + let res = rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap(); + + assert_eq!(res.status(), 101); + let upgraded = rt.block_on(hyper::upgrade::on(res)).expect("on_upgrade"); + + let parts = upgraded.downcast::().unwrap(); + assert_eq!(s(&parts.read_buf), "foobar=ready"); + + let mut io = parts.io; + rt.block_on(io.write_all(b"foo=bar")).unwrap(); + let mut vec = vec![]; + rt.block_on(io.read_to_end(&mut vec)).unwrap(); + assert_eq!(vec, b"bar=foo"); +} + +#[cfg(not(miri))] +#[test] +fn alpn_h2() { + use http::Response; + use hyper::service::service_fn; + use tokio::net::TcpListener; + + let _ = pretty_env_logger::try_init(); + let rt = runtime(); + let listener = rt + .block_on(TcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0)))) + .unwrap(); + let addr = listener.local_addr().unwrap(); + let mut connector = DebugConnector::new(); + connector.alpn_h2 = true; + let connects = connector.connects.clone(); + + let client = Client::builder(TokioExecutor::new()).build(connector); + + rt.spawn(async move { + let (stream, _) = listener.accept().await.expect("accept"); + let stream = TokioIo::new(stream); + let _ = hyper::server::conn::http2::Builder::new(TokioExecutor::new()) + .serve_connection( + stream, + service_fn(|req| async move { + assert_eq!(req.headers().get("host"), None); + Ok::<_, hyper::Error>(Response::new(Full::::from("Hello, world"))) + }), + ) + .await + .expect("server"); + }); + + assert_eq!(connects.load(Ordering::SeqCst), 0); + + let url = format!("http://{}/a", addr) + .parse::<::hyper::Uri>() + .unwrap(); + let res1 = client.get(url.clone()); + let res2 = client.get(url.clone()); + let res3 = client.get(url.clone()); + rt.block_on(future::try_join3(res1, res2, res3)).unwrap(); + + // Since the client doesn't know it can ALPN at first, it will have + // started 3 connections. But, the server above will only handle 1, + // so the unwrapped responses futures show it still worked. + assert_eq!(connects.load(Ordering::SeqCst), 3); + + let res4 = client.get(url.clone()); + rt.block_on(res4).unwrap(); + + // HTTP/2 request allowed + let res5 = client.request( + Request::builder() + .uri(url) + .version(hyper::Version::HTTP_2) + .body(Empty::::new()) + .unwrap(), + ); + rt.block_on(res5).unwrap(); + + assert_eq!( + connects.load(Ordering::SeqCst), + 3, + "after ALPN, no more connects" + ); + drop(client); +} + +#[cfg(not(miri))] +#[test] +fn capture_connection_on_client() { + let _ = pretty_env_logger::try_init(); + + let rt = runtime(); + let connector = DebugConnector::new(); + + let client = Client::builder(TokioExecutor::new()).build(connector); + + let server = TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = server.local_addr().unwrap(); + thread::spawn(move || { + let mut sock = server.accept().unwrap().0; + sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); + sock.set_write_timeout(Some(Duration::from_secs(5))) + .unwrap(); + let mut buf = [0; 4096]; + sock.read(&mut buf).expect("read 1"); + sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") + .expect("write 1"); + }); + let mut req = Request::builder() + .uri(&*format!("http://{}/a", addr)) + .body(Empty::::new()) + .unwrap(); + let captured_conn = capture_connection(&mut req); + rt.block_on(client.request(req)).expect("200 OK"); + assert!(captured_conn.connection_metadata().is_some()); +} + +#[cfg(not(miri))] +#[test] +fn connection_poisoning() { + use std::sync::atomic::AtomicUsize; + + let _ = pretty_env_logger::try_init(); + + let rt = runtime(); + let connector = DebugConnector::new(); + + let client = Client::builder(TokioExecutor::new()).build(connector); + + let server = TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = server.local_addr().unwrap(); + let num_conns: Arc = Default::default(); + let num_requests: Arc = Default::default(); + let num_requests_tracker = num_requests.clone(); + let num_conns_tracker = num_conns.clone(); + thread::spawn(move || loop { + let mut sock = server.accept().unwrap().0; + num_conns_tracker.fetch_add(1, Ordering::Relaxed); + let num_requests_tracker = num_requests_tracker.clone(); + thread::spawn(move || { + sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); + sock.set_write_timeout(Some(Duration::from_secs(5))) + .unwrap(); + let mut buf = [0; 4096]; + loop { + if sock.read(&mut buf).expect("read 1") > 0 { + num_requests_tracker.fetch_add(1, Ordering::Relaxed); + sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") + .expect("write 1"); + } + } + }); + }); + let make_request = || { + Request::builder() + .uri(&*format!("http://{}/a", addr)) + .body(Empty::::new()) + .unwrap() + }; + let mut req = make_request(); + let captured_conn = capture_connection(&mut req); + rt.block_on(client.request(req)).expect("200 OK"); + assert_eq!(num_conns.load(Ordering::SeqCst), 1); + assert_eq!(num_requests.load(Ordering::SeqCst), 1); + + rt.block_on(client.request(make_request())).expect("200 OK"); + rt.block_on(client.request(make_request())).expect("200 OK"); + // Before poisoning the connection is reused + assert_eq!(num_conns.load(Ordering::SeqCst), 1); + assert_eq!(num_requests.load(Ordering::SeqCst), 3); + captured_conn + .connection_metadata() + .as_ref() + .unwrap() + .poison(); + + rt.block_on(client.request(make_request())).expect("200 OK"); + + // After poisoning, a new connection is established + assert_eq!(num_conns.load(Ordering::SeqCst), 2); + assert_eq!(num_requests.load(Ordering::SeqCst), 4); + + rt.block_on(client.request(make_request())).expect("200 OK"); + // another request can still reuse: + assert_eq!(num_conns.load(Ordering::SeqCst), 2); + assert_eq!(num_requests.load(Ordering::SeqCst), 5); +} diff --git a/.cargo-vendor/hyper-util/tests/test_utils/mod.rs b/.cargo-vendor/hyper-util/tests/test_utils/mod.rs new file mode 100644 index 0000000000..df3a65d469 --- /dev/null +++ b/.cargo-vendor/hyper-util/tests/test_utils/mod.rs @@ -0,0 +1,175 @@ +use std::pin::Pin; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::Arc; + +use futures_channel::mpsc; +use futures_util::task::{Context, Poll}; +use futures_util::Future; +use futures_util::TryFutureExt; +use hyper::Uri; +use tokio::io::{self, AsyncRead, AsyncWrite, ReadBuf}; +use tokio::net::TcpStream; + +use hyper::rt::ReadBufCursor; + +use hyper_util::client::legacy::connect::HttpConnector; +use hyper_util::client::legacy::connect::{Connected, Connection}; +use hyper_util::rt::TokioIo; + +#[derive(Clone)] +pub struct DebugConnector { + pub http: HttpConnector, + pub closes: mpsc::Sender<()>, + pub connects: Arc, + pub is_proxy: bool, + pub alpn_h2: bool, +} + +impl DebugConnector { + pub fn new() -> DebugConnector { + let http = HttpConnector::new(); + let (tx, _) = mpsc::channel(10); + DebugConnector::with_http_and_closes(http, tx) + } + + pub fn with_http_and_closes(http: HttpConnector, closes: mpsc::Sender<()>) -> DebugConnector { + DebugConnector { + http, + closes, + connects: Arc::new(AtomicUsize::new(0)), + is_proxy: false, + alpn_h2: false, + } + } + + pub fn proxy(mut self) -> Self { + self.is_proxy = true; + self + } +} + +impl tower_service::Service for DebugConnector { + type Response = DebugStream; + type Error = >::Error; + type Future = Pin> + Send>>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + // don't forget to check inner service is ready :) + tower_service::Service::::poll_ready(&mut self.http, cx) + } + + fn call(&mut self, dst: Uri) -> Self::Future { + self.connects.fetch_add(1, Ordering::SeqCst); + let closes = self.closes.clone(); + let is_proxy = self.is_proxy; + let is_alpn_h2 = self.alpn_h2; + Box::pin(self.http.call(dst).map_ok(move |tcp| DebugStream { + tcp, + on_drop: closes, + is_alpn_h2, + is_proxy, + })) + } +} + +pub struct DebugStream { + tcp: TokioIo, + on_drop: mpsc::Sender<()>, + is_alpn_h2: bool, + is_proxy: bool, +} + +impl Drop for DebugStream { + fn drop(&mut self) { + let _ = self.on_drop.try_send(()); + } +} + +impl Connection for DebugStream { + fn connected(&self) -> Connected { + let connected = self.tcp.connected().proxy(self.is_proxy); + + if self.is_alpn_h2 { + connected.negotiated_h2() + } else { + connected + } + } +} + +impl hyper::rt::Read for DebugStream { + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: ReadBufCursor<'_>, + ) -> Poll> { + hyper::rt::Read::poll_read(Pin::new(&mut self.tcp), cx, buf) + } +} + +impl hyper::rt::Write for DebugStream { + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + hyper::rt::Write::poll_write(Pin::new(&mut self.tcp), cx, buf) + } + + fn poll_flush( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + hyper::rt::Write::poll_flush(Pin::new(&mut self.tcp), cx) + } + + fn poll_shutdown( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + hyper::rt::Write::poll_shutdown(Pin::new(&mut self.tcp), cx) + } + + fn is_write_vectored(&self) -> bool { + hyper::rt::Write::is_write_vectored(&self.tcp) + } + + fn poll_write_vectored( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &[std::io::IoSlice<'_>], + ) -> Poll> { + hyper::rt::Write::poll_write_vectored(Pin::new(&mut self.tcp), cx, bufs) + } +} + +impl AsyncWrite for DebugStream { + fn poll_shutdown( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + Pin::new(self.tcp.inner_mut()).poll_shutdown(cx) + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(self.tcp.inner_mut()).poll_flush(cx) + } + + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + Pin::new(self.tcp.inner_mut()).poll_write(cx, buf) + } +} + +impl AsyncRead for DebugStream { + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut ReadBuf<'_>, + ) -> Poll> { + Pin::new(self.tcp.inner_mut()).poll_read(cx, buf) + } +} diff --git a/.cargo-vendor/hyper/.cargo-checksum.json b/.cargo-vendor/hyper/.cargo-checksum.json index 5c8b448d2f..d6e409ced9 100644 --- a/.cargo-vendor/hyper/.cargo-checksum.json +++ b/.cargo-vendor/hyper/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"5287a86e0a967152d6578b885bcfdf76786f09f7fa2d478f295e79e22c9af437","LICENSE":"25dfd9ec24ebbee73dc93e687526cd4c26deae17bc2179ea0fe3e5dc96105b9b","src/body/aggregate.rs":"a4b05d775a7ef7807ce4eb3ccd0592f542398c7e14c876cb07298dc792b076e3","src/body/body.rs":"1fafdc91f9a48c19ec3eaeede79adc1b7c0162bca03efb40f85e9d7f7ed9eb3f","src/body/length.rs":"2d186f50f05b0562db92d3e1d272521c60c9ad259222ccb1a9310b1ff380a64e","src/body/mod.rs":"8098a367c239f05ba443a9b5394449142c6f144ad187f87f8d9f9cde74405b44","src/body/to_bytes.rs":"517077ed79a987c96a89f70a09eba776db5e8b867768da2ca291d28c2c0d70d2","src/cfg.rs":"de5fee5bba45a982c10e8f09fc24677be5494bf6da0b39a3a132f0f6eb3fe41e","src/client/client.rs":"00df0ef99e35d7b3b60c2da6aabee3418f2d35c45a0127ce64893331db040972","src/client/conn.rs":"1358a7b7b43588bd9863f2243ac78381d20018e530e3f8efc58fe4d7df239d04","src/client/conn/http1.rs":"9e4a9f2565f5a4bb8201d5a7321b89b6597b0a05f899da1326b72a9668639e15","src/client/conn/http2.rs":"95f2ae02b8297d48560049a5fd557d374fa6a325ac563996e88e2ae5fff0b4ec","src/client/connect/dns.rs":"98830a9163eae71cbf8d240c6e531ae98d6b51653c0d158fc1c5bddad4f7321e","src/client/connect/http.rs":"6d5b6e0d4f83ecfe66e79d1d4eb72597588af0ba70576faab2e658e3b781c09e","src/client/connect/mod.rs":"83ef7a4d8e8376bfd69321d4646ac439158f3d2c228acbc74dba208bfa0ae938","src/client/dispatch.rs":"39cac7daaf4b473c030d338e48c6fd4e4133742a06a0047ea7153e56def3cbdc","src/client/mod.rs":"d5580cda5e7dc9c5240c72a9ea0005d34b5412c28067ab3fa2301ba4cf49d9fa","src/client/pool.rs":"584f473408059b230adc1a74e80795d27b402e5de16224017157ed06694f7eab","src/client/service.rs":"e61baf9c206da67265c4fefe48fc037d65d93eebfecca68e3dc2215896ef4bd0","src/client/tests.rs":"f7eb2d1dba18e1bd69990f59e61b73a0477db5cc4de68fd64bd7cd36a9406072","src/common/buf.rs":"c762dc2688828ffc88f0549ceddeef65e57d404c245118bcacf3dd8d402bc9cc","src/common/date.rs":"f9a1a63aa7e8f5d6f5800cd56e7e6182cf07e781089930afb529263c1d8adf14","src/common/drain.rs":"058bbcf26dfeb96d7646c69e03b5a5f363b3bcee9afe0a9fe30ea52a9eb995ff","src/common/exec.rs":"c9e24d6c624b9c237bcdea7c59c60c9b8539510ac053cad50e861567cea3e17b","src/common/io/mod.rs":"6f8e4518df7f24d81fc59b46a2deb61557e8d92700bdc62539fe0f034066fc89","src/common/io/rewind.rs":"a708a373d96e7b1444d8497d57f0fe1902456fda8eb3dc42ade3a8f305880625","src/common/lazy.rs":"5cee776d65f28282e556567b433bddb908947df6601218872496ba088c2a7d12","src/common/mod.rs":"363cbf3853ffe6c4258010b17e67babdb8f518fc3cad39dc6588e7ba75587405","src/common/sync_wrapper.rs":"76206c2a52eeb62cdba279d620a4aef52134c6ac782a9f61e741edc8b653cb50","src/common/task.rs":"5a7c2b9255ab023cceedb8e42bd26db7ba8f11498d2f14d4b23a798618cbc920","src/common/watch.rs":"eb6db13fbb266ec11200ffa1f5e7a9a132d7e6555373895efde1e2daea428e03","src/error.rs":"d3f3c8e2303c964904e84a4bf8b93ff6b036c8918bac2bd66edac5fd5967c7e3","src/ext.rs":"19a65a25be9b821ad4088583f712d4d7f388096ec9d69b33f40129c9945afe2c","src/ext/h1_reason_phrase.rs":"e1d18088f942b52dbee766d81e11a9caeadaf368ff12b58d53b97fc62d08876c","src/ffi/body.rs":"941fb8b79097e5a4eec0c611a1cd5db24bed3479f1a14cf754e33d19f6d25854","src/ffi/client.rs":"6b35700e9dec4a9cb40ad3155327bd6fe11165e0cef1874a3916cf96d8b0c7a6","src/ffi/error.rs":"de3d8c1eb3818b438ed28a9dea800dfdac47bf2dd21a7c3e5fc10cb331b6e39f","src/ffi/http_types.rs":"ae25e0fd07ec80e90d5b4f488ce972fe7858f009261fdf16a35b2bd0b1bbdad3","src/ffi/io.rs":"ab176e866c179a589d22e3aa7410612615e60117019d1e589f9732c36a2282da","src/ffi/macros.rs":"8e1fe58244295db1d19aceeb0e9a777fe484ccc20194fae88b54208e0cbeb515","src/ffi/mod.rs":"0e52ae3586c6a960ae68e862561aabcee690a1e23c6d5d1045fcdc3c74b7fc96","src/ffi/task.rs":"f348cdbe1f1d4e26b339cd9382bb739b0f37aaceb2aa85627b7fda0c6326de56","src/headers.rs":"4d76596bfc90f88fe8b48bb8d0552a215a20c452197ea982b37ba30fa496e007","src/lib.rs":"cff8e513cb2d9611ba30a7a7787fe5220b848819e9f46267a8fe2acaf465ec28","src/mock.rs":"8b455312be74af6c4a27b3beba85a9493c86d43097947c5aad8a5e9dc0dcdbb3","src/proto/h1/conn.rs":"ba7d5bb4875dbd11f622986034cab8eaa2a751235324bf7cf03bea20c66f9f00","src/proto/h1/decode.rs":"ac06e4fb3b0bf07907581949ad361e4ba05559fd8add253b90bd183cfb09e34f","src/proto/h1/dispatch.rs":"da3a986e8e0d255bedac48109a31921b3faf575c6821d5f0f60dd06a24900f75","src/proto/h1/encode.rs":"3a360356431ff3f011226cf81fb4eeb63cfe9ca25140e41e2e3907f6d64725f9","src/proto/h1/io.rs":"321d845a497eb4761dbd4eedb994ae9d6e5aca7faabf3b05e83eb35cb4ebf214","src/proto/h1/mod.rs":"61ec22d19567116aadc365ca424c958744b058b55d2f064b9a74ee88b126c7be","src/proto/h1/role.rs":"f672ed78abda4605cd27cc390ff16ce715093af0c4edba9bb221ea0aedddcfe4","src/proto/h2/client.rs":"5862ca7bc2847f58ed5f57464f8eb74abe3fe89afe4bd632e575a3c51b8a8744","src/proto/h2/mod.rs":"1f3f157aaef6d91b7af8abea7f76ab1c49ee2038b71027c83f83a2648786fafc","src/proto/h2/ping.rs":"1ea4daea2317a72958879a89baecdea02fb7ab238083606461400ed9e3df0c83","src/proto/h2/server.rs":"705f8ecea99dbf5fe74188ba4f5fa2ea22c252fc443eed51171a89f845dc729d","src/proto/mod.rs":"1a935a3da830131f848a6a64c049c559ce07e6b0012fd6e4002bb365f562ebeb","src/rt.rs":"1ef7d4bb3ad6637c6f37189e30d740c061a3c98ca008d266a99f865130909427","src/server/accept.rs":"07b9b520fbf7d8f472455412f359afdd7713fb408f88dbc050585249023fc075","src/server/conn.rs":"25e1b5dfbd74fd62b41800c49a9492f0f74bc20285c3783a6a2f8a1a9ffbb4cd","src/server/conn/http1.rs":"af364abcd92aa78e05af83c9c606de66447170dc73276896294c4257e903a047","src/server/conn/http2.rs":"73bb19450798d804bf1b76e8ac495f9cdbad3453c009756b66f06c1e47f2f664","src/server/mod.rs":"2375370854ac8ae5b80378aa1e29bc781c50aad1d1150c32515393a3316b745c","src/server/server.rs":"5294facdd9abae7a2bc9a7eb7ce1521437780a0f6505fee44e8aa8a2dd909e5e","src/server/server_stub.rs":"ab443f51ede637e0b0c08f36fbc143a34935102af2921edcc257660eeaad4537","src/server/shutdown.rs":"45bf03fc9314873572775fb4ea336230340108239c88f2cd2b435759ad8c693c","src/server/tcp.rs":"8ed09df1ccfb59d0c9ff2561acd0f3d5e2a03929c960f6923e208e08fb5f1806","src/service/http.rs":"ac930efc71bcecc904fa65a44af254501ce8abd6f7d36e591b907eee45e77979","src/service/make.rs":"ee11adc469796427c7b694551d0bbda69f732536079aee53e3ef7f9be4385b2d","src/service/mod.rs":"92c05f08a175fb847868a02e7aca96176df1237458d40a17a7a6aa377476df90","src/service/oneshot.rs":"3ac3f0c7c20fcc3790cef868ca7a70c87a36687ae28c44e0c9978d2e514e4b22","src/service/util.rs":"67f5b4373a4d705a7277cda717b5f3a3ebd00365446e2beb60c1d6193d071a85","src/upgrade.rs":"d6c68680ad74ebbd6ff74ea28b52704c14d4547c67520048f7f84cfe03d11f94"},"package":"a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9"} \ No newline at end of file +{"files":{"Cargo.toml":"5ff0cd4d724294a8e56215c298efbf590edbc087c688b1e9330ee3c9d329a035","LICENSE":"25dfd9ec24ebbee73dc93e687526cd4c26deae17bc2179ea0fe3e5dc96105b9b","src/body/incoming.rs":"6860ea00a996aa54b3745cffecad0d426f83548fbb70215f5c704e656e6fe2e1","src/body/length.rs":"fcf245cd9d46d5742c5745db3f643ac2f4e67a5165aed60550689ed0731d4ffc","src/body/mod.rs":"6e029d258d08b35a2f825235842da2f4d98fe90a9543851516382f019cfa4b8e","src/cfg.rs":"de5fee5bba45a982c10e8f09fc24677be5494bf6da0b39a3a132f0f6eb3fe41e","src/client/conn/http1.rs":"31501a15a7e26122ab8524b78d3a7f4a16de76af42a3ff75c1bca4050cbad493","src/client/conn/http2.rs":"f9165e1b6b74595853efd5d9f583c6799e3d742fe4071f0678e98caa17ef1ff2","src/client/conn/mod.rs":"9a3a11f287ac5f8a2eb2f27af4d66cf201f92dc580f773f9cb5495f32756ee28","src/client/dispatch.rs":"179b95de203f9546bcb3e9e2e4852b2bf61128800cd76c300ab923670148eb70","src/client/mod.rs":"3428a487d81d2a46742493fb07b0b852d0868acf6136c77cc4a5c1aeeda7c311","src/client/tests.rs":"de0001609ef0b82d34d6e152b20d19f0b2f79ab6708bc4b3748a40acd3325238","src/common/buf.rs":"6ffe7941d14edbdd4d20e18e4c1da225161eb0d89ae807476d7b4282519bac7c","src/common/date.rs":"3fc169163f142a17f3bc883861bec65333710d478275881d1a9b792f832dbf91","src/common/io/compat.rs":"e3e9333b8c1862c61239fef6fc8aae6b39eebcfe85393e8b9906bca8b75326a0","src/common/io/mod.rs":"1f9002411f8a74be746e86d7a6afa5a7c3bdfe7c75943bed5ac752b802b1594d","src/common/io/rewind.rs":"1e15c9b837bd96753bbaf6470681d032a3e324c80ce8bbbab16a8d67b54654ec","src/common/mod.rs":"5827c350a2ba5716ae2a30a3b1658db552c7ff974c9a8e372ebf171e974fb0a4","src/common/task.rs":"09d2f914586f8ad42f92ba8149a8757c8cbd094467a1519f2d26906b977616f7","src/common/time.rs":"cc87655c71eb6ddfaeb90cb3efcbdc3aa08cff1bcbe588a6cfde442b76b7417a","src/common/watch.rs":"eb6db13fbb266ec11200ffa1f5e7a9a132d7e6555373895efde1e2daea428e03","src/error.rs":"106944bc1acb12667cf0275a406beb1d5a39c21bdc349d33458abfe3e55dd922","src/ext/h1_reason_phrase.rs":"2de78dddac10e712f06785a2455e350e7ec38dd104761d1c56a0863681ed85b4","src/ext/mod.rs":"417f83d532c890d1ac93c13d0f3893dffcb48362205f73f0bdd645878f45f253","src/ffi/body.rs":"bc328c59bad8f472529ab8187574b2ae13a2dbdba18261b5b3d2fb8a5ef6ae06","src/ffi/client.rs":"562c61a9cfa8949dc956b963db9fb6c68ca94a63ec0b54663cba52eb56f1c6a5","src/ffi/error.rs":"164096784cdb0cef22e668beef520a32ec80c0e59142574e04b83a2505568f12","src/ffi/http_types.rs":"f561abc00c1cc938568911e143722885e1f6e565063ab263ea83722ec21fc8ab","src/ffi/io.rs":"12c170a10cdc8c3142ffad096344d5356b060793beec87bc0db327cf8e79545d","src/ffi/macros.rs":"8e1fe58244295db1d19aceeb0e9a777fe484ccc20194fae88b54208e0cbeb515","src/ffi/mod.rs":"57fda6d935cc6f2e24b59d974a7178096521dfaaa89af8b93ec7bb98644e3bcb","src/ffi/task.rs":"1c920bc332115ed7bd985be6ca1e4dfaf120a37916ff077870f98c0ab045a6a3","src/headers.rs":"43305ee388d59a3712e78e0c108c788999448600d0f5284e9361afc6f11eb679","src/lib.rs":"54f5228c9857e1af3d3ba278e749880c3129c41be0d28c16dcd9885de48632fb","src/mock.rs":"8b455312be74af6c4a27b3beba85a9493c86d43097947c5aad8a5e9dc0dcdbb3","src/proto/h1/conn.rs":"99a4ae046ff5fec70217b449f1c95d794d4a50ef7dbd1d9d011aea66c3beadab","src/proto/h1/decode.rs":"5c6da9fd282c7d7a464d3bbc2bf8c48bc42ff0528a2a1e603d054bbc374fc3b2","src/proto/h1/dispatch.rs":"5df77db15782f925c42e748584fb8db7a9795a5188fb4d08f1904d3ed56ec457","src/proto/h1/encode.rs":"545298eb173871d1dca34b2ada2b602415b4409e9fbac4a92c1d04a8c7dee697","src/proto/h1/io.rs":"7d4d5411a9c38de972aa77171603c3ee3d68df99e00f49016e3ab34fb51edc0a","src/proto/h1/mod.rs":"e4ed9051b65892e1dc37ea630f7ab765caebbfbca757d032c4360442b3223c37","src/proto/h1/role.rs":"5081b558996cabca00d396756b299853cfb099529ada30dae46cb023b1ab98ca","src/proto/h2/client.rs":"b22905f3d8ec213d3c8fcf9208a5169db0b39b98de4f900f92b9fa092210eee3","src/proto/h2/mod.rs":"bc92681b36a5666208fb5c4110412b9948ec21ff482c0a3e2d7f049add44efce","src/proto/h2/ping.rs":"505074c096c8edc85fd1da567b0ffb5036ecf742189cb20b936f620754c93f75","src/proto/h2/server.rs":"8b676c941e1f27b5f981d56f278d53e8d3e4e953e06d31e5ab592ddddfeb6c6b","src/proto/mod.rs":"075880551f7ad6daf9a863d6707d116ff22c37bd18bcfa2d272d185c1853a9c3","src/rt/bounds.rs":"3c75b9039a57d31bb92b12c3aa4597beb09273103e1759ed2a28ad133fa72444","src/rt/io.rs":"7a7a02dd96091e6e48e0594bd80b9b76ef5e0d850cd5535a16dd8edae8d29937","src/rt/mod.rs":"1452a0b001d5895c9a1d3c57002877ba583c988199e54f84a8b3d9cbbc80e5c3","src/rt/timer.rs":"7c9ed432ef5b727ef1963dcf90ceed502044411fa705355740c39ad0f33520a0","src/server/conn/http1.rs":"d336d2f77d12450921548c0b6c7ff55143bf81d01ef5b631e202ee16cfe432ee","src/server/conn/http2.rs":"2bd02bec68a527f3f9dd58d57fac745b1ce7350cfd9392324c4fc9321ed0c110","src/server/conn/mod.rs":"b2393dc0d68c09b560f7c9bcc61ed3bf99fce4d746eda9e1ad158891ee56f2be","src/server/mod.rs":"ffe7729eba251a89d24d605d889dfdb32020203f337e88de0bacb19317e0ea9c","src/service/http.rs":"a1cae019c6f542ac7ce84f3324a3fe4de3daee43fda53eca0a7ba3e77e73d13b","src/service/mod.rs":"de143e994e0021c1d54a7214f1748712e166d37a6496e783ee589956677ce034","src/service/service.rs":"b8af271802094a72ef07148433bec24c154b32cc575fca12b6e43bacf0ec0c4a","src/service/util.rs":"7d2fcf8701722456979edf7e8431efee235f838c6f70f66c28ce8e3a36d514b6","src/trace.rs":"a766c472433c7569440862e978eceeea78220af170220e9fdc90b71bab731344","src/upgrade.rs":"3cbe17514801d6850a2d4d9a5f2d30fa8781d6c84541a78128d56d65cf9d2589"},"package":"50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05"} \ No newline at end of file diff --git a/.cargo-vendor/hyper/Cargo.toml b/.cargo-vendor/hyper/Cargo.toml index 95bca19bb3..831c1fb83e 100644 --- a/.cargo-vendor/hyper/Cargo.toml +++ b/.cargo-vendor/hyper/Cargo.toml @@ -10,9 +10,10 @@ # See Cargo.toml.orig for the original contents. [package] -edition = "2018" +edition = "2021" +rust-version = "1.63" name = "hyper" -version = "0.14.30" +version = "1.4.1" authors = ["Sean McArthur "] build = false include = [ @@ -45,12 +46,13 @@ repository = "https://github.com/hyperium/hyper" features = [ "ffi", "full", + "tracing", ] rustdoc-args = [ - "--cfg", - "docsrs", "--cfg", "hyper_unstable_ffi", + "--cfg", + "hyper_unstable_tracing", ] [package.metadata.playground] @@ -69,37 +71,42 @@ name = "hyper" path = "src/lib.rs" [dependencies.bytes] -version = "1" +version = "1.2" [dependencies.futures-channel] version = "0.3" - -[dependencies.futures-core] -version = "0.3" -default-features = false +optional = true [dependencies.futures-util] version = "0.3" +optional = true default-features = false [dependencies.h2] -version = "0.3.24" +version = "0.4.2" optional = true [dependencies.http] -version = "0.2" +version = "1" [dependencies.http-body] -version = "0.4" +version = "1" + +[dependencies.http-body-util] +version = "0.1" +optional = true [dependencies.httparse] version = "1.8" +optional = true [dependencies.httpdate] version = "1.0" +optional = true [dependencies.itoa] version = "1" +optional = true [dependencies.libc] version = "0.2" @@ -107,40 +114,50 @@ optional = true [dependencies.pin-project-lite] version = "0.2.4" +optional = true -[dependencies.socket2] -version = ">=0.4.7, <0.6.0" -features = ["all"] +[dependencies.smallvec] +version = "1.12" +features = [ + "const_generics", + "const_new", +] optional = true [dependencies.tokio] -version = "1.27" +version = "1" features = ["sync"] -[dependencies.tower-service] -version = "0.3" - [dependencies.tracing] version = "0.1" features = ["std"] +optional = true default-features = false [dependencies.want] version = "0.3" +optional = true + +[dev-dependencies.form_urlencoded] +version = "1" + +[dev-dependencies.futures-channel] +version = "0.3" +features = ["sink"] [dev-dependencies.futures-util] version = "0.3" -features = ["alloc"] +features = [ + "alloc", + "sink", +] default-features = false -[dev-dependencies.matches] +[dev-dependencies.http-body-util] version = "0.1" -[dev-dependencies.num_cpus] -version = "1.0" - [dev-dependencies.pretty_env_logger] -version = "0.4" +version = "0.5" [dev-dependencies.serde] version = "1.0" @@ -153,10 +170,11 @@ version = "1.0" version = "0.3" [dev-dependencies.tokio] -version = "1.27" +version = "1" features = [ "fs", "macros", + "net", "io-std", "io-util", "rt", @@ -170,51 +188,49 @@ features = [ version = "0.4" [dev-dependencies.tokio-util] -version = "0.7" -features = ["codec"] - -[dev-dependencies.tower] -version = "0.4" -features = [ - "make", - "util", -] -default-features = false - -[dev-dependencies.url] -version = "2.2" +version = "0.7.10" [features] -__internal_happy_eyeballs_tests = [] -backports = [] -client = [] +client = [ + "dep:want", + "dep:pin-project-lite", + "dep:smallvec", +] default = [] -deprecated = [] -ffi = ["libc"] +ffi = [ + "dep:libc", + "dep:http-body-util", + "futures-util?/alloc", +] full = [ "client", "http1", "http2", "server", - "stream", - "runtime", ] -http1 = [] -http2 = ["h2"] +http1 = [ + "dep:futures-channel", + "dep:futures-util", + "dep:httparse", + "dep:itoa", +] +http2 = [ + "dep:futures-channel", + "dep:futures-util", + "dep:h2", +] nightly = [] -runtime = [ - "tcp", - "tokio/rt", - "tokio/time", +server = [ + "dep:httpdate", + "dep:pin-project-lite", + "dep:smallvec", ] -server = [] -stream = [] -tcp = [ - "socket2", - "tokio/net", - "tokio/rt", - "tokio/time", +tracing = ["dep:tracing"] + +[lints.rust.unexpected_cfgs] +level = "warn" +priority = 0 +check-cfg = [ + "cfg(hyper_unstable_tracing)", + "cfg(hyper_unstable_ffi)", ] - -[target.'cfg(any(target_os = "linux", target_os = "macos"))'.dev-dependencies.pnet_datalink] -version = "0.27.2" diff --git a/.cargo-vendor/hyper/src/body/incoming.rs b/.cargo-vendor/hyper/src/body/incoming.rs new file mode 100644 index 0000000000..dcfb71d53a --- /dev/null +++ b/.cargo-vendor/hyper/src/body/incoming.rs @@ -0,0 +1,617 @@ +use std::fmt; +#[cfg(all(feature = "http1", any(feature = "client", feature = "server")))] +use std::future::Future; +use std::pin::Pin; +use std::task::{Context, Poll}; + +use bytes::Bytes; +#[cfg(all(feature = "http1", any(feature = "client", feature = "server")))] +use futures_channel::{mpsc, oneshot}; +#[cfg(all( + any(feature = "http1", feature = "http2"), + any(feature = "client", feature = "server") +))] +use futures_util::ready; +#[cfg(all(feature = "http1", any(feature = "client", feature = "server")))] +use futures_util::{stream::FusedStream, Stream}; // for mpsc::Receiver +#[cfg(all(feature = "http1", any(feature = "client", feature = "server")))] +use http::HeaderMap; +use http_body::{Body, Frame, SizeHint}; + +#[cfg(all( + any(feature = "http1", feature = "http2"), + any(feature = "client", feature = "server") +))] +use super::DecodedLength; +#[cfg(all(feature = "http1", any(feature = "client", feature = "server")))] +use crate::common::watch; +#[cfg(all(feature = "http2", any(feature = "client", feature = "server")))] +use crate::proto::h2::ping; + +#[cfg(all(feature = "http1", any(feature = "client", feature = "server")))] +type BodySender = mpsc::Sender>; +#[cfg(all(feature = "http1", any(feature = "client", feature = "server")))] +type TrailersSender = oneshot::Sender; + +/// A stream of `Bytes`, used when receiving bodies from the network. +/// +/// Note that Users should not instantiate this struct directly. When working with the hyper client, +/// `Incoming` is returned to you in responses. Similarly, when operating with the hyper server, +/// it is provided within requests. +/// +/// # Examples +/// +/// ```rust,ignore +/// async fn echo( +/// req: Request, +/// ) -> Result>, hyper::Error> { +/// //Here, you can process `Incoming` +/// } +/// ``` +#[must_use = "streams do nothing unless polled"] +pub struct Incoming { + kind: Kind, +} + +enum Kind { + Empty, + #[cfg(all(feature = "http1", any(feature = "client", feature = "server")))] + Chan { + content_length: DecodedLength, + want_tx: watch::Sender, + data_rx: mpsc::Receiver>, + trailers_rx: oneshot::Receiver, + }, + #[cfg(all(feature = "http2", any(feature = "client", feature = "server")))] + H2 { + content_length: DecodedLength, + data_done: bool, + ping: ping::Recorder, + recv: h2::RecvStream, + }, + #[cfg(feature = "ffi")] + Ffi(crate::ffi::UserBody), +} + +/// A sender half created through [`Body::channel()`]. +/// +/// Useful when wanting to stream chunks from another thread. +/// +/// ## Body Closing +/// +/// Note that the request body will always be closed normally when the sender is dropped (meaning +/// that the empty terminating chunk will be sent to the remote). If you desire to close the +/// connection with an incomplete response (e.g. in the case of an error during asynchronous +/// processing), call the [`Sender::abort()`] method to abort the body in an abnormal fashion. +/// +/// [`Body::channel()`]: struct.Body.html#method.channel +/// [`Sender::abort()`]: struct.Sender.html#method.abort +#[must_use = "Sender does nothing unless sent on"] +#[cfg(all(feature = "http1", any(feature = "client", feature = "server")))] +pub(crate) struct Sender { + want_rx: watch::Receiver, + data_tx: BodySender, + trailers_tx: Option, +} + +#[cfg(all(feature = "http1", any(feature = "client", feature = "server")))] +const WANT_PENDING: usize = 1; +#[cfg(all(feature = "http1", any(feature = "client", feature = "server")))] +const WANT_READY: usize = 2; + +impl Incoming { + /// Create a `Body` stream with an associated sender half. + /// + /// Useful when wanting to stream chunks from another thread. + #[inline] + #[cfg(test)] + pub(crate) fn channel() -> (Sender, Incoming) { + Self::new_channel(DecodedLength::CHUNKED, /*wanter =*/ false) + } + + #[cfg(all(feature = "http1", any(feature = "client", feature = "server")))] + pub(crate) fn new_channel(content_length: DecodedLength, wanter: bool) -> (Sender, Incoming) { + let (data_tx, data_rx) = mpsc::channel(0); + let (trailers_tx, trailers_rx) = oneshot::channel(); + + // If wanter is true, `Sender::poll_ready()` won't becoming ready + // until the `Body` has been polled for data once. + let want = if wanter { WANT_PENDING } else { WANT_READY }; + + let (want_tx, want_rx) = watch::channel(want); + + let tx = Sender { + want_rx, + data_tx, + trailers_tx: Some(trailers_tx), + }; + let rx = Incoming::new(Kind::Chan { + content_length, + want_tx, + data_rx, + trailers_rx, + }); + + (tx, rx) + } + + fn new(kind: Kind) -> Incoming { + Incoming { kind } + } + + #[allow(dead_code)] + pub(crate) fn empty() -> Incoming { + Incoming::new(Kind::Empty) + } + + #[cfg(feature = "ffi")] + pub(crate) fn ffi() -> Incoming { + Incoming::new(Kind::Ffi(crate::ffi::UserBody::new())) + } + + #[cfg(all(feature = "http2", any(feature = "client", feature = "server")))] + pub(crate) fn h2( + recv: h2::RecvStream, + mut content_length: DecodedLength, + ping: ping::Recorder, + ) -> Self { + // If the stream is already EOS, then the "unknown length" is clearly + // actually ZERO. + if !content_length.is_exact() && recv.is_end_stream() { + content_length = DecodedLength::ZERO; + } + + Incoming::new(Kind::H2 { + data_done: false, + ping, + content_length, + recv, + }) + } + + #[cfg(feature = "ffi")] + pub(crate) fn as_ffi_mut(&mut self) -> &mut crate::ffi::UserBody { + match self.kind { + Kind::Ffi(ref mut body) => return body, + _ => { + self.kind = Kind::Ffi(crate::ffi::UserBody::new()); + } + } + + match self.kind { + Kind::Ffi(ref mut body) => body, + _ => unreachable!(), + } + } +} + +impl Body for Incoming { + type Data = Bytes; + type Error = crate::Error; + + fn poll_frame( + #[cfg_attr( + not(all( + any(feature = "http1", feature = "http2"), + any(feature = "client", feature = "server") + )), + allow(unused_mut) + )] + mut self: Pin<&mut Self>, + #[cfg_attr( + not(all( + any(feature = "http1", feature = "http2"), + any(feature = "client", feature = "server") + )), + allow(unused_variables) + )] + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + match self.kind { + Kind::Empty => Poll::Ready(None), + #[cfg(all(feature = "http1", any(feature = "client", feature = "server")))] + Kind::Chan { + content_length: ref mut len, + ref mut data_rx, + ref mut want_tx, + ref mut trailers_rx, + } => { + want_tx.send(WANT_READY); + + if !data_rx.is_terminated() { + if let Some(chunk) = ready!(Pin::new(data_rx).poll_next(cx)?) { + len.sub_if(chunk.len() as u64); + return Poll::Ready(Some(Ok(Frame::data(chunk)))); + } + } + + // check trailers after data is terminated + match ready!(Pin::new(trailers_rx).poll(cx)) { + Ok(t) => Poll::Ready(Some(Ok(Frame::trailers(t)))), + Err(_) => Poll::Ready(None), + } + } + #[cfg(all(feature = "http2", any(feature = "client", feature = "server")))] + Kind::H2 { + ref mut data_done, + ref ping, + recv: ref mut h2, + content_length: ref mut len, + } => { + if !*data_done { + match ready!(h2.poll_data(cx)) { + Some(Ok(bytes)) => { + let _ = h2.flow_control().release_capacity(bytes.len()); + len.sub_if(bytes.len() as u64); + ping.record_data(bytes.len()); + return Poll::Ready(Some(Ok(Frame::data(bytes)))); + } + Some(Err(e)) => { + return match e.reason() { + // These reasons should cause the body reading to stop, but not fail it. + // The same logic as for `Read for H2Upgraded` is applied here. + Some(h2::Reason::NO_ERROR) | Some(h2::Reason::CANCEL) => { + Poll::Ready(None) + } + _ => Poll::Ready(Some(Err(crate::Error::new_body(e)))), + }; + } + None => { + *data_done = true; + // fall through to trailers + } + } + } + + // after data, check trailers + match ready!(h2.poll_trailers(cx)) { + Ok(t) => { + ping.record_non_data(); + Poll::Ready(Ok(t.map(Frame::trailers)).transpose()) + } + Err(e) => Poll::Ready(Some(Err(crate::Error::new_h2(e)))), + } + } + + #[cfg(feature = "ffi")] + Kind::Ffi(ref mut body) => body.poll_data(cx), + } + } + + fn is_end_stream(&self) -> bool { + match self.kind { + Kind::Empty => true, + #[cfg(all(feature = "http1", any(feature = "client", feature = "server")))] + Kind::Chan { content_length, .. } => content_length == DecodedLength::ZERO, + #[cfg(all(feature = "http2", any(feature = "client", feature = "server")))] + Kind::H2 { recv: ref h2, .. } => h2.is_end_stream(), + #[cfg(feature = "ffi")] + Kind::Ffi(..) => false, + } + } + + fn size_hint(&self) -> SizeHint { + #[cfg(all( + any(feature = "http1", feature = "http2"), + any(feature = "client", feature = "server") + ))] + fn opt_len(decoded_length: DecodedLength) -> SizeHint { + if let Some(content_length) = decoded_length.into_opt() { + SizeHint::with_exact(content_length) + } else { + SizeHint::default() + } + } + + match self.kind { + Kind::Empty => SizeHint::with_exact(0), + #[cfg(all(feature = "http1", any(feature = "client", feature = "server")))] + Kind::Chan { content_length, .. } => opt_len(content_length), + #[cfg(all(feature = "http2", any(feature = "client", feature = "server")))] + Kind::H2 { content_length, .. } => opt_len(content_length), + #[cfg(feature = "ffi")] + Kind::Ffi(..) => SizeHint::default(), + } + } +} + +impl fmt::Debug for Incoming { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + #[cfg(any( + all( + any(feature = "http1", feature = "http2"), + any(feature = "client", feature = "server") + ), + feature = "ffi" + ))] + #[derive(Debug)] + struct Streaming; + #[derive(Debug)] + struct Empty; + + let mut builder = f.debug_tuple("Body"); + match self.kind { + Kind::Empty => builder.field(&Empty), + #[cfg(any( + all( + any(feature = "http1", feature = "http2"), + any(feature = "client", feature = "server") + ), + feature = "ffi" + ))] + _ => builder.field(&Streaming), + }; + + builder.finish() + } +} + +#[cfg(all(feature = "http1", any(feature = "client", feature = "server")))] +impl Sender { + /// Check to see if this `Sender` can send more data. + pub(crate) fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + // Check if the receiver end has tried polling for the body yet + ready!(self.poll_want(cx)?); + self.data_tx + .poll_ready(cx) + .map_err(|_| crate::Error::new_closed()) + } + + fn poll_want(&mut self, cx: &mut Context<'_>) -> Poll> { + match self.want_rx.load(cx) { + WANT_READY => Poll::Ready(Ok(())), + WANT_PENDING => Poll::Pending, + watch::CLOSED => Poll::Ready(Err(crate::Error::new_closed())), + unexpected => unreachable!("want_rx value: {}", unexpected), + } + } + + #[cfg(test)] + async fn ready(&mut self) -> crate::Result<()> { + futures_util::future::poll_fn(|cx| self.poll_ready(cx)).await + } + + /// Send data on data channel when it is ready. + #[cfg(test)] + #[allow(unused)] + pub(crate) async fn send_data(&mut self, chunk: Bytes) -> crate::Result<()> { + self.ready().await?; + self.data_tx + .try_send(Ok(chunk)) + .map_err(|_| crate::Error::new_closed()) + } + + /// Send trailers on trailers channel. + #[allow(unused)] + pub(crate) async fn send_trailers(&mut self, trailers: HeaderMap) -> crate::Result<()> { + let tx = match self.trailers_tx.take() { + Some(tx) => tx, + None => return Err(crate::Error::new_closed()), + }; + tx.send(trailers).map_err(|_| crate::Error::new_closed()) + } + + /// Try to send data on this channel. + /// + /// # Errors + /// + /// Returns `Err(Bytes)` if the channel could not (currently) accept + /// another `Bytes`. + /// + /// # Note + /// + /// This is mostly useful for when trying to send from some other thread + /// that doesn't have an async context. If in an async context, prefer + /// `send_data()` instead. + #[cfg(feature = "http1")] + pub(crate) fn try_send_data(&mut self, chunk: Bytes) -> Result<(), Bytes> { + self.data_tx + .try_send(Ok(chunk)) + .map_err(|err| err.into_inner().expect("just sent Ok")) + } + + #[cfg(feature = "http1")] + pub(crate) fn try_send_trailers( + &mut self, + trailers: HeaderMap, + ) -> Result<(), Option> { + let tx = match self.trailers_tx.take() { + Some(tx) => tx, + None => return Err(None), + }; + + tx.send(trailers).map_err(Some) + } + + #[cfg(test)] + pub(crate) fn abort(mut self) { + self.send_error(crate::Error::new_body_write_aborted()); + } + + pub(crate) fn send_error(&mut self, err: crate::Error) { + let _ = self + .data_tx + // clone so the send works even if buffer is full + .clone() + .try_send(Err(err)); + } +} + +#[cfg(all(feature = "http1", any(feature = "client", feature = "server")))] +impl fmt::Debug for Sender { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + #[derive(Debug)] + struct Open; + #[derive(Debug)] + struct Closed; + + let mut builder = f.debug_tuple("Sender"); + match self.want_rx.peek() { + watch::CLOSED => builder.field(&Closed), + _ => builder.field(&Open), + }; + + builder.finish() + } +} + +#[cfg(test)] +mod tests { + use std::mem; + use std::task::Poll; + + use super::{Body, DecodedLength, Incoming, Sender, SizeHint}; + use http_body_util::BodyExt; + + #[test] + fn test_size_of() { + // These are mostly to help catch *accidentally* increasing + // the size by too much. + + let body_size = mem::size_of::(); + let body_expected_size = mem::size_of::() * 5; + assert!( + body_size <= body_expected_size, + "Body size = {} <= {}", + body_size, + body_expected_size, + ); + + //assert_eq!(body_size, mem::size_of::>(), "Option"); + + assert_eq!( + mem::size_of::(), + mem::size_of::() * 5, + "Sender" + ); + + assert_eq!( + mem::size_of::(), + mem::size_of::>(), + "Option" + ); + } + + #[test] + fn size_hint() { + fn eq(body: Incoming, b: SizeHint, note: &str) { + let a = body.size_hint(); + assert_eq!(a.lower(), b.lower(), "lower for {:?}", note); + assert_eq!(a.upper(), b.upper(), "upper for {:?}", note); + } + + eq(Incoming::empty(), SizeHint::with_exact(0), "empty"); + + eq(Incoming::channel().1, SizeHint::new(), "channel"); + + eq( + Incoming::new_channel(DecodedLength::new(4), /*wanter =*/ false).1, + SizeHint::with_exact(4), + "channel with length", + ); + } + + #[cfg(not(miri))] + #[tokio::test] + async fn channel_abort() { + let (tx, mut rx) = Incoming::channel(); + + tx.abort(); + + let err = rx.frame().await.unwrap().unwrap_err(); + assert!(err.is_body_write_aborted(), "{:?}", err); + } + + #[cfg(all(not(miri), feature = "http1"))] + #[tokio::test] + async fn channel_abort_when_buffer_is_full() { + let (mut tx, mut rx) = Incoming::channel(); + + tx.try_send_data("chunk 1".into()).expect("send 1"); + // buffer is full, but can still send abort + tx.abort(); + + let chunk1 = rx + .frame() + .await + .expect("item 1") + .expect("chunk 1") + .into_data() + .unwrap(); + assert_eq!(chunk1, "chunk 1"); + + let err = rx.frame().await.unwrap().unwrap_err(); + assert!(err.is_body_write_aborted(), "{:?}", err); + } + + #[cfg(feature = "http1")] + #[test] + fn channel_buffers_one() { + let (mut tx, _rx) = Incoming::channel(); + + tx.try_send_data("chunk 1".into()).expect("send 1"); + + // buffer is now full + let chunk2 = tx.try_send_data("chunk 2".into()).expect_err("send 2"); + assert_eq!(chunk2, "chunk 2"); + } + + #[cfg(not(miri))] + #[tokio::test] + async fn channel_empty() { + let (_, mut rx) = Incoming::channel(); + + assert!(rx.frame().await.is_none()); + } + + #[test] + fn channel_ready() { + let (mut tx, _rx) = Incoming::new_channel(DecodedLength::CHUNKED, /*wanter = */ false); + + let mut tx_ready = tokio_test::task::spawn(tx.ready()); + + assert!(tx_ready.poll().is_ready(), "tx is ready immediately"); + } + + #[test] + fn channel_wanter() { + let (mut tx, mut rx) = + Incoming::new_channel(DecodedLength::CHUNKED, /*wanter = */ true); + + let mut tx_ready = tokio_test::task::spawn(tx.ready()); + let mut rx_data = tokio_test::task::spawn(rx.frame()); + + assert!( + tx_ready.poll().is_pending(), + "tx isn't ready before rx has been polled" + ); + + assert!(rx_data.poll().is_pending(), "poll rx.data"); + assert!(tx_ready.is_woken(), "rx poll wakes tx"); + + assert!( + tx_ready.poll().is_ready(), + "tx is ready after rx has been polled" + ); + } + + #[test] + fn channel_notices_closure() { + let (mut tx, rx) = Incoming::new_channel(DecodedLength::CHUNKED, /*wanter = */ true); + + let mut tx_ready = tokio_test::task::spawn(tx.ready()); + + assert!( + tx_ready.poll().is_pending(), + "tx isn't ready before rx has been polled" + ); + + drop(rx); + assert!(tx_ready.is_woken(), "dropping rx wakes tx"); + + match tx_ready.poll() { + Poll::Ready(Err(ref e)) if e.is_closed() => (), + unexpected => panic!("tx poll ready unexpected: {:?}", unexpected), + } + } +} diff --git a/.cargo-vendor/hyper/src/body/length.rs b/.cargo-vendor/hyper/src/body/length.rs index e2bbee8039..e5eab7449f 100644 --- a/.cargo-vendor/hyper/src/body/length.rs +++ b/.cargo-vendor/hyper/src/body/length.rs @@ -15,11 +15,11 @@ impl From> for DecodedLength { } #[cfg(any(feature = "http1", feature = "http2", test))] -const MAX_LEN: u64 = std::u64::MAX - 2; +const MAX_LEN: u64 = u64::MAX - 2; impl DecodedLength { - pub(crate) const CLOSE_DELIMITED: DecodedLength = DecodedLength(::std::u64::MAX); - pub(crate) const CHUNKED: DecodedLength = DecodedLength(::std::u64::MAX - 1); + pub(crate) const CLOSE_DELIMITED: DecodedLength = DecodedLength(u64::MAX); + pub(crate) const CHUNKED: DecodedLength = DecodedLength(u64::MAX - 1); pub(crate) const ZERO: DecodedLength = DecodedLength(0); #[cfg(test)] @@ -33,13 +33,17 @@ impl DecodedLength { /// Should only be called if previously confirmed this isn't /// CLOSE_DELIMITED or CHUNKED. #[inline] - #[cfg(feature = "http1")] + #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))] pub(crate) fn danger_len(self) -> u64 { debug_assert!(self.0 < Self::CHUNKED.0); self.0 } /// Converts to an Option representing a Known or Unknown length. + #[cfg(all( + any(feature = "http1", feature = "http2"), + any(feature = "client", feature = "server") + ))] pub(crate) fn into_opt(self) -> Option { match self { DecodedLength::CHUNKED | DecodedLength::CLOSE_DELIMITED => None, @@ -50,8 +54,6 @@ impl DecodedLength { /// Checks the `u64` is within the maximum allowed for content-length. #[cfg(any(feature = "http1", feature = "http2"))] pub(crate) fn checked_new(len: u64) -> Result { - use tracing::warn; - if len <= MAX_LEN { Ok(DecodedLength(len)) } else { @@ -60,6 +62,10 @@ impl DecodedLength { } } + #[cfg(all( + any(feature = "http1", feature = "http2"), + any(feature = "client", feature = "server") + ))] pub(crate) fn sub_if(&mut self, amt: u64) { match *self { DecodedLength::CHUNKED | DecodedLength::CLOSE_DELIMITED => (), @@ -74,7 +80,7 @@ impl DecodedLength { /// This includes 0, which of course is an exact known length. /// /// It would return false if "chunked" or otherwise size-unknown. - #[cfg(feature = "http2")] + #[cfg(all(any(feature = "client", feature = "server"), feature = "http2"))] pub(crate) fn is_exact(&self) -> bool { self.0 <= MAX_LEN } diff --git a/.cargo-vendor/hyper/src/body/mod.rs b/.cargo-vendor/hyper/src/body/mod.rs index 109b1e6b72..7b71d98be4 100644 --- a/.cargo-vendor/hyper/src/body/mod.rs +++ b/.cargo-vendor/hyper/src/body/mod.rs @@ -7,61 +7,44 @@ //! //! There are two pieces to this in hyper: //! -//! - **The [`HttpBody`](HttpBody) trait** describes all possible bodies. -//! hyper allows any body type that implements `HttpBody`, allowing +//! - **The [`Body`] trait** describes all possible bodies. +//! hyper allows any body type that implements `Body`, allowing //! applications to have fine-grained control over their streaming. -//! - **The [`Body`](Body) concrete type**, which is an implementation of -//! `HttpBody`, and returned by hyper as a "receive stream" (so, for server -//! requests and client responses). It is also a decent default implementation -//! if you don't have very custom needs of your send streams. +//! - **The [`Incoming`] concrete type**, which is an implementation +//! of `Body`, and returned by hyper as a "receive stream" (so, for server +//! requests and client responses). +//! +//! There are additional implementations available in [`http-body-util`][], +//! such as a `Full` or `Empty` body. +//! +//! [`http-body-util`]: https://docs.rs/http-body-util pub use bytes::{Buf, Bytes}; -pub use http_body::Body as HttpBody; +pub use http_body::Body; +pub use http_body::Frame; pub use http_body::SizeHint; -#[cfg_attr(feature = "deprecated", allow(deprecated))] -pub use self::aggregate::aggregate; -pub use self::body::{Body, Sender}; +pub use self::incoming::Incoming; + +#[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))] +pub(crate) use self::incoming::Sender; +#[cfg(all( + any(feature = "http1", feature = "http2"), + any(feature = "client", feature = "server") +))] pub(crate) use self::length::DecodedLength; -#[cfg_attr(feature = "deprecated", allow(deprecated))] -pub use self::to_bytes::to_bytes; -mod aggregate; -mod body; +mod incoming; +#[cfg(all( + any(feature = "http1", feature = "http2"), + any(feature = "client", feature = "server") +))] mod length; -mod to_bytes; - -/// An optimization to try to take a full body if immediately available. -/// -/// This is currently limited to *only* `hyper::Body`s. -#[cfg(feature = "http1")] -pub(crate) fn take_full_data(body: &mut T) -> Option { - use std::any::{Any, TypeId}; - - // This static type check can be optimized at compile-time. - if TypeId::of::() == TypeId::of::() { - let mut full = (body as &mut dyn Any) - .downcast_mut::() - .expect("must be Body") - .take_full_data(); - // This second cast is required to make the type system happy. - // Without it, the compiler cannot reason that the type is actually - // `T::Data`. Oh wells. - // - // It's still a measurable win! - (&mut full as &mut dyn Any) - .downcast_mut::>() - .expect("must be T::Data") - .take() - } else { - None - } -} fn _assert_send_sync() { fn _assert_send() {} fn _assert_sync() {} - _assert_send::(); - _assert_sync::(); + _assert_send::(); + _assert_sync::(); } diff --git a/.cargo-vendor/hyper/src/client/conn/http1.rs b/.cargo-vendor/hyper/src/client/conn/http1.rs index 37eda04067..647171e764 100644 --- a/.cargo-vendor/hyper/src/client/conn/http1.rs +++ b/.cargo-vendor/hyper/src/client/conn/http1.rs @@ -3,19 +3,18 @@ use std::error::Error as StdError; use std::fmt; use std::future::Future; -use std::marker::Unpin; use std::pin::Pin; use std::task::{Context, Poll}; +use crate::rt::{Read, Write}; use bytes::Bytes; +use futures_util::ready; use http::{Request, Response}; use httparse::ParserConfig; -use tokio::io::{AsyncRead, AsyncWrite}; -use super::super::dispatch; -use crate::body::{Body as IncomingBody, HttpBody as Body}; +use super::super::dispatch::{self, TrySendError}; +use crate::body::{Body, Incoming as IncomingBody}; use crate::proto; -use crate::upgrade::Upgraded; type Dispatcher = proto::dispatch::Dispatcher, B, T, proto::h1::ClientTransaction>; @@ -30,6 +29,7 @@ pub struct SendRequest { /// This allows taking apart a `Connection` at a later time, in order to /// reclaim the IO object, and additional related pieces. #[derive(Debug)] +#[non_exhaustive] pub struct Parts { /// The original IO object used in the handshake. pub io: T, @@ -42,7 +42,6 @@ pub struct Parts { /// You will want to check for any existing bytes if you plan to continue /// communicating on the IO object. pub read_buf: Bytes, - _inner: (), } /// A future that processes all HTTP state for the IO object. @@ -52,15 +51,15 @@ pub struct Parts { #[must_use = "futures do nothing unless polled"] pub struct Connection where - T: AsyncRead + AsyncWrite + Send + 'static, + T: Read + Write, B: Body + 'static, { - inner: Option>, + inner: Dispatcher, } impl Connection where - T: AsyncRead + AsyncWrite + Send + Unpin + 'static, + T: Read + Write + Unpin, B: Body + 'static, B::Error: Into>, { @@ -68,12 +67,8 @@ where /// /// Only works for HTTP/1 connections. HTTP/2 connections will panic. pub fn into_parts(self) -> Parts { - let (io, read_buf, _) = self.inner.expect("already upgraded").into_inner(); - Parts { - io, - read_buf, - _inner: (), - } + let (io, read_buf, _) = self.inner.into_inner(); + Parts { io, read_buf } } /// Poll the connection for completion, but without calling `shutdown` @@ -88,26 +83,27 @@ where /// and [`try_ready!`](https://docs.rs/futures/0.1.25/futures/macro.try_ready.html) /// to work with this function; or use the `without_shutdown` wrapper. pub fn poll_without_shutdown(&mut self, cx: &mut Context<'_>) -> Poll> { - self.inner - .as_mut() - .expect("algready upgraded") - .poll_without_shutdown(cx) + self.inner.poll_without_shutdown(cx) } /// Prevent shutdown of the underlying IO object at the end of service the request, /// instead run `into_parts`. This is a convenience wrapper over `poll_without_shutdown`. - pub fn without_shutdown(self) -> impl Future>> { + pub async fn without_shutdown(self) -> crate::Result> { let mut conn = Some(self); futures_util::future::poll_fn(move |cx| -> Poll>> { ready!(conn.as_mut().unwrap().poll_without_shutdown(cx))?; Poll::Ready(Ok(conn.take().unwrap().into_parts())) }) + .await } } /// A builder to configure an HTTP connection. /// /// After setting options, the builder is used to create a handshake future. +/// +/// **Note**: The default values of options are *not considered stable*. They +/// are subject to change at any time. #[derive(Clone, Debug)] pub struct Builder { h09_responses: bool, @@ -115,6 +111,7 @@ pub struct Builder { h1_writev: Option, h1_title_case_headers: bool, h1_preserve_header_case: bool, + h1_max_headers: Option, #[cfg(feature = "ffi")] h1_preserve_header_order: bool, h1_read_buf_exact_size: Option, @@ -127,7 +124,7 @@ pub struct Builder { /// See [`client::conn`](crate::client::conn) for more. pub async fn handshake(io: T) -> crate::Result<(SendRequest, Connection)> where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static, + T: Read + Write + Unpin, B: Body + 'static, B::Data: Send, B::Error: Into>, @@ -152,24 +149,21 @@ impl SendRequest { futures_util::future::poll_fn(|cx| self.poll_ready(cx)).await } - /* - pub(super) async fn when_ready(self) -> crate::Result { - let mut me = Some(self); - future::poll_fn(move |cx| { - ready!(me.as_mut().unwrap().poll_ready(cx))?; - Poll::Ready(Ok(me.take().unwrap())) - }) - .await - } - - pub(super) fn is_ready(&self) -> bool { + /// Checks if the connection is currently ready to send a request. + /// + /// # Note + /// + /// This is mostly a hint. Due to inherent latency of networks, it is + /// possible that even after checking this is ready, sending a request + /// may still fail because the connection was closed in the meantime. + pub fn is_ready(&self) -> bool { self.dispatch.is_ready() } - pub(super) fn is_closed(&self) -> bool { + /// Checks if the connection side has been closed. + pub fn is_closed(&self) -> bool { self.dispatch.is_closed() } - */ } impl SendRequest @@ -180,18 +174,18 @@ where /// /// Returns a future that if successful, yields the `Response`. /// - /// # Note + /// `req` must have a `Host` header. + /// + /// # Uri /// - /// There are some key differences in what automatic things the `Client` - /// does for you that will not be done here: + /// The `Uri` of the request is serialized as-is. /// - /// - `Client` requires absolute-form `Uri`s, since the scheme and - /// authority are needed to connect. They aren't required here. - /// - Since the `Client` requires absolute-form `Uri`s, it can add - /// the `Host` header based on it. You must add a `Host` header yourself - /// before calling this method. - /// - Since absolute-form `Uri`s are not required, if received, they will - /// be serialized as-is. + /// - Usually you want origin-form (`/path?query`). + /// - For sending to an HTTP proxy, you want to send in absolute-form + /// (`https://hyper.rs/guides`). + /// + /// This is however not enforced or validated and it is up to the user + /// of this method to ensure the `Uri` is correct for their intended purpose. pub fn send_request( &mut self, req: Request, @@ -207,41 +201,45 @@ where Err(_canceled) => panic!("dispatch dropped without returning error"), }, Err(_req) => { - tracing::debug!("connection was not ready"); - + debug!("connection was not ready"); Err(crate::Error::new_canceled().with("connection was not ready")) } } } } - /* - pub(super) fn send_request_retryable( + /// Sends a `Request` on the associated connection. + /// + /// Returns a future that if successful, yields the `Response`. + /// + /// # Error + /// + /// If there was an error before trying to serialize the request to the + /// connection, the message will be returned as part of this error. + pub fn try_send_request( &mut self, req: Request, - ) -> impl Future, (crate::Error, Option>)>> + Unpin - where - B: Send, - { - match self.dispatch.try_send(req) { - Ok(rx) => { - Either::Left(rx.then(move |res| { - match res { - Ok(Ok(res)) => future::ok(res), - Ok(Err(err)) => future::err(err), - // this is definite bug if it happens, but it shouldn't happen! - Err(_) => panic!("dispatch dropped without returning error"), - } - })) - } - Err(req) => { - tracing::debug!("connection was not ready"); - let err = crate::Error::new_canceled().with("connection was not ready"); - Either::Right(future::err((err, Some(req)))) + ) -> impl Future, TrySendError>>> { + let sent = self.dispatch.try_send(req); + async move { + match sent { + Ok(rx) => match rx.await { + Ok(Ok(res)) => Ok(res), + Ok(Err(err)) => Err(err), + // this is definite bug if it happens, but it shouldn't happen! + Err(_) => panic!("dispatch dropped without returning error"), + }, + Err(req) => { + debug!("connection was not ready"); + let error = crate::Error::new_canceled().with("connection was not ready"); + Err(TrySendError { + error, + message: Some(req), + }) + } } } } - */ } impl fmt::Debug for SendRequest { @@ -252,9 +250,23 @@ impl fmt::Debug for SendRequest { // ===== impl Connection +impl Connection +where + T: Read + Write + Unpin + Send, + B: Body + 'static, + B::Error: Into>, +{ + /// Enable this connection to support higher-level HTTP upgrades. + /// + /// See [the `upgrade` module](crate::upgrade) for more. + pub fn with_upgrades(self) -> upgrades::UpgradeableConnection { + upgrades::UpgradeableConnection { inner: Some(self) } + } +} + impl fmt::Debug for Connection where - T: AsyncRead + AsyncWrite + fmt::Debug + Send + 'static, + T: Read + Write + fmt::Debug, B: Body + 'static, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -264,27 +276,24 @@ where impl Future for Connection where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static, - B: Body + Send + 'static, + T: Read + Write + Unpin, + B: Body + 'static, B::Data: Send, B::Error: Into>, { type Output = crate::Result<()>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - match ready!(Pin::new(self.inner.as_mut().unwrap()).poll(cx))? { + match ready!(Pin::new(&mut self.inner).poll(cx))? { proto::Dispatched::Shutdown => Poll::Ready(Ok(())), - proto::Dispatched::Upgrade(pending) => match self.inner.take() { - Some(h1) => { - let (io, buf, _) = h1.into_inner(); - pending.fulfill(Upgraded::new(io, buf)); - Poll::Ready(Ok(())) - } - _ => { - drop(pending); - unreachable!("Upgraded twice"); - } - }, + proto::Dispatched::Upgrade(pending) => { + // With no `Send` bound on `I`, we can't try to do + // upgrades here. In case a user was trying to use + // `upgrade` with this API, send a special + // error letting them know about that. + pending.manual(); + Poll::Ready(Ok(())) + } } } } @@ -302,6 +311,7 @@ impl Builder { h1_parser_config: Default::default(), h1_title_case_headers: false, h1_preserve_header_case: false, + h1_max_headers: None, #[cfg(feature = "ffi")] h1_preserve_header_order: false, h1_max_buf_size: None, @@ -330,8 +340,6 @@ impl Builder { /// > of 400 (Bad Request). A proxy MUST remove any such whitespace from a /// > response message before forwarding the message downstream. /// - /// Note that this setting does not affect HTTP/2. - /// /// Default is false. /// /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4 @@ -381,7 +389,7 @@ impl Builder { /// Set whether HTTP/1 connections will silently ignored malformed header lines. /// - /// If this is enabled and and a header line does not start with a valid header + /// If this is enabled and a header line does not start with a valid header /// name, or does not include a colon at all, the line will be silently ignored /// and no error will be reported. /// @@ -434,6 +442,24 @@ impl Builder { self } + /// Set the maximum number of headers. + /// + /// When a response is received, the parser will reserve a buffer to store headers for optimal + /// performance. + /// + /// If client receives more headers than the buffer size, the error "message header too large" + /// is returned. + /// + /// Note that headers is allocated on the stack by default, which has higher performance. After + /// setting this value, headers will be allocated in heap memory, that is, heap memory + /// allocation will occur for each response, and there will be a performance drop of about 5%. + /// + /// Default is 100. + pub fn max_headers(&mut self, val: usize) -> &mut Self { + self.h1_max_headers = Some(val); + self + } + /// Set whether to support preserving original header order. /// /// Currently, this will record the order in which headers are received, and store this @@ -488,7 +514,7 @@ impl Builder { io: T, ) -> impl Future, Connection)>> where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static, + T: Read + Write + Unpin, B: Body + 'static, B::Data: Send, B::Error: Into>, @@ -496,7 +522,7 @@ impl Builder { let opts = self.clone(); async move { - tracing::trace!("client handshake HTTP/1"); + trace!("client handshake HTTP/1"); let (tx, rx) = dispatch::channel(); let mut conn = proto::Conn::new(io); @@ -514,6 +540,9 @@ impl Builder { if opts.h1_preserve_header_case { conn.set_preserve_header_case(); } + if let Some(max_headers) = opts.h1_max_headers { + conn.set_http1_max_headers(max_headers); + } #[cfg(feature = "ffi")] if opts.h1_preserve_header_order { conn.set_preserve_header_order(); @@ -532,10 +561,49 @@ impl Builder { let cd = proto::h1::dispatch::Client::new(rx); let proto = proto::h1::Dispatcher::new(cd, conn); - Ok(( - SendRequest { dispatch: tx }, - Connection { inner: Some(proto) }, - )) + Ok((SendRequest { dispatch: tx }, Connection { inner: proto })) + } + } +} + +mod upgrades { + use crate::upgrade::Upgraded; + + use super::*; + + // A future binding a connection with a Service with Upgrade support. + // + // This type is unnameable outside the crate. + #[must_use = "futures do nothing unless polled"] + #[allow(missing_debug_implementations)] + pub struct UpgradeableConnection + where + T: Read + Write + Unpin + Send + 'static, + B: Body + 'static, + B::Error: Into>, + { + pub(super) inner: Option>, + } + + impl Future for UpgradeableConnection + where + I: Read + Write + Unpin + Send + 'static, + B: Body + 'static, + B::Data: Send, + B::Error: Into>, + { + type Output = crate::Result<()>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match ready!(Pin::new(&mut self.inner.as_mut().unwrap().inner).poll(cx)) { + Ok(proto::Dispatched::Shutdown) => Poll::Ready(Ok(())), + Ok(proto::Dispatched::Upgrade(pending)) => { + let Parts { io, read_buf } = self.inner.take().unwrap().into_parts(); + pending.fulfill(Upgraded::new(io, read_buf)); + Poll::Ready(Ok(())) + } + Err(e) => Poll::Ready(Err(e)), + } } } } diff --git a/.cargo-vendor/hyper/src/client/conn/http2.rs b/.cargo-vendor/hyper/src/client/conn/http2.rs index 5697e9ee47..aee135f672 100644 --- a/.cargo-vendor/hyper/src/client/conn/http2.rs +++ b/.cargo-vendor/hyper/src/client/conn/http2.rs @@ -1,23 +1,24 @@ //! HTTP/2 client connections -use std::error::Error as StdError; +use std::error::Error; use std::fmt; use std::future::Future; use std::marker::PhantomData; -use std::marker::Unpin; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; use std::time::Duration; +use crate::rt::{Read, Write}; +use futures_util::ready; use http::{Request, Response}; -use tokio::io::{AsyncRead, AsyncWrite}; -use super::super::dispatch; -use crate::body::{Body as IncomingBody, HttpBody as Body}; -use crate::common::exec::{BoxSendFuture, Exec}; +use super::super::dispatch::{self, TrySendError}; +use crate::body::{Body, Incoming as IncomingBody}; +use crate::common::time::Time; use crate::proto; -use crate::rt::Executor; +use crate::rt::bounds::Http2ClientConnExec; +use crate::rt::Timer; /// The sender side of an established connection. pub struct SendRequest { @@ -37,34 +38,43 @@ impl Clone for SendRequest { /// In most cases, this should just be spawned into an executor, so that it /// can process incoming and outgoing messages, notice hangups, and the like. #[must_use = "futures do nothing unless polled"] -pub struct Connection +pub struct Connection where - T: AsyncRead + AsyncWrite + Send + 'static, + T: Read + Write + Unpin, B: Body + 'static, + E: Http2ClientConnExec + Unpin, + B::Error: Into>, { - inner: (PhantomData, proto::h2::ClientTask), + inner: (PhantomData, proto::h2::ClientTask), } /// A builder to configure an HTTP connection. /// /// After setting options, the builder is used to create a handshake future. +/// +/// **Note**: The default values of options are *not considered stable*. They +/// are subject to change at any time. #[derive(Clone, Debug)] -pub struct Builder { - pub(super) exec: Exec, +pub struct Builder { + pub(super) exec: Ex, + pub(super) timer: Time, h2_builder: proto::h2::client::Config, } /// Returns a handshake future over some IO. /// -/// This is a shortcut for `Builder::new().handshake(io)`. +/// This is a shortcut for `Builder::new(exec).handshake(io)`. /// See [`client::conn`](crate::client::conn) for more. -pub async fn handshake(exec: E, io: T) -> crate::Result<(SendRequest, Connection)> +pub async fn handshake( + exec: E, + io: T, +) -> crate::Result<(SendRequest, Connection)> where - E: Executor + Send + Sync + 'static, - T: AsyncRead + AsyncWrite + Unpin + Send + 'static, + T: Read + Write + Unpin, B: Body + 'static, B::Data: Send, - B::Error: Into>, + B::Error: Into>, + E: Http2ClientConnExec + Unpin + Clone, { Builder::new(exec).handshake(io).await } @@ -90,22 +100,19 @@ impl SendRequest { futures_util::future::poll_fn(|cx| self.poll_ready(cx)).await } - /* - pub(super) async fn when_ready(self) -> crate::Result { - let mut me = Some(self); - future::poll_fn(move |cx| { - ready!(me.as_mut().unwrap().poll_ready(cx))?; - Poll::Ready(Ok(me.take().unwrap())) - }) - .await - } - - pub(super) fn is_ready(&self) -> bool { + /// Checks if the connection is currently ready to send a request. + /// + /// # Note + /// + /// This is mostly a hint. Due to inherent latency of networks, it is + /// possible that even after checking this is ready, sending a request + /// may still fail because the connection was closed in the meantime. + pub fn is_ready(&self) -> bool { self.dispatch.is_ready() } - */ - pub(super) fn is_closed(&self) -> bool { + /// Checks if the connection side has been closed. + pub fn is_closed(&self) -> bool { self.dispatch.is_closed() } } @@ -118,18 +125,10 @@ where /// /// Returns a future that if successful, yields the `Response`. /// - /// # Note - /// - /// There are some key differences in what automatic things the `Client` - /// does for you that will not be done here: + /// `req` must have a `Host` header. /// - /// - `Client` requires absolute-form `Uri`s, since the scheme and - /// authority are needed to connect. They aren't required here. - /// - Since the `Client` requires absolute-form `Uri`s, it can add - /// the `Host` header based on it. You must add a `Host` header yourself - /// before calling this method. - /// - Since absolute-form `Uri`s are not required, if received, they will - /// be serialized as-is. + /// Absolute-form `Uri`s are not required. If received, they will be serialized + /// as-is. pub fn send_request( &mut self, req: Request, @@ -145,7 +144,7 @@ where Err(_canceled) => panic!("dispatch dropped without returning error"), }, Err(_req) => { - tracing::debug!("connection was not ready"); + debug!("connection was not ready"); Err(crate::Error::new_canceled().with("connection was not ready")) } @@ -153,33 +152,38 @@ where } } - /* - pub(super) fn send_request_retryable( + /// Sends a `Request` on the associated connection. + /// + /// Returns a future that if successful, yields the `Response`. + /// + /// # Error + /// + /// If there was an error before trying to serialize the request to the + /// connection, the message will be returned as part of this error. + pub fn try_send_request( &mut self, req: Request, - ) -> impl Future, (crate::Error, Option>)>> + Unpin - where - B: Send, - { - match self.dispatch.try_send(req) { - Ok(rx) => { - Either::Left(rx.then(move |res| { - match res { - Ok(Ok(res)) => future::ok(res), - Ok(Err(err)) => future::err(err), - // this is definite bug if it happens, but it shouldn't happen! - Err(_) => panic!("dispatch dropped without returning error"), - } - })) - } - Err(req) => { - tracing::debug!("connection was not ready"); - let err = crate::Error::new_canceled().with("connection was not ready"); - Either::Right(future::err((err, Some(req)))) + ) -> impl Future, TrySendError>>> { + let sent = self.dispatch.try_send(req); + async move { + match sent { + Ok(rx) => match rx.await { + Ok(Ok(res)) => Ok(res), + Ok(Err(err)) => Err(err), + // this is definite bug if it happens, but it shouldn't happen! + Err(_) => panic!("dispatch dropped without returning error"), + }, + Err(req) => { + debug!("connection was not ready"); + let error = crate::Error::new_canceled().with("connection was not ready"); + Err(TrySendError { + error, + message: Some(req), + }) + } } } } - */ } impl fmt::Debug for SendRequest { @@ -190,12 +194,13 @@ impl fmt::Debug for SendRequest { // ===== impl Connection -impl Connection +impl Connection where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static, - B: Body + Unpin + Send + 'static, + T: Read + Write + Unpin + 'static, + B: Body + Unpin + 'static, B::Data: Send, - B::Error: Into>, + B::Error: Into>, + E: Http2ClientConnExec + Unpin, { /// Returns whether the [extended CONNECT protocol][1] is enabled or not. /// @@ -211,22 +216,26 @@ where } } -impl fmt::Debug for Connection +impl fmt::Debug for Connection where - T: AsyncRead + AsyncWrite + fmt::Debug + Send + 'static, + T: Read + Write + fmt::Debug + 'static + Unpin, B: Body + 'static, + E: Http2ClientConnExec + Unpin, + B::Error: Into>, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Connection").finish() } } -impl Future for Connection +impl Future for Connection where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static, - B: Body + Send + 'static, + T: Read + Write + Unpin + 'static, + B: Body + 'static + Unpin, B::Data: Send, - B::Error: Into>, + E: Unpin, + B::Error: Into>, + E: Http2ClientConnExec + Unpin, { type Output = crate::Result<()>; @@ -241,26 +250,26 @@ where // ===== impl Builder -impl Builder { +impl Builder +where + Ex: Clone, +{ /// Creates a new connection builder. #[inline] - pub fn new(exec: E) -> Builder - where - E: Executor + Send + Sync + 'static, - { - use std::sync::Arc; + pub fn new(exec: Ex) -> Builder { Builder { - exec: Exec::Executor(Arc::new(exec)), + exec, + timer: Time::Empty, h2_builder: Default::default(), } } - /// Provide an executor to execute background HTTP2 tasks. - pub fn executor(&mut self, exec: E) -> &mut Builder + /// Provide a timer to execute background HTTP2 tasks. + pub fn timer(&mut self, timer: M) -> &mut Builder where - E: Executor + Send + Sync + 'static, + M: Timer + Send + Sync + 'static, { - self.exec = Exec::Executor(Arc::new(exec)); + self.timer = Time::Timer(Arc::new(timer)); self } @@ -271,7 +280,7 @@ impl Builder { /// /// If not set, hyper will use a default. /// - /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE + /// [spec]: https://httpwg.org/specs/rfc9113.html#SETTINGS_INITIAL_WINDOW_SIZE pub fn initial_stream_window_size(&mut self, sz: impl Into>) -> &mut Self { if let Some(sz) = sz.into() { self.h2_builder.adaptive_window = false; @@ -293,6 +302,23 @@ impl Builder { self } + /// Sets the initial maximum of locally initiated (send) streams. + /// + /// This value will be overwritten by the value included in the initial + /// SETTINGS frame received from the peer as part of a [connection preface]. + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + /// + /// [connection preface]: https://httpwg.org/specs/rfc9113.html#preface + pub fn initial_max_send_streams(&mut self, initial: impl Into>) -> &mut Self { + if let Some(initial) = initial.into() { + self.h2_builder.initial_max_send_streams = initial; + } + self + } + /// Sets whether to use an adaptive flow control. /// /// Enabling this will override the limits set in @@ -321,13 +347,20 @@ impl Builder { self } + /// Sets the max size of received header frames. + /// + /// Default is currently 16KB, but can change. + pub fn max_header_list_size(&mut self, max: u32) -> &mut Self { + self.h2_builder.max_header_list_size = max; + self + } + /// Sets an interval for HTTP2 Ping frames should be sent to keep a /// connection alive. /// /// Pass `None` to disable HTTP2 keep-alive. /// /// Default is currently disabled. - #[cfg(feature = "runtime")] pub fn keep_alive_interval(&mut self, interval: impl Into>) -> &mut Self { self.h2_builder.keep_alive_interval = interval.into(); self @@ -339,7 +372,6 @@ impl Builder { /// be closed. Does nothing if `keep_alive_interval` is disabled. /// /// Default is 20 seconds. - #[cfg(feature = "runtime")] pub fn keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self { self.h2_builder.keep_alive_timeout = timeout; self @@ -353,7 +385,6 @@ impl Builder { /// disabled. /// /// Default is `false`. - #[cfg(feature = "runtime")] pub fn keep_alive_while_idle(&mut self, enabled: bool) -> &mut Self { self.h2_builder.keep_alive_while_idle = enabled; self @@ -380,11 +411,22 @@ impl Builder { /// /// The value must be no larger than `u32::MAX`. pub fn max_send_buf_size(&mut self, max: usize) -> &mut Self { - assert!(max <= std::u32::MAX as usize); + assert!(max <= u32::MAX as usize); self.h2_builder.max_send_buffer_size = max; self } + /// Configures the maximum number of pending reset streams allowed before a GOAWAY will be sent. + /// + /// This will default to the default value set by the [`h2` crate](https://crates.io/crates/h2). + /// As of v0.4.0, it is 20. + /// + /// See for more information. + pub fn max_pending_accept_reset_streams(&mut self, max: impl Into>) -> &mut Self { + self.h2_builder.max_pending_accept_reset_streams = max.into(); + self + } + /// Constructs a connection with the configured options and IO. /// See [`client::conn`](crate::client::conn) for more. /// @@ -393,20 +435,22 @@ impl Builder { pub fn handshake( &self, io: T, - ) -> impl Future, Connection)>> + ) -> impl Future, Connection)>> where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static, + T: Read + Write + Unpin, B: Body + 'static, B::Data: Send, - B::Error: Into>, + B::Error: Into>, + Ex: Http2ClientConnExec + Unpin, { let opts = self.clone(); async move { - tracing::trace!("client handshake HTTP/1"); + trace!("client handshake HTTP/2"); let (tx, rx) = dispatch::channel(); - let h2 = proto::h2::client::handshake(io, rx, &opts.h2_builder, opts.exec).await?; + let h2 = proto::h2::client::handshake(io, rx, &opts.h2_builder, opts.exec, opts.timer) + .await?; Ok(( SendRequest { dispatch: tx.unbound(), @@ -418,3 +462,219 @@ impl Builder { } } } + +#[cfg(test)] +mod tests { + + #[tokio::test] + #[ignore] // only compilation is checked + async fn send_sync_executor_of_non_send_futures() { + #[derive(Clone)] + struct LocalTokioExecutor; + + impl crate::rt::Executor for LocalTokioExecutor + where + F: std::future::Future + 'static, // not requiring `Send` + { + fn execute(&self, fut: F) { + // This will spawn into the currently running `LocalSet`. + tokio::task::spawn_local(fut); + } + } + + #[allow(unused)] + async fn run(io: impl crate::rt::Read + crate::rt::Write + Unpin + 'static) { + let (_sender, conn) = crate::client::conn::http2::handshake::< + _, + _, + http_body_util::Empty, + >(LocalTokioExecutor, io) + .await + .unwrap(); + + tokio::task::spawn_local(async move { + conn.await.unwrap(); + }); + } + } + + #[tokio::test] + #[ignore] // only compilation is checked + async fn not_send_not_sync_executor_of_not_send_futures() { + #[derive(Clone)] + struct LocalTokioExecutor { + _x: std::marker::PhantomData>, + } + + impl crate::rt::Executor for LocalTokioExecutor + where + F: std::future::Future + 'static, // not requiring `Send` + { + fn execute(&self, fut: F) { + // This will spawn into the currently running `LocalSet`. + tokio::task::spawn_local(fut); + } + } + + #[allow(unused)] + async fn run(io: impl crate::rt::Read + crate::rt::Write + Unpin + 'static) { + let (_sender, conn) = + crate::client::conn::http2::handshake::<_, _, http_body_util::Empty>( + LocalTokioExecutor { + _x: Default::default(), + }, + io, + ) + .await + .unwrap(); + + tokio::task::spawn_local(async move { + conn.await.unwrap(); + }); + } + } + + #[tokio::test] + #[ignore] // only compilation is checked + async fn send_not_sync_executor_of_not_send_futures() { + #[derive(Clone)] + struct LocalTokioExecutor { + _x: std::marker::PhantomData>, + } + + impl crate::rt::Executor for LocalTokioExecutor + where + F: std::future::Future + 'static, // not requiring `Send` + { + fn execute(&self, fut: F) { + // This will spawn into the currently running `LocalSet`. + tokio::task::spawn_local(fut); + } + } + + #[allow(unused)] + async fn run(io: impl crate::rt::Read + crate::rt::Write + Unpin + 'static) { + let (_sender, conn) = + crate::client::conn::http2::handshake::<_, _, http_body_util::Empty>( + LocalTokioExecutor { + _x: Default::default(), + }, + io, + ) + .await + .unwrap(); + + tokio::task::spawn_local(async move { + conn.await.unwrap(); + }); + } + } + + #[tokio::test] + #[ignore] // only compilation is checked + async fn send_sync_executor_of_send_futures() { + #[derive(Clone)] + struct TokioExecutor; + + impl crate::rt::Executor for TokioExecutor + where + F: std::future::Future + 'static + Send, + F::Output: Send + 'static, + { + fn execute(&self, fut: F) { + tokio::task::spawn(fut); + } + } + + #[allow(unused)] + async fn run(io: impl crate::rt::Read + crate::rt::Write + Send + Unpin + 'static) { + let (_sender, conn) = crate::client::conn::http2::handshake::< + _, + _, + http_body_util::Empty, + >(TokioExecutor, io) + .await + .unwrap(); + + tokio::task::spawn(async move { + conn.await.unwrap(); + }); + } + } + + #[tokio::test] + #[ignore] // only compilation is checked + async fn not_send_not_sync_executor_of_send_futures() { + #[derive(Clone)] + struct TokioExecutor { + // !Send, !Sync + _x: std::marker::PhantomData>, + } + + impl crate::rt::Executor for TokioExecutor + where + F: std::future::Future + 'static + Send, + F::Output: Send + 'static, + { + fn execute(&self, fut: F) { + tokio::task::spawn(fut); + } + } + + #[allow(unused)] + async fn run(io: impl crate::rt::Read + crate::rt::Write + Send + Unpin + 'static) { + let (_sender, conn) = + crate::client::conn::http2::handshake::<_, _, http_body_util::Empty>( + TokioExecutor { + _x: Default::default(), + }, + io, + ) + .await + .unwrap(); + + tokio::task::spawn_local(async move { + // can't use spawn here because when executor is !Send + conn.await.unwrap(); + }); + } + } + + #[tokio::test] + #[ignore] // only compilation is checked + async fn send_not_sync_executor_of_send_futures() { + #[derive(Clone)] + struct TokioExecutor { + // !Sync + _x: std::marker::PhantomData>, + } + + impl crate::rt::Executor for TokioExecutor + where + F: std::future::Future + 'static + Send, + F::Output: Send + 'static, + { + fn execute(&self, fut: F) { + tokio::task::spawn(fut); + } + } + + #[allow(unused)] + async fn run(io: impl crate::rt::Read + crate::rt::Write + Send + Unpin + 'static) { + let (_sender, conn) = + crate::client::conn::http2::handshake::<_, _, http_body_util::Empty>( + TokioExecutor { + _x: Default::default(), + }, + io, + ) + .await + .unwrap(); + + tokio::task::spawn_local(async move { + // can't use spawn here because when executor is !Send + conn.await.unwrap(); + }); + } + } +} diff --git a/.cargo-vendor/hyper/src/client/conn/mod.rs b/.cargo-vendor/hyper/src/client/conn/mod.rs new file mode 100644 index 0000000000..f982ae6ddb --- /dev/null +++ b/.cargo-vendor/hyper/src/client/conn/mod.rs @@ -0,0 +1,22 @@ +//! Lower-level client connection API. +//! +//! The types in this module are to provide a lower-level API based around a +//! single connection. Connecting to a host, pooling connections, and the like +//! are not handled at this level. This module provides the building blocks to +//! customize those things externally. +//! +//! If you are looking for a convenient HTTP client, then you may wish to +//! consider [reqwest](https://github.com/seanmonstar/reqwest) for a high level +//! client or [`hyper-util`'s client](https://docs.rs/hyper-util/latest/hyper_util/client/index.html) +//! if you want to keep it more low level / basic. +//! +//! ## Example +//! +//! See the [client guide](https://hyper.rs/guides/1/client/basic/). + +#[cfg(feature = "http1")] +pub mod http1; +#[cfg(feature = "http2")] +pub mod http2; + +pub use super::dispatch::TrySendError; diff --git a/.cargo-vendor/hyper/src/client/dispatch.rs b/.cargo-vendor/hyper/src/client/dispatch.rs index a1a93ea964..b52da60e72 100644 --- a/.cargo-vendor/hyper/src/client/dispatch.rs +++ b/.cargo-vendor/hyper/src/client/dispatch.rs @@ -1,20 +1,38 @@ -#[cfg(feature = "http2")] -use std::future::Future; -use std::marker::Unpin; -#[cfg(feature = "http2")] -use std::pin::Pin; use std::task::{Context, Poll}; +#[cfg(feature = "http2")] +use std::{future::Future, pin::Pin}; -use futures_util::FutureExt; +#[cfg(feature = "http2")] +use http::{Request, Response}; +#[cfg(feature = "http2")] +use http_body::Body; +#[cfg(feature = "http2")] +use pin_project_lite::pin_project; use tokio::sync::{mpsc, oneshot}; -pub(crate) type RetryPromise = oneshot::Receiver)>>; +#[cfg(feature = "http2")] +use crate::{body::Incoming, proto::h2::client::ResponseFutMap}; + +pub(crate) type RetryPromise = oneshot::Receiver>>; pub(crate) type Promise = oneshot::Receiver>; +/// An error when calling `try_send_request`. +/// +/// There is a possibility of an error occuring on a connection in-between the +/// time that a request is queued and when it is actually written to the IO +/// transport. If that happens, it is safe to return the request back to the +/// caller, as it was never fully sent. +#[derive(Debug)] +pub struct TrySendError { + pub(crate) error: crate::Error, + pub(crate) message: Option, +} + pub(crate) fn channel() -> (Sender, Receiver) { let (tx, rx) = mpsc::unbounded_channel(); let (giver, taker) = want::new(); let tx = Sender { + #[cfg(feature = "http1")] buffered_once: false, giver, inner: tx, @@ -31,8 +49,9 @@ pub(crate) struct Sender { /// One message is always allowed, even if the Receiver hasn't asked /// for it yet. This boolean keeps track of whether we've sent one /// without notice. + #[cfg(feature = "http1")] buffered_once: bool, - /// The Giver helps watch that the the Receiver side has been polled + /// The Giver helps watch that the Receiver side has been polled /// when the queue is empty. This helps us know when a request and /// response have been fully processed, and a connection is ready /// for more. @@ -53,20 +72,24 @@ pub(crate) struct UnboundedSender { } impl Sender { + #[cfg(feature = "http1")] pub(crate) fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { self.giver .poll_want(cx) .map_err(|_| crate::Error::new_closed()) } + #[cfg(feature = "http1")] pub(crate) fn is_ready(&self) -> bool { self.giver.is_wanting() } + #[cfg(feature = "http1")] pub(crate) fn is_closed(&self) -> bool { self.giver.is_canceled() } + #[cfg(feature = "http1")] fn can_send(&mut self) -> bool { if self.giver.give() || !self.buffered_once { // If the receiver is ready *now*, then of course we can send. @@ -80,6 +103,7 @@ impl Sender { } } + #[cfg(feature = "http1")] pub(crate) fn try_send(&mut self, val: T) -> Result, T> { if !self.can_send() { return Err(val); @@ -91,6 +115,7 @@ impl Sender { .map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0) } + #[cfg(feature = "http1")] pub(crate) fn send(&mut self, val: T) -> Result, T> { if !self.can_send() { return Err(val); @@ -129,7 +154,6 @@ impl UnboundedSender { .map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0) } - #[cfg(all(feature = "backports", feature = "http2"))] pub(crate) fn send(&mut self, val: T) -> Result, T> { let (tx, rx) = oneshot::channel(); self.inner @@ -175,6 +199,7 @@ impl Receiver { #[cfg(feature = "http1")] pub(crate) fn try_recv(&mut self) -> Option<(T, Callback)> { + use futures_util::FutureExt; match self.inner.recv().now_or_never() { Some(Some(mut env)) => env.0.take(), _ => None, @@ -195,43 +220,50 @@ struct Envelope(Option<(T, Callback)>); impl Drop for Envelope { fn drop(&mut self) { if let Some((val, cb)) = self.0.take() { - cb.send(Err(( - crate::Error::new_canceled().with("connection closed"), - Some(val), - ))); + cb.send(Err(TrySendError { + error: crate::Error::new_canceled().with("connection closed"), + message: Some(val), + })); } } } pub(crate) enum Callback { - Retry(Option)>>>), + #[allow(unused)] + Retry(Option>>>), NoRetry(Option>>), } impl Drop for Callback { fn drop(&mut self) { - // FIXME(nox): What errors do we want here? - let error = crate::Error::new_user_dispatch_gone().with(if std::thread::panicking() { - "user code panicked" - } else { - "runtime dropped the dispatch task" - }); - match self { Callback::Retry(tx) => { if let Some(tx) = tx.take() { - let _ = tx.send(Err((error, None))); + let _ = tx.send(Err(TrySendError { + error: dispatch_gone(), + message: None, + })); } } Callback::NoRetry(tx) => { if let Some(tx) = tx.take() { - let _ = tx.send(Err(error)); + let _ = tx.send(Err(dispatch_gone())); } } } } } +#[cold] +fn dispatch_gone() -> crate::Error { + // FIXME(nox): What errors do we want here? + crate::Error::new_user_dispatch_gone().with(if std::thread::panicking() { + "user code panicked" + } else { + "runtime dropped the dispatch task" + }) +} + impl Callback { #[cfg(feature = "http2")] pub(crate) fn is_canceled(&self) -> bool { @@ -250,47 +282,83 @@ impl Callback { } } - pub(crate) fn send(mut self, val: Result)>) { + pub(crate) fn send(mut self, val: Result>) { match self { Callback::Retry(ref mut tx) => { let _ = tx.take().unwrap().send(val); } Callback::NoRetry(ref mut tx) => { - let _ = tx.take().unwrap().send(val.map_err(|e| e.0)); + let _ = tx.take().unwrap().send(val.map_err(|e| e.error)); } } } +} - #[cfg(feature = "http2")] - pub(crate) async fn send_when( - self, - mut when: impl Future)>> + Unpin, - ) { - use futures_util::future; - use tracing::trace; - - let mut cb = Some(self); - - // "select" on this callback being canceled, and the future completing - future::poll_fn(move |cx| { - match Pin::new(&mut when).poll(cx) { - Poll::Ready(Ok(res)) => { - cb.take().expect("polled after complete").send(Ok(res)); - Poll::Ready(()) - } - Poll::Pending => { - // check if the callback is canceled - ready!(cb.as_mut().unwrap().poll_canceled(cx)); - trace!("send_when canceled"); - Poll::Ready(()) - } - Poll::Ready(Err(err)) => { - cb.take().expect("polled after complete").send(Err(err)); - Poll::Ready(()) - } +impl TrySendError { + /// Take the message from this error. + /// + /// The message will not always have been recovered. If an error occurs + /// after the message has been serialized onto the connection, it will not + /// be available here. + pub fn take_message(&mut self) -> Option { + self.message.take() + } + + /// Consumes this to return the inner error. + pub fn into_error(self) -> crate::Error { + self.error + } +} + +#[cfg(feature = "http2")] +pin_project! { + pub struct SendWhen + where + B: Body, + B: 'static, + { + #[pin] + pub(crate) when: ResponseFutMap, + #[pin] + pub(crate) call_back: Option, Response>>, + } +} + +#[cfg(feature = "http2")] +impl Future for SendWhen +where + B: Body + 'static, +{ + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let mut this = self.project(); + + let mut call_back = this.call_back.take().expect("polled after complete"); + + match Pin::new(&mut this.when).poll(cx) { + Poll::Ready(Ok(res)) => { + call_back.send(Ok(res)); + Poll::Ready(()) } - }) - .await + Poll::Pending => { + // check if the callback is canceled + match call_back.poll_canceled(cx) { + Poll::Ready(v) => v, + Poll::Pending => { + // Move call_back back to struct before return + this.call_back.set(Some(call_back)); + return Poll::Pending; + } + }; + trace!("send_when canceled"); + Poll::Ready(()) + } + Poll::Ready(Err((error, message))) => { + call_back.send(Err(TrySendError { error, message })); + Poll::Ready(()) + } + } } } @@ -306,7 +374,7 @@ mod tests { use super::{channel, Callback, Receiver}; #[derive(Debug)] - struct Custom(i32); + struct Custom(#[allow(dead_code)] i32); impl Future for Receiver { type Output = Option<(T, Callback)>; @@ -333,6 +401,7 @@ mod tests { } } + #[cfg(not(miri))] #[tokio::test] async fn drop_receiver_sends_cancel_errors() { let _ = pretty_env_logger::try_init(); @@ -349,12 +418,13 @@ mod tests { let err = fulfilled .expect("fulfilled") .expect_err("promise should error"); - match (err.0.kind(), err.1) { - (&crate::error::Kind::Canceled, Some(_)) => (), + match (err.error.is_canceled(), err.message) { + (true, Some(_)) => (), e => panic!("expected Error::Cancel(_), found {:?}", e), } } + #[cfg(not(miri))] #[tokio::test] async fn sender_checks_for_want_on_send() { let (mut tx, mut rx) = channel::(); @@ -392,16 +462,15 @@ mod tests { #[cfg(feature = "nightly")] #[bench] fn giver_queue_throughput(b: &mut test::Bencher) { - use crate::{Body, Request, Response}; + use crate::{body::Incoming, Request, Response}; let rt = tokio::runtime::Builder::new_current_thread() - .enable_all() .build() .unwrap(); - let (mut tx, mut rx) = channel::, Response>(); + let (mut tx, mut rx) = channel::, Response>(); b.iter(move || { - let _ = tx.send(Request::default()).unwrap(); + let _ = tx.send(Request::new(Incoming::empty())).unwrap(); rt.block_on(async { loop { let poll_once = PollOnce(&mut rx); @@ -418,7 +487,6 @@ mod tests { #[bench] fn giver_queue_not_ready(b: &mut test::Bencher) { let rt = tokio::runtime::Builder::new_current_thread() - .enable_all() .build() .unwrap(); let (_tx, mut rx) = channel::(); diff --git a/.cargo-vendor/hyper/src/client/mod.rs b/.cargo-vendor/hyper/src/client/mod.rs index 734bda8819..86e3897388 100644 --- a/.cargo-vendor/hyper/src/client/mod.rs +++ b/.cargo-vendor/hyper/src/client/mod.rs @@ -1,68 +1,22 @@ //! HTTP Client //! -//! There are two levels of APIs provided for construct HTTP clients: +//! hyper provides HTTP over a single connection. See the [`conn`] module. //! -//! - The higher-level [`Client`](Client) type. -//! - The lower-level [`conn`](conn) module. +//! ## Examples //! -//! # Client +//! * [`client`] - A simple CLI http client that requests the url passed in parameters and outputs the response content and details to the stdout, reading content chunk-by-chunk. //! -//! The [`Client`](Client) is the main way to send HTTP requests to a server. -//! The default `Client` provides these things on top of the lower-level API: +//! * [`client_json`] - A simple program that GETs some json, reads the body asynchronously, parses it with serde and outputs the result. //! -//! - A default **connector**, able to resolve hostnames and connect to -//! destinations over plain-text TCP. -//! - A **pool** of existing connections, allowing better performance when -//! making multiple requests to the same hostname. -//! - Automatic setting of the `Host` header, based on the request `Uri`. -//! - Automatic request **retries** when a pooled connection is closed by the -//! server before any bytes have been written. -//! -//! Many of these features can configured, by making use of -//! [`Client::builder`](Client::builder). -//! -//! ## Example -//! -//! For a small example program simply fetching a URL, take a look at the -//! [full client example](https://github.com/hyperium/hyper/blob/master/examples/client.rs). -//! -//! ``` -//! # #[cfg(all(feature = "tcp", feature = "client", any(feature = "http1", feature = "http2")))] -//! # async fn fetch_httpbin() -> hyper::Result<()> { -//! use hyper::{body::HttpBody as _, Client, Uri}; -//! -//! let client = Client::new(); -//! -//! // Make a GET /ip to 'http://httpbin.org' -//! let res = client.get(Uri::from_static("http://httpbin.org/ip")).await?; -//! -//! // And then, if the request gets a response... -//! println!("status: {}", res.status()); -//! -//! // Concatenate the body stream into a single buffer... -//! let buf = hyper::body::to_bytes(res).await?; -//! -//! println!("body: {:?}", buf); -//! # Ok(()) -//! # } -//! # fn main () {} -//! ``` +//! [`client`]: https://github.com/hyperium/hyper/blob/master/examples/client.rs +//! [`client_json`]: https://github.com/hyperium/hyper/blob/master/examples/client_json.rs -#[cfg(feature = "tcp")] -pub use self::connect::HttpConnector; - -pub mod connect; -#[cfg(all(test, feature = "runtime"))] +#[cfg(test)] mod tests; cfg_feature! { #![any(feature = "http1", feature = "http2")] - pub use self::client::{Builder, Client, ResponseFuture}; - - mod client; pub mod conn; pub(super) mod dispatch; - mod pool; - pub mod service; } diff --git a/.cargo-vendor/hyper/src/client/tests.rs b/.cargo-vendor/hyper/src/client/tests.rs index 0a281a637d..144349e5d7 100644 --- a/.cargo-vendor/hyper/src/client/tests.rs +++ b/.cargo-vendor/hyper/src/client/tests.rs @@ -1,28 +1,3 @@ -use std::io; - -use futures_util::future; -use tokio::net::TcpStream; - -use super::Client; - -#[tokio::test] -async fn client_connect_uri_argument() { - let connector = tower::service_fn(|dst: http::Uri| { - assert_eq!(dst.scheme(), Some(&http::uri::Scheme::HTTP)); - assert_eq!(dst.host(), Some("example.local")); - assert_eq!(dst.port(), None); - assert_eq!(dst.path(), "/", "path should be removed"); - - future::err::(io::Error::new(io::ErrorKind::Other, "expect me")) - }); - - let client = Client::builder().build::<_, crate::Body>(connector); - let _ = client - .get("http://example.local/and/a/path".parse().unwrap()) - .await - .expect_err("response should fail"); -} - /* // FIXME: re-implement tests with `async/await` #[test] diff --git a/.cargo-vendor/hyper/src/common/buf.rs b/.cargo-vendor/hyper/src/common/buf.rs index 64e9333ead..d00071551b 100644 --- a/.cargo-vendor/hyper/src/common/buf.rs +++ b/.cargo-vendor/hyper/src/common/buf.rs @@ -21,7 +21,6 @@ impl BufList { } #[inline] - #[cfg(feature = "http1")] pub(crate) fn bufs_cnt(&self) -> usize { self.bufs.len() } diff --git a/.cargo-vendor/hyper/src/common/date.rs b/.cargo-vendor/hyper/src/common/date.rs index a436fc07c0..6eae674695 100644 --- a/.cargo-vendor/hyper/src/common/date.rs +++ b/.cargo-vendor/hyper/src/common/date.rs @@ -29,13 +29,15 @@ pub(crate) fn update_and_header_value() -> HeaderValue { CACHED.with(|cache| { let mut cache = cache.borrow_mut(); cache.check(); - HeaderValue::from_bytes(cache.buffer()).expect("Date format should be valid HeaderValue") + cache.header_value.clone() }) } struct CachedDate { bytes: [u8; DATE_VALUE_LENGTH], pos: usize, + #[cfg(feature = "http2")] + header_value: HeaderValue, next_update: SystemTime, } @@ -46,6 +48,8 @@ impl CachedDate { let mut cache = CachedDate { bytes: [0; DATE_VALUE_LENGTH], pos: 0, + #[cfg(feature = "http2")] + header_value: HeaderValue::from_static(""), next_update: SystemTime::now(), }; cache.update(cache.next_update); @@ -72,7 +76,17 @@ impl CachedDate { self.pos = 0; let _ = write!(self, "{}", HttpDate::from(now)); debug_assert!(self.pos == DATE_VALUE_LENGTH); + self.render_http2(); } + + #[cfg(feature = "http2")] + fn render_http2(&mut self) { + self.header_value = HeaderValue::from_bytes(self.buffer()) + .expect("Date format should be valid HeaderValue"); + } + + #[cfg(not(feature = "http2"))] + fn render_http2(&mut self) {} } impl fmt::Write for CachedDate { diff --git a/.cargo-vendor/hyper/src/common/io/compat.rs b/.cargo-vendor/hyper/src/common/io/compat.rs new file mode 100644 index 0000000000..d026b6d38b --- /dev/null +++ b/.cargo-vendor/hyper/src/common/io/compat.rs @@ -0,0 +1,150 @@ +use std::pin::Pin; +use std::task::{Context, Poll}; + +/// This adapts from `hyper` IO traits to the ones in Tokio. +/// +/// This is currently used by `h2`, and by hyper internal unit tests. +#[derive(Debug)] +pub(crate) struct Compat(pub(crate) T); + +impl Compat { + pub(crate) fn new(io: T) -> Self { + Compat(io) + } + + fn p(self: Pin<&mut Self>) -> Pin<&mut T> { + // SAFETY: The simplest of projections. This is just + // a wrapper, we don't do anything that would undo the projection. + unsafe { self.map_unchecked_mut(|me| &mut me.0) } + } +} + +impl tokio::io::AsyncRead for Compat +where + T: crate::rt::Read, +{ + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + tbuf: &mut tokio::io::ReadBuf<'_>, + ) -> Poll> { + let init = tbuf.initialized().len(); + let filled = tbuf.filled().len(); + let (new_init, new_filled) = unsafe { + let mut buf = crate::rt::ReadBuf::uninit(tbuf.inner_mut()); + buf.set_init(init); + buf.set_filled(filled); + + match crate::rt::Read::poll_read(self.p(), cx, buf.unfilled()) { + Poll::Ready(Ok(())) => (buf.init_len(), buf.len()), + other => return other, + } + }; + + let n_init = new_init - init; + unsafe { + tbuf.assume_init(n_init); + tbuf.set_filled(new_filled); + } + + Poll::Ready(Ok(())) + } +} + +impl tokio::io::AsyncWrite for Compat +where + T: crate::rt::Write, +{ + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + crate::rt::Write::poll_write(self.p(), cx, buf) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + crate::rt::Write::poll_flush(self.p(), cx) + } + + fn poll_shutdown( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + crate::rt::Write::poll_shutdown(self.p(), cx) + } + + fn is_write_vectored(&self) -> bool { + crate::rt::Write::is_write_vectored(&self.0) + } + + fn poll_write_vectored( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &[std::io::IoSlice<'_>], + ) -> Poll> { + crate::rt::Write::poll_write_vectored(self.p(), cx, bufs) + } +} + +#[cfg(test)] +impl crate::rt::Read for Compat +where + T: tokio::io::AsyncRead, +{ + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + mut buf: crate::rt::ReadBufCursor<'_>, + ) -> Poll> { + let n = unsafe { + let mut tbuf = tokio::io::ReadBuf::uninit(buf.as_mut()); + match tokio::io::AsyncRead::poll_read(self.p(), cx, &mut tbuf) { + Poll::Ready(Ok(())) => tbuf.filled().len(), + other => return other, + } + }; + + unsafe { + buf.advance(n); + } + Poll::Ready(Ok(())) + } +} + +#[cfg(test)] +impl crate::rt::Write for Compat +where + T: tokio::io::AsyncWrite, +{ + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + tokio::io::AsyncWrite::poll_write(self.p(), cx, buf) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + tokio::io::AsyncWrite::poll_flush(self.p(), cx) + } + + fn poll_shutdown( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + tokio::io::AsyncWrite::poll_shutdown(self.p(), cx) + } + + fn is_write_vectored(&self) -> bool { + tokio::io::AsyncWrite::is_write_vectored(&self.0) + } + + fn poll_write_vectored( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &[std::io::IoSlice<'_>], + ) -> Poll> { + tokio::io::AsyncWrite::poll_write_vectored(self.p(), cx, bufs) + } +} diff --git a/.cargo-vendor/hyper/src/common/io/mod.rs b/.cargo-vendor/hyper/src/common/io/mod.rs index 2e6d506153..98c297ca14 100644 --- a/.cargo-vendor/hyper/src/common/io/mod.rs +++ b/.cargo-vendor/hyper/src/common/io/mod.rs @@ -1,3 +1,7 @@ +#[cfg(all(any(feature = "client", feature = "server"), feature = "http2"))] +mod compat; mod rewind; +#[cfg(all(any(feature = "client", feature = "server"), feature = "http2"))] +pub(crate) use self::compat::Compat; pub(crate) use self::rewind::Rewind; diff --git a/.cargo-vendor/hyper/src/common/io/rewind.rs b/.cargo-vendor/hyper/src/common/io/rewind.rs index 9ed7c42fea..c2556f013d 100644 --- a/.cargo-vendor/hyper/src/common/io/rewind.rs +++ b/.cargo-vendor/hyper/src/common/io/rewind.rs @@ -1,10 +1,10 @@ -use std::marker::Unpin; use std::pin::Pin; use std::task::{Context, Poll}; use std::{cmp, io}; use bytes::{Buf, Bytes}; -use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; + +use crate::rt::{Read, ReadBufCursor, Write}; /// Combine a buffer with an IO, rewinding reads to use the buffer. #[derive(Debug)] @@ -14,7 +14,7 @@ pub(crate) struct Rewind { } impl Rewind { - #[cfg(any(all(feature = "http2", feature = "server"), test))] + #[cfg(test)] pub(crate) fn new(io: T) -> Self { Rewind { pre: None, @@ -29,14 +29,14 @@ impl Rewind { } } - #[cfg(any(all(feature = "http1", feature = "http2", feature = "server"), test))] + #[cfg(test)] pub(crate) fn rewind(&mut self, bs: Bytes) { debug_assert!(self.pre.is_none()); self.pre = Some(bs); } pub(crate) fn into_inner(self) -> (T, Bytes) { - (self.inner, self.pre.unwrap_or_else(Bytes::new)) + (self.inner, self.pre.unwrap_or_default()) } // pub(crate) fn get_mut(&mut self) -> &mut T { @@ -44,14 +44,14 @@ impl Rewind { // } } -impl AsyncRead for Rewind +impl Read for Rewind where - T: AsyncRead + Unpin, + T: Read + Unpin, { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, + mut buf: ReadBufCursor<'_>, ) -> Poll> { if let Some(mut prefix) = self.pre.take() { // If there are no remaining bytes, let the bytes get dropped. @@ -72,9 +72,9 @@ where } } -impl AsyncWrite for Rewind +impl Write for Rewind where - T: AsyncWrite + Unpin, + T: Write + Unpin, { fn poll_write( mut self: Pin<&mut Self>, @@ -105,28 +105,32 @@ where } } +#[cfg(all( + any(feature = "client", feature = "server"), + any(feature = "http1", feature = "http2"), +))] #[cfg(test)] mod tests { - // FIXME: re-implement tests with `async/await`, this import should - // trigger a warning to remind us + use super::super::Compat; use super::Rewind; use bytes::Bytes; use tokio::io::AsyncReadExt; + #[cfg(not(miri))] #[tokio::test] async fn partial_rewind() { let underlying = [104, 101, 108, 108, 111]; let mock = tokio_test::io::Builder::new().read(&underlying).build(); - let mut stream = Rewind::new(mock); + let mut stream = Compat::new(Rewind::new(Compat::new(mock))); // Read off some bytes, ensure we filled o1 let mut buf = [0; 2]; stream.read_exact(&mut buf).await.expect("read1"); // Rewind the stream so that it is as if we never read in the first place. - stream.rewind(Bytes::copy_from_slice(&buf[..])); + stream.0.rewind(Bytes::copy_from_slice(&buf[..])); let mut buf = [0; 5]; stream.read_exact(&mut buf).await.expect("read1"); @@ -135,19 +139,20 @@ mod tests { assert_eq!(&buf, &underlying); } + #[cfg(not(miri))] #[tokio::test] async fn full_rewind() { let underlying = [104, 101, 108, 108, 111]; let mock = tokio_test::io::Builder::new().read(&underlying).build(); - let mut stream = Rewind::new(mock); + let mut stream = Compat::new(Rewind::new(Compat::new(mock))); let mut buf = [0; 5]; stream.read_exact(&mut buf).await.expect("read1"); // Rewind the stream so that it is as if we never read in the first place. - stream.rewind(Bytes::copy_from_slice(&buf[..])); + stream.0.rewind(Bytes::copy_from_slice(&buf[..])); let mut buf = [0; 5]; stream.read_exact(&mut buf).await.expect("read1"); diff --git a/.cargo-vendor/hyper/src/common/mod.rs b/.cargo-vendor/hyper/src/common/mod.rs index 3d83946243..a0c71385cb 100644 --- a/.cargo-vendor/hyper/src/common/mod.rs +++ b/.cargo-vendor/hyper/src/common/mod.rs @@ -1,30 +1,14 @@ -macro_rules! ready { - ($e:expr) => { - match $e { - std::task::Poll::Ready(v) => v, - std::task::Poll::Pending => return std::task::Poll::Pending, - } - }; -} - +#[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))] pub(crate) mod buf; #[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))] pub(crate) mod date; -#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))] -pub(crate) mod drain; -#[cfg(any(feature = "http1", feature = "http2", feature = "server"))] -pub(crate) mod exec; pub(crate) mod io; -#[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] -mod lazy; +#[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))] +pub(crate) mod task; #[cfg(any( - feature = "stream", - all(feature = "client", any(feature = "http1", feature = "http2")) + all(feature = "server", feature = "http1"), + all(any(feature = "client", feature = "server"), feature = "http2"), ))] -pub(crate) mod sync_wrapper; -#[cfg(feature = "http1")] -pub(crate) mod task; +pub(crate) mod time; +#[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))] pub(crate) mod watch; - -#[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] -pub(crate) use self::lazy::{lazy, Started as Lazy}; diff --git a/.cargo-vendor/hyper/src/common/task.rs b/.cargo-vendor/hyper/src/common/task.rs index 0ac047a462..41671b1453 100644 --- a/.cargo-vendor/hyper/src/common/task.rs +++ b/.cargo-vendor/hyper/src/common/task.rs @@ -1,12 +1,9 @@ -use std::{ - convert::Infallible, - task::{Context, Poll}, -}; +use std::task::{Context, Poll}; /// A function to help "yield" a future, such that it is re-scheduled immediately. /// /// Useful for spin counts, so a future doesn't hog too much time. -pub(crate) fn yield_now(cx: &mut Context<'_>) -> Poll { +pub(crate) fn yield_now(cx: &mut Context<'_>) -> Poll { cx.waker().wake_by_ref(); Poll::Pending } diff --git a/.cargo-vendor/hyper/src/common/time.rs b/.cargo-vendor/hyper/src/common/time.rs new file mode 100644 index 0000000000..a8d3cc9c85 --- /dev/null +++ b/.cargo-vendor/hyper/src/common/time.rs @@ -0,0 +1,79 @@ +#[cfg(any( + all(any(feature = "client", feature = "server"), feature = "http2"), + all(feature = "server", feature = "http1"), +))] +use std::time::Duration; +use std::{fmt, sync::Arc}; +use std::{pin::Pin, time::Instant}; + +use crate::rt::Sleep; +use crate::rt::Timer; + +/// A user-provided timer to time background tasks. +#[derive(Clone)] +pub(crate) enum Time { + Timer(Arc), + Empty, +} + +#[cfg(all(feature = "server", feature = "http1"))] +#[derive(Clone, Copy, Debug)] +pub(crate) enum Dur { + Default(Option), + Configured(Option), +} + +impl fmt::Debug for Time { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Time").finish() + } +} + +impl Time { + #[cfg(all(any(feature = "client", feature = "server"), feature = "http2"))] + pub(crate) fn sleep(&self, duration: Duration) -> Pin> { + match *self { + Time::Empty => { + panic!("You must supply a timer.") + } + Time::Timer(ref t) => t.sleep(duration), + } + } + + #[cfg(feature = "http1")] + pub(crate) fn sleep_until(&self, deadline: Instant) -> Pin> { + match *self { + Time::Empty => { + panic!("You must supply a timer.") + } + Time::Timer(ref t) => t.sleep_until(deadline), + } + } + + pub(crate) fn reset(&self, sleep: &mut Pin>, new_deadline: Instant) { + match *self { + Time::Empty => { + panic!("You must supply a timer.") + } + Time::Timer(ref t) => t.reset(sleep, new_deadline), + } + } + + #[cfg(all(feature = "server", feature = "http1"))] + pub(crate) fn check(&self, dur: Dur, name: &'static str) -> Option { + match dur { + Dur::Default(Some(dur)) => match self { + Time::Empty => { + warn!("timeout `{}` has default, but no timer set", name,); + None + } + Time::Timer(..) => Some(dur), + }, + Dur::Configured(Some(dur)) => match self { + Time::Empty => panic!("timeout `{}` set, but no timer set", name,), + Time::Timer(..) => Some(dur), + }, + Dur::Default(None) | Dur::Configured(None) => None, + } + } +} diff --git a/.cargo-vendor/hyper/src/error.rs b/.cargo-vendor/hyper/src/error.rs index 5beedeb8b2..9ad4c0e5b3 100644 --- a/.cargo-vendor/hyper/src/error.rs +++ b/.cargo-vendor/hyper/src/error.rs @@ -1,7 +1,4 @@ //! Error and Result module. - -#[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] -use crate::client::connect::Connected; use std::error::Error as StdError; use std::fmt; @@ -11,6 +8,26 @@ pub type Result = std::result::Result; type Cause = Box; /// Represents errors that can occur handling HTTP streams. +/// +/// # Formatting +/// +/// The `Display` implementation of this type will only print the details of +/// this level of error, even though it may have been caused by another error +/// and contain that error in its source. To print all the relevant +/// information, including the source chain, using something like +/// `std::error::Report`, or equivalent 3rd party types. +/// +/// The contents of the formatted error message of this specific `Error` type +/// is unspecified. **You must not depend on it.** The wording and details may +/// change in any version, with the goal of improving error messages. +/// +/// # Source +/// +/// A `hyper::Error` may be caused by another error. To aid in debugging, +/// those are exposed in `Error::source()` as erased types. While it is +/// possible to check the exact type of the sources, they **can not be depended +/// on**. They may come from private internal dependencies, and are subject to +/// change at any moment. pub struct Error { inner: Box, } @@ -18,8 +35,6 @@ pub struct Error { struct ErrorImpl { kind: Kind, cause: Option, - #[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] - connect_info: Option, } #[derive(Debug)] @@ -27,86 +42,100 @@ pub(super) enum Kind { Parse(Parse), User(User), /// A message reached EOF, but is not complete. - #[allow(unused)] + #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))] IncompleteMessage, /// A connection received a message (or bytes) when not waiting for one. - #[cfg(feature = "http1")] + #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))] UnexpectedMessage, /// A pending item was dropped before ever being processed. Canceled, /// Indicates a channel (client or body sender) is closed. + #[cfg(any( + all(feature = "http1", any(feature = "client", feature = "server")), + all(feature = "http2", feature = "client") + ))] ChannelClosed, /// An `io::Error` that occurred while trying to read or write to a network stream. - #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(all( + any(feature = "client", feature = "server"), + any(feature = "http1", feature = "http2") + ))] Io, - /// Error occurred while connecting. - #[allow(unused)] - Connect, - /// Error creating a TcpListener. - #[cfg(all(feature = "tcp", feature = "server"))] - Listen, - /// Error accepting on an Incoming stream. - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "server")] - Accept, /// User took too long to send headers - #[cfg(all(feature = "http1", feature = "server", feature = "runtime"))] + #[cfg(all(feature = "http1", feature = "server"))] HeaderTimeout, /// Error while reading a body from connection. - #[cfg(any(feature = "http1", feature = "http2", feature = "stream"))] + #[cfg(all( + any(feature = "client", feature = "server"), + any(feature = "http1", feature = "http2") + ))] Body, /// Error while writing a body to connection. - #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(all( + any(feature = "client", feature = "server"), + any(feature = "http1", feature = "http2") + ))] BodyWrite, /// Error calling AsyncWrite::shutdown() - #[cfg(feature = "http1")] + #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))] Shutdown, /// A general error from h2. - #[cfg(feature = "http2")] + #[cfg(all(any(feature = "client", feature = "server"), feature = "http2"))] Http2, } #[derive(Debug)] pub(super) enum Parse { Method, - Version, #[cfg(feature = "http1")] + Version, + #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))] VersionH2, Uri, - #[cfg_attr(not(all(feature = "http1", feature = "server")), allow(unused))] + #[cfg(all(feature = "http1", feature = "server"))] UriTooLong, + #[cfg(feature = "http1")] Header(Header), + #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg_attr(feature = "http2", allow(unused))] TooLarge, Status, - #[cfg_attr(debug_assertions, allow(unused))] + #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))] Internal, } #[derive(Debug)] +#[cfg(feature = "http1")] pub(super) enum Header { Token, - #[cfg(feature = "http1")] + #[cfg(any(feature = "client", feature = "server"))] ContentLengthInvalid, - #[cfg(all(feature = "http1", feature = "server"))] + #[cfg(feature = "server")] TransferEncodingInvalid, - #[cfg(feature = "http1")] + #[cfg(any(feature = "client", feature = "server"))] TransferEncodingUnexpected, } #[derive(Debug)] pub(super) enum User { - /// Error calling user's HttpBody::poll_data(). - #[cfg(any(feature = "http1", feature = "http2"))] + /// Error calling user's Body::poll_data(). + #[cfg(all( + any(feature = "client", feature = "server"), + any(feature = "http1", feature = "http2") + ))] Body, /// The user aborted writing of the outgoing body. + #[cfg(any( + all(feature = "http1", any(feature = "client", feature = "server")), + feature = "ffi" + ))] BodyWriteAborted, - /// Error calling user's MakeService. - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "server")] - MakeService, /// Error from future of user's Service. - #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(any( + all(any(feature = "client", feature = "server"), feature = "http1"), + all(feature = "server", feature = "http2") + ))] Service, /// User tried to send a certain header in an unexpected context. /// @@ -114,36 +143,20 @@ pub(super) enum User { #[cfg(any(feature = "http1", feature = "http2"))] #[cfg(feature = "server")] UnexpectedHeader, - /// User tried to create a Request with bad version. - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "client")] - UnsupportedVersion, - /// User tried to create a CONNECT Request with the Client. - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "client")] - UnsupportedRequestMethod, /// User tried to respond with a 1xx (not 101) response code. #[cfg(feature = "http1")] #[cfg(feature = "server")] UnsupportedStatusCode, - /// User tried to send a Request with Client with non-absolute URI. - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "client")] - AbsoluteUriRequired, /// User tried polling for an upgrade that doesn't exist. NoUpgrade, /// User polled for an upgrade, but low-level API is not using upgrades. - #[cfg(feature = "http1")] + #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))] ManualUpgrade, - /// User called `server::Connection::without_shutdown()` on an HTTP/2 conn. - #[cfg(feature = "server")] - WithoutShutdownNonHttp1, - /// The dispatch task is gone. - #[cfg(feature = "client")] + #[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] DispatchGone, /// User aborted in an FFI callback. @@ -162,6 +175,7 @@ impl Error { } /// Returns true if this was an HTTP parse error caused by a message that was too large. + #[cfg(all(feature = "http1", feature = "server"))] pub fn is_parse_too_large(&self) -> bool { matches!( self.inner.kind, @@ -187,21 +201,40 @@ impl Error { /// Returns true if a sender's channel is closed. pub fn is_closed(&self) -> bool { + #[cfg(not(any( + all(feature = "http1", any(feature = "client", feature = "server")), + all(feature = "http2", feature = "client") + )))] + return false; + + #[cfg(any( + all(feature = "http1", any(feature = "client", feature = "server")), + all(feature = "http2", feature = "client") + ))] matches!(self.inner.kind, Kind::ChannelClosed) } - /// Returns true if this was an error from `Connect`. - pub fn is_connect(&self) -> bool { - matches!(self.inner.kind, Kind::Connect) - } - /// Returns true if the connection closed before a message could complete. pub fn is_incomplete_message(&self) -> bool { + #[cfg(not(all(any(feature = "client", feature = "server"), feature = "http1")))] + return false; + + #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))] matches!(self.inner.kind, Kind::IncompleteMessage) } /// Returns true if the body write was aborted. pub fn is_body_write_aborted(&self) -> bool { + #[cfg(not(any( + all(feature = "http1", any(feature = "client", feature = "server")), + feature = "ffi" + )))] + return false; + + #[cfg(any( + all(feature = "http1", any(feature = "client", feature = "server")), + feature = "ffi" + ))] matches!(self.inner.kind, Kind::User(User::BodyWriteAborted)) } @@ -210,25 +243,9 @@ impl Error { self.find_source::().is_some() } - /// Consumes the error, returning its cause. - pub fn into_cause(self) -> Option> { - self.inner.cause - } - - /// Returns the info of the client connection on which this error occurred. - #[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] - pub fn client_connect_info(&self) -> Option<&Connected> { - self.inner.connect_info.as_ref() - } - pub(super) fn new(kind: Kind) -> Error { Error { - inner: Box::new(ErrorImpl { - kind, - cause: None, - #[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] - connect_info: None, - }), + inner: Box::new(ErrorImpl { kind, cause: None }), } } @@ -237,12 +254,6 @@ impl Error { self } - #[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] - pub(super) fn with_client_connect_info(mut self, connect_info: Connected) -> Error { - self.inner.connect_info = Some(connect_info); - self - } - #[cfg(any(all(feature = "http1", feature = "server"), feature = "ffi"))] pub(super) fn kind(&self) -> &Kind { &self.inner.kind @@ -251,7 +262,7 @@ impl Error { pub(crate) fn find_source(&self) -> Option<&E> { let mut cause = self.source(); while let Some(err) = cause { - if let Some(ref typed) = err.downcast_ref() { + if let Some(typed) = err.downcast_ref() { return Some(typed); } cause = err.source(); @@ -261,7 +272,7 @@ impl Error { None } - #[cfg(feature = "http2")] + #[cfg(all(any(feature = "client", feature = "server"), feature = "http2"))] pub(super) fn h2_reason(&self) -> h2::Reason { // Find an h2::Reason somewhere in the cause stack, if it exists, // otherwise assume an INTERNAL_ERROR. @@ -274,62 +285,62 @@ impl Error { Error::new(Kind::Canceled) } - #[cfg(feature = "http1")] + #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))] pub(super) fn new_incomplete() -> Error { Error::new(Kind::IncompleteMessage) } - #[cfg(feature = "http1")] + #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))] pub(super) fn new_too_large() -> Error { Error::new(Kind::Parse(Parse::TooLarge)) } - #[cfg(feature = "http1")] + #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))] pub(super) fn new_version_h2() -> Error { Error::new(Kind::Parse(Parse::VersionH2)) } - #[cfg(feature = "http1")] + #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))] pub(super) fn new_unexpected_message() -> Error { Error::new(Kind::UnexpectedMessage) } - #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(all( + any(feature = "client", feature = "server"), + any(feature = "http1", feature = "http2") + ))] pub(super) fn new_io(cause: std::io::Error) -> Error { Error::new(Kind::Io).with(cause) } - #[cfg(all(feature = "server", feature = "tcp"))] - pub(super) fn new_listen>(cause: E) -> Error { - Error::new(Kind::Listen).with(cause) - } - - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "server")] - pub(super) fn new_accept>(cause: E) -> Error { - Error::new(Kind::Accept).with(cause) - } - - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "client")] - pub(super) fn new_connect>(cause: E) -> Error { - Error::new(Kind::Connect).with(cause) - } - + #[cfg(any( + all(feature = "http1", any(feature = "client", feature = "server")), + all(feature = "http2", feature = "client") + ))] pub(super) fn new_closed() -> Error { Error::new(Kind::ChannelClosed) } - #[cfg(any(feature = "http1", feature = "http2", feature = "stream"))] + #[cfg(all( + any(feature = "client", feature = "server"), + any(feature = "http1", feature = "http2") + ))] pub(super) fn new_body>(cause: E) -> Error { Error::new(Kind::Body).with(cause) } - #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(all( + any(feature = "client", feature = "server"), + any(feature = "http1", feature = "http2") + ))] pub(super) fn new_body_write>(cause: E) -> Error { Error::new(Kind::BodyWrite).with(cause) } + #[cfg(any( + all(feature = "http1", any(feature = "client", feature = "server")), + feature = "ffi" + ))] pub(super) fn new_body_write_aborted() -> Error { Error::new(Kind::User(User::BodyWriteAborted)) } @@ -344,66 +355,43 @@ impl Error { Error::new_user(User::UnexpectedHeader) } - #[cfg(all(feature = "http1", feature = "server", feature = "runtime"))] + #[cfg(all(feature = "http1", feature = "server"))] pub(super) fn new_header_timeout() -> Error { Error::new(Kind::HeaderTimeout) } - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "client")] - pub(super) fn new_user_unsupported_version() -> Error { - Error::new_user(User::UnsupportedVersion) - } - - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "client")] - pub(super) fn new_user_unsupported_request_method() -> Error { - Error::new_user(User::UnsupportedRequestMethod) - } - #[cfg(feature = "http1")] #[cfg(feature = "server")] pub(super) fn new_user_unsupported_status_code() -> Error { Error::new_user(User::UnsupportedStatusCode) } - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "client")] - pub(super) fn new_user_absolute_uri_required() -> Error { - Error::new_user(User::AbsoluteUriRequired) - } - pub(super) fn new_user_no_upgrade() -> Error { Error::new_user(User::NoUpgrade) } - #[cfg(feature = "http1")] + #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))] pub(super) fn new_user_manual_upgrade() -> Error { Error::new_user(User::ManualUpgrade) } - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "server")] - pub(super) fn new_user_make_service>(cause: E) -> Error { - Error::new_user(User::MakeService).with(cause) - } - - #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(any( + all(any(feature = "client", feature = "server"), feature = "http1"), + all(feature = "server", feature = "http2") + ))] pub(super) fn new_user_service>(cause: E) -> Error { Error::new_user(User::Service).with(cause) } - #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(all( + any(feature = "client", feature = "server"), + any(feature = "http1", feature = "http2") + ))] pub(super) fn new_user_body>(cause: E) -> Error { Error::new_user(User::Body).with(cause) } - #[cfg(feature = "server")] - pub(super) fn new_without_shutdown_not_h1() -> Error { - Error::new(Kind::User(User::WithoutShutdownNonHttp1)) - } - - #[cfg(feature = "http1")] + #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))] pub(super) fn new_shutdown(cause: std::io::Error) -> Error { Error::new(Kind::Shutdown).with(cause) } @@ -413,12 +401,12 @@ impl Error { Error::new_user(User::AbortedByCallback) } - #[cfg(feature = "client")] + #[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] pub(super) fn new_user_dispatch_gone() -> Error { Error::new(Kind::User(User::DispatchGone)) } - #[cfg(feature = "http2")] + #[cfg(all(any(feature = "client", feature = "server"), feature = "http2"))] pub(super) fn new_h2(cause: ::h2::Error) -> Error { if cause.is_io() { Error::new_io(cause.into_io().expect("h2::Error::is_io")) @@ -427,21 +415,19 @@ impl Error { } } - /// The error's standalone message, without the message from the source. - pub fn message(&self) -> impl fmt::Display + '_ { - self.description() - } - fn description(&self) -> &str { match self.inner.kind { Kind::Parse(Parse::Method) => "invalid HTTP method parsed", - Kind::Parse(Parse::Version) => "invalid HTTP version parsed", #[cfg(feature = "http1")] + Kind::Parse(Parse::Version) => "invalid HTTP version parsed", + #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))] Kind::Parse(Parse::VersionH2) => "invalid HTTP version parsed (found HTTP2 preface)", Kind::Parse(Parse::Uri) => "invalid URI", + #[cfg(all(feature = "http1", feature = "server"))] Kind::Parse(Parse::UriTooLong) => "URI too long", - Kind::Parse(Parse::Header(Header::Token)) => "invalid HTTP header parsed", #[cfg(feature = "http1")] + Kind::Parse(Parse::Header(Header::Token)) => "invalid HTTP header parsed", + #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))] Kind::Parse(Parse::Header(Header::ContentLengthInvalid)) => { "invalid content-length parsed" } @@ -449,72 +435,76 @@ impl Error { Kind::Parse(Parse::Header(Header::TransferEncodingInvalid)) => { "invalid transfer-encoding parsed" } - #[cfg(feature = "http1")] + #[cfg(all(feature = "http1", any(feature = "client", feature = "server")))] Kind::Parse(Parse::Header(Header::TransferEncodingUnexpected)) => { "unexpected transfer-encoding parsed" } + #[cfg(any(feature = "http1", feature = "http2"))] Kind::Parse(Parse::TooLarge) => "message head is too large", Kind::Parse(Parse::Status) => "invalid HTTP status-code parsed", + #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))] Kind::Parse(Parse::Internal) => { "internal error inside Hyper and/or its dependencies, please report" } + #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))] Kind::IncompleteMessage => "connection closed before message completed", - #[cfg(feature = "http1")] + #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))] Kind::UnexpectedMessage => "received unexpected message from connection", + #[cfg(any( + all(feature = "http1", any(feature = "client", feature = "server")), + all(feature = "http2", feature = "client") + ))] Kind::ChannelClosed => "channel closed", - Kind::Connect => "error trying to connect", Kind::Canceled => "operation was canceled", - #[cfg(all(feature = "server", feature = "tcp"))] - Kind::Listen => "error creating server listener", - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "server")] - Kind::Accept => "error accepting connection", - #[cfg(all(feature = "http1", feature = "server", feature = "runtime"))] + #[cfg(all(feature = "http1", feature = "server"))] Kind::HeaderTimeout => "read header from client timeout", - #[cfg(any(feature = "http1", feature = "http2", feature = "stream"))] + #[cfg(all( + any(feature = "client", feature = "server"), + any(feature = "http1", feature = "http2") + ))] Kind::Body => "error reading a body from connection", - #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(all( + any(feature = "client", feature = "server"), + any(feature = "http1", feature = "http2") + ))] Kind::BodyWrite => "error writing a body to connection", - #[cfg(feature = "http1")] + #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))] Kind::Shutdown => "error shutting down connection", - #[cfg(feature = "http2")] + #[cfg(all(any(feature = "client", feature = "server"), feature = "http2"))] Kind::Http2 => "http2 error", - #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(all( + any(feature = "client", feature = "server"), + any(feature = "http1", feature = "http2") + ))] Kind::Io => "connection error", - #[cfg(any(feature = "http1", feature = "http2"))] - Kind::User(User::Body) => "error from user's HttpBody stream", + #[cfg(all( + any(feature = "client", feature = "server"), + any(feature = "http1", feature = "http2") + ))] + Kind::User(User::Body) => "error from user's Body stream", + #[cfg(any( + all(feature = "http1", any(feature = "client", feature = "server")), + feature = "ffi" + ))] Kind::User(User::BodyWriteAborted) => "user body write aborted", - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "server")] - Kind::User(User::MakeService) => "error from user's MakeService", - #[cfg(any(feature = "http1", feature = "http2"))] + #[cfg(any( + all(any(feature = "client", feature = "server"), feature = "http1"), + all(feature = "server", feature = "http2") + ))] Kind::User(User::Service) => "error from user's Service", #[cfg(any(feature = "http1", feature = "http2"))] #[cfg(feature = "server")] Kind::User(User::UnexpectedHeader) => "user sent unexpected header", - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "client")] - Kind::User(User::UnsupportedVersion) => "request has unsupported HTTP version", - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "client")] - Kind::User(User::UnsupportedRequestMethod) => "request has unsupported HTTP method", #[cfg(feature = "http1")] #[cfg(feature = "server")] Kind::User(User::UnsupportedStatusCode) => { "response has 1xx status code, not supported by server" } - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "client")] - Kind::User(User::AbsoluteUriRequired) => "client requires absolute-form URIs", Kind::User(User::NoUpgrade) => "no upgrade available", - #[cfg(feature = "http1")] + #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))] Kind::User(User::ManualUpgrade) => "upgrade expected but low level API in use", - #[cfg(feature = "server")] - Kind::User(User::WithoutShutdownNonHttp1) => { - "without_shutdown() called on a non-HTTP/1 connection" - } - #[cfg(feature = "client")] + #[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] Kind::User(User::DispatchGone) => "dispatch task is gone", #[cfg(feature = "ffi")] Kind::User(User::AbortedByCallback) => "operation aborted by an application callback", @@ -535,11 +525,7 @@ impl fmt::Debug for Error { impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if let Some(ref cause) = self.inner.cause { - write!(f, "{}: {}", self.description(), cause) - } else { - f.write_str(self.description()) - } + f.write_str(self.description()) } } @@ -561,20 +547,23 @@ impl From for Error { #[cfg(feature = "http1")] impl Parse { + #[cfg(any(feature = "client", feature = "server"))] pub(crate) fn content_length_invalid() -> Self { Parse::Header(Header::ContentLengthInvalid) } - #[cfg(all(feature = "http1", feature = "server"))] + #[cfg(feature = "server")] pub(crate) fn transfer_encoding_invalid() -> Self { Parse::Header(Header::TransferEncodingInvalid) } + #[cfg(any(feature = "client", feature = "server"))] pub(crate) fn transfer_encoding_unexpected() -> Self { Parse::Header(Header::TransferEncodingUnexpected) } } +#[cfg(feature = "http1")] impl From for Parse { fn from(err: httparse::Error) -> Parse { match err { @@ -613,11 +602,6 @@ impl From for Parse { } } -#[doc(hidden)] -trait AssertSendSync: Send + Sync + 'static {} -#[doc(hidden)] -impl AssertSendSync for Error {} - // ===== impl TimedOut ==== impl fmt::Display for TimedOut { @@ -633,6 +617,13 @@ mod tests { use super::*; use std::mem; + fn assert_send_sync() {} + + #[test] + fn error_satisfies_send_sync() { + assert_send_sync::() + } + #[test] fn error_size_of() { assert_eq!(mem::size_of::(), mem::size_of::()); diff --git a/.cargo-vendor/hyper/src/ext/h1_reason_phrase.rs b/.cargo-vendor/hyper/src/ext/h1_reason_phrase.rs index 021b632b6d..c6f5233345 100644 --- a/.cargo-vendor/hyper/src/ext/h1_reason_phrase.rs +++ b/.cargo-vendor/hyper/src/ext/h1_reason_phrase.rs @@ -1,5 +1,3 @@ -use std::convert::TryFrom; - use bytes::Bytes; /// A reason phrase in an HTTP/1 response. @@ -42,7 +40,7 @@ impl ReasonPhrase { } /// Converts a static byte slice to a reason phrase. - pub fn from_static(reason: &'static [u8]) -> Self { + pub const fn from_static(reason: &'static [u8]) -> Self { // TODO: this can be made const once MSRV is >= 1.57.0 if find_invalid_byte(reason).is_some() { panic!("invalid byte in static reason phrase"); @@ -50,11 +48,13 @@ impl ReasonPhrase { Self(Bytes::from_static(reason)) } + // Not public on purpose. /// Converts a `Bytes` directly into a `ReasonPhrase` without validating. /// /// Use with care; invalid bytes in a reason phrase can cause serious security problems if /// emitted in a response. - pub unsafe fn from_bytes_unchecked(reason: Bytes) -> Self { + #[cfg(feature = "client")] + pub(crate) fn from_bytes_unchecked(reason: Bytes) -> Self { Self(reason) } } @@ -107,9 +107,9 @@ impl TryFrom for ReasonPhrase { } } -impl Into for ReasonPhrase { - fn into(self) -> Bytes { - self.0 +impl From for Bytes { + fn from(reason: ReasonPhrase) -> Self { + reason.0 } } @@ -147,7 +147,7 @@ const fn is_valid_byte(b: u8) -> bool { // // The 0xFF comparison is technically redundant, but it matches the text of the spec more // clearly and will be optimized away. - #[allow(unused_comparisons)] + #[allow(unused_comparisons, clippy::absurd_extreme_comparisons)] const fn is_obs_text(b: u8) -> bool { 0x80 <= b && b <= 0xFF } diff --git a/.cargo-vendor/hyper/src/ext/mod.rs b/.cargo-vendor/hyper/src/ext/mod.rs new file mode 100644 index 0000000000..7728f705de --- /dev/null +++ b/.cargo-vendor/hyper/src/ext/mod.rs @@ -0,0 +1,237 @@ +//! HTTP extensions. + +#[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))] +use bytes::Bytes; +#[cfg(any( + all(any(feature = "client", feature = "server"), feature = "http1"), + feature = "ffi" +))] +use http::header::HeaderName; +#[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))] +use http::header::{HeaderMap, IntoHeaderName, ValueIter}; +#[cfg(feature = "ffi")] +use std::collections::HashMap; +#[cfg(feature = "http2")] +use std::fmt; + +#[cfg(any(feature = "http1", feature = "ffi"))] +mod h1_reason_phrase; +#[cfg(any(feature = "http1", feature = "ffi"))] +pub use h1_reason_phrase::ReasonPhrase; + +#[cfg(feature = "http2")] +/// Represents the `:protocol` pseudo-header used by +/// the [Extended CONNECT Protocol]. +/// +/// [Extended CONNECT Protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4 +#[derive(Clone, Eq, PartialEq)] +pub struct Protocol { + inner: h2::ext::Protocol, +} + +#[cfg(feature = "http2")] +impl Protocol { + /// Converts a static string to a protocol name. + pub const fn from_static(value: &'static str) -> Self { + Self { + inner: h2::ext::Protocol::from_static(value), + } + } + + /// Returns a str representation of the header. + pub fn as_str(&self) -> &str { + self.inner.as_str() + } + + #[cfg(feature = "server")] + pub(crate) fn from_inner(inner: h2::ext::Protocol) -> Self { + Self { inner } + } + + #[cfg(all(feature = "client", feature = "http2"))] + pub(crate) fn into_inner(self) -> h2::ext::Protocol { + self.inner + } +} + +#[cfg(feature = "http2")] +impl<'a> From<&'a str> for Protocol { + fn from(value: &'a str) -> Self { + Self { + inner: h2::ext::Protocol::from(value), + } + } +} + +#[cfg(feature = "http2")] +impl AsRef<[u8]> for Protocol { + fn as_ref(&self) -> &[u8] { + self.inner.as_ref() + } +} + +#[cfg(feature = "http2")] +impl fmt::Debug for Protocol { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.inner.fmt(f) + } +} + +/// A map from header names to their original casing as received in an HTTP message. +/// +/// If an HTTP/1 response `res` is parsed on a connection whose option +/// [`preserve_header_case`] was set to true and the response included +/// the following headers: +/// +/// ```ignore +/// x-Bread: Baguette +/// X-BREAD: Pain +/// x-bread: Ficelle +/// ``` +/// +/// Then `res.extensions().get::()` will return a map with: +/// +/// ```ignore +/// HeaderCaseMap({ +/// "x-bread": ["x-Bread", "X-BREAD", "x-bread"], +/// }) +/// ``` +/// +/// [`preserve_header_case`]: /client/struct.Client.html#method.preserve_header_case +#[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))] +#[derive(Clone, Debug)] +pub(crate) struct HeaderCaseMap(HeaderMap); + +#[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))] +impl HeaderCaseMap { + /// Returns a view of all spellings associated with that header name, + /// in the order they were found. + #[cfg(feature = "client")] + pub(crate) fn get_all<'a>( + &'a self, + name: &HeaderName, + ) -> impl Iterator + 'a> + 'a { + self.get_all_internal(name) + } + + /// Returns a view of all spellings associated with that header name, + /// in the order they were found. + #[cfg(any(feature = "client", feature = "server"))] + pub(crate) fn get_all_internal(&self, name: &HeaderName) -> ValueIter<'_, Bytes> { + self.0.get_all(name).into_iter() + } + + #[cfg(any(feature = "client", feature = "server"))] + pub(crate) fn default() -> Self { + Self(Default::default()) + } + + #[cfg(any(test, feature = "ffi"))] + pub(crate) fn insert(&mut self, name: HeaderName, orig: Bytes) { + self.0.insert(name, orig); + } + + #[cfg(any(feature = "client", feature = "server"))] + pub(crate) fn append(&mut self, name: N, orig: Bytes) + where + N: IntoHeaderName, + { + self.0.append(name, orig); + } +} + +#[cfg(feature = "ffi")] +#[derive(Clone, Debug)] +/// Hashmap +pub(crate) struct OriginalHeaderOrder { + /// Stores how many entries a Headername maps to. This is used + /// for accounting. + num_entries: HashMap, + /// Stores the ordering of the headers. ex: `vec[i] = (headerName, idx)`, + /// The vector is ordered such that the ith element + /// represents the ith header that came in off the line. + /// The `HeaderName` and `idx` are then used elsewhere to index into + /// the multi map that stores the header values. + entry_order: Vec<(HeaderName, usize)>, +} + +#[cfg(all(feature = "http1", feature = "ffi"))] +impl OriginalHeaderOrder { + pub(crate) fn default() -> Self { + OriginalHeaderOrder { + num_entries: HashMap::new(), + entry_order: Vec::new(), + } + } + + pub(crate) fn insert(&mut self, name: HeaderName) { + if !self.num_entries.contains_key(&name) { + let idx = 0; + self.num_entries.insert(name.clone(), 1); + self.entry_order.push((name, idx)); + } + // Replacing an already existing element does not + // change ordering, so we only care if its the first + // header name encountered + } + + pub(crate) fn append(&mut self, name: N) + where + N: IntoHeaderName + Into + Clone, + { + let name: HeaderName = name.into(); + let idx; + if self.num_entries.contains_key(&name) { + idx = self.num_entries[&name]; + *self.num_entries.get_mut(&name).unwrap() += 1; + } else { + idx = 0; + self.num_entries.insert(name.clone(), 1); + } + self.entry_order.push((name, idx)); + } + + // No doc test is run here because `RUSTFLAGS='--cfg hyper_unstable_ffi'` + // is needed to compile. Once ffi is stablized `no_run` should be removed + // here. + /// This returns an iterator that provides header names and indexes + /// in the original order received. + /// + /// # Examples + /// ```no_run + /// use hyper::ext::OriginalHeaderOrder; + /// use hyper::header::{HeaderName, HeaderValue, HeaderMap}; + /// + /// let mut h_order = OriginalHeaderOrder::default(); + /// let mut h_map = Headermap::new(); + /// + /// let name1 = b"Set-CookiE"; + /// let value1 = b"a=b"; + /// h_map.append(name1); + /// h_order.append(name1); + /// + /// let name2 = b"Content-Encoding"; + /// let value2 = b"gzip"; + /// h_map.append(name2, value2); + /// h_order.append(name2); + /// + /// let name3 = b"SET-COOKIE"; + /// let value3 = b"c=d"; + /// h_map.append(name3, value3); + /// h_order.append(name3) + /// + /// let mut iter = h_order.get_in_order() + /// + /// let (name, idx) = iter.next(); + /// assert_eq!(b"a=b", h_map.get_all(name).nth(idx).unwrap()); + /// + /// let (name, idx) = iter.next(); + /// assert_eq!(b"gzip", h_map.get_all(name).nth(idx).unwrap()); + /// + /// let (name, idx) = iter.next(); + /// assert_eq!(b"c=d", h_map.get_all(name).nth(idx).unwrap()); + /// ``` + pub(crate) fn get_in_order(&self) -> impl Iterator { + self.entry_order.iter() + } +} diff --git a/.cargo-vendor/hyper/src/ffi/body.rs b/.cargo-vendor/hyper/src/ffi/body.rs index 39ba5beffb..c412f219f6 100644 --- a/.cargo-vendor/hyper/src/ffi/body.rs +++ b/.cargo-vendor/hyper/src/ffi/body.rs @@ -3,17 +3,47 @@ use std::mem::ManuallyDrop; use std::ptr; use std::task::{Context, Poll}; -use http::HeaderMap; +use http_body_util::BodyExt as _; use libc::{c_int, size_t}; use super::task::{hyper_context, hyper_task, hyper_task_return_type, AsTaskType}; use super::{UserDataPointer, HYPER_ITER_CONTINUE}; -use crate::body::{Body, Bytes, HttpBody as _}; +use crate::body::{Bytes, Frame, Incoming as IncomingBody}; /// A streaming HTTP body. -pub struct hyper_body(pub(super) Body); +/// +/// This is used both for sending requests (with `hyper_request_set_body`) and +/// for receiving responses (with `hyper_response_body`). +/// +/// For outgoing request bodies, call `hyper_body_set_data_func` to provide the +/// data. +/// +/// For incoming response bodies, call `hyper_body_data` to get a task that will +/// yield a chunk of data each time it is polled. That task must be then be +/// added to the executor with `hyper_executor_push`. +/// +/// Methods: +/// +/// - hyper_body_new: Create a new “empty” body. +/// - hyper_body_set_userdata: Set userdata on this body, which will be passed to callback functions. +/// - hyper_body_set_data_func: Set the data callback for this body. +/// - hyper_body_data: Creates a task that will poll a response body for the next buffer of data. +/// - hyper_body_foreach: Creates a task to execute the callback with each body chunk received. +/// - hyper_body_free: Free a body. +pub struct hyper_body(pub(super) IncomingBody); /// A buffer of bytes that is sent or received on a `hyper_body`. +/// +/// Obtain one of these in the callback of `hyper_body_foreach` or by receiving +/// a task of type `HYPER_TASK_BUF` from `hyper_executor_poll` (after calling +/// `hyper_body_data` and pushing the resulting task). +/// +/// Methods: +/// +/// - hyper_buf_bytes: Get a pointer to the bytes in this buffer. +/// - hyper_buf_copy: Create a new hyper_buf * by copying the provided bytes. +/// - hyper_buf_free: Free this buffer. +/// - hyper_buf_len: Get the length of the bytes this buffer contains. pub struct hyper_buf(pub(crate) Bytes); pub(crate) struct UserBody { @@ -29,51 +59,90 @@ type hyper_body_data_callback = extern "C" fn(*mut c_void, *mut hyper_context<'_>, *mut *mut hyper_buf) -> c_int; ffi_fn! { - /// Create a new "empty" body. + /// Creates a new "empty" body. /// /// If not configured, this body acts as an empty payload. + /// + /// To avoid a memory leak, the body must eventually be consumed by + /// `hyper_body_free`, `hyper_body_foreach`, or `hyper_request_set_body`. fn hyper_body_new() -> *mut hyper_body { - Box::into_raw(Box::new(hyper_body(Body::empty()))) + Box::into_raw(Box::new(hyper_body(IncomingBody::ffi()))) } ?= ptr::null_mut() } ffi_fn! { - /// Free a `hyper_body *`. + /// Free a body. + /// + /// This should only be used if the request isn't consumed by + /// `hyper_body_foreach` or `hyper_request_set_body`. fn hyper_body_free(body: *mut hyper_body) { drop(non_null!(Box::from_raw(body) ?= ())); } } ffi_fn! { - /// Return a task that will poll the body for the next buffer of data. + /// Creates a task that will poll a response body for the next buffer of data. /// - /// The task value may have different types depending on the outcome: + /// The task may have different types depending on the outcome: /// /// - `HYPER_TASK_BUF`: Success, and more data was received. /// - `HYPER_TASK_ERROR`: An error retrieving the data. /// - `HYPER_TASK_EMPTY`: The body has finished streaming data. /// - /// This does not consume the `hyper_body *`, so it may be used to again. - /// However, it MUST NOT be used or freed until the related task completes. + /// When the application receives the task from `hyper_executor_poll`, + /// if the task type is `HYPER_TASK_BUF`, it should cast the task to + /// `hyper_buf *` and consume all the bytes in the buffer. Then + /// the application should call `hyper_body_data` again for the same + /// `hyper_body *`, to create a task for the next buffer of data. + /// Repeat until the polled task type is `HYPER_TASK_ERROR` or + /// `HYPER_TASK_EMPTY`. + /// + /// To avoid a memory leak, the task must eventually be consumed by + /// `hyper_task_free`, or taken ownership of by `hyper_executor_push` + /// without subsequently being given back by `hyper_executor_poll`. + /// + /// This does not consume the `hyper_body *`, so it may be used again. + /// However, the `hyper_body *` MUST NOT be used or freed until the + /// related task is returned from `hyper_executor_poll`. + /// + /// For a more convenient method, see also `hyper_body_foreach`. fn hyper_body_data(body: *mut hyper_body) -> *mut hyper_task { // This doesn't take ownership of the Body, so don't allow destructor let mut body = ManuallyDrop::new(non_null!(Box::from_raw(body) ?= ptr::null_mut())); Box::into_raw(hyper_task::boxed(async move { - body.0.data().await.map(|res| res.map(hyper_buf)) + loop { + match body.0.frame().await { + Some(Ok(frame)) => { + if let Ok(data) = frame.into_data() { + return Ok(Some(hyper_buf(data))); + } else { + continue; + } + }, + Some(Err(e)) => return Err(e), + None => return Ok(None), + } + } })) } ?= ptr::null_mut() } ffi_fn! { - /// Return a task that will poll the body and execute the callback with each - /// body chunk that is received. + /// Creates a task to execute the callback with each body chunk received. /// - /// The `hyper_buf` pointer is only a borrowed reference, it cannot live outside - /// the execution of the callback. You must make a copy to retain it. + /// To avoid a memory leak, the task must eventually be consumed by + /// `hyper_task_free`, or taken ownership of by `hyper_executor_push` + /// without subsequently being given back by `hyper_executor_poll`. + /// + /// The `hyper_buf` pointer is only a borrowed reference. It cannot live outside + /// the execution of the callback. You must make a copy of the bytes to retain them. /// /// The callback should return `HYPER_ITER_CONTINUE` to continue iterating - /// chunks as they are received, or `HYPER_ITER_BREAK` to cancel. + /// chunks as they are received, or `HYPER_ITER_BREAK` to cancel. Each + /// invocation of the callback must consume all the bytes it is provided. + /// There is no mechanism to signal to Hyper that only a subset of bytes were + /// consumed. /// /// This will consume the `hyper_body *`, you shouldn't use it anymore or free it. fn hyper_body_foreach(body: *mut hyper_body, func: hyper_body_foreach_callback, userdata: *mut c_void) -> *mut hyper_task { @@ -81,10 +150,13 @@ ffi_fn! { let userdata = UserDataPointer(userdata); Box::into_raw(hyper_task::boxed(async move { - while let Some(item) = body.0.data().await { - let chunk = item?; - if HYPER_ITER_CONTINUE != func(userdata.0, &hyper_buf(chunk)) { - return Err(crate::Error::new_user_aborted_by_callback()); + let _ = &userdata; + while let Some(item) = body.0.frame().await { + let frame = item?; + if let Ok(chunk) = frame.into_data() { + if HYPER_ITER_CONTINUE != func(userdata.0, &hyper_buf(chunk)) { + return Err(crate::Error::new_user_aborted_by_callback()); + } } } Ok(()) @@ -101,7 +173,7 @@ ffi_fn! { } ffi_fn! { - /// Set the data callback for this body. + /// Set the outgoing data callback for this body. /// /// The callback is called each time hyper needs to send more data for the /// body. It is passed the value from `hyper_body_set_userdata`. @@ -136,7 +208,10 @@ impl UserBody { } } - pub(crate) fn poll_data(&mut self, cx: &mut Context<'_>) -> Poll>> { + pub(crate) fn poll_data( + &mut self, + cx: &mut Context<'_>, + ) -> Poll>>> { let mut out = std::ptr::null_mut(); match (self.data_func)(self.userdata, hyper_context::wrap(cx), &mut out) { super::task::HYPER_POLL_READY => { @@ -144,7 +219,7 @@ impl UserBody { Poll::Ready(None) } else { let buf = unsafe { Box::from_raw(out) }; - Poll::Ready(Some(Ok(buf.0))) + Poll::Ready(Some(Ok(Frame::data(buf.0)))) } } super::task::HYPER_POLL_PENDING => Poll::Pending, @@ -157,13 +232,6 @@ impl UserBody { ))))), } } - - pub(crate) fn poll_trailers( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll>> { - Poll::Ready(Ok(None)) - } } /// cbindgen:ignore @@ -184,7 +252,10 @@ ffi_fn! { /// Create a new `hyper_buf *` by copying the provided bytes. /// /// This makes an owned copy of the bytes, so the `buf` argument can be - /// freed or changed afterwards. + /// freed (with `hyper_buf_free`) or changed afterwards. + /// + /// To avoid a memory leak, the copy must eventually be consumed by + /// `hyper_buf_free`. /// /// This returns `NULL` if allocating a new buffer fails. fn hyper_buf_copy(buf: *const u8, len: size_t) -> *mut hyper_buf { @@ -217,6 +288,8 @@ ffi_fn! { ffi_fn! { /// Free this buffer. + /// + /// This should be used for any buffer once it is no longer needed. fn hyper_buf_free(buf: *mut hyper_buf) { drop(unsafe { Box::from_raw(buf) }); } diff --git a/.cargo-vendor/hyper/src/ffi/client.rs b/.cargo-vendor/hyper/src/ffi/client.rs index 670f77d141..975314b9be 100644 --- a/.cargo-vendor/hyper/src/ffi/client.rs +++ b/.cargo-vendor/hyper/src/ffi/client.rs @@ -12,60 +12,146 @@ use super::io::hyper_io; use super::task::{hyper_executor, hyper_task, hyper_task_return_type, AsTaskType, WeakExec}; /// An options builder to configure an HTTP client connection. +/// +/// Methods: +/// +/// - hyper_clientconn_options_new: Creates a new set of HTTP clientconn options to be used in a handshake. +/// - hyper_clientconn_options_exec: Set the client background task executor. +/// - hyper_clientconn_options_http2: Set whether to use HTTP2. +/// - hyper_clientconn_options_set_preserve_header_case: Set whether header case is preserved. +/// - hyper_clientconn_options_set_preserve_header_order: Set whether header order is preserved. +/// - hyper_clientconn_options_http1_allow_multiline_headers: Set whether HTTP/1 connections accept obsolete line folding for header values. +/// - hyper_clientconn_options_free: Free a set of HTTP clientconn options. pub struct hyper_clientconn_options { - builder: conn::Builder, + http1_allow_obsolete_multiline_headers_in_responses: bool, + http1_preserve_header_case: bool, + http1_preserve_header_order: bool, + http2: bool, /// Use a `Weak` to prevent cycles. exec: WeakExec, } /// An HTTP client connection handle. /// -/// These are used to send a request on a single connection. It's possible to -/// send multiple requests on a single connection, such as when HTTP/1 -/// keep-alive or HTTP/2 is used. +/// These are used to send one or more requests on a single connection. +/// +/// It's possible to send multiple requests on a single connection, such +/// as when HTTP/1 keep-alive or HTTP/2 is used. +/// +/// To create a `hyper_clientconn`: +/// +/// 1. Create a `hyper_io` with `hyper_io_new`. +/// 2. Create a `hyper_clientconn_options` with `hyper_clientconn_options_new`. +/// 3. Call `hyper_clientconn_handshake` with the `hyper_io` and `hyper_clientconn_options`. +/// This creates a `hyper_task`. +/// 5. Call `hyper_task_set_userdata` to assign an application-specific pointer to the task. +/// This allows keeping track of multiple connections that may be handshaking +/// simultaneously. +/// 4. Add the `hyper_task` to an executor with `hyper_executor_push`. +/// 5. Poll that executor until it yields a task of type `HYPER_TASK_CLIENTCONN`. +/// 6. Extract the `hyper_clientconn` from the task with `hyper_task_value`. +/// This will require a cast from `void *` to `hyper_clientconn *`. +/// +/// This process results in a `hyper_clientconn` that permanently owns the +/// `hyper_io`. Because the `hyper_io` in turn owns a TCP or TLS connection, that means +/// the `hyper_clientconn` owns the connection for both the clientconn's lifetime +/// and the connection's lifetime. +/// +/// In other words, each connection (`hyper_io`) must have exactly one `hyper_clientconn` +/// associated with it. That's because `hyper_clientconn_handshake` sends the +/// [HTTP/2 Connection Preface] (for HTTP/2 connections). Since that preface can't +/// be sent twice, handshake can't be called twice. +/// +/// [HTTP/2 Connection Preface]: https://datatracker.ietf.org/doc/html/rfc9113#name-http-2-connection-preface +/// +/// Methods: +/// +/// - hyper_clientconn_handshake: Creates an HTTP client handshake task. +/// - hyper_clientconn_send: Creates a task to send a request on the client connection. +/// - hyper_clientconn_free: Free a hyper_clientconn *. pub struct hyper_clientconn { - tx: conn::SendRequest, + tx: Tx, +} + +enum Tx { + #[cfg(feature = "http1")] + Http1(conn::http1::SendRequest), + #[cfg(feature = "http2")] + Http2(conn::http2::SendRequest), } // ===== impl hyper_clientconn ===== ffi_fn! { - /// Starts an HTTP client connection handshake using the provided IO transport - /// and options. + /// Creates an HTTP client handshake task. /// /// Both the `io` and the `options` are consumed in this function call. + /// They should not be used or freed afterwards. + /// + /// The returned task must be polled with an executor until the handshake + /// completes, at which point the value can be taken. /// - /// The returned `hyper_task *` must be polled with an executor until the - /// handshake completes, at which point the value can be taken. + /// To avoid a memory leak, the task must eventually be consumed by + /// `hyper_task_free`, or taken ownership of by `hyper_executor_push` + /// without subsequently being given back by `hyper_executor_poll`. fn hyper_clientconn_handshake(io: *mut hyper_io, options: *mut hyper_clientconn_options) -> *mut hyper_task { let options = non_null! { Box::from_raw(options) ?= ptr::null_mut() }; let io = non_null! { Box::from_raw(io) ?= ptr::null_mut() }; Box::into_raw(hyper_task::boxed(async move { - options.builder.handshake::<_, crate::Body>(io) + #[cfg(feature = "http2")] + { + if options.http2 { + return conn::http2::Builder::new(options.exec.clone()) + .handshake::<_, crate::body::Incoming>(io) + .await + .map(|(tx, conn)| { + options.exec.execute(Box::pin(async move { + let _ = conn.await; + })); + hyper_clientconn { tx: Tx::Http2(tx) } + }); + } + } + + conn::http1::Builder::new() + .allow_obsolete_multiline_headers_in_responses(options.http1_allow_obsolete_multiline_headers_in_responses) + .preserve_header_case(options.http1_preserve_header_case) + .preserve_header_order(options.http1_preserve_header_order) + .handshake::<_, crate::body::Incoming>(io) .await .map(|(tx, conn)| { options.exec.execute(Box::pin(async move { let _ = conn.await; })); - hyper_clientconn { tx } + hyper_clientconn { tx: Tx::Http1(tx) } }) })) } ?= std::ptr::null_mut() } ffi_fn! { - /// Send a request on the client connection. + /// Creates a task to send a request on the client connection. + /// + /// This consumes the request. You should not use or free the request + /// afterwards. /// /// Returns a task that needs to be polled until it is ready. When ready, the /// task yields a `hyper_response *`. + /// + /// To avoid a memory leak, the task must eventually be consumed by + /// `hyper_task_free`, or taken ownership of by `hyper_executor_push` + /// without subsequently being given back by `hyper_executor_poll`. fn hyper_clientconn_send(conn: *mut hyper_clientconn, req: *mut hyper_request) -> *mut hyper_task { let mut req = non_null! { Box::from_raw(req) ?= ptr::null_mut() }; // Update request with original-case map of headers req.finalize_request(); - let fut = non_null! { &mut *conn ?= ptr::null_mut() }.tx.send_request(req.0); + let fut = match non_null! { &mut *conn ?= ptr::null_mut() }.tx { + Tx::Http1(ref mut tx) => futures_util::future::Either::Left(tx.send_request(req.0)), + Tx::Http2(ref mut tx) => futures_util::future::Either::Right(tx.send_request(req.0)), + }; let fut = async move { fut.await.map(hyper_response::wrap) @@ -77,6 +163,8 @@ ffi_fn! { ffi_fn! { /// Free a `hyper_clientconn *`. + /// + /// This should be used for any connection once it is no longer needed. fn hyper_clientconn_free(conn: *mut hyper_clientconn) { drop(non_null! { Box::from_raw(conn) ?= () }); } @@ -92,39 +180,45 @@ unsafe impl AsTaskType for hyper_clientconn { ffi_fn! { /// Creates a new set of HTTP clientconn options to be used in a handshake. + /// + /// To avoid a memory leak, the options must eventually be consumed by + /// `hyper_clientconn_options_free` or `hyper_clientconn_handshake`. fn hyper_clientconn_options_new() -> *mut hyper_clientconn_options { - #[allow(deprecated)] - let builder = conn::Builder::new(); - Box::into_raw(Box::new(hyper_clientconn_options { - builder, + http1_allow_obsolete_multiline_headers_in_responses: false, + http1_preserve_header_case: false, + http1_preserve_header_order: false, + http2: false, exec: WeakExec::new(), })) } ?= std::ptr::null_mut() } ffi_fn! { - /// Set the whether or not header case is preserved. + /// Set whether header case is preserved. /// /// Pass `0` to allow lowercase normalization (default), `1` to retain original case. fn hyper_clientconn_options_set_preserve_header_case(opts: *mut hyper_clientconn_options, enabled: c_int) { let opts = non_null! { &mut *opts ?= () }; - opts.builder.http1_preserve_header_case(enabled != 0); + opts.http1_preserve_header_case = enabled != 0; } } ffi_fn! { - /// Set the whether or not header order is preserved. + /// Set whether header order is preserved. /// /// Pass `0` to allow reordering (default), `1` to retain original ordering. fn hyper_clientconn_options_set_preserve_header_order(opts: *mut hyper_clientconn_options, enabled: c_int) { let opts = non_null! { &mut *opts ?= () }; - opts.builder.http1_preserve_header_order(enabled != 0); + opts.http1_preserve_header_order = enabled != 0; } } ffi_fn! { - /// Free a `hyper_clientconn_options *`. + /// Free a set of HTTP clientconn options. + /// + /// This should only be used if the options aren't consumed by + /// `hyper_clientconn_handshake`. fn hyper_clientconn_options_free(opts: *mut hyper_clientconn_options) { drop(non_null! { Box::from_raw(opts) ?= () }); } @@ -141,20 +235,19 @@ ffi_fn! { let weak_exec = hyper_executor::downgrade(&exec); std::mem::forget(exec); - opts.builder.executor(weak_exec.clone()); opts.exec = weak_exec; } } ffi_fn! { - /// Set the whether to use HTTP2. + /// Set whether to use HTTP2. /// /// Pass `0` to disable, `1` to enable. fn hyper_clientconn_options_http2(opts: *mut hyper_clientconn_options, enabled: c_int) -> hyper_code { #[cfg(feature = "http2")] { let opts = non_null! { &mut *opts ?= hyper_code::HYPERE_INVALID_ARG }; - opts.builder.http2_only(enabled != 0); + opts.http2 = enabled != 0; hyper_code::HYPERE_OK } @@ -168,15 +261,15 @@ ffi_fn! { } ffi_fn! { - /// Set the whether to include a copy of the raw headers in responses - /// received on this connection. + /// Set whether HTTP/1 connections accept obsolete line folding for header values. + /// + /// Newline codepoints (\r and \n) will be transformed to spaces when parsing. /// /// Pass `0` to disable, `1` to enable. /// - /// If enabled, see `hyper_response_headers_raw()` for usage. - fn hyper_clientconn_options_headers_raw(opts: *mut hyper_clientconn_options, enabled: c_int) -> hyper_code { + fn hyper_clientconn_options_http1_allow_multiline_headers(opts: *mut hyper_clientconn_options, enabled: c_int) -> hyper_code { let opts = non_null! { &mut *opts ?= hyper_code::HYPERE_INVALID_ARG }; - opts.builder.http1_headers_raw(enabled != 0); + opts.http1_allow_obsolete_multiline_headers_in_responses = enabled != 0; hyper_code::HYPERE_OK } } diff --git a/.cargo-vendor/hyper/src/ffi/error.rs b/.cargo-vendor/hyper/src/ffi/error.rs index 015e595aee..b103b2f053 100644 --- a/.cargo-vendor/hyper/src/ffi/error.rs +++ b/.cargo-vendor/hyper/src/ffi/error.rs @@ -1,6 +1,15 @@ use libc::size_t; /// A more detailed error object returned by some hyper functions. +/// +/// Compare with `hyper_code`, which is a simpler error returned from +/// some hyper functions. +/// +/// Methods: +/// +/// - hyper_error_code: Get an equivalent hyper_code from this error. +/// - hyper_error_print: Print the details of this error to a buffer. +/// - hyper_error_free: Frees a hyper_error. pub struct hyper_error(crate::Error); /// A return code for many of hyper's methods. @@ -57,6 +66,8 @@ impl hyper_error { ffi_fn! { /// Frees a `hyper_error`. + /// + /// This should be used for any error once it is no longer needed. fn hyper_error_free(err: *mut hyper_error) { drop(non_null!(Box::from_raw(err) ?= ())); } diff --git a/.cargo-vendor/hyper/src/ffi/http_types.rs b/.cargo-vendor/hyper/src/ffi/http_types.rs index ea10f139cb..2779cb191f 100644 --- a/.cargo-vendor/hyper/src/ffi/http_types.rs +++ b/.cargo-vendor/hyper/src/ffi/http_types.rs @@ -2,31 +2,73 @@ use bytes::Bytes; use libc::{c_int, size_t}; use std::ffi::c_void; -use super::body::{hyper_body, hyper_buf}; +use super::body::hyper_body; use super::error::hyper_code; use super::task::{hyper_task_return_type, AsTaskType}; use super::{UserDataPointer, HYPER_ITER_CONTINUE}; +use crate::body::Incoming as IncomingBody; use crate::ext::{HeaderCaseMap, OriginalHeaderOrder, ReasonPhrase}; use crate::header::{HeaderName, HeaderValue}; -use crate::{Body, HeaderMap, Method, Request, Response, Uri}; +use crate::{HeaderMap, Method, Request, Response, Uri}; /// An HTTP request. -pub struct hyper_request(pub(super) Request); +/// +/// Once you've finished constructing a request, you can send it with +/// `hyper_clientconn_send`. +/// +/// Methods: +/// +/// - hyper_request_new: Construct a new HTTP request. +/// - hyper_request_headers: Gets a mutable reference to the HTTP headers of this request +/// - hyper_request_set_body: Set the body of the request. +/// - hyper_request_set_method: Set the HTTP Method of the request. +/// - hyper_request_set_uri: Set the URI of the request. +/// - hyper_request_set_uri_parts: Set the URI of the request with separate scheme, authority, and path/query strings. +/// - hyper_request_set_version: Set the preferred HTTP version of the request. +/// - hyper_request_on_informational: Set an informational (1xx) response callback. +/// - hyper_request_free: Free an HTTP request. +pub struct hyper_request(pub(super) Request); /// An HTTP response. -pub struct hyper_response(pub(super) Response); +/// +/// Obtain one of these by making a request with `hyper_clientconn_send`, then +/// polling the executor unntil you get a `hyper_task` of type +/// `HYPER_TASK_RESPONSE`. To figure out which request this response +/// corresponds to, check the userdata of the task, which you should +/// previously have set to an application-specific identifier for the +/// request. +/// +/// Methods: +/// +/// - hyper_response_status: Get the HTTP-Status code of this response. +/// - hyper_response_version: Get the HTTP version used by this response. +/// - hyper_response_reason_phrase: Get a pointer to the reason-phrase of this response. +/// - hyper_response_reason_phrase_len: Get the length of the reason-phrase of this response. +/// - hyper_response_headers: Gets a reference to the HTTP headers of this response. +/// - hyper_response_body: Take ownership of the body of this response. +/// - hyper_response_free: Free an HTTP response. +pub struct hyper_response(pub(super) Response); /// An HTTP header map. /// /// These can be part of a request or response. +/// +/// Obtain a pointer to read or modify these from `hyper_request_headers` +/// or `hyper_response_headers`. +/// +/// Methods: +/// +/// - hyper_headers_add: Adds the provided value to the list of the provided name. +/// - hyper_headers_foreach: Iterates the headers passing each name and value pair to the callback. +/// - hyper_headers_set: Sets the header with the provided name to the provided value. +#[derive(Clone)] pub struct hyper_headers { pub(super) headers: HeaderMap, orig_casing: HeaderCaseMap, orig_order: OriginalHeaderOrder, } -pub(crate) struct RawHeaders(pub(crate) hyper_buf); - +#[derive(Clone)] pub(crate) struct OnInformational { func: hyper_request_on_informational_callback, data: UserDataPointer, @@ -38,13 +80,22 @@ type hyper_request_on_informational_callback = extern "C" fn(*mut c_void, *mut h ffi_fn! { /// Construct a new HTTP request. + /// + /// The default request has an empty body. To send a body, call `hyper_request_set_body`. + /// + /// + /// To avoid a memory leak, the request must eventually be consumed by + /// `hyper_request_free` or `hyper_clientconn_send`. fn hyper_request_new() -> *mut hyper_request { - Box::into_raw(Box::new(hyper_request(Request::new(Body::empty())))) + Box::into_raw(Box::new(hyper_request(Request::new(IncomingBody::empty())))) } ?= std::ptr::null_mut() } ffi_fn! { - /// Free an HTTP request if not going to send it on a client. + /// Free an HTTP request. + /// + /// This should only be used if the request isn't consumed by + /// `hyper_clientconn_send`. fn hyper_request_free(req: *mut hyper_request) { drop(non_null!(Box::from_raw(req) ?= ())); } @@ -175,7 +226,7 @@ ffi_fn! { } ffi_fn! { - /// Gets a reference to the HTTP headers of this request + /// Gets a mutable reference to the HTTP headers of this request /// /// This is not an owned reference, so it should not be accessed after the /// `hyper_request` has been consumed. @@ -187,7 +238,7 @@ ffi_fn! { ffi_fn! { /// Set the body of the request. /// - /// The default is an empty body. + /// You can get a `hyper_body` by calling `hyper_body_new`. /// /// This takes ownership of the `hyper_body *`, you must not use it or /// free it after setting it on the request. @@ -239,7 +290,9 @@ impl hyper_request { // ===== impl hyper_response ===== ffi_fn! { - /// Free an HTTP response after using it. + /// Free an HTTP response. + /// + /// This should be used for any response once it is no longer needed. fn hyper_response_free(resp: *mut hyper_response) { drop(non_null!(Box::from_raw(resp) ?= ())); } @@ -278,27 +331,6 @@ ffi_fn! { } } -ffi_fn! { - /// Get a reference to the full raw headers of this response. - /// - /// You must have enabled `hyper_clientconn_options_headers_raw()`, or this - /// will return NULL. - /// - /// The returned `hyper_buf *` is just a reference, owned by the response. - /// You need to make a copy if you wish to use it after freeing the - /// response. - /// - /// The buffer is not null-terminated, see the `hyper_buf` functions for - /// getting the bytes and length. - fn hyper_response_headers_raw(resp: *const hyper_response) -> *const hyper_buf { - let resp = non_null!(&*resp ?= std::ptr::null()); - match resp.0.extensions().get::() { - Some(raw) => &raw.0, - None => std::ptr::null(), - } - } ?= std::ptr::null() -} - ffi_fn! { /// Get the HTTP version used by this response. /// @@ -334,14 +366,17 @@ ffi_fn! { /// Take ownership of the body of this response. /// /// It is safe to free the response even after taking ownership of its body. + /// + /// To avoid a memory leak, the body must eventually be consumed by + /// `hyper_body_free`, `hyper_body_foreach`, or `hyper_request_set_body`. fn hyper_response_body(resp: *mut hyper_response) -> *mut hyper_body { - let body = std::mem::take(non_null!(&mut *resp ?= std::ptr::null_mut()).0.body_mut()); + let body = std::mem::replace(non_null!(&mut *resp ?= std::ptr::null_mut()).0.body_mut(), IncomingBody::empty()); Box::into_raw(Box::new(hyper_body(body))) } ?= std::ptr::null_mut() } impl hyper_response { - pub(super) fn wrap(mut resp: Response) -> hyper_response { + pub(super) fn wrap(mut resp: Response) -> hyper_response { let headers = std::mem::take(resp.headers_mut()); let orig_casing = resp .extensions_mut() @@ -532,7 +567,7 @@ unsafe fn raw_name_value( // ===== impl OnInformational ===== impl OnInformational { - pub(crate) fn call(&mut self, resp: Response) { + pub(crate) fn call(&mut self, resp: Response) { let mut resp = hyper_response::wrap(resp); (self.func)(self.data.0, &mut resp); } diff --git a/.cargo-vendor/hyper/src/ffi/io.rs b/.cargo-vendor/hyper/src/ffi/io.rs index bff666dbcf..1bf9aa7a97 100644 --- a/.cargo-vendor/hyper/src/ffi/io.rs +++ b/.cargo-vendor/hyper/src/ffi/io.rs @@ -2,8 +2,8 @@ use std::ffi::c_void; use std::pin::Pin; use std::task::{Context, Poll}; +use crate::rt::{Read, Write}; use libc::size_t; -use tokio::io::{AsyncRead, AsyncWrite}; use super::task::hyper_context; @@ -19,7 +19,20 @@ type hyper_io_read_callback = type hyper_io_write_callback = extern "C" fn(*mut c_void, *mut hyper_context<'_>, *const u8, size_t) -> size_t; -/// An IO object used to represent a socket or similar concept. +/// A read/write handle for a specific connection. +/// +/// This owns a specific TCP or TLS connection for the lifetime of +/// that connection. It contains a read and write callback, as well as a +/// void *userdata. Typically the userdata will point to a struct +/// containing a file descriptor and a TLS context. +/// +/// Methods: +/// +/// - hyper_io_new: Create a new IO type used to represent a transport. +/// - hyper_io_set_read: Set the read function for this IO transport. +/// - hyper_io_set_write: Set the write function for this IO transport. +/// - hyper_io_set_userdata: Set the user data pointer for this IO to some value. +/// - hyper_io_free: Free an IO handle. pub struct hyper_io { read: hyper_io_read_callback, write: hyper_io_write_callback, @@ -31,6 +44,14 @@ ffi_fn! { /// /// The read and write functions of this transport should be set with /// `hyper_io_set_read` and `hyper_io_set_write`. + /// + /// It is expected that the underlying transport is non-blocking. When + /// a read or write callback can't make progress because there is no + /// data available yet, it should use the `hyper_waker` mechanism to + /// arrange to be called again when data is available. + /// + /// To avoid a memory leak, the IO handle must eventually be consumed by + /// `hyper_io_free` or `hyper_clientconn_handshake`. fn hyper_io_new() -> *mut hyper_io { Box::into_raw(Box::new(hyper_io { read: read_noop, @@ -41,10 +62,10 @@ ffi_fn! { } ffi_fn! { - /// Free an unused `hyper_io *`. + /// Free an IO handle. /// - /// This is typically only useful if you aren't going to pass ownership - /// of the IO handle to hyper, such as with `hyper_clientconn_handshake()`. + /// This should only be used if the request isn't consumed by + /// `hyper_clientconn_handshake`. fn hyper_io_free(io: *mut hyper_io) { drop(non_null!(Box::from_raw(io) ?= ())); } @@ -69,10 +90,11 @@ ffi_fn! { /// unless you have already written them yourself. It is also undefined behavior /// to return that more bytes have been written than actually set on the `buf`. /// - /// If there is no data currently available, a waker should be claimed from - /// the `ctx` and registered with whatever polling mechanism is used to signal - /// when data is available later on. The return value should be - /// `HYPER_IO_PENDING`. + /// If there is no data currently available, the callback should create a + /// `hyper_waker` from its `hyper_context` argument and register the waker + /// with whatever polling mechanism is used to signal when data is available + /// later on. The return value should be `HYPER_IO_PENDING`. See the + /// documentation for `hyper_waker`. /// /// If there is an irrecoverable error reading data, then `HYPER_IO_ERROR` /// should be the return value. @@ -87,11 +109,11 @@ ffi_fn! { /// Data from the `buf` pointer should be written to the transport, up to /// `buf_len` bytes. The number of bytes written should be the return value. /// - /// If no data can currently be written, the `waker` should be cloned and - /// registered with whatever polling mechanism is used to signal when data - /// is available later on. The return value should be `HYPER_IO_PENDING`. - /// - /// Yeet. + /// If there is no data currently available, the callback should create a + /// `hyper_waker` from its `hyper_context` argument and register the waker + /// with whatever polling mechanism is used to signal when data is available + /// later on. The return value should be `HYPER_IO_PENDING`. See the documentation + /// for `hyper_waker`. /// /// If there is an irrecoverable error reading data, then `HYPER_IO_ERROR` /// should be the return value. @@ -120,13 +142,13 @@ extern "C" fn write_noop( 0 } -impl AsyncRead for hyper_io { +impl Read for hyper_io { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, - buf: &mut tokio::io::ReadBuf<'_>, + mut buf: crate::rt::ReadBufCursor<'_>, ) -> Poll> { - let buf_ptr = unsafe { buf.unfilled_mut() }.as_mut_ptr() as *mut u8; + let buf_ptr = unsafe { buf.as_mut() }.as_mut_ptr() as *mut u8; let buf_len = buf.remaining(); match (self.read)(self.userdata, hyper_context::wrap(cx), buf_ptr, buf_len) { @@ -138,15 +160,14 @@ impl AsyncRead for hyper_io { ok => { // We have to trust that the user's read callback actually // filled in that many bytes... :( - unsafe { buf.assume_init(ok) }; - buf.advance(ok); + unsafe { buf.advance(ok) }; Poll::Ready(Ok(())) } } } } -impl AsyncWrite for hyper_io { +impl Write for hyper_io { fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, diff --git a/.cargo-vendor/hyper/src/ffi/mod.rs b/.cargo-vendor/hyper/src/ffi/mod.rs index fd67a880a6..664b6439d6 100644 --- a/.cargo-vendor/hyper/src/ffi/mod.rs +++ b/.cargo-vendor/hyper/src/ffi/mod.rs @@ -22,10 +22,10 @@ //! ## Building //! //! The C API is part of the Rust library, but isn't compiled by default. Using -//! `cargo`, it can be compiled with the following command: +//! `cargo`, staring with `1.64.0`, it can be compiled with the following command: //! //! ```notrust -//! RUSTFLAGS="--cfg hyper_unstable_ffi" cargo build --features client,http1,http2,ffi +//! RUSTFLAGS="--cfg hyper_unstable_ffi" cargo rustc --crate-type cdylib --features client,http1,http2,ffi //! ``` // We may eventually allow the FFI to be enabled without `client` or `http1`, @@ -76,6 +76,7 @@ pub const HYPER_HTTP_VERSION_1_1: libc::c_int = 11; /// The HTTP/2 version. pub const HYPER_HTTP_VERSION_2: libc::c_int = 20; +#[derive(Clone)] struct UserDataPointer(*mut std::ffi::c_void); // We don't actually know anything about this pointer, it's up to the user diff --git a/.cargo-vendor/hyper/src/ffi/task.rs b/.cargo-vendor/hyper/src/ffi/task.rs index ef54fe408f..f53a7b1f5a 100644 --- a/.cargo-vendor/hyper/src/ffi/task.rs +++ b/.cargo-vendor/hyper/src/ffi/task.rs @@ -28,6 +28,28 @@ pub const HYPER_POLL_PENDING: c_int = 1; pub const HYPER_POLL_ERROR: c_int = 3; /// A task executor for `hyper_task`s. +/// +/// A task is a unit of work that may be blocked on IO, and can be polled to +/// make progress on that work. +/// +/// An executor can hold many tasks, included from unrelated HTTP connections. +/// An executor is single threaded. Typically you might have one executor per +/// thread. Or, for simplicity, you may choose one executor per connection. +/// +/// Progress on tasks happens only when `hyper_executor_poll` is called, and only +/// on tasks whose corresponding `hyper_waker` has been called to indicate they +/// are ready to make progress (for instance, because the OS has indicated there +/// is more data to read or more buffer space available to write). +/// +/// Deadlock potential: `hyper_executor_poll` must not be called from within a task's +/// callback. Doing so will result in a deadlock. +/// +/// Methods: +/// +/// - hyper_executor_new: Creates a new task executor. +/// - hyper_executor_push: Push a task onto the executor. +/// - hyper_executor_poll: Polls the executor, trying to make progress on any tasks that have notified that they are ready again. +/// - hyper_executor_free: Frees an executor and any incomplete tasks still part of it. pub struct hyper_executor { /// The executor of all task futures. /// @@ -55,6 +77,40 @@ pub(crate) struct WeakExec(Weak); struct ExecWaker(AtomicBool); /// An async task. +/// +/// A task represents a chunk of work that will eventually yield exactly one +/// `hyper_task_value`. Tasks are pushed onto an executor, and that executor is +/// responsible for calling the necessary private functions on the task to make +/// progress. In most cases those private functions will eventually cause read +/// or write callbacks on a `hyper_io` object to be called. +/// +/// Tasks are created by various functions: +/// +/// - hyper_clientconn_handshake: Creates an HTTP client handshake task. +/// - hyper_clientconn_send: Creates a task to send a request on the client connection. +/// - hyper_body_data: Creates a task that will poll a response body for the next buffer of data. +/// - hyper_body_foreach: Creates a task to execute the callback with each body chunk received. +/// +/// Tasks then have a userdata associated with them using `hyper_task_set_userdata``. This +/// is important, for instance, to associate a request id with a given request. When multiple +/// tasks are running on the same executor, this allows distinguishing tasks for different +/// requests. +/// +/// Tasks are then pushed onto an executor, and eventually yielded from hyper_executor_poll: +/// +/// - hyper_executor_push: Push a task onto the executor. +/// - hyper_executor_poll: Polls the executor, trying to make progress on any tasks that have notified that they are ready again. +/// +/// Once a task is yielded from poll, retrieve its userdata, check its type, +/// and extract its value. This will require a case from void* to the appropriate type. +/// +/// Methods on hyper_task: +/// +/// - hyper_task_type: Query the return type of this task. +/// - hyper_task_value: Takes the output value of this task. +/// - hyper_task_set_userdata: Set a user data pointer to be associated with this task. +/// - hyper_task_userdata: Retrieve the userdata that has been set via hyper_task_set_userdata. +/// - hyper_task_free: Free a task. pub struct hyper_task { future: BoxFuture, output: Option, @@ -66,9 +122,36 @@ struct TaskFuture { } /// An async context for a task that contains the related waker. +/// +/// This is provided to `hyper_io`'s read and write callbacks. Currently +/// its only purpose is to provide access to the waker. See `hyper_waker`. +/// +/// Corresponding Rust type: pub struct hyper_context<'a>(Context<'a>); /// A waker that is saved and used to waken a pending task. +/// +/// This is provided to `hyper_io`'s read and write callbacks via `hyper_context` +/// and `hyper_context_waker`. +/// +/// When nonblocking I/O in one of those callbacks can't make progress (returns +/// `EAGAIN` or `EWOULDBLOCK`), the callback has to return to avoid blocking the +/// executor. But it also has to arrange to get called in the future when more +/// data is available. That's the role of the async context and the waker. The +/// waker can be used to tell the executor "this task is ready to make progress." +/// +/// The read or write callback, upon finding it can't make progress, must get a +/// waker from the context (`hyper_context_waker`), arrange for that waker to be +/// called in the future, and then return `HYPER_POLL_PENDING`. +/// +/// The arrangements for the waker to be called in the future are up to the +/// application, but usually it will involve one big `select(2)` loop that checks which +/// FDs are ready, and a correspondence between FDs and waker objects. For each +/// FD that is ready, the corresponding waker must be called. Then `hyper_executor_poll` +/// must be called. That will cause the executor to attempt to make progress on each +/// woken task. +/// +/// Corresponding Rust type: pub struct hyper_waker { waker: std::task::Waker, } @@ -126,27 +209,35 @@ impl hyper_executor { let mut cx = Context::from_waker(&waker); loop { - match Pin::new(&mut *self.driver.lock().unwrap()).poll_next(&mut cx) { - Poll::Ready(val) => return val, - Poll::Pending => { - // Check if any of the pending tasks tried to spawn - // some new tasks. If so, drain into the driver and loop. - if self.drain_queue() { - continue; - } - - // If the driver called `wake` while we were polling, - // we should poll again immediately! - if self.is_woken.0.swap(false, Ordering::SeqCst) { - continue; - } - - return None; - } + { + // Scope the lock on the driver to ensure it is dropped before + // calling drain_queue below. + let mut driver = self.driver.lock().unwrap(); + match Pin::new(&mut *driver).poll_next(&mut cx) { + Poll::Ready(val) => return val, + Poll::Pending => {} + }; + } + + // poll_next returned Pending. + // Check if any of the pending tasks tried to spawn + // some new tasks. If so, drain into the driver and loop. + if self.drain_queue() { + continue; } + + // If the driver called `wake` while we were polling, + // we should poll again immediately! + if self.is_woken.0.swap(false, Ordering::SeqCst) { + continue; + } + + return None; } } + /// drain_queue locks both self.spawn_queue and self.driver, so it requires + /// that neither of them be locked already. fn drain_queue(&self) -> bool { let mut queue = self.spawn_queue.lock().unwrap(); if queue.is_empty() { @@ -177,8 +268,12 @@ impl WeakExec { } } -impl crate::rt::Executor> for WeakExec { - fn execute(&self, fut: BoxFuture<()>) { +impl crate::rt::Executor for WeakExec +where + F: Future + Send + 'static, + F::Output: Send + Sync + AsTaskType, +{ + fn execute(&self, fut: F) { if let Some(exec) = self.0.upgrade() { exec.spawn(hyper_task::boxed(fut)); } @@ -187,6 +282,9 @@ impl crate::rt::Executor> for WeakExec { ffi_fn! { /// Creates a new task executor. + /// + /// To avoid a memory leak, the executor must eventually be consumed by + /// `hyper_executor_free`. fn hyper_executor_new() -> *const hyper_executor { Arc::into_raw(hyper_executor::new()) } ?= ptr::null() @@ -194,6 +292,8 @@ ffi_fn! { ffi_fn! { /// Frees an executor and any incomplete tasks still part of it. + /// + /// This should be used for any executor once it is no longer needed. fn hyper_executor_free(exec: *const hyper_executor) { drop(non_null!(Arc::from_raw(exec) ?= ())); } @@ -202,8 +302,14 @@ ffi_fn! { ffi_fn! { /// Push a task onto the executor. /// - /// The executor takes ownership of the task, it should not be accessed - /// again unless returned back to the user with `hyper_executor_poll`. + /// The executor takes ownership of the task, which must not be accessed + /// again. + /// + /// Ownership of the task will eventually be returned to the user from + /// `hyper_executor_poll`. + /// + /// To distinguish multiple tasks running on the same executor, use + /// hyper_task_set_userdata. fn hyper_executor_push(exec: *const hyper_executor, task: *mut hyper_task) -> hyper_code { let exec = non_null!(&*exec ?= hyper_code::HYPERE_INVALID_ARG); let task = non_null!(Box::from_raw(task) ?= hyper_code::HYPERE_INVALID_ARG); @@ -213,10 +319,14 @@ ffi_fn! { } ffi_fn! { - /// Polls the executor, trying to make progress on any tasks that have notified - /// that they are ready again. + /// Polls the executor, trying to make progress on any tasks that can do so. + /// + /// If any task from the executor is ready, returns one of them. The way + /// tasks signal being finished is internal to Hyper. The order in which tasks + /// are returned is not guaranteed. Use userdata to distinguish between tasks. /// - /// If ready, returns a task from the executor that has completed. + /// To avoid a memory leak, the task must eventually be consumed by + /// `hyper_task_free`. /// /// If there are no ready tasks, this returns `NULL`. fn hyper_executor_poll(exec: *const hyper_executor) -> *mut hyper_task { @@ -268,6 +378,10 @@ impl Future for TaskFuture { ffi_fn! { /// Free a task. + /// + /// This should only be used if the task isn't consumed by + /// `hyper_clientconn_handshake` or taken ownership of by + /// `hyper_executor_push`. fn hyper_task_free(task: *mut hyper_task) { drop(non_null!(Box::from_raw(task) ?= ())); } @@ -280,6 +394,11 @@ ffi_fn! { /// this task. /// /// Use `hyper_task_type` to determine the type of the `void *` return value. + /// + /// To avoid a memory leak, a non-empty return value must eventually be + /// consumed by a function appropriate for its type, one of + /// `hyper_error_free`, `hyper_clientconn_free`, `hyper_response_free`, or + /// `hyper_buf_free`. fn hyper_task_value(task: *mut hyper_task) -> *mut c_void { let task = non_null!(&mut *task ?= ptr::null_mut()); @@ -311,6 +430,9 @@ ffi_fn! { /// /// This value will be passed to task callbacks, and can be checked later /// with `hyper_task_userdata`. + /// + /// This is useful for telling apart tasks for different requests that are + /// running on the same executor. fn hyper_task_set_userdata(task: *mut hyper_task, userdata: *mut c_void) { if task.is_null() { return; @@ -384,7 +506,16 @@ impl hyper_context<'_> { } ffi_fn! { - /// Copies a waker out of the task context. + /// Creates a waker associated with the task context. + /// + /// The waker can be used to inform the task's executor that the task is + /// ready to make progress (using `hyper_waker_wake``). + /// + /// Typically this only needs to be called once, but it can be called + /// multiple times, returning a new waker each time. + /// + /// To avoid a memory leak, the waker must eventually be consumed by + /// `hyper_waker_free` or `hyper_waker_wake`. fn hyper_context_waker(cx: *mut hyper_context<'_>) -> *mut hyper_waker { let waker = non_null!(&mut *cx ?= ptr::null_mut()).0.waker().clone(); Box::into_raw(Box::new(hyper_waker { waker })) @@ -394,7 +525,10 @@ ffi_fn! { // ===== impl hyper_waker ===== ffi_fn! { - /// Free a waker that hasn't been woken. + /// Free a waker. + /// + /// This should only be used if the request isn't consumed by + /// `hyper_waker_wake`. fn hyper_waker_free(waker: *mut hyper_waker) { drop(non_null!(Box::from_raw(waker) ?= ())); } @@ -403,6 +537,11 @@ ffi_fn! { ffi_fn! { /// Wake up the task associated with a waker. /// + /// This does not do work towards associated task. Instead, it signals + /// to the task's executor that the task is ready to make progress. The + /// application is responsible for calling hyper_executor_poll, which + /// will in turn do work on all tasks that are ready to make progress. + /// /// NOTE: This consumes the waker. You should not use or free the waker afterwards. fn hyper_waker_wake(waker: *mut hyper_waker) { let waker = non_null!(Box::from_raw(waker) ?= ()); diff --git a/.cargo-vendor/hyper/src/headers.rs b/.cargo-vendor/hyper/src/headers.rs index 2e5e5db0f2..8bebdb9bfa 100644 --- a/.cargo-vendor/hyper/src/headers.rs +++ b/.cargo-vendor/hyper/src/headers.rs @@ -1,10 +1,13 @@ -#[cfg(feature = "http1")] +#[cfg(all(feature = "client", feature = "http1"))] use bytes::BytesMut; -use http::header::CONTENT_LENGTH; -use http::header::{HeaderValue, ValueIter}; -use http::HeaderMap; +use http::header::HeaderValue; #[cfg(all(feature = "http2", feature = "client"))] use http::Method; +#[cfg(any(feature = "client", all(feature = "server", feature = "http2")))] +use http::{ + header::{ValueIter, CONTENT_LENGTH}, + HeaderMap, +}; #[cfg(feature = "http1")] pub(super) fn connection_keep_alive(value: &HeaderValue) -> bool { @@ -33,10 +36,12 @@ pub(super) fn content_length_parse(value: &HeaderValue) -> Option { from_digits(value.as_bytes()) } +#[cfg(any(feature = "client", all(feature = "server", feature = "http2")))] pub(super) fn content_length_parse_all(headers: &HeaderMap) -> Option { content_length_parse_all_values(headers.get_all(CONTENT_LENGTH).into_iter()) } +#[cfg(any(feature = "client", all(feature = "server", feature = "http2")))] pub(super) fn content_length_parse_all_values(values: ValueIter<'_, HeaderValue>) -> Option { // If multiple Content-Length headers were sent, everything can still // be alright if they all contain the same value, and all parse @@ -93,10 +98,10 @@ fn from_digits(bytes: &[u8]) -> Option { #[cfg(all(feature = "http2", feature = "client"))] pub(super) fn method_has_defined_payload_semantics(method: &Method) -> bool { - match *method { - Method::GET | Method::HEAD | Method::DELETE | Method::CONNECT => false, - _ => true, - } + !matches!( + *method, + Method::GET | Method::HEAD | Method::DELETE | Method::CONNECT + ) } #[cfg(feature = "http2")] @@ -106,12 +111,12 @@ pub(super) fn set_content_length_if_missing(headers: &mut HeaderMap, len: u64) { .or_insert_with(|| HeaderValue::from(len)); } -#[cfg(feature = "http1")] +#[cfg(all(feature = "client", feature = "http1"))] pub(super) fn transfer_encoding_is_chunked(headers: &HeaderMap) -> bool { is_chunked(headers.get_all(http::header::TRANSFER_ENCODING).into_iter()) } -#[cfg(feature = "http1")] +#[cfg(all(feature = "client", feature = "http1"))] pub(super) fn is_chunked(mut encodings: ValueIter<'_, HeaderValue>) -> bool { // chunked must always be the last encoding, according to spec if let Some(line) = encodings.next_back() { @@ -133,7 +138,7 @@ pub(super) fn is_chunked_(value: &HeaderValue) -> bool { false } -#[cfg(feature = "http1")] +#[cfg(all(feature = "client", feature = "http1"))] pub(super) fn add_chunked(mut entry: http::header::OccupiedEntry<'_, HeaderValue>) { const CHUNKED: &str = "chunked"; diff --git a/.cargo-vendor/hyper/src/lib.rs b/.cargo-vendor/hyper/src/lib.rs index 064a18ec30..23087a0efe 100644 --- a/.cargo-vendor/hyper/src/lib.rs +++ b/.cargo-vendor/hyper/src/lib.rs @@ -2,8 +2,7 @@ #![deny(missing_debug_implementations)] #![cfg_attr(test, deny(rust_2018_idioms))] #![cfg_attr(all(test, feature = "full"), deny(unreachable_pub))] -// 0.14.x is not actively developed, new warnings just get in the way. -//#![cfg_attr(all(test, feature = "full", not(feature = "nightly")), deny(warnings))] +#![cfg_attr(all(test, feature = "full"), deny(warnings))] #![cfg_attr(all(test, feature = "nightly"), feature(test))] #![cfg_attr(docsrs, feature(doc_cfg))] @@ -20,7 +19,7 @@ //! - Extensive production use //! - [Client](client/index.html) and [Server](server/index.html) APIs //! -//! If just starting out, **check out the [Guides](https://hyper.rs/guides) +//! If just starting out, **check out the [Guides](https://hyper.rs/guides/1/) //! first.** //! //! ## "Low-level" @@ -50,15 +49,44 @@ //! - `http2`: Enables HTTP/2 support. //! - `client`: Enables the HTTP `client`. //! - `server`: Enables the HTTP `server`. -//! - `runtime`: Enables convenient integration with `tokio`, providing -//! connectors and acceptors for TCP, and a default executor. -//! - `tcp`: Enables convenient implementations over TCP (using tokio). -//! - `stream`: Provides `futures::Stream` capabilities. -//! - `backports`: 1.0 functionality backported to 0.14. -//! - `deprecated`: opt-in to deprecation warnings to prepare you for 1.0. //! //! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section - +//! +//! # Unstable Features +//! +//! hyper includes a set of unstable optional features that can be enabled through the use of a +//! feature flag and a [configuration flag]. +//! +//! The following is a list of feature flags and their corresponding `RUSTFLAG`: +//! - `ffi`: Enables C API for hyper `hyper_unstable_ffi`. +//! - `tracing`: Enables debug logging with `hyper_unstable_tracing`. +//! +//! Enabling an unstable feature is possible with the following `cargo` command, as of version `1.64.0`: +//! ```notrust +//! RUSTFLAGS="--cfg hyper_unstable_tracing" cargo rustc --features client,http1,http2,tracing --crate-type cdylib +//!``` +//! [configuration flag]: https://doc.rust-lang.org/reference/conditional-compilation.html +//! +//! # Stability +//! +//! It's worth talking a bit about the stability of hyper. hyper's API follows +//! [SemVer](https://semver.org). Breaking changes will only be introduced in +//! major versions, if ever. New additions to the API, such as new types, +//! methods, or traits will only be added in minor versions. +//! +//! Some parts of hyper are documented as NOT being part of the stable API. The +//! following is a brief list, you can read more about each one in the relevant +//! part of the documentation. +//! +//! - Downcasting error types from `Error::source()` is not considered stable. +//! - Private dependencies use of global variables is not considered stable. +//! So, if a dependency uses `log` or `tracing`, hyper doesn't promise it +//! will continue to do so. +//! - Behavior from default options is not stable. hyper reserves the right to +//! add new options that are enabled by default which might alter the +//! behavior, for the purposes of protection. It is also possible to _change_ +//! what the default options are set to, also in efforts to protect the +//! most people possible. #[doc(hidden)] pub use http; @@ -70,14 +98,16 @@ pub use crate::http::{header, Method, Request, Response, StatusCode, Uri, Versio #[doc(no_inline)] pub use crate::http::HeaderMap; -pub use crate::body::Body; pub use crate::error::{Error, Result}; #[macro_use] mod cfg; + #[macro_use] -mod common; +mod trace; + pub mod body; +mod common; mod error; pub mod ext; #[cfg(test)] @@ -87,6 +117,7 @@ pub mod service; pub mod upgrade; #[cfg(feature = "ffi")] +#[cfg_attr(docsrs, doc(cfg(all(feature = "ffi", hyper_unstable_ffi))))] pub mod ffi; cfg_proto! { @@ -98,15 +129,10 @@ cfg_feature! { #![feature = "client"] pub mod client; - #[cfg(any(feature = "http1", feature = "http2"))] - #[doc(no_inline)] - pub use crate::client::Client; } cfg_feature! { #![feature = "server"] pub mod server; - #[doc(no_inline)] - pub use crate::server::Server; } diff --git a/.cargo-vendor/hyper/src/proto/h1/conn.rs b/.cargo-vendor/hyper/src/proto/h1/conn.rs index 5ab72f264e..744e1b2ae5 100644 --- a/.cargo-vendor/hyper/src/proto/h1/conn.rs +++ b/.cargo-vendor/hyper/src/proto/h1/conn.rs @@ -1,31 +1,35 @@ use std::fmt; +#[cfg(feature = "server")] +use std::future::Future; use std::io; -use std::marker::PhantomData; -use std::marker::Unpin; +use std::marker::{PhantomData, Unpin}; use std::pin::Pin; use std::task::{Context, Poll}; -#[cfg(all(feature = "server", feature = "runtime"))] -use std::time::Duration; +#[cfg(feature = "server")] +use std::time::{Duration, Instant}; +use crate::rt::{Read, Write}; use bytes::{Buf, Bytes}; -use http::header::{HeaderValue, CONNECTION}; +use futures_util::ready; +use http::header::{HeaderValue, CONNECTION, TE}; use http::{HeaderMap, Method, Version}; +use http_body::Frame; use httparse::ParserConfig; -use tokio::io::{AsyncRead, AsyncWrite}; -#[cfg(all(feature = "server", feature = "runtime"))] -use tokio::time::Sleep; -use tracing::{debug, error, trace}; use super::io::Buffered; use super::{Decoder, Encode, EncodedBuf, Encoder, Http1Transaction, ParseContext, Wants}; use crate::body::DecodedLength; +#[cfg(feature = "server")] +use crate::common::time::Time; use crate::headers::connection_keep_alive; use crate::proto::{BodyLength, MessageHead}; +#[cfg(feature = "server")] +use crate::rt::Sleep; const H2_PREFACE: &[u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; /// This handles a connection, which will have been established over an -/// `AsyncRead + AsyncWrite` (like a socket), and will likely include multiple +/// `Read + Write` (like a socket), and will likely include multiple /// `Transaction`s over HTTP. /// /// The connection will determine when a message begins and ends as well as @@ -39,7 +43,7 @@ pub(crate) struct Conn { impl Conn where - I: AsyncRead + AsyncWrite + Unpin, + I: Read + Write + Unpin, B: Buf, T: Http1Transaction, { @@ -53,12 +57,17 @@ where keep_alive: KA::Busy, method: None, h1_parser_config: ParserConfig::default(), - #[cfg(all(feature = "server", feature = "runtime"))] + h1_max_headers: None, + #[cfg(feature = "server")] h1_header_read_timeout: None, - #[cfg(all(feature = "server", feature = "runtime"))] + #[cfg(feature = "server")] h1_header_read_timeout_fut: None, - #[cfg(all(feature = "server", feature = "runtime"))] + #[cfg(feature = "server")] h1_header_read_timeout_running: false, + #[cfg(feature = "server")] + date_header: true, + #[cfg(feature = "server")] + timer: Time::Empty, preserve_header_case: false, #[cfg(feature = "ffi")] preserve_header_order: false, @@ -66,8 +75,6 @@ where h09_responses: false, #[cfg(feature = "ffi")] on_informational: None, - #[cfg(feature = "ffi")] - raw_headers: false, notify_read: false, reading: Reading::Init, writing: Writing::Init, @@ -75,11 +82,17 @@ where // We assume a modern world where the remote speaks HTTP/1.1. // If they tell us otherwise, we'll downgrade in `read_head`. version: Version::HTTP_11, + allow_trailer_fields: false, }, _marker: PhantomData, } } + #[cfg(feature = "server")] + pub(crate) fn set_timer(&mut self, timer: Time) { + self.state.timer = timer; + } + #[cfg(feature = "server")] pub(crate) fn set_flush_pipeline(&mut self, enabled: bool) { self.io.set_flush_pipeline(enabled); @@ -125,7 +138,11 @@ where self.state.h09_responses = true; } - #[cfg(all(feature = "server", feature = "runtime"))] + pub(crate) fn set_http1_max_headers(&mut self, val: usize) { + self.state.h1_max_headers = Some(val); + } + + #[cfg(feature = "server")] pub(crate) fn set_http1_header_read_timeout(&mut self, val: Duration) { self.state.h1_header_read_timeout = Some(val); } @@ -135,11 +152,6 @@ where self.state.allow_half_close = true; } - #[cfg(feature = "ffi")] - pub(crate) fn set_raw_headers(&mut self, enabled: bool) { - self.state.raw_headers = enabled; - } - pub(crate) fn into_inner(self) -> (I, Bytes) { self.io.into_inner() } @@ -169,10 +181,17 @@ where } pub(crate) fn can_read_body(&self) -> bool { - match self.state.reading { - Reading::Body(..) | Reading::Continue(..) => true, - _ => false, - } + matches!( + self.state.reading, + Reading::Body(..) | Reading::Continue(..) + ) + } + + #[cfg(feature = "server")] + pub(crate) fn has_initial_read_write_state(&self) -> bool { + matches!(self.state.reading, Reading::Init) + && matches!(self.state.writing, Writing::Init) + && self.io.read_buf().is_empty() } fn should_error_on_eof(&self) -> bool { @@ -192,32 +211,67 @@ where debug_assert!(self.can_read_head()); trace!("Conn::read_head"); - let msg = match ready!(self.io.parse::( + #[cfg(feature = "server")] + if !self.state.h1_header_read_timeout_running { + if let Some(h1_header_read_timeout) = self.state.h1_header_read_timeout { + let deadline = Instant::now() + h1_header_read_timeout; + self.state.h1_header_read_timeout_running = true; + match self.state.h1_header_read_timeout_fut { + Some(ref mut h1_header_read_timeout_fut) => { + trace!("resetting h1 header read timeout timer"); + self.state.timer.reset(h1_header_read_timeout_fut, deadline); + } + None => { + trace!("setting h1 header read timeout timer"); + self.state.h1_header_read_timeout_fut = + Some(self.state.timer.sleep_until(deadline)); + } + } + } + } + + let msg = match self.io.parse::( cx, ParseContext { cached_headers: &mut self.state.cached_headers, req_method: &mut self.state.method, h1_parser_config: self.state.h1_parser_config.clone(), - #[cfg(all(feature = "server", feature = "runtime"))] - h1_header_read_timeout: self.state.h1_header_read_timeout, - #[cfg(all(feature = "server", feature = "runtime"))] - h1_header_read_timeout_fut: &mut self.state.h1_header_read_timeout_fut, - #[cfg(all(feature = "server", feature = "runtime"))] - h1_header_read_timeout_running: &mut self.state.h1_header_read_timeout_running, + h1_max_headers: self.state.h1_max_headers, preserve_header_case: self.state.preserve_header_case, #[cfg(feature = "ffi")] preserve_header_order: self.state.preserve_header_order, h09_responses: self.state.h09_responses, #[cfg(feature = "ffi")] on_informational: &mut self.state.on_informational, - #[cfg(feature = "ffi")] - raw_headers: self.state.raw_headers, + }, + ) { + Poll::Ready(Ok(msg)) => msg, + Poll::Ready(Err(e)) => return self.on_read_head_error(e), + Poll::Pending => { + #[cfg(feature = "server")] + if self.state.h1_header_read_timeout_running { + if let Some(ref mut h1_header_read_timeout_fut) = + self.state.h1_header_read_timeout_fut + { + if Pin::new(h1_header_read_timeout_fut).poll(cx).is_ready() { + self.state.h1_header_read_timeout_running = false; + + warn!("read header from client timeout"); + return Poll::Ready(Some(Err(crate::Error::new_header_timeout()))); + } + } + } + + return Poll::Pending; } - )) { - Ok(msg) => msg, - Err(e) => return self.on_read_head_error(e), }; + #[cfg(feature = "server")] + { + self.state.h1_header_read_timeout_running = false; + self.state.h1_header_read_timeout_fut = None; + } + // Note: don't deconstruct `msg` into local variables, it appears // the optimizer doesn't remove the extra copies. @@ -250,13 +304,29 @@ where if !T::should_read_first() { self.try_keep_alive(cx); } - } else if msg.expect_continue { - self.state.reading = Reading::Continue(Decoder::new(msg.decode)); + } else if msg.expect_continue && msg.head.version.gt(&Version::HTTP_10) { + let h1_max_header_size = None; // TODO: remove this when we land h1_max_header_size support + self.state.reading = Reading::Continue(Decoder::new( + msg.decode, + self.state.h1_max_headers, + h1_max_header_size, + )); wants = wants.add(Wants::EXPECT); } else { - self.state.reading = Reading::Body(Decoder::new(msg.decode)); + let h1_max_header_size = None; // TODO: remove this when we land h1_max_header_size support + self.state.reading = Reading::Body(Decoder::new( + msg.decode, + self.state.h1_max_headers, + h1_max_header_size, + )); } + self.state.allow_trailer_fields = msg + .head + .headers + .get(TE) + .map_or(false, |te_header| te_header == "trailers"); + Poll::Ready(Some(Ok((msg.head, msg.decode, wants)))) } @@ -289,33 +359,41 @@ where pub(crate) fn poll_read_body( &mut self, cx: &mut Context<'_>, - ) -> Poll>> { + ) -> Poll>>> { debug_assert!(self.can_read_body()); let (reading, ret) = match self.state.reading { Reading::Body(ref mut decoder) => { match ready!(decoder.decode(cx, &mut self.io)) { - Ok(slice) => { - let (reading, chunk) = if decoder.is_eof() { - debug!("incoming body completed"); - ( - Reading::KeepAlive, - if !slice.is_empty() { - Some(Ok(slice)) - } else { - None - }, - ) - } else if slice.is_empty() { - error!("incoming body unexpectedly ended"); - // This should be unreachable, since all 3 decoders - // either set eof=true or return an Err when reading - // an empty slice... - (Reading::Closed, None) + Ok(frame) => { + if frame.is_data() { + let slice = frame.data_ref().unwrap_or_else(|| unreachable!()); + let (reading, maybe_frame) = if decoder.is_eof() { + debug!("incoming body completed"); + ( + Reading::KeepAlive, + if !slice.is_empty() { + Some(Ok(frame)) + } else { + None + }, + ) + } else if slice.is_empty() { + error!("incoming body unexpectedly ended"); + // This should be unreachable, since all 3 decoders + // either set eof=true or return an Err when reading + // an empty slice... + (Reading::Closed, None) + } else { + return Poll::Ready(Some(Ok(frame))); + }; + (reading, Poll::Ready(maybe_frame)) + } else if frame.is_trailers() { + (Reading::Closed, Poll::Ready(Some(Ok(frame)))) } else { - return Poll::Ready(Some(Ok(slice))); - }; - (reading, Poll::Ready(chunk)) + trace!("discarding unknown frame"); + (Reading::Closed, Poll::Ready(None)) + } } Err(e) => { debug!("incoming body decode error: {}", e); @@ -429,7 +507,7 @@ where let result = ready!(self.io.poll_read_from_io(cx)); Poll::Ready(result.map_err(|e| { - trace!("force_io_read; io error = {:?}", e); + trace!(error = %e, "force_io_read; io error"); self.state.close(); e })) @@ -521,24 +599,6 @@ where } } - pub(crate) fn write_full_msg(&mut self, head: MessageHead, body: B) { - if let Some(encoder) = - self.encode_head(head, Some(BodyLength::Known(body.remaining() as u64))) - { - let is_last = encoder.is_last(); - // Make sure we don't write a body if we weren't actually allowed - // to do so, like because its a HEAD request. - if !encoder.is_eof() { - encoder.danger_full_buf(body, self.io.write_buf()); - } - self.state.writing = if is_last { - Writing::Closed - } else { - Writing::KeepAlive - } - } - } - fn encode_head( &mut self, mut head: MessageHead, @@ -561,6 +621,8 @@ where keep_alive: self.state.wants_keep_alive(), req_method: &mut self.state.method, title_case_headers: self.state.title_case_headers, + #[cfg(feature = "server")] + date_header: self.state.date_header, }, buf, ) { @@ -590,8 +652,7 @@ where let outgoing_is_keep_alive = head .headers .get(CONNECTION) - .map(connection_keep_alive) - .unwrap_or(false); + .map_or(false, connection_keep_alive); if !outgoing_is_keep_alive { match head.version { @@ -651,6 +712,31 @@ where self.state.writing = state; } + pub(crate) fn write_trailers(&mut self, trailers: HeaderMap) { + if T::is_server() && !self.state.allow_trailer_fields { + debug!("trailers not allowed to be sent"); + return; + } + debug_assert!(self.can_write_body() && self.can_buffer_body()); + + match self.state.writing { + Writing::Body(ref encoder) => { + if let Some(enc_buf) = + encoder.encode_trailers(trailers, self.state.title_case_headers) + { + self.io.buffer(enc_buf); + + self.state.writing = if encoder.is_last() || encoder.is_close_delimited() { + Writing::Closed + } else { + Writing::KeepAlive + }; + } + } + _ => unreachable!("write_trailers invalid state: {:?}", self.state.writing), + } + } + pub(crate) fn write_body_and_end(&mut self, chunk: B) { debug_assert!(self.can_write_body() && self.can_buffer_body()); // empty chunks should be discarded at Dispatcher level @@ -757,7 +843,9 @@ where // If still in Reading::Body, just give up match self.state.reading { - Reading::Init | Reading::KeepAlive => trace!("body drained"), + Reading::Init | Reading::KeepAlive => { + trace!("body drained") + } _ => self.close_read(), } } @@ -822,12 +910,17 @@ struct State { /// a body or not. method: Option, h1_parser_config: ParserConfig, - #[cfg(all(feature = "server", feature = "runtime"))] + h1_max_headers: Option, + #[cfg(feature = "server")] h1_header_read_timeout: Option, - #[cfg(all(feature = "server", feature = "runtime"))] - h1_header_read_timeout_fut: Option>>, - #[cfg(all(feature = "server", feature = "runtime"))] + #[cfg(feature = "server")] + h1_header_read_timeout_fut: Option>>, + #[cfg(feature = "server")] h1_header_read_timeout_running: bool, + #[cfg(feature = "server")] + date_header: bool, + #[cfg(feature = "server")] + timer: Time, preserve_header_case: bool, #[cfg(feature = "ffi")] preserve_header_order: bool, @@ -838,8 +931,6 @@ struct State { /// received. #[cfg(feature = "ffi")] on_informational: Option, - #[cfg(feature = "ffi")] - raw_headers: bool, /// Set to true when the Dispatcher should poll read operations /// again. See the `maybe_notify` method for more. notify_read: bool, @@ -851,6 +942,8 @@ struct State { upgrade: Option, /// Either HTTP/1.0 or 1.1 connection version: Version, + /// Flag to track if trailer fields are allowed to be sent + allow_trailer_fields: bool, } #[derive(Debug)] @@ -912,19 +1005,14 @@ impl std::ops::BitAndAssign for KA { } } -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug, Default)] enum KA { Idle, + #[default] Busy, Disabled, } -impl Default for KA { - fn default() -> KA { - KA::Busy - } -} - impl KA { fn idle(&mut self) { *self = KA::Idle; @@ -964,11 +1052,7 @@ impl State { } fn wants_keep_alive(&self) -> bool { - if let KA::Disabled = self.keep_alive.status() { - false - } else { - true - } + !matches!(self.keep_alive.status(), KA::Disabled) } fn try_keep_alive(&mut self) { @@ -1048,16 +1132,17 @@ impl State { #[cfg(test)] mod tests { - #[cfg(feature = "nightly")] + #[cfg(all(feature = "nightly", not(miri)))] #[bench] fn bench_read_head_short(b: &mut ::test::Bencher) { use super::*; + use crate::common::io::Compat; let s = b"GET / HTTP/1.1\r\nHost: localhost:8080\r\n\r\n"; let len = s.len(); b.bytes = len as u64; // an empty IO, we'll be skipping and using the read buffer anyways - let io = tokio_test::io::Builder::new().build(); + let io = Compat(tokio_test::io::Builder::new().build()); let mut conn = Conn::<_, bytes::Bytes, crate::proto::h1::ServerTransaction>::new(io); *conn.io.read_buf_mut() = ::bytes::BytesMut::from(&s[..]); conn.state.cached_headers = Some(HeaderMap::with_capacity(2)); diff --git a/.cargo-vendor/hyper/src/proto/h1/decode.rs b/.cargo-vendor/hyper/src/proto/h1/decode.rs index 3206863530..2e196c36a5 100644 --- a/.cargo-vendor/hyper/src/proto/h1/decode.rs +++ b/.cargo-vendor/hyper/src/proto/h1/decode.rs @@ -2,12 +2,14 @@ use std::error::Error as StdError; use std::fmt; use std::io; use std::task::{Context, Poll}; -use std::usize; -use bytes::Bytes; -use tracing::{debug, trace}; +use bytes::{BufMut, Bytes, BytesMut}; +use futures_util::ready; +use http::{HeaderMap, HeaderName, HeaderValue}; +use http_body::Frame; use super::io::MemRead; +use super::role::DEFAULT_MAX_HEADERS; use super::DecodedLength; use self::Kind::{Chunked, Eof, Length}; @@ -17,6 +19,11 @@ use self::Kind::{Chunked, Eof, Length}; /// This limit is currentlty applied for the entire body, not per chunk. const CHUNKED_EXTENSIONS_LIMIT: u64 = 1024 * 16; +/// Maximum number of bytes allowed for all trailer fields. +/// +/// TODO: remove this when we land h1_max_header_size support +const TRAILER_LIMIT: usize = 1024 * 16; + /// Decoders to handle different Transfer-Encodings. /// /// If a message body does not include a Transfer-Encoding, it *should* @@ -26,7 +33,7 @@ pub(crate) struct Decoder { kind: Kind, } -#[derive(Debug, Clone, Copy, PartialEq)] +#[derive(Debug, Clone, PartialEq)] enum Kind { /// A Reader used when a Content-Length header is passed with a positive integer. Length(u64), @@ -35,6 +42,10 @@ enum Kind { state: ChunkedState, chunk_len: u64, extensions_cnt: u64, + trailers_buf: Option, + trailers_cnt: usize, + h1_max_headers: Option, + h1_max_header_size: Option, }, /// A Reader used for responses that don't indicate a length or chunked. /// @@ -81,12 +92,19 @@ impl Decoder { } } - pub(crate) fn chunked() -> Decoder { + pub(crate) fn chunked( + h1_max_headers: Option, + h1_max_header_size: Option, + ) -> Decoder { Decoder { kind: Kind::Chunked { state: ChunkedState::new(), chunk_len: 0, extensions_cnt: 0, + trailers_buf: None, + trailers_cnt: 0, + h1_max_headers, + h1_max_header_size, }, } } @@ -97,9 +115,13 @@ impl Decoder { } } - pub(super) fn new(len: DecodedLength) -> Self { + pub(super) fn new( + len: DecodedLength, + h1_max_headers: Option, + h1_max_header_size: Option, + ) -> Self { match len { - DecodedLength::CHUNKED => Decoder::chunked(), + DecodedLength::CHUNKED => Decoder::chunked(h1_max_headers, h1_max_header_size), DecodedLength::CLOSE_DELIMITED => Decoder::eof(), length => Decoder::length(length.danger_len()), } @@ -123,12 +145,12 @@ impl Decoder { &mut self, cx: &mut Context<'_>, body: &mut R, - ) -> Poll> { + ) -> Poll, io::Error>> { trace!("decode; state={:?}", self.kind); match self.kind { Length(ref mut remaining) => { if *remaining == 0 { - Poll::Ready(Ok(Bytes::new())) + Poll::Ready(Ok(Frame::data(Bytes::new()))) } else { let to_read = *remaining as usize; let buf = ready!(body.read_mem(cx, to_read))?; @@ -143,37 +165,77 @@ impl Decoder { } else { *remaining -= num; } - Poll::Ready(Ok(buf)) + Poll::Ready(Ok(Frame::data(buf))) } } Chunked { ref mut state, ref mut chunk_len, ref mut extensions_cnt, + ref mut trailers_buf, + ref mut trailers_cnt, + ref h1_max_headers, + ref h1_max_header_size, } => { + let h1_max_headers = h1_max_headers.unwrap_or(DEFAULT_MAX_HEADERS); + let h1_max_header_size = h1_max_header_size.unwrap_or(TRAILER_LIMIT); loop { let mut buf = None; // advances the chunked state - *state = ready!(state.step(cx, body, chunk_len, extensions_cnt, &mut buf))?; + *state = ready!(state.step( + cx, + body, + chunk_len, + extensions_cnt, + &mut buf, + trailers_buf, + trailers_cnt, + h1_max_headers, + h1_max_header_size + ))?; if *state == ChunkedState::End { trace!("end of chunked"); - return Poll::Ready(Ok(Bytes::new())); + + if trailers_buf.is_some() { + trace!("found possible trailers"); + + // decoder enforces that trailers count will not exceed h1_max_headers + if *trailers_cnt >= h1_max_headers { + return Poll::Ready(Err(io::Error::new( + io::ErrorKind::InvalidData, + "chunk trailers count overflow", + ))); + } + match decode_trailers( + &mut trailers_buf.take().expect("Trailer is None"), + *trailers_cnt, + ) { + Ok(headers) => { + return Poll::Ready(Ok(Frame::trailers(headers))); + } + Err(e) => { + return Poll::Ready(Err(e)); + } + } + } + + return Poll::Ready(Ok(Frame::data(Bytes::new()))); } if let Some(buf) = buf { - return Poll::Ready(Ok(buf)); + return Poll::Ready(Ok(Frame::data(buf))); } } } Eof(ref mut is_eof) => { if *is_eof { - Poll::Ready(Ok(Bytes::new())) + Poll::Ready(Ok(Frame::data(Bytes::new()))) } else { // 8192 chosen because its about 2 packets, there probably // won't be that much available, so don't have MemReaders // allocate buffers to big body.read_mem(cx, 8192).map_ok(|slice| { *is_eof = slice.is_empty(); - slice + Frame::data(slice) }) } } @@ -181,7 +243,7 @@ impl Decoder { } #[cfg(test)] - async fn decode_fut(&mut self, body: &mut R) -> Result { + async fn decode_fut(&mut self, body: &mut R) -> Result, io::Error> { futures_util::future::poll_fn(move |cx| self.decode(cx, body)).await } } @@ -216,6 +278,19 @@ macro_rules! or_overflow { ) } +macro_rules! put_u8 { + ($trailers_buf:expr, $byte:expr, $limit:expr) => { + $trailers_buf.put_u8($byte); + + if $trailers_buf.len() >= $limit { + return Poll::Ready(Err(io::Error::new( + io::ErrorKind::InvalidData, + "chunk trailers bytes over limit", + ))); + } + }; +} + impl ChunkedState { fn new() -> ChunkedState { ChunkedState::Start @@ -227,6 +302,10 @@ impl ChunkedState { size: &mut u64, extensions_cnt: &mut u64, buf: &mut Option, + trailers_buf: &mut Option, + trailers_cnt: &mut usize, + h1_max_headers: usize, + h1_max_header_size: usize, ) -> Poll> { use self::ChunkedState::*; match *self { @@ -238,10 +317,17 @@ impl ChunkedState { Body => ChunkedState::read_body(cx, body, size, buf), BodyCr => ChunkedState::read_body_cr(cx, body), BodyLf => ChunkedState::read_body_lf(cx, body), - Trailer => ChunkedState::read_trailer(cx, body), - TrailerLf => ChunkedState::read_trailer_lf(cx, body), - EndCr => ChunkedState::read_end_cr(cx, body), - EndLf => ChunkedState::read_end_lf(cx, body), + Trailer => ChunkedState::read_trailer(cx, body, trailers_buf, h1_max_header_size), + TrailerLf => ChunkedState::read_trailer_lf( + cx, + body, + trailers_buf, + trailers_cnt, + h1_max_headers, + h1_max_header_size, + ), + EndCr => ChunkedState::read_end_cr(cx, body, trailers_buf, h1_max_header_size), + EndLf => ChunkedState::read_end_lf(cx, body, trailers_buf, h1_max_header_size), End => Poll::Ready(Ok(ChunkedState::End)), } } @@ -442,19 +528,51 @@ impl ChunkedState { fn read_trailer( cx: &mut Context<'_>, rdr: &mut R, + trailers_buf: &mut Option, + h1_max_header_size: usize, ) -> Poll> { trace!("read_trailer"); - match byte!(rdr, cx) { + let byte = byte!(rdr, cx); + + put_u8!( + trailers_buf.as_mut().expect("trailers_buf is None"), + byte, + h1_max_header_size + ); + + match byte { b'\r' => Poll::Ready(Ok(ChunkedState::TrailerLf)), _ => Poll::Ready(Ok(ChunkedState::Trailer)), } } + fn read_trailer_lf( cx: &mut Context<'_>, rdr: &mut R, + trailers_buf: &mut Option, + trailers_cnt: &mut usize, + h1_max_headers: usize, + h1_max_header_size: usize, ) -> Poll> { - match byte!(rdr, cx) { - b'\n' => Poll::Ready(Ok(ChunkedState::EndCr)), + let byte = byte!(rdr, cx); + match byte { + b'\n' => { + if *trailers_cnt >= h1_max_headers { + return Poll::Ready(Err(io::Error::new( + io::ErrorKind::InvalidData, + "chunk trailers count overflow", + ))); + } + *trailers_cnt += 1; + + put_u8!( + trailers_buf.as_mut().expect("trailers_buf is None"), + byte, + h1_max_header_size + ); + + Poll::Ready(Ok(ChunkedState::EndCr)) + } _ => Poll::Ready(Err(io::Error::new( io::ErrorKind::InvalidInput, "Invalid trailer end LF", @@ -465,18 +583,48 @@ impl ChunkedState { fn read_end_cr( cx: &mut Context<'_>, rdr: &mut R, + trailers_buf: &mut Option, + h1_max_header_size: usize, ) -> Poll> { - match byte!(rdr, cx) { - b'\r' => Poll::Ready(Ok(ChunkedState::EndLf)), - _ => Poll::Ready(Ok(ChunkedState::Trailer)), + let byte = byte!(rdr, cx); + match byte { + b'\r' => { + if let Some(trailers_buf) = trailers_buf { + put_u8!(trailers_buf, byte, h1_max_header_size); + } + Poll::Ready(Ok(ChunkedState::EndLf)) + } + byte => { + match trailers_buf { + None => { + // 64 will fit a single Expires header without reallocating + let mut buf = BytesMut::with_capacity(64); + buf.put_u8(byte); + *trailers_buf = Some(buf); + } + Some(ref mut trailers_buf) => { + put_u8!(trailers_buf, byte, h1_max_header_size); + } + } + + Poll::Ready(Ok(ChunkedState::Trailer)) + } } } fn read_end_lf( cx: &mut Context<'_>, rdr: &mut R, + trailers_buf: &mut Option, + h1_max_header_size: usize, ) -> Poll> { - match byte!(rdr, cx) { - b'\n' => Poll::Ready(Ok(ChunkedState::End)), + let byte = byte!(rdr, cx); + match byte { + b'\n' => { + if let Some(trailers_buf) = trailers_buf { + put_u8!(trailers_buf, byte, h1_max_header_size); + } + Poll::Ready(Ok(ChunkedState::End)) + } _ => Poll::Ready(Err(io::Error::new( io::ErrorKind::InvalidInput, "Invalid chunk end LF", @@ -485,6 +633,48 @@ impl ChunkedState { } } +// TODO: disallow Transfer-Encoding, Content-Length, Trailer, etc in trailers ?? +fn decode_trailers(buf: &mut BytesMut, count: usize) -> Result { + let mut trailers = HeaderMap::new(); + let mut headers = vec![httparse::EMPTY_HEADER; count]; + let res = httparse::parse_headers(buf, &mut headers); + match res { + Ok(httparse::Status::Complete((_, headers))) => { + for header in headers.iter() { + use std::convert::TryFrom; + let name = match HeaderName::try_from(header.name) { + Ok(name) => name, + Err(_) => { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + format!("Invalid header name: {:?}", &header), + )); + } + }; + + let value = match HeaderValue::from_bytes(header.value) { + Ok(value) => value, + Err(_) => { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + format!("Invalid header value: {:?}", &header), + )); + } + }; + + trailers.insert(name, value); + } + + Ok(trailers) + } + Ok(httparse::Status::Partial) => Err(io::Error::new( + io::ErrorKind::InvalidInput, + "Partial header", + )), + Err(e) => Err(io::Error::new(io::ErrorKind::InvalidInput, e)), + } +} + #[derive(Debug)] struct IncompleteBody; @@ -499,9 +689,9 @@ impl StdError for IncompleteBody {} #[cfg(test)] mod tests { use super::*; + use crate::rt::{Read, ReadBuf}; use std::pin::Pin; use std::time::Duration; - use tokio::io::{AsyncRead, ReadBuf}; impl<'a> MemRead for &'a [u8] { fn read_mem(&mut self, _: &mut Context<'_>, len: usize) -> Poll> { @@ -517,11 +707,11 @@ mod tests { } } - impl<'a> MemRead for &'a mut (dyn AsyncRead + Unpin) { + impl<'a> MemRead for &'a mut (dyn Read + Unpin) { fn read_mem(&mut self, cx: &mut Context<'_>, len: usize) -> Poll> { let mut v = vec![0; len]; let mut buf = ReadBuf::new(&mut v); - ready!(Pin::new(self).poll_read(cx, &mut buf)?); + ready!(Pin::new(self).poll_read(cx, buf.unfilled())?); Poll::Ready(Ok(Bytes::copy_from_slice(&buf.filled()))) } } @@ -544,6 +734,7 @@ mod tests { use crate::mock::AsyncIo; */ + #[cfg(not(miri))] #[tokio::test] async fn test_read_chunk_size() { use std::io::ErrorKind::{InvalidData, InvalidInput, UnexpectedEof}; @@ -553,9 +744,20 @@ mod tests { let rdr = &mut s.as_bytes(); let mut size = 0; let mut ext_cnt = 0; + let mut trailers_cnt = 0; loop { let result = futures_util::future::poll_fn(|cx| { - state.step(cx, rdr, &mut size, &mut ext_cnt, &mut None) + state.step( + cx, + rdr, + &mut size, + &mut ext_cnt, + &mut None, + &mut None, + &mut trailers_cnt, + DEFAULT_MAX_HEADERS, + TRAILER_LIMIT, + ) }) .await; let desc = format!("read_size failed for {:?}", s); @@ -572,9 +774,20 @@ mod tests { let rdr = &mut s.as_bytes(); let mut size = 0; let mut ext_cnt = 0; + let mut trailers_cnt = 0; loop { let result = futures_util::future::poll_fn(|cx| { - state.step(cx, rdr, &mut size, &mut ext_cnt, &mut None) + state.step( + cx, + rdr, + &mut size, + &mut ext_cnt, + &mut None, + &mut None, + &mut trailers_cnt, + DEFAULT_MAX_HEADERS, + TRAILER_LIMIT, + ) }) .await; state = match result { @@ -633,34 +846,57 @@ mod tests { read_err("f0000000000000003\r\n", InvalidData).await; } + #[cfg(not(miri))] #[tokio::test] async fn test_read_sized_early_eof() { let mut bytes = &b"foo bar"[..]; let mut decoder = Decoder::length(10); - assert_eq!(decoder.decode_fut(&mut bytes).await.unwrap().len(), 7); + assert_eq!( + decoder + .decode_fut(&mut bytes) + .await + .unwrap() + .data_ref() + .unwrap() + .len(), + 7 + ); let e = decoder.decode_fut(&mut bytes).await.unwrap_err(); assert_eq!(e.kind(), io::ErrorKind::UnexpectedEof); } + #[cfg(not(miri))] #[tokio::test] async fn test_read_chunked_early_eof() { let mut bytes = &b"\ 9\r\n\ foo bar\ "[..]; - let mut decoder = Decoder::chunked(); - assert_eq!(decoder.decode_fut(&mut bytes).await.unwrap().len(), 7); + let mut decoder = Decoder::chunked(None, None); + assert_eq!( + decoder + .decode_fut(&mut bytes) + .await + .unwrap() + .data_ref() + .unwrap() + .len(), + 7 + ); let e = decoder.decode_fut(&mut bytes).await.unwrap_err(); assert_eq!(e.kind(), io::ErrorKind::UnexpectedEof); } + #[cfg(not(miri))] #[tokio::test] async fn test_read_chunked_single_read() { let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\n"[..]; - let buf = Decoder::chunked() + let buf = Decoder::chunked(None, None) .decode_fut(&mut mock_buf) .await - .expect("decode"); + .expect("decode") + .into_data() + .expect("unknown frame type"); assert_eq!(16, buf.len()); let result = String::from_utf8(buf.as_ref().to_vec()).expect("decode String"); assert_eq!("1234567890abcdef", &result); @@ -670,8 +906,13 @@ mod tests { async fn test_read_chunked_with_missing_zero_digit() { // After reading a valid chunk, the ending is missing a zero. let mut mock_buf = &b"1\r\nZ\r\n\r\n\r\n"[..]; - let mut decoder = Decoder::chunked(); - let buf = decoder.decode_fut(&mut mock_buf).await.expect("decode"); + let mut decoder = Decoder::chunked(None, None); + let buf = decoder + .decode_fut(&mut mock_buf) + .await + .expect("decode") + .into_data() + .expect("unknown frame type"); assert_eq!("Z", buf); let err = decoder @@ -695,8 +936,13 @@ mod tests { scratch.extend(b"0\r\n\r\n"); let mut mock_buf = Bytes::from(scratch); - let mut decoder = Decoder::chunked(); - let buf1 = decoder.decode_fut(&mut mock_buf).await.expect("decode1"); + let mut decoder = Decoder::chunked(None, None); + let buf1 = decoder + .decode_fut(&mut mock_buf) + .await + .expect("decode1") + .into_data() + .expect("unknown frame type"); assert_eq!(&buf1[..], b"A"); let err = decoder @@ -711,29 +957,45 @@ mod tests { #[tokio::test] async fn test_read_chunked_trailer_with_missing_lf() { let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\nbad\r\r\n"[..]; - let mut decoder = Decoder::chunked(); + let mut decoder = Decoder::chunked(None, None); decoder.decode_fut(&mut mock_buf).await.expect("decode"); let e = decoder.decode_fut(&mut mock_buf).await.unwrap_err(); assert_eq!(e.kind(), io::ErrorKind::InvalidInput); } + #[cfg(not(miri))] #[tokio::test] async fn test_read_chunked_after_eof() { let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\n\r\n"[..]; - let mut decoder = Decoder::chunked(); + let mut decoder = Decoder::chunked(None, None); // normal read - let buf = decoder.decode_fut(&mut mock_buf).await.unwrap(); + let buf = decoder + .decode_fut(&mut mock_buf) + .await + .unwrap() + .into_data() + .expect("unknown frame type"); assert_eq!(16, buf.len()); let result = String::from_utf8(buf.as_ref().to_vec()).expect("decode String"); assert_eq!("1234567890abcdef", &result); // eof read - let buf = decoder.decode_fut(&mut mock_buf).await.expect("decode"); + let buf = decoder + .decode_fut(&mut mock_buf) + .await + .expect("decode") + .into_data() + .expect("unknown frame type"); assert_eq!(0, buf.len()); // ensure read after eof also returns eof - let buf = decoder.decode_fut(&mut mock_buf).await.expect("decode"); + let buf = decoder + .decode_fut(&mut mock_buf) + .await + .expect("decode") + .into_data() + .expect("unknown frame type"); assert_eq!(0, buf.len()); } @@ -742,7 +1004,7 @@ mod tests { async fn read_async(mut decoder: Decoder, content: &[u8], block_at: usize) -> String { let mut outs = Vec::new(); - let mut ins = if block_at == 0 { + let mut ins = crate::common::io::Compat::new(if block_at == 0 { tokio_test::io::Builder::new() .wait(Duration::from_millis(10)) .read(content) @@ -753,15 +1015,17 @@ mod tests { .wait(Duration::from_millis(10)) .read(&content[block_at..]) .build() - }; + }); - let mut ins = &mut ins as &mut (dyn AsyncRead + Unpin); + let mut ins = &mut ins as &mut (dyn Read + Unpin); loop { let buf = decoder .decode_fut(&mut ins) .await - .expect("unexpected decode error"); + .expect("unexpected decode error") + .into_data() + .expect("unexpected frame type"); if buf.is_empty() { break; // eof } @@ -781,26 +1045,29 @@ mod tests { } } + #[cfg(not(miri))] #[tokio::test] async fn test_read_length_async() { let content = "foobar"; all_async_cases(content, content, Decoder::length(content.len() as u64)).await; } + #[cfg(not(miri))] #[tokio::test] async fn test_read_chunked_async() { let content = "3\r\nfoo\r\n3\r\nbar\r\n0\r\n\r\n"; let expected = "foobar"; - all_async_cases(content, expected, Decoder::chunked()).await; + all_async_cases(content, expected, Decoder::chunked(None, None)).await; } + #[cfg(not(miri))] #[tokio::test] async fn test_read_eof_async() { let content = "foobar"; all_async_cases(content, content, Decoder::eof()).await; } - #[cfg(feature = "nightly")] + #[cfg(all(feature = "nightly", not(miri)))] #[bench] fn bench_decode_chunked_1kb(b: &mut test::Bencher) { let rt = new_runtime(); @@ -815,16 +1082,21 @@ mod tests { b.bytes = LEN as u64; b.iter(|| { - let mut decoder = Decoder::chunked(); + let mut decoder = Decoder::chunked(None, None); rt.block_on(async { let mut raw = content.clone(); - let chunk = decoder.decode_fut(&mut raw).await.unwrap(); + let chunk = decoder + .decode_fut(&mut raw) + .await + .unwrap() + .into_data() + .unwrap(); assert_eq!(chunk.len(), LEN); }); }); } - #[cfg(feature = "nightly")] + #[cfg(all(feature = "nightly", not(miri)))] #[bench] fn bench_decode_length_1kb(b: &mut test::Bencher) { let rt = new_runtime(); @@ -837,7 +1109,12 @@ mod tests { let mut decoder = Decoder::length(LEN as u64); rt.block_on(async { let mut raw = content.clone(); - let chunk = decoder.decode_fut(&mut raw).await.unwrap(); + let chunk = decoder + .decode_fut(&mut raw) + .await + .unwrap() + .into_data() + .unwrap(); assert_eq!(chunk.len(), LEN); }); }); @@ -850,4 +1127,110 @@ mod tests { .build() .expect("rt build") } + + #[test] + fn test_decode_trailers() { + let mut buf = BytesMut::new(); + buf.extend_from_slice( + b"Expires: Wed, 21 Oct 2015 07:28:00 GMT\r\nX-Stream-Error: failed to decode\r\n\r\n", + ); + let headers = decode_trailers(&mut buf, 2).expect("decode_trailers"); + assert_eq!(headers.len(), 2); + assert_eq!( + headers.get("Expires").unwrap(), + "Wed, 21 Oct 2015 07:28:00 GMT" + ); + assert_eq!(headers.get("X-Stream-Error").unwrap(), "failed to decode"); + } + + #[tokio::test] + async fn test_trailer_max_headers_enforced() { + let h1_max_headers = 10; + let mut scratch = vec![]; + scratch.extend(b"10\r\n1234567890abcdef\r\n0\r\n"); + for i in 0..h1_max_headers { + scratch.extend(format!("trailer{}: {}\r\n", i, i).as_bytes()); + } + scratch.extend(b"\r\n"); + let mut mock_buf = Bytes::from(scratch); + + let mut decoder = Decoder::chunked(Some(h1_max_headers), None); + + // ready chunked body + let buf = decoder + .decode_fut(&mut mock_buf) + .await + .unwrap() + .into_data() + .expect("unknown frame type"); + assert_eq!(16, buf.len()); + + // eof read + let err = decoder + .decode_fut(&mut mock_buf) + .await + .expect_err("trailer fields over limit"); + assert_eq!(err.kind(), io::ErrorKind::InvalidData); + } + + #[tokio::test] + async fn test_trailer_max_header_size_huge_trailer() { + let max_header_size = 1024; + let mut scratch = vec![]; + scratch.extend(b"10\r\n1234567890abcdef\r\n0\r\n"); + scratch.extend(format!("huge_trailer: {}\r\n", "x".repeat(max_header_size)).as_bytes()); + scratch.extend(b"\r\n"); + let mut mock_buf = Bytes::from(scratch); + + let mut decoder = Decoder::chunked(None, Some(max_header_size)); + + // ready chunked body + let buf = decoder + .decode_fut(&mut mock_buf) + .await + .unwrap() + .into_data() + .expect("unknown frame type"); + assert_eq!(16, buf.len()); + + // eof read + let err = decoder + .decode_fut(&mut mock_buf) + .await + .expect_err("trailers over limit"); + assert_eq!(err.kind(), io::ErrorKind::InvalidData); + } + + #[tokio::test] + async fn test_trailer_max_header_size_many_small_trailers() { + let max_headers = 10; + let header_size = 64; + let mut scratch = vec![]; + scratch.extend(b"10\r\n1234567890abcdef\r\n0\r\n"); + + for i in 0..max_headers { + scratch.extend(format!("trailer{}: {}\r\n", i, "x".repeat(header_size)).as_bytes()); + } + + scratch.extend(b"\r\n"); + let mut mock_buf = Bytes::from(scratch); + + let mut decoder = Decoder::chunked(None, Some(max_headers * header_size)); + + // ready chunked body + let buf = decoder + .decode_fut(&mut mock_buf) + .await + .unwrap() + .into_data() + .expect("unknown frame type"); + assert_eq!(16, buf.len()); + + // eof read + let err = decoder + .decode_fut(&mut mock_buf) + .await + .expect_err("trailers over limit"); + assert_eq!(err.kind(), io::ErrorKind::InvalidData); + } } diff --git a/.cargo-vendor/hyper/src/proto/h1/dispatch.rs b/.cargo-vendor/hyper/src/proto/h1/dispatch.rs index 3516d7ad21..79ea48be9f 100644 --- a/.cargo-vendor/hyper/src/proto/h1/dispatch.rs +++ b/.cargo-vendor/hyper/src/proto/h1/dispatch.rs @@ -1,21 +1,25 @@ -use std::error::Error as StdError; -use std::future::Future; -use std::marker::Unpin; -use std::pin::Pin; -use std::task::{Context, Poll}; - +use std::{ + error::Error as StdError, + future::Future, + marker::Unpin, + pin::Pin, + task::{Context, Poll}, +}; + +use crate::rt::{Read, Write}; use bytes::{Buf, Bytes}; +use futures_util::ready; use http::Request; -use tokio::io::{AsyncRead, AsyncWrite}; -use tracing::{debug, trace}; use super::{Http1Transaction, Wants}; -use crate::body::{Body, DecodedLength, HttpBody}; -use crate::common; +use crate::body::{Body, DecodedLength, Incoming as IncomingBody}; +#[cfg(feature = "client")] +use crate::client::dispatch::TrySendError; +use crate::common::task; use crate::proto::{BodyLength, Conn, Dispatched, MessageHead, RequestHead}; use crate::upgrade::OnUpgrade; -pub(crate) struct Dispatcher { +pub(crate) struct Dispatcher { conn: Conn, dispatch: D, body_tx: Option, @@ -32,7 +36,8 @@ pub(crate) trait Dispatch { self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll>>; - fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, Body)>) -> crate::Result<()>; + fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, IncomingBody)>) + -> crate::Result<()>; fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll>; fn should_poll(&self) -> bool; } @@ -49,14 +54,14 @@ cfg_server! { cfg_client! { pin_project_lite::pin_project! { pub(crate) struct Client { - callback: Option, http::Response>>, + callback: Option, http::Response>>, #[pin] rx: ClientRx, rx_closed: bool, } } - type ClientRx = crate::client::dispatch::Receiver, http::Response>; + type ClientRx = crate::client::dispatch::Receiver, http::Response>; } impl Dispatcher @@ -67,9 +72,9 @@ where RecvItem = MessageHead, > + Unpin, D::PollError: Into>, - I: AsyncRead + AsyncWrite + Unpin, + I: Read + Write + Unpin, T: Http1Transaction + Unpin, - Bs: HttpBody + 'static, + Bs: Body + 'static, Bs::Error: Into>, { pub(crate) fn new(dispatch: D, conn: Conn) -> Self { @@ -85,7 +90,11 @@ where #[cfg(feature = "server")] pub(crate) fn disable_keep_alive(&mut self) { self.conn.disable_keep_alive(); - if self.conn.is_write_closed() { + + // If keep alive has been disabled and no read or write has been seen on + // the connection yet, we must be in a state where the server is being asked to + // shut down before any data has been seen on the connection + if self.conn.is_write_closed() || self.conn.has_initial_read_write_state() { self.close(); } } @@ -96,14 +105,14 @@ where } /// Run this dispatcher until HTTP says this connection is done, - /// but don't call `AsyncWrite::shutdown` on the underlying IO. + /// but don't call `Write::shutdown` on the underlying IO. /// /// This is useful for old-style HTTP upgrades, but ignores /// newer-style upgrade API. - pub(crate) fn poll_without_shutdown(&mut self, cx: &mut Context<'_>) -> Poll> - where - Self: Unpin, - { + pub(crate) fn poll_without_shutdown( + &mut self, + cx: &mut Context<'_>, + ) -> Poll> { Pin::new(self).poll_catch(cx, false).map_ok(|ds| { if let Dispatched::Upgrade(pending) = ds { pending.manual(); @@ -180,7 +189,7 @@ where trace!("poll_loop yielding (self = {:p})", self); - common::task::yield_now(cx).map(|never| match never {}) + task::yield_now(cx).map(|never| match never {}) } fn poll_read(&mut self, cx: &mut Context<'_>) -> Poll> { @@ -206,17 +215,39 @@ where } } match self.conn.poll_read_body(cx) { - Poll::Ready(Some(Ok(chunk))) => match body.try_send_data(chunk) { - Ok(()) => { - self.body_tx = Some(body); - } - Err(_canceled) => { - if self.conn.can_read_body() { - trace!("body receiver dropped before eof, closing"); - self.conn.close_read(); + Poll::Ready(Some(Ok(frame))) => { + if frame.is_data() { + let chunk = frame.into_data().unwrap_or_else(|_| unreachable!()); + match body.try_send_data(chunk) { + Ok(()) => { + self.body_tx = Some(body); + } + Err(_canceled) => { + if self.conn.can_read_body() { + trace!("body receiver dropped before eof, closing"); + self.conn.close_read(); + } + } } + } else if frame.is_trailers() { + let trailers = + frame.into_trailers().unwrap_or_else(|_| unreachable!()); + match body.try_send_trailers(trailers) { + Ok(()) => { + self.body_tx = Some(body); + } + Err(_canceled) => { + if self.conn.can_read_body() { + trace!("body receiver dropped before eof, closing"); + self.conn.close_read(); + } + } + } + } else { + // we should have dropped all unknown frames in poll_read_body + error!("unexpected frame"); } - }, + } Poll::Ready(None) => { // just drop, the body will close automatically } @@ -238,7 +269,7 @@ where } fn poll_read_head(&mut self, cx: &mut Context<'_>) -> Poll> { - // can dispatch receive, or does it still care about, an incoming message? + // can dispatch receive, or does it still care about other incoming message? match ready!(self.dispatch.poll_ready(cx)) { Ok(()) => (), Err(()) => { @@ -247,13 +278,15 @@ where return Poll::Ready(Ok(())); } } + // dispatch is ready for a message, try to read one match ready!(self.conn.poll_read_head(cx)) { Some(Ok((mut head, body_len, wants))) => { let body = match body_len { - DecodedLength::ZERO => Body::empty(), + DecodedLength::ZERO => IncomingBody::empty(), other => { - let (tx, rx) = Body::new_channel(other, wants.contains(Wants::EXPECT)); + let (tx, rx) = + IncomingBody::new_channel(other, wants.contains(Wants::EXPECT)); self.body_tx = Some(tx); rx } @@ -301,16 +334,7 @@ where && self.dispatch.should_poll() { if let Some(msg) = ready!(Pin::new(&mut self.dispatch).poll_msg(cx)) { - let (head, mut body) = msg.map_err(crate::Error::new_user_service)?; - - // Check if the body knows its full data immediately. - // - // If so, we can skip a bit of bookkeeping that streaming - // bodies need to do. - if let Some(full) = crate::body::take_full_data(&mut body) { - self.conn.write_full_msg(head, full); - return Poll::Ready(Ok(())); - } + let (head, body) = msg.map_err(crate::Error::new_user_service)?; let body_type = if body.is_end_stream() { self.body_rx.set(None); @@ -320,7 +344,7 @@ where .size_hint() .exact() .map(BodyLength::Known) - .or_else(|| Some(BodyLength::Unknown)); + .or(Some(BodyLength::Unknown)); self.body_rx.set(Some(body)); btype }; @@ -346,27 +370,39 @@ where continue; } - let item = ready!(body.as_mut().poll_data(cx)); + let item = ready!(body.as_mut().poll_frame(cx)); if let Some(item) = item { - let chunk = item.map_err(|e| { + let frame = item.map_err(|e| { *clear_body = true; crate::Error::new_user_body(e) })?; - let eos = body.is_end_stream(); - if eos { - *clear_body = true; - if chunk.remaining() == 0 { - trace!("discarding empty chunk"); - self.conn.end_body()?; + + if frame.is_data() { + let chunk = frame.into_data().unwrap_or_else(|_| unreachable!()); + let eos = body.is_end_stream(); + if eos { + *clear_body = true; + if chunk.remaining() == 0 { + trace!("discarding empty chunk"); + self.conn.end_body()?; + } else { + self.conn.write_body_and_end(chunk); + } } else { - self.conn.write_body_and_end(chunk); + if chunk.remaining() == 0 { + trace!("discarding empty chunk"); + continue; + } + self.conn.write_body(chunk); } + } else if frame.is_trailers() { + *clear_body = true; + self.conn.write_trailers( + frame.into_trailers().unwrap_or_else(|_| unreachable!()), + ); } else { - if chunk.remaining() == 0 { - trace!("discarding empty chunk"); - continue; - } - self.conn.write_body(chunk); + trace!("discarding unknown frame"); + continue; } } else { *clear_body = true; @@ -423,9 +459,9 @@ where RecvItem = MessageHead, > + Unpin, D::PollError: Into>, - I: AsyncRead + AsyncWrite + Unpin, + I: Read + Write + Unpin, T: Http1Transaction + Unpin, - Bs: HttpBody + 'static, + Bs: Body + 'static, Bs::Error: Into>, { type Output = crate::Result; @@ -482,11 +518,11 @@ cfg_server! { // Service is never pinned impl, B> Unpin for Server {} - impl Dispatch for Server + impl Dispatch for Server where - S: HttpService, + S: HttpService, S::Error: Into>, - Bs: HttpBody, + Bs: Body, { type PollItem = MessageHead; type PollBody = Bs; @@ -517,7 +553,7 @@ cfg_server! { ret } - fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, Body)>) -> crate::Result<()> { + fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, IncomingBody)>) -> crate::Result<()> { let (msg, body) = msg?; let mut req = Request::new(body); *req.method_mut() = msg.subject.0; @@ -530,14 +566,11 @@ cfg_server! { Ok(()) } - fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { if self.in_flight.is_some() { Poll::Pending } else { - self.service.poll_ready(cx).map_err(|_e| { - // FIXME: return error value. - trace!("service closed"); - }) + Poll::Ready(Ok(())) } } @@ -550,6 +583,8 @@ cfg_server! { // ===== impl Client ===== cfg_client! { + use std::convert::Infallible; + impl Client { pub(crate) fn new(rx: ClientRx) -> Client { Client { @@ -562,17 +597,17 @@ cfg_client! { impl Dispatch for Client where - B: HttpBody, + B: Body, { type PollItem = RequestHead; type PollBody = B; - type PollError = std::convert::Infallible; + type PollError = Infallible; type RecvItem = crate::proto::ResponseHead; fn poll_msg( mut self: Pin<&mut Self>, cx: &mut Context<'_>, - ) -> Poll>> { + ) -> Poll>> { let mut this = self.as_mut(); debug_assert!(!this.rx_closed); match this.rx.poll_recv(cx) { @@ -606,7 +641,7 @@ cfg_client! { } } - fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, Body)>) -> crate::Result<()> { + fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, IncomingBody)>) -> crate::Result<()> { match msg { Ok((msg, body)) => { if let Some(cb) = self.callback.take() { @@ -622,7 +657,10 @@ cfg_client! { } Err(err) => { if let Some(cb) = self.callback.take() { - cb.send(Err((err, None))); + cb.send(Err(TrySendError { + error: err, + message: None, + })); Ok(()) } else if !self.rx_closed { self.rx.close(); @@ -630,7 +668,10 @@ cfg_client! { trace!("canceling queued request with connection error: {}", err); // in this case, the message was never even started, so it's safe to tell // the user that the request was completely canceled - cb.send(Err((crate::Error::new_canceled().with(err), Some(req)))); + cb.send(Err(TrySendError { + error: crate::Error::new_canceled().with(err), + message: Some(req), + })); Ok(()) } else { Err(err) @@ -664,6 +705,7 @@ cfg_client! { #[cfg(test)] mod tests { use super::*; + use crate::common::io::Compat; use crate::proto::h1::ClientTransaction; use std::time::Duration; @@ -677,7 +719,7 @@ mod tests { // Block at 0 for now, but we will release this response before // the request is ready to write later... let (mut tx, rx) = crate::client::dispatch::channel(); - let conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(io); + let conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(Compat::new(io)); let mut dispatcher = Dispatcher::new(Client::new(rx), conn); // First poll is needed to allow tx to send... @@ -688,20 +730,21 @@ mod tests { handle.read(b"HTTP/1.1 200 OK\r\n\r\n"); let mut res_rx = tx - .try_send(crate::Request::new(crate::Body::empty())) + .try_send(crate::Request::new(IncomingBody::empty())) .unwrap(); tokio_test::assert_ready_ok!(Pin::new(&mut dispatcher).poll(cx)); let err = tokio_test::assert_ready_ok!(Pin::new(&mut res_rx).poll(cx)) .expect_err("callback should send error"); - match (err.0.kind(), err.1) { - (&crate::error::Kind::Canceled, Some(_)) => (), - other => panic!("expected Canceled, got {:?}", other), + match (err.error.is_canceled(), err.message.as_ref()) { + (true, Some(_)) => (), + _ => panic!("expected Canceled, got {:?}", err), } }); } + #[cfg(not(miri))] #[tokio::test] async fn client_flushing_is_not_ready_for_next_request() { let _ = pretty_env_logger::try_init(); @@ -713,16 +756,19 @@ mod tests { .build_with_handle(); let (mut tx, rx) = crate::client::dispatch::channel(); - let mut conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(io); + let mut conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(Compat::new(io)); conn.set_write_strategy_queue(); let dispatcher = Dispatcher::new(Client::new(rx), conn); let _dispatcher = tokio::spawn(async move { dispatcher.await }); - let req = crate::Request::builder() - .method("POST") - .body(crate::Body::from("reee")) - .unwrap(); + let body = { + let (mut tx, body) = IncomingBody::new_channel(DecodedLength::new(4), false); + tx.try_send_data("reee".into()).unwrap(); + body + }; + + let req = crate::Request::builder().method("POST").body(body).unwrap(); let res = tx.try_send(req).unwrap().await.expect("response"); drop(res); @@ -730,6 +776,7 @@ mod tests { assert!(!tx.is_ready()); } + #[cfg(not(miri))] #[tokio::test] async fn body_empty_chunks_ignored() { let _ = pretty_env_logger::try_init(); @@ -740,14 +787,14 @@ mod tests { .build(); let (mut tx, rx) = crate::client::dispatch::channel(); - let conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(io); + let conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(Compat::new(io)); let mut dispatcher = tokio_test::task::spawn(Dispatcher::new(Client::new(rx), conn)); // First poll is needed to allow tx to send... assert!(dispatcher.poll().is_pending()); let body = { - let (mut tx, body) = crate::Body::channel(); + let (mut tx, body) = IncomingBody::channel(); tx.try_send_data("".into()).unwrap(); body }; diff --git a/.cargo-vendor/hyper/src/proto/h1/encode.rs b/.cargo-vendor/hyper/src/proto/h1/encode.rs index f0aa261a4f..2f24a8a3b1 100644 --- a/.cargo-vendor/hyper/src/proto/h1/encode.rs +++ b/.cargo-vendor/hyper/src/proto/h1/encode.rs @@ -1,11 +1,19 @@ +use std::collections::HashMap; use std::fmt; use std::io::IoSlice; use bytes::buf::{Chain, Take}; -use bytes::Buf; -use tracing::trace; +use bytes::{Buf, Bytes}; +use http::{ + header::{ + AUTHORIZATION, CACHE_CONTROL, CONTENT_ENCODING, CONTENT_LENGTH, CONTENT_RANGE, + CONTENT_TYPE, HOST, MAX_FORWARDS, SET_COOKIE, TE, TRAILER, TRANSFER_ENCODING, + }, + HeaderMap, HeaderName, HeaderValue, +}; use super::io::WriteBuf; +use super::role::{write_headers, write_headers_title_case}; type StaticBuf = &'static [u8]; @@ -27,7 +35,7 @@ pub(crate) struct NotEof(u64); #[derive(Debug, PartialEq, Clone)] enum Kind { /// An Encoder for when Transfer-Encoding includes `chunked`. - Chunked, + Chunked(Option>), /// An Encoder for when Content-Length is set. /// /// Enforces that the body is not longer than the Content-Length header. @@ -46,6 +54,7 @@ enum BufKind { Limited(Take), Chunked(Chain, StaticBuf>), ChunkedEnd(StaticBuf), + Trailers(Chain, StaticBuf>), } impl Encoder { @@ -56,7 +65,7 @@ impl Encoder { } } pub(crate) fn chunked() -> Encoder { - Encoder::new(Kind::Chunked) + Encoder::new(Kind::Chunked(None)) } pub(crate) fn length(len: u64) -> Encoder { @@ -68,6 +77,16 @@ impl Encoder { Encoder::new(Kind::CloseDelimited) } + pub(crate) fn into_chunked_with_trailing_fields(self, trailers: Vec) -> Encoder { + match self.kind { + Kind::Chunked(_) => Encoder { + kind: Kind::Chunked(Some(trailers)), + is_last: self.is_last, + }, + _ => self, + } + } + pub(crate) fn is_eof(&self) -> bool { matches!(self.kind, Kind::Length(0)) } @@ -90,10 +109,14 @@ impl Encoder { } } + pub(crate) fn is_chunked(&self) -> bool { + matches!(self.kind, Kind::Chunked(_)) + } + pub(crate) fn end(&self) -> Result>, NotEof> { match self.kind { Kind::Length(0) => Ok(None), - Kind::Chunked => Ok(Some(EncodedBuf { + Kind::Chunked(_) => Ok(Some(EncodedBuf { kind: BufKind::ChunkedEnd(b"0\r\n\r\n"), })), #[cfg(feature = "server")] @@ -110,7 +133,7 @@ impl Encoder { debug_assert!(len > 0, "encode() called with empty buf"); let kind = match self.kind { - Kind::Chunked => { + Kind::Chunked(_) => { trace!("encoding chunked {}B", len); let buf = ChunkSize::new(len) .chain(msg) @@ -137,6 +160,62 @@ impl Encoder { EncodedBuf { kind } } + pub(crate) fn encode_trailers( + &self, + trailers: HeaderMap, + title_case_headers: bool, + ) -> Option> { + trace!("encoding trailers"); + match &self.kind { + Kind::Chunked(Some(allowed_trailer_fields)) => { + let allowed_trailer_field_map = allowed_trailer_field_map(allowed_trailer_fields); + + let mut cur_name = None; + let mut allowed_trailers = HeaderMap::new(); + + for (opt_name, value) in trailers { + if let Some(n) = opt_name { + cur_name = Some(n); + } + let name = cur_name.as_ref().expect("current header name"); + + if allowed_trailer_field_map.contains_key(name.as_str()) { + if is_valid_trailer_field(name) { + allowed_trailers.insert(name, value); + } else { + debug!("trailer field is not valid: {}", &name); + } + } else { + debug!("trailer header name not found in trailer header: {}", &name); + } + } + + let mut buf = Vec::new(); + if title_case_headers { + write_headers_title_case(&allowed_trailers, &mut buf); + } else { + write_headers(&allowed_trailers, &mut buf); + } + + if buf.is_empty() { + return None; + } + + Some(EncodedBuf { + kind: BufKind::Trailers(b"0\r\n".chain(Bytes::from(buf)).chain(b"\r\n")), + }) + } + Kind::Chunked(None) => { + debug!("attempted to encode trailers, but the trailer header is not set"); + None + } + _ => { + debug!("attempted to encode trailers for non-chunked response"); + None + } + } + } + pub(super) fn encode_and_end(&self, msg: B, dst: &mut WriteBuf>) -> bool where B: Buf, @@ -145,7 +224,7 @@ impl Encoder { debug_assert!(len > 0, "encode() called with empty buf"); match self.kind { - Kind::Chunked => { + Kind::Chunked(_) => { trace!("encoding chunked {}B", len); let buf = ChunkSize::new(len) .chain(msg) @@ -180,39 +259,40 @@ impl Encoder { } } } +} - /// Encodes the full body, without verifying the remaining length matches. - /// - /// This is used in conjunction with HttpBody::__hyper_full_data(), which - /// means we can trust that the buf has the correct size (the buf itself - /// was checked to make the headers). - pub(super) fn danger_full_buf(self, msg: B, dst: &mut WriteBuf>) - where - B: Buf, - { - debug_assert!(msg.remaining() > 0, "encode() called with empty buf"); - debug_assert!( - match self.kind { - Kind::Length(len) => len == msg.remaining() as u64, - _ => true, - }, - "danger_full_buf length mismatches" - ); +fn is_valid_trailer_field(name: &HeaderName) -> bool { + !matches!( + *name, + AUTHORIZATION + | CACHE_CONTROL + | CONTENT_ENCODING + | CONTENT_LENGTH + | CONTENT_RANGE + | CONTENT_TYPE + | HOST + | MAX_FORWARDS + | SET_COOKIE + | TRAILER + | TRANSFER_ENCODING + | TE + ) +} - match self.kind { - Kind::Chunked => { - let len = msg.remaining(); - trace!("encoding chunked {}B", len); - let buf = ChunkSize::new(len) - .chain(msg) - .chain(b"\r\n0\r\n\r\n" as &'static [u8]); - dst.buffer(buf); - } - _ => { - dst.buffer(msg); +fn allowed_trailer_field_map(allowed_trailer_fields: &Vec) -> HashMap { + let mut trailer_map = HashMap::new(); + + for header_value in allowed_trailer_fields { + if let Ok(header_str) = header_value.to_str() { + let items: Vec<&str> = header_str.split(',').map(|item| item.trim()).collect(); + + for item in items { + trailer_map.entry(item.to_string()).or_insert(()); } } } + + trailer_map } impl Buf for EncodedBuf @@ -226,6 +306,7 @@ where BufKind::Limited(ref b) => b.remaining(), BufKind::Chunked(ref b) => b.remaining(), BufKind::ChunkedEnd(ref b) => b.remaining(), + BufKind::Trailers(ref b) => b.remaining(), } } @@ -236,6 +317,7 @@ where BufKind::Limited(ref b) => b.chunk(), BufKind::Chunked(ref b) => b.chunk(), BufKind::ChunkedEnd(ref b) => b.chunk(), + BufKind::Trailers(ref b) => b.chunk(), } } @@ -246,6 +328,7 @@ where BufKind::Limited(ref mut b) => b.advance(cnt), BufKind::Chunked(ref mut b) => b.advance(cnt), BufKind::ChunkedEnd(ref mut b) => b.advance(cnt), + BufKind::Trailers(ref mut b) => b.advance(cnt), } } @@ -256,6 +339,7 @@ where BufKind::Limited(ref b) => b.chunks_vectored(dst), BufKind::Chunked(ref b) => b.chunks_vectored(dst), BufKind::ChunkedEnd(ref b) => b.chunks_vectored(dst), + BufKind::Trailers(ref b) => b.chunks_vectored(dst), } } } @@ -362,6 +446,13 @@ impl std::error::Error for NotEof {} #[cfg(test)] mod tests { use bytes::BufMut; + use http::{ + header::{ + AUTHORIZATION, CACHE_CONTROL, CONTENT_ENCODING, CONTENT_LENGTH, CONTENT_RANGE, + CONTENT_TYPE, HOST, MAX_FORWARDS, SET_COOKIE, TE, TRAILER, TRANSFER_ENCODING, + }, + HeaderMap, HeaderName, HeaderValue, + }; use super::super::io::Cursor; use super::Encoder; @@ -436,4 +527,145 @@ mod tests { assert!(!encoder.is_eof()); encoder.end::<()>().unwrap(); } + + #[test] + fn chunked_with_valid_trailers() { + let encoder = Encoder::chunked(); + let trailers = vec![HeaderValue::from_static("chunky-trailer")]; + let encoder = encoder.into_chunked_with_trailing_fields(trailers); + + let headers = HeaderMap::from_iter( + vec![ + ( + HeaderName::from_static("chunky-trailer"), + HeaderValue::from_static("header data"), + ), + ( + HeaderName::from_static("should-not-be-included"), + HeaderValue::from_static("oops"), + ), + ] + .into_iter(), + ); + + let buf1 = encoder.encode_trailers::<&[u8]>(headers, false).unwrap(); + + let mut dst = Vec::new(); + dst.put(buf1); + assert_eq!(dst, b"0\r\nchunky-trailer: header data\r\n\r\n"); + } + + #[test] + fn chunked_with_multiple_trailer_headers() { + let encoder = Encoder::chunked(); + let trailers = vec![ + HeaderValue::from_static("chunky-trailer"), + HeaderValue::from_static("chunky-trailer-2"), + ]; + let encoder = encoder.into_chunked_with_trailing_fields(trailers); + + let headers = HeaderMap::from_iter( + vec![ + ( + HeaderName::from_static("chunky-trailer"), + HeaderValue::from_static("header data"), + ), + ( + HeaderName::from_static("chunky-trailer-2"), + HeaderValue::from_static("more header data"), + ), + ] + .into_iter(), + ); + + let buf1 = encoder.encode_trailers::<&[u8]>(headers, false).unwrap(); + + let mut dst = Vec::new(); + dst.put(buf1); + assert_eq!( + dst, + b"0\r\nchunky-trailer: header data\r\nchunky-trailer-2: more header data\r\n\r\n" + ); + } + + #[test] + fn chunked_with_no_trailer_header() { + let encoder = Encoder::chunked(); + + let headers = HeaderMap::from_iter( + vec![( + HeaderName::from_static("chunky-trailer"), + HeaderValue::from_static("header data"), + )] + .into_iter(), + ); + + assert!(encoder + .encode_trailers::<&[u8]>(headers.clone(), false) + .is_none()); + + let trailers = vec![]; + let encoder = encoder.into_chunked_with_trailing_fields(trailers); + + assert!(encoder.encode_trailers::<&[u8]>(headers, false).is_none()); + } + + #[test] + fn chunked_with_invalid_trailers() { + let encoder = Encoder::chunked(); + + let trailers = format!( + "{},{},{},{},{},{},{},{},{},{},{},{}", + AUTHORIZATION, + CACHE_CONTROL, + CONTENT_ENCODING, + CONTENT_LENGTH, + CONTENT_RANGE, + CONTENT_TYPE, + HOST, + MAX_FORWARDS, + SET_COOKIE, + TRAILER, + TRANSFER_ENCODING, + TE, + ); + let trailers = vec![HeaderValue::from_str(&trailers).unwrap()]; + let encoder = encoder.into_chunked_with_trailing_fields(trailers); + + let mut headers = HeaderMap::new(); + headers.insert(AUTHORIZATION, HeaderValue::from_static("header data")); + headers.insert(CACHE_CONTROL, HeaderValue::from_static("header data")); + headers.insert(CONTENT_ENCODING, HeaderValue::from_static("header data")); + headers.insert(CONTENT_LENGTH, HeaderValue::from_static("header data")); + headers.insert(CONTENT_RANGE, HeaderValue::from_static("header data")); + headers.insert(CONTENT_TYPE, HeaderValue::from_static("header data")); + headers.insert(HOST, HeaderValue::from_static("header data")); + headers.insert(MAX_FORWARDS, HeaderValue::from_static("header data")); + headers.insert(SET_COOKIE, HeaderValue::from_static("header data")); + headers.insert(TRAILER, HeaderValue::from_static("header data")); + headers.insert(TRANSFER_ENCODING, HeaderValue::from_static("header data")); + headers.insert(TE, HeaderValue::from_static("header data")); + + assert!(encoder.encode_trailers::<&[u8]>(headers, true).is_none()); + } + + #[test] + fn chunked_with_title_case_headers() { + let encoder = Encoder::chunked(); + let trailers = vec![HeaderValue::from_static("chunky-trailer")]; + let encoder = encoder.into_chunked_with_trailing_fields(trailers); + + let headers = HeaderMap::from_iter( + vec![( + HeaderName::from_static("chunky-trailer"), + HeaderValue::from_static("header data"), + )] + .into_iter(), + ); + let buf1 = encoder.encode_trailers::<&[u8]>(headers, true).unwrap(); + + let mut dst = Vec::new(); + dst.put(buf1); + assert_eq!(dst, b"0\r\nChunky-Trailer: header data\r\n\r\n"); + } } diff --git a/.cargo-vendor/hyper/src/proto/h1/io.rs b/.cargo-vendor/hyper/src/proto/h1/io.rs index 02d8a4a9ec..4ad2fca1f4 100644 --- a/.cargo-vendor/hyper/src/proto/h1/io.rs +++ b/.cargo-vendor/hyper/src/proto/h1/io.rs @@ -1,20 +1,12 @@ use std::cmp; use std::fmt; -#[cfg(all(feature = "server", feature = "runtime"))] -use std::future::Future; use std::io::{self, IoSlice}; -use std::marker::Unpin; -use std::mem::MaybeUninit; use std::pin::Pin; use std::task::{Context, Poll}; -#[cfg(all(feature = "server", feature = "runtime"))] -use std::time::Duration; +use crate::rt::{Read, ReadBuf, Write}; use bytes::{Buf, BufMut, Bytes, BytesMut}; -use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; -#[cfg(all(feature = "server", feature = "runtime"))] -use tokio::time::Instant; -use tracing::{debug, trace}; +use futures_util::ready; use super::{Http1Transaction, ParseContext, ParsedMessage}; use crate::common::buf::BufList; @@ -60,7 +52,7 @@ where impl Buffered where - T: AsyncRead + AsyncWrite + Unpin, + T: Read + Write + Unpin, B: Buf, { pub(crate) fn new(io: T) -> Buffered { @@ -188,38 +180,17 @@ where cached_headers: parse_ctx.cached_headers, req_method: parse_ctx.req_method, h1_parser_config: parse_ctx.h1_parser_config.clone(), - #[cfg(all(feature = "server", feature = "runtime"))] - h1_header_read_timeout: parse_ctx.h1_header_read_timeout, - #[cfg(all(feature = "server", feature = "runtime"))] - h1_header_read_timeout_fut: parse_ctx.h1_header_read_timeout_fut, - #[cfg(all(feature = "server", feature = "runtime"))] - h1_header_read_timeout_running: parse_ctx.h1_header_read_timeout_running, + h1_max_headers: parse_ctx.h1_max_headers, preserve_header_case: parse_ctx.preserve_header_case, #[cfg(feature = "ffi")] preserve_header_order: parse_ctx.preserve_header_order, h09_responses: parse_ctx.h09_responses, #[cfg(feature = "ffi")] on_informational: parse_ctx.on_informational, - #[cfg(feature = "ffi")] - raw_headers: parse_ctx.raw_headers, }, )? { Some(msg) => { debug!("parsed {} headers", msg.head.headers.len()); - - #[cfg(all(feature = "server", feature = "runtime"))] - { - *parse_ctx.h1_header_read_timeout_running = false; - - if let Some(h1_header_read_timeout_fut) = - parse_ctx.h1_header_read_timeout_fut - { - // Reset the timer in order to avoid woken up when the timeout finishes - h1_header_read_timeout_fut - .as_mut() - .reset(Instant::now() + Duration::from_secs(30 * 24 * 60 * 60)); - } - } return Poll::Ready(Ok(msg)); } None => { @@ -228,20 +199,6 @@ where debug!("max_buf_size ({}) reached, closing", max); return Poll::Ready(Err(crate::Error::new_too_large())); } - - #[cfg(all(feature = "server", feature = "runtime"))] - if *parse_ctx.h1_header_read_timeout_running { - if let Some(h1_header_read_timeout_fut) = - parse_ctx.h1_header_read_timeout_fut - { - if Pin::new(h1_header_read_timeout_fut).poll(cx).is_ready() { - *parse_ctx.h1_header_read_timeout_running = false; - - tracing::warn!("read header from client timeout"); - return Poll::Ready(Err(crate::Error::new_header_timeout())); - } - } - } } } if ready!(self.poll_read_from_io(cx)).map_err(crate::Error::new_io)? == 0 { @@ -258,10 +215,11 @@ where self.read_buf.reserve(next); } - let dst = self.read_buf.chunk_mut(); - let dst = unsafe { &mut *(dst as *mut _ as *mut [MaybeUninit]) }; + // SAFETY: ReadBuf and poll_read promise not to set any uninitialized + // bytes onto `dst`. + let dst = unsafe { self.read_buf.chunk_mut().as_uninit_slice_mut() }; let mut buf = ReadBuf::uninit(dst); - match Pin::new(&mut self.io).poll_read(cx, &mut buf) { + match Pin::new(&mut self.io).poll_read(cx, buf.unfilled()) { Poll::Ready(Ok(_)) => { let n = buf.filled().len(); trace!("received {} bytes", n); @@ -369,7 +327,7 @@ pub(crate) trait MemRead { impl MemRead for Buffered where - T: AsyncRead + AsyncWrite + Unpin, + T: Read + Write + Unpin, B: Buf, { fn read_mem(&mut self, cx: &mut Context<'_>, len: usize) -> Poll> { @@ -462,7 +420,7 @@ fn prev_power_of_two(n: usize) -> usize { // Only way this shift can underflow is if n is less than 4. // (Which would means `usize::MAX >> 64` and underflowed!) debug_assert!(n >= 4); - (::std::usize::MAX >> (n.leading_zeros() + 2)) + 1 + (usize::MAX >> (n.leading_zeros() + 2)) + 1 } impl Default for ReadStrategy { @@ -673,6 +631,7 @@ enum WriteStrategy { #[cfg(test)] mod tests { use super::*; + use crate::common::io::Compat; use std::time::Duration; use tokio_test::io::Builder as Mock; @@ -711,6 +670,7 @@ mod tests { // io_buf.flush().await.expect("should short-circuit flush"); } + #[cfg(not(miri))] #[tokio::test] async fn parse_reads_until_blocked() { use crate::proto::h1::ClientTransaction; @@ -724,7 +684,7 @@ mod tests { .wait(Duration::from_secs(1)) .build(); - let mut buffered = Buffered::<_, Cursor>>::new(mock); + let mut buffered = Buffered::<_, Cursor>>::new(Compat::new(mock)); // We expect a `parse` to be not ready, and so can't await it directly. // Rather, this `poll_fn` will wrap the `Poll` result. @@ -733,20 +693,13 @@ mod tests { cached_headers: &mut None, req_method: &mut None, h1_parser_config: Default::default(), - #[cfg(feature = "runtime")] - h1_header_read_timeout: None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_fut: &mut None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_running: &mut false, + h1_max_headers: None, preserve_header_case: false, #[cfg(feature = "ffi")] preserve_header_order: false, h09_responses: false, #[cfg(feature = "ffi")] on_informational: &mut None, - #[cfg(feature = "ffi")] - raw_headers: false, }; assert!(buffered .parse::(cx, parse_ctx) @@ -774,7 +727,7 @@ mod tests { assert_eq!(strategy.next(), 32768); // Enormous records still increment at same rate - strategy.record(::std::usize::MAX); + strategy.record(usize::MAX); assert_eq!(strategy.next(), 65536); let max = strategy.max(); @@ -844,7 +797,7 @@ mod tests { fn fuzz(max: usize) { let mut strategy = ReadStrategy::with_max(max); while strategy.next() < max { - strategy.record(::std::usize::MAX); + strategy.record(usize::MAX); } let mut next = strategy.next(); while next > 8192 { @@ -865,7 +818,7 @@ mod tests { fuzz(max); max = (max / 2).saturating_mul(3); } - fuzz(::std::usize::MAX); + fuzz(usize::MAX); } #[test] @@ -873,7 +826,7 @@ mod tests { #[cfg(debug_assertions)] // needs to trigger a debug_assert fn write_buf_requires_non_empty_bufs() { let mock = Mock::new().build(); - let mut buffered = Buffered::<_, Cursor>>::new(mock); + let mut buffered = Buffered::<_, Cursor>>::new(Compat::new(mock)); buffered.buffer(Cursor::new(Vec::new())); } @@ -901,13 +854,14 @@ mod tests { } */ + #[cfg(not(miri))] #[tokio::test] async fn write_buf_flatten() { let _ = pretty_env_logger::try_init(); let mock = Mock::new().write(b"hello world, it's hyper!").build(); - let mut buffered = Buffered::<_, Cursor>>::new(mock); + let mut buffered = Buffered::<_, Cursor>>::new(Compat::new(mock)); buffered.write_buf.set_strategy(WriteStrategy::Flatten); buffered.headers_buf().extend(b"hello "); @@ -954,6 +908,7 @@ mod tests { assert_eq!(write_buf.headers.pos, 0); } + #[cfg(not(miri))] #[tokio::test] async fn write_buf_queue_disable_auto() { let _ = pretty_env_logger::try_init(); @@ -965,7 +920,7 @@ mod tests { .write(b"hyper!") .build(); - let mut buffered = Buffered::<_, Cursor>>::new(mock); + let mut buffered = Buffered::<_, Cursor>>::new(Compat::new(mock)); buffered.write_buf.set_strategy(WriteStrategy::Queue); // we have 4 buffers, and vec IO disabled, but explicitly said diff --git a/.cargo-vendor/hyper/src/proto/h1/mod.rs b/.cargo-vendor/hyper/src/proto/h1/mod.rs index 5a2587a843..017b8671fb 100644 --- a/.cargo-vendor/hyper/src/proto/h1/mod.rs +++ b/.cargo-vendor/hyper/src/proto/h1/mod.rs @@ -1,11 +1,6 @@ -#[cfg(all(feature = "server", feature = "runtime"))] -use std::{pin::Pin, time::Duration}; - use bytes::BytesMut; use http::{HeaderMap, Method}; use httparse::ParserConfig; -#[cfg(all(feature = "server", feature = "runtime"))] -use tokio::time::Sleep; use crate::body::DecodedLength; use crate::proto::{BodyLength, MessageHead}; @@ -35,6 +30,7 @@ cfg_server! { pub(crate) trait Http1Transaction { type Incoming; type Outgoing: Default; + #[cfg(feature = "tracing")] const LOG: &'static str; fn parse(bytes: &mut BytesMut, ctx: ParseContext<'_>) -> ParseResult; fn encode(enc: Encode<'_, Self::Outgoing>, dst: &mut Vec) -> crate::Result; @@ -76,20 +72,13 @@ pub(crate) struct ParseContext<'a> { cached_headers: &'a mut Option, req_method: &'a mut Option, h1_parser_config: ParserConfig, - #[cfg(all(feature = "server", feature = "runtime"))] - h1_header_read_timeout: Option, - #[cfg(all(feature = "server", feature = "runtime"))] - h1_header_read_timeout_fut: &'a mut Option>>, - #[cfg(all(feature = "server", feature = "runtime"))] - h1_header_read_timeout_running: &'a mut bool, + h1_max_headers: Option, preserve_header_case: bool, #[cfg(feature = "ffi")] preserve_header_order: bool, h09_responses: bool, #[cfg(feature = "ffi")] on_informational: &'a mut Option, - #[cfg(feature = "ffi")] - raw_headers: bool, } /// Passed to Http1Transaction::encode @@ -100,6 +89,8 @@ pub(crate) struct Encode<'a, T> { keep_alive: bool, req_method: &'a mut Option, title_case_headers: bool, + #[cfg(feature = "server")] + date_header: bool, } /// Extra flags that a request "wants", like expect-continue or upgrades. diff --git a/.cargo-vendor/hyper/src/proto/h1/role.rs b/.cargo-vendor/hyper/src/proto/h1/role.rs index 7a4544d989..e5a8872111 100644 --- a/.cargo-vendor/hyper/src/proto/h1/role.rs +++ b/.cargo-vendor/hyper/src/proto/h1/role.rs @@ -1,15 +1,17 @@ -use std::fmt::{self, Write}; use std::mem::MaybeUninit; +#[cfg(feature = "client")] +use std::fmt::{self, Write as _}; + use bytes::Bytes; use bytes::BytesMut; +#[cfg(feature = "client")] +use http::header::Entry; #[cfg(feature = "server")] use http::header::ValueIter; -use http::header::{self, Entry, HeaderName, HeaderValue}; -use http::{HeaderMap, Method, StatusCode, Version}; -#[cfg(all(feature = "server", feature = "runtime"))] -use tokio::time::Instant; -use tracing::{debug, error, trace, trace_span, warn}; +use http::header::{self, HeaderMap, HeaderName, HeaderValue}; +use http::{Method, StatusCode, Version}; +use smallvec::{smallvec, smallvec_inline, SmallVec}; use crate::body::DecodedLength; #[cfg(feature = "server")] @@ -22,9 +24,11 @@ use crate::headers; use crate::proto::h1::{ Encode, Encoder, Http1Transaction, ParseContext, ParseResult, ParsedMessage, }; -use crate::proto::{BodyLength, MessageHead, RequestHead, RequestLine}; +#[cfg(feature = "client")] +use crate::proto::RequestHead; +use crate::proto::{BodyLength, MessageHead, RequestLine}; -const MAX_HEADERS: usize = 100; +pub(crate) const DEFAULT_MAX_HEADERS: usize = 100; const AVERAGE_HEADER_SIZE: usize = 30; // totally scientific #[cfg(feature = "server")] const MAX_URI_LEN: usize = (u16::MAX - 1) as usize; @@ -67,35 +71,12 @@ pub(super) fn parse_headers( where T: Http1Transaction, { - #[cfg(all(feature = "server", feature = "runtime"))] - if !*ctx.h1_header_read_timeout_running { - if let Some(h1_header_read_timeout) = ctx.h1_header_read_timeout { - let span = trace_span!("parse_headers"); - let _s = span.enter(); - - let deadline = Instant::now() + h1_header_read_timeout; - *ctx.h1_header_read_timeout_running = true; - match ctx.h1_header_read_timeout_fut { - Some(h1_header_read_timeout_fut) => { - debug!("resetting h1 header read timeout timer"); - h1_header_read_timeout_fut.as_mut().reset(deadline); - } - None => { - debug!("setting h1 header read timeout timer"); - *ctx.h1_header_read_timeout_fut = - Some(Box::pin(tokio::time::sleep_until(deadline))); - } - } - } - } - // If the buffer is empty, don't bother entering the span, it's just noise. if bytes.is_empty() { return Ok(None); } - let span = trace_span!("parse_headers"); - let _s = span.enter(); + let _entered = trace_span!("parse_headers"); T::parse(bytes, ctx) } @@ -107,8 +88,7 @@ pub(super) fn encode_headers( where T: Http1Transaction, { - let span = trace_span!("encode_headers"); - let _s = span.enter(); + let _entered = trace_span!("encode_headers"); T::encode(enc, dst) } @@ -124,6 +104,7 @@ pub(crate) enum Server {} impl Http1Transaction for Server { type Incoming = RequestLine; type Outgoing = StatusCode; + #[cfg(feature = "tracing")] const LOG: &'static str = "{role=server}"; fn parse(buf: &mut BytesMut, ctx: ParseContext<'_>) -> ParseResult { @@ -135,19 +116,24 @@ impl Http1Transaction for Server { let version; let len; let headers_len; + let method; + let path_range; - // Unsafe: both headers_indices and headers are using uninitialized memory, + // Both headers_indices and headers are using uninitialized memory, // but we *never* read any of it until after httparse has assigned // values into it. By not zeroing out the stack memory, this saves // a good ~5% on pipeline benchmarks. - let mut headers_indices: [MaybeUninit; MAX_HEADERS] = unsafe { - // SAFETY: We can go safely from MaybeUninit array to array of MaybeUninit - MaybeUninit::uninit().assume_init() - }; + let mut headers_indices: SmallVec<[MaybeUninit; DEFAULT_MAX_HEADERS]> = + match ctx.h1_max_headers { + Some(cap) => smallvec![MaybeUninit::uninit(); cap], + None => smallvec_inline![MaybeUninit::uninit(); DEFAULT_MAX_HEADERS], + }; { - /* SAFETY: it is safe to go from MaybeUninit array to array of MaybeUninit */ - let mut headers: [MaybeUninit>; MAX_HEADERS] = - unsafe { MaybeUninit::uninit().assume_init() }; + let mut headers: SmallVec<[MaybeUninit>; DEFAULT_MAX_HEADERS]> = + match ctx.h1_max_headers { + Some(cap) => smallvec![MaybeUninit::uninit(); cap], + None => smallvec_inline![MaybeUninit::uninit(); DEFAULT_MAX_HEADERS], + }; trace!(bytes = buf.len(), "Request.parse"); let mut req = httparse::Request::new(&mut []); let bytes = buf.as_ref(); @@ -159,10 +145,8 @@ impl Http1Transaction for Server { if uri.len() > MAX_URI_LEN { return Err(Parse::UriTooLong); } - subject = RequestLine( - Method::from_bytes(req.method.unwrap().as_bytes())?, - uri.parse()?, - ); + method = Method::from_bytes(req.method.unwrap().as_bytes())?; + path_range = Server::record_path_range(bytes, uri); version = if req.version.unwrap() == 1 { keep_alive = true; is_http_11 = true; @@ -173,7 +157,7 @@ impl Http1Transaction for Server { Version::HTTP_10 }; - record_header_indices(bytes, &req.headers, &mut headers_indices)?; + record_header_indices(bytes, req.headers, &mut headers_indices)?; headers_len = req.headers.len(); } Ok(httparse::Status::Partial) => return Ok(None), @@ -195,6 +179,12 @@ impl Http1Transaction for Server { }; let slice = buf.split_to(len).freeze(); + let uri = { + let uri_bytes = slice.slice_ref(&slice[path_range]); + // TODO(lucab): switch to `Uri::from_shared()` once public. + http::Uri::from_maybe_shared(uri_bytes)? + }; + subject = RequestLine(method, uri); // According to https://tools.ietf.org/html/rfc7230#section-3.3.3 // 1. (irrelevant to Request) @@ -225,13 +215,13 @@ impl Http1Transaction for Server { None }; - let mut headers = ctx.cached_headers.take().unwrap_or_else(HeaderMap::new); + let mut headers = ctx.cached_headers.take().unwrap_or_default(); headers.reserve(headers_len); for header in &headers_indices[..headers_len] { // SAFETY: array is valid up to `headers_len` - let header = unsafe { &*header.as_ptr() }; + let header = unsafe { header.assume_init_ref() }; let name = header_name!(&slice[header.name.0..header.name.1]); let value = header_value!(slice.slice(header.value.0..header.value.1)); @@ -456,8 +446,10 @@ impl Http1Transaction for Server { }; debug!("sending automatic response ({}) for parse error", status); - let mut msg = MessageHead::default(); - msg.subject = status; + let msg = MessageHead { + subject: status, + ..Default::default() + }; Some(msg) } @@ -477,16 +469,13 @@ impl Server { } fn can_chunked(method: &Option, status: StatusCode) -> bool { - if method == &Some(Method::HEAD) || method == &Some(Method::CONNECT) && status.is_success() + if method == &Some(Method::HEAD) + || method == &Some(Method::CONNECT) && status.is_success() + || status.is_informational() { false - } else if status.is_informational() { - false } else { - match status { - StatusCode::NO_CONTENT | StatusCode::NOT_MODIFIED => false, - _ => true, - } + !matches!(status, StatusCode::NO_CONTENT | StatusCode::NOT_MODIFIED) } } @@ -494,10 +483,7 @@ impl Server { if status.is_informational() || method == &Some(Method::CONNECT) && status.is_success() { false } else { - match status { - StatusCode::NO_CONTENT | StatusCode::NOT_MODIFIED => false, - _ => true, - } + !matches!(status, StatusCode::NO_CONTENT | StatusCode::NOT_MODIFIED) } } @@ -635,6 +621,7 @@ impl Server { }; let mut encoder = Encoder::length(0); + let mut allowed_trailer_fields: Option> = None; let mut wrote_date = false; let mut cur_name = None; let mut is_name_written = false; @@ -676,7 +663,7 @@ impl Server { } match msg.body { Some(BodyLength::Known(known_len)) => { - // The HttpBody claims to know a length, and + // The Body claims to know a length, and // the headers are already set. For performance // reasons, we are just going to trust that // the values match. @@ -709,7 +696,7 @@ impl Server { continue 'headers; } Some(BodyLength::Unknown) => { - // The HttpBody impl didn't know how long the + // The Body impl didn't know how long the // body is, but a length header was included. // We have to parse the value to return our // Encoder... @@ -821,6 +808,38 @@ impl Server { header::DATE => { wrote_date = true; } + header::TRAILER => { + // check that we actually can send a chunked body... + if msg.head.version == Version::HTTP_10 + || !Server::can_chunked(msg.req_method, msg.head.subject) + { + continue; + } + + if !is_name_written { + is_name_written = true; + header_name_writer.write_header_name_with_colon( + dst, + "trailer: ", + header::TRAILER, + ); + extend(dst, value.as_bytes()); + } else { + extend(dst, b", "); + extend(dst, value.as_bytes()); + } + + match allowed_trailer_fields { + Some(ref mut allowed_trailer_fields) => { + allowed_trailer_fields.push(value); + } + None => { + allowed_trailer_fields = Some(vec![value]); + } + } + + continue 'headers; + } _ => (), } //TODO: this should perhaps instead combine them into @@ -896,7 +915,8 @@ impl Server { } // cached date is much faster than formatting every request - if !wrote_date { + // don't force the write if disabled + if !wrote_date && msg.date_header { dst.reserve(date::DATE_VALUE_LENGTH + 8); header_name_writer.write_header_name_with_colon(dst, "date: ", header::DATE); date::extend(dst); @@ -905,8 +925,23 @@ impl Server { extend(dst, b"\r\n"); } + if encoder.is_chunked() { + if let Some(allowed_trailer_fields) = allowed_trailer_fields { + encoder = encoder.into_chunked_with_trailing_fields(allowed_trailer_fields); + } + } + Ok(encoder.set_last(is_last)) } + + /// Helper for zero-copy parsing of request path URI. + #[inline] + fn record_path_range(bytes: &[u8], req_path: &str) -> std::ops::Range { + let bytes_ptr = bytes.as_ptr() as usize; + let start = req_path.as_ptr() as usize - bytes_ptr; + let end = start + req_path.len(); + std::ops::Range { start, end } + } } #[cfg(feature = "server")] @@ -930,6 +965,7 @@ trait HeaderNameWriter { impl Http1Transaction for Client { type Incoming = StatusCode; type Outgoing = RequestLine; + #[cfg(feature = "tracing")] const LOG: &'static str = "{role=client}"; fn parse(buf: &mut BytesMut, ctx: ParseContext<'_>) -> ParseResult { @@ -937,15 +973,18 @@ impl Http1Transaction for Client { // Loop to skip information status code headers (100 Continue, etc). loop { - // Unsafe: see comment in Server Http1Transaction, above. - let mut headers_indices: [MaybeUninit; MAX_HEADERS] = unsafe { - // SAFETY: We can go safely from MaybeUninit array to array of MaybeUninit - MaybeUninit::uninit().assume_init() - }; + let mut headers_indices: SmallVec<[MaybeUninit; DEFAULT_MAX_HEADERS]> = + match ctx.h1_max_headers { + Some(cap) => smallvec![MaybeUninit::uninit(); cap], + None => smallvec_inline![MaybeUninit::uninit(); DEFAULT_MAX_HEADERS], + }; let (len, status, reason, version, headers_len) = { - // SAFETY: We can go safely from MaybeUninit array to array of MaybeUninit - let mut headers: [MaybeUninit>; MAX_HEADERS] = - unsafe { MaybeUninit::uninit().assume_init() }; + let mut headers: SmallVec< + [MaybeUninit>; DEFAULT_MAX_HEADERS], + > = match ctx.h1_max_headers { + Some(cap) => smallvec![MaybeUninit::uninit(); cap], + None => smallvec_inline![MaybeUninit::uninit(); DEFAULT_MAX_HEADERS], + }; trace!(bytes = buf.len(), "Response.parse"); let mut res = httparse::Response::new(&mut []); let bytes = buf.as_ref(); @@ -973,7 +1012,7 @@ impl Http1Transaction for Client { } else { Version::HTTP_10 }; - record_header_indices(bytes, &res.headers, &mut headers_indices)?; + record_header_indices(bytes, res.headers, &mut headers_indices)?; let headers_len = res.headers.len(); (len, status, reason, version, headers_len) } @@ -993,20 +1032,16 @@ impl Http1Transaction for Client { .h1_parser_config .obsolete_multiline_headers_in_responses_are_allowed() { - for header in &headers_indices[..headers_len] { + for header in &mut headers_indices[..headers_len] { // SAFETY: array is valid up to `headers_len` - let header = unsafe { &*header.as_ptr() }; - for b in &mut slice[header.value.0..header.value.1] { - if *b == b'\r' || *b == b'\n' { - *b = b' '; - } - } + let header = unsafe { header.assume_init_mut() }; + Client::obs_fold_line(&mut slice, header); } } let slice = slice.freeze(); - let mut headers = ctx.cached_headers.take().unwrap_or_else(HeaderMap::new); + let mut headers = ctx.cached_headers.take().unwrap_or_default(); let mut keep_alive = version == Version::HTTP_11; @@ -1026,7 +1061,7 @@ impl Http1Transaction for Client { headers.reserve(headers_len); for header in &headers_indices[..headers_len] { // SAFETY: array is valid up to `headers_len` - let header = unsafe { &*header.as_ptr() }; + let header = unsafe { header.assume_init_ref() }; let name = header_name!(&slice[header.name.0..header.name.1]); let value = header_value!(slice.slice(header.value.0..header.value.1)); @@ -1067,15 +1102,10 @@ impl Http1Transaction for Client { if let Some(reason) = reason { // Safety: httparse ensures that only valid reason phrase bytes are present in this // field. - let reason = unsafe { crate::ext::ReasonPhrase::from_bytes_unchecked(reason) }; + let reason = crate::ext::ReasonPhrase::from_bytes_unchecked(reason); extensions.insert(reason); } - #[cfg(feature = "ffi")] - if ctx.raw_headers { - extensions.insert(crate::ffi::RawHeaders(crate::ffi::hyper_buf(slice))); - } - let head = MessageHead { version, subject: status, @@ -1097,7 +1127,7 @@ impl Http1Transaction for Client { #[cfg(feature = "ffi")] if head.subject.is_informational() { if let Some(callback) = ctx.on_informational { - callback.call(head.into_response(crate::Body::empty())); + callback.call(head.into_response(crate::body::Incoming::empty())); } } @@ -1249,7 +1279,7 @@ impl Client { let headers = &mut head.headers; // If the user already set specific headers, we should respect them, regardless - // of what the HttpBody knows about itself. They set them for a reason. + // of what the Body knows about itself. They set them for a reason. // Because of the borrow checker, we can't check the for an existing // Content-Length header while holding an `Entry` for the Transfer-Encoding @@ -1327,6 +1357,19 @@ impl Client { } }; + let encoder = encoder.map(|enc| { + if enc.is_chunked() { + let allowed_trailer_fields: Vec = + headers.get_all(header::TRAILER).iter().cloned().collect(); + + if !allowed_trailer_fields.is_empty() { + return enc.into_chunked_with_trailing_fields(allowed_trailer_fields); + } + } + + enc + }); + // This is because we need a second mutable borrow to remove // content-length header. if let Some(encoder) = encoder { @@ -1347,8 +1390,68 @@ impl Client { set_content_length(headers, len) } + + fn obs_fold_line(all: &mut [u8], idx: &mut HeaderIndices) { + // If the value has obs-folded text, then in-place shift the bytes out + // of here. + // + // https://httpwg.org/specs/rfc9112.html#line.folding + // + // > A user agent that receives an obs-fold MUST replace each received + // > obs-fold with one or more SP octets prior to interpreting the + // > field value. + // + // This means strings like "\r\n\t foo" must replace the "\r\n\t " with + // a single space. + + let buf = &mut all[idx.value.0..idx.value.1]; + + // look for a newline, otherwise bail out + let first_nl = match buf.iter().position(|b| *b == b'\n') { + Some(i) => i, + None => return, + }; + + // not on standard slices because whatever, sigh + fn trim_start(mut s: &[u8]) -> &[u8] { + while let [first, rest @ ..] = s { + if first.is_ascii_whitespace() { + s = rest; + } else { + break; + } + } + s + } + + fn trim_end(mut s: &[u8]) -> &[u8] { + while let [rest @ .., last] = s { + if last.is_ascii_whitespace() { + s = rest; + } else { + break; + } + } + s + } + + fn trim(s: &[u8]) -> &[u8] { + trim_start(trim_end(s)) + } + + // TODO(perf): we could do the moves in-place, but this is so uncommon + // that it shouldn't matter. + let mut unfolded = trim_end(&buf[..first_nl]).to_vec(); + for line in buf[first_nl + 1..].split(|b| *b == b'\n') { + unfolded.push(b' '); + unfolded.extend_from_slice(trim(line)); + } + buf[..unfolded.len()].copy_from_slice(&unfolded); + idx.value.1 = idx.value.0 + unfolded.len(); + } } +#[cfg(feature = "client")] fn set_content_length(headers: &mut HeaderMap, len: u64) -> Encoder { // At this point, there should not be a valid Content-Length // header. However, since we'll be indexing in anyways, we can @@ -1405,16 +1508,10 @@ fn record_header_indices( let value_start = header.value.as_ptr() as usize - bytes_ptr; let value_end = value_start + header.value.len(); - // FIXME(maybe_uninit_extra) - // FIXME(addr_of) - // Currently we don't have `ptr::addr_of_mut` in stable rust or - // MaybeUninit::write, so this is some way of assigning into a MaybeUninit - // safely - let new_header_indices = HeaderIndices { + indices.write(HeaderIndices { name: (name_start, name_end), value: (value_start, value_end), - }; - *indices = MaybeUninit::new(new_header_indices); + }); } Ok(()) @@ -1435,7 +1532,7 @@ fn title_case(dst: &mut Vec, name: &[u8]) { } } -fn write_headers_title_case(headers: &HeaderMap, dst: &mut Vec) { +pub(crate) fn write_headers_title_case(headers: &HeaderMap, dst: &mut Vec) { for (name, value) in headers { title_case(dst, name.as_str().as_bytes()); extend(dst, b": "); @@ -1444,7 +1541,7 @@ fn write_headers_title_case(headers: &HeaderMap, dst: &mut Vec) { } } -fn write_headers(headers: &HeaderMap, dst: &mut Vec) { +pub(crate) fn write_headers(headers: &HeaderMap, dst: &mut Vec) { for (name, value) in headers { extend(dst, name.as_str().as_bytes()); extend(dst, b": "); @@ -1454,6 +1551,7 @@ fn write_headers(headers: &HeaderMap, dst: &mut Vec) { } #[cold] +#[cfg(feature = "client")] fn write_headers_original_case( headers: &HeaderMap, orig_case: &HeaderCaseMap, @@ -1489,8 +1587,10 @@ fn write_headers_original_case( } } +#[cfg(feature = "client")] struct FastWrite<'a>(&'a mut Vec); +#[cfg(feature = "client")] impl<'a> fmt::Write for FastWrite<'a> { #[inline] fn write_str(&mut self, s: &str) -> fmt::Result { @@ -1526,20 +1626,13 @@ mod tests { cached_headers: &mut None, req_method: &mut method, h1_parser_config: Default::default(), - #[cfg(feature = "runtime")] - h1_header_read_timeout: None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_fut: &mut None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_running: &mut false, + h1_max_headers: None, preserve_header_case: false, #[cfg(feature = "ffi")] preserve_header_order: false, h09_responses: false, #[cfg(feature = "ffi")] on_informational: &mut None, - #[cfg(feature = "ffi")] - raw_headers: false, }, ) .unwrap() @@ -1561,20 +1654,13 @@ mod tests { cached_headers: &mut None, req_method: &mut Some(crate::Method::GET), h1_parser_config: Default::default(), - #[cfg(feature = "runtime")] - h1_header_read_timeout: None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_fut: &mut None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_running: &mut false, + h1_max_headers: None, preserve_header_case: false, #[cfg(feature = "ffi")] preserve_header_order: false, h09_responses: false, #[cfg(feature = "ffi")] on_informational: &mut None, - #[cfg(feature = "ffi")] - raw_headers: false, }; let msg = Client::parse(&mut raw, ctx).unwrap().unwrap(); assert_eq!(raw.len(), 0); @@ -1591,20 +1677,13 @@ mod tests { cached_headers: &mut None, req_method: &mut None, h1_parser_config: Default::default(), - #[cfg(feature = "runtime")] - h1_header_read_timeout: None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_fut: &mut None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_running: &mut false, + h1_max_headers: None, preserve_header_case: false, #[cfg(feature = "ffi")] preserve_header_order: false, h09_responses: false, #[cfg(feature = "ffi")] on_informational: &mut None, - #[cfg(feature = "ffi")] - raw_headers: false, }; Server::parse(&mut raw, ctx).unwrap_err(); } @@ -1619,20 +1698,13 @@ mod tests { cached_headers: &mut None, req_method: &mut Some(crate::Method::GET), h1_parser_config: Default::default(), - #[cfg(feature = "runtime")] - h1_header_read_timeout: None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_fut: &mut None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_running: &mut false, + h1_max_headers: None, preserve_header_case: false, #[cfg(feature = "ffi")] preserve_header_order: false, h09_responses: true, #[cfg(feature = "ffi")] on_informational: &mut None, - #[cfg(feature = "ffi")] - raw_headers: false, }; let msg = Client::parse(&mut raw, ctx).unwrap().unwrap(); assert_eq!(raw, H09_RESPONSE); @@ -1649,20 +1721,13 @@ mod tests { cached_headers: &mut None, req_method: &mut Some(crate::Method::GET), h1_parser_config: Default::default(), - #[cfg(feature = "runtime")] - h1_header_read_timeout: None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_fut: &mut None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_running: &mut false, + h1_max_headers: None, preserve_header_case: false, #[cfg(feature = "ffi")] preserve_header_order: false, h09_responses: false, #[cfg(feature = "ffi")] on_informational: &mut None, - #[cfg(feature = "ffi")] - raw_headers: false, }; Client::parse(&mut raw, ctx).unwrap_err(); assert_eq!(raw, H09_RESPONSE); @@ -1683,20 +1748,13 @@ mod tests { cached_headers: &mut None, req_method: &mut Some(crate::Method::GET), h1_parser_config, - #[cfg(feature = "runtime")] - h1_header_read_timeout: None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_fut: &mut None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_running: &mut false, + h1_max_headers: None, preserve_header_case: false, #[cfg(feature = "ffi")] preserve_header_order: false, h09_responses: false, #[cfg(feature = "ffi")] on_informational: &mut None, - #[cfg(feature = "ffi")] - raw_headers: false, }; let msg = Client::parse(&mut raw, ctx).unwrap().unwrap(); assert_eq!(raw.len(), 0); @@ -1714,20 +1772,13 @@ mod tests { cached_headers: &mut None, req_method: &mut Some(crate::Method::GET), h1_parser_config: Default::default(), - #[cfg(feature = "runtime")] - h1_header_read_timeout: None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_fut: &mut None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_running: &mut false, + h1_max_headers: None, preserve_header_case: false, #[cfg(feature = "ffi")] preserve_header_order: false, h09_responses: false, #[cfg(feature = "ffi")] on_informational: &mut None, - #[cfg(feature = "ffi")] - raw_headers: false, }; Client::parse(&mut raw, ctx).unwrap_err(); } @@ -1740,20 +1791,13 @@ mod tests { cached_headers: &mut None, req_method: &mut None, h1_parser_config: Default::default(), - #[cfg(feature = "runtime")] - h1_header_read_timeout: None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_fut: &mut None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_running: &mut false, + h1_max_headers: None, preserve_header_case: true, #[cfg(feature = "ffi")] preserve_header_order: false, h09_responses: false, #[cfg(feature = "ffi")] on_informational: &mut None, - #[cfg(feature = "ffi")] - raw_headers: false, }; let parsed_message = Server::parse(&mut raw, ctx).unwrap().unwrap(); let orig_headers = parsed_message @@ -1787,20 +1831,13 @@ mod tests { cached_headers: &mut None, req_method: &mut None, h1_parser_config: Default::default(), - #[cfg(feature = "runtime")] - h1_header_read_timeout: None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_fut: &mut None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_running: &mut false, + h1_max_headers: None, preserve_header_case: false, #[cfg(feature = "ffi")] preserve_header_order: false, h09_responses: false, #[cfg(feature = "ffi")] on_informational: &mut None, - #[cfg(feature = "ffi")] - raw_headers: false, }, ) .expect("parse ok") @@ -1815,20 +1852,13 @@ mod tests { cached_headers: &mut None, req_method: &mut None, h1_parser_config: Default::default(), - #[cfg(feature = "runtime")] - h1_header_read_timeout: None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_fut: &mut None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_running: &mut false, + h1_max_headers: None, preserve_header_case: false, #[cfg(feature = "ffi")] preserve_header_order: false, h09_responses: false, #[cfg(feature = "ffi")] on_informational: &mut None, - #[cfg(feature = "ffi")] - raw_headers: false, }, ) .expect_err(comment) @@ -2052,20 +2082,13 @@ mod tests { cached_headers: &mut None, req_method: &mut Some(Method::GET), h1_parser_config: Default::default(), - #[cfg(feature = "runtime")] - h1_header_read_timeout: None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_fut: &mut None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_running: &mut false, + h1_max_headers: None, preserve_header_case: false, #[cfg(feature = "ffi")] preserve_header_order: false, h09_responses: false, #[cfg(feature = "ffi")] on_informational: &mut None, - #[cfg(feature = "ffi")] - raw_headers: false, } ) .expect("parse ok") @@ -2080,20 +2103,13 @@ mod tests { cached_headers: &mut None, req_method: &mut Some(m), h1_parser_config: Default::default(), - #[cfg(feature = "runtime")] - h1_header_read_timeout: None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_fut: &mut None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_running: &mut false, + h1_max_headers: None, preserve_header_case: false, #[cfg(feature = "ffi")] preserve_header_order: false, h09_responses: false, #[cfg(feature = "ffi")] on_informational: &mut None, - #[cfg(feature = "ffi")] - raw_headers: false, }, ) .expect("parse ok") @@ -2108,20 +2124,13 @@ mod tests { cached_headers: &mut None, req_method: &mut Some(Method::GET), h1_parser_config: Default::default(), - #[cfg(feature = "runtime")] - h1_header_read_timeout: None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_fut: &mut None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_running: &mut false, + h1_max_headers: None, preserve_header_case: false, #[cfg(feature = "ffi")] preserve_header_order: false, h09_responses: false, #[cfg(feature = "ffi")] on_informational: &mut None, - #[cfg(feature = "ffi")] - raw_headers: false, }, ) .expect_err("parse should err") @@ -2387,6 +2396,24 @@ mod tests { ); } + #[cfg(feature = "client")] + #[test] + fn test_client_obs_fold_line() { + fn unfold(src: &str) -> String { + let mut buf = src.as_bytes().to_vec(); + let mut idx = HeaderIndices { + name: (0, 0), + value: (0, buf.len()), + }; + Client::obs_fold_line(&mut buf, &mut idx); + String::from_utf8(buf[idx.value.0..idx.value.1].to_vec()).unwrap() + } + + assert_eq!(unfold("a normal line"), "a normal line",); + + assert_eq!(unfold("obs\r\n fold\r\n\t line"), "obs fold line",); + } + #[test] fn test_client_request_encode_title_case() { use crate::proto::BodyLength; @@ -2407,6 +2434,7 @@ mod tests { keep_alive: true, req_method: &mut None, title_case_headers: true, + date_header: true, }, &mut vec, ) @@ -2438,6 +2466,7 @@ mod tests { keep_alive: true, req_method: &mut None, title_case_headers: false, + date_header: true, }, &mut vec, ) @@ -2472,6 +2501,7 @@ mod tests { keep_alive: true, req_method: &mut None, title_case_headers: true, + date_header: true, }, &mut vec, ) @@ -2496,6 +2526,7 @@ mod tests { keep_alive: true, req_method: &mut Some(Method::CONNECT), title_case_headers: false, + date_header: true, }, &mut vec, ) @@ -2525,6 +2556,7 @@ mod tests { keep_alive: true, req_method: &mut None, title_case_headers: true, + date_header: true, }, &mut vec, ) @@ -2559,6 +2591,7 @@ mod tests { keep_alive: true, req_method: &mut None, title_case_headers: false, + date_header: true, }, &mut vec, ) @@ -2593,17 +2626,54 @@ mod tests { keep_alive: true, req_method: &mut None, title_case_headers: true, + date_header: true, }, &mut vec, ) .unwrap(); + // this will also test that the date does exist let expected_response = b"HTTP/1.1 200 OK\r\nCONTENT-LENGTH: 10\r\nContent-Type: application/json\r\nDate: "; assert_eq!(&vec[..expected_response.len()], &expected_response[..]); } + #[test] + fn test_disabled_date_header() { + use crate::proto::BodyLength; + use http::header::{HeaderValue, CONTENT_LENGTH}; + + let mut head = MessageHead::default(); + head.headers + .insert("content-length", HeaderValue::from_static("10")); + head.headers + .insert("content-type", HeaderValue::from_static("application/json")); + + let mut orig_headers = HeaderCaseMap::default(); + orig_headers.insert(CONTENT_LENGTH, "CONTENT-LENGTH".into()); + head.extensions.insert(orig_headers); + + let mut vec = Vec::new(); + Server::encode( + Encode { + head: &mut head, + body: Some(BodyLength::Known(10)), + keep_alive: true, + req_method: &mut None, + title_case_headers: true, + date_header: false, + }, + &mut vec, + ) + .unwrap(); + + let expected_response = + b"HTTP/1.1 200 OK\r\nCONTENT-LENGTH: 10\r\nContent-Type: application/json\r\n\r\n"; + + assert_eq!(&vec, &expected_response); + } + #[test] fn parse_header_htabs() { let mut bytes = BytesMut::from("HTTP/1.1 200 OK\r\nserver: hello\tworld\r\n\r\n"); @@ -2613,20 +2683,13 @@ mod tests { cached_headers: &mut None, req_method: &mut Some(Method::GET), h1_parser_config: Default::default(), - #[cfg(feature = "runtime")] - h1_header_read_timeout: None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_fut: &mut None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_running: &mut false, + h1_max_headers: None, preserve_header_case: false, #[cfg(feature = "ffi")] preserve_header_order: false, h09_responses: false, #[cfg(feature = "ffi")] on_informational: &mut None, - #[cfg(feature = "ffi")] - raw_headers: false, }, ) .expect("parse ok") @@ -2635,6 +2698,135 @@ mod tests { assert_eq!(parsed.head.headers["server"], "hello\tworld"); } + #[test] + fn parse_too_large_headers() { + fn gen_req_with_headers(num: usize) -> String { + let mut req = String::from("GET / HTTP/1.1\r\n"); + for i in 0..num { + req.push_str(&format!("key{i}: val{i}\r\n")); + } + req.push_str("\r\n"); + req + } + fn gen_resp_with_headers(num: usize) -> String { + let mut req = String::from("HTTP/1.1 200 OK\r\n"); + for i in 0..num { + req.push_str(&format!("key{i}: val{i}\r\n")); + } + req.push_str("\r\n"); + req + } + fn parse(max_headers: Option, gen_size: usize, should_success: bool) { + { + // server side + let mut bytes = BytesMut::from(gen_req_with_headers(gen_size).as_str()); + let result = Server::parse( + &mut bytes, + ParseContext { + cached_headers: &mut None, + req_method: &mut None, + h1_parser_config: Default::default(), + h1_max_headers: max_headers, + preserve_header_case: false, + #[cfg(feature = "ffi")] + preserve_header_order: false, + h09_responses: false, + #[cfg(feature = "ffi")] + on_informational: &mut None, + }, + ); + if should_success { + result.expect("parse ok").expect("parse complete"); + } else { + result.expect_err("parse should err"); + } + } + { + // client side + let mut bytes = BytesMut::from(gen_resp_with_headers(gen_size).as_str()); + let result = Client::parse( + &mut bytes, + ParseContext { + cached_headers: &mut None, + req_method: &mut None, + h1_parser_config: Default::default(), + h1_max_headers: max_headers, + preserve_header_case: false, + #[cfg(feature = "ffi")] + preserve_header_order: false, + h09_responses: false, + #[cfg(feature = "ffi")] + on_informational: &mut None, + }, + ); + if should_success { + result.expect("parse ok").expect("parse complete"); + } else { + result.expect_err("parse should err"); + } + } + } + + // check generator + assert_eq!( + gen_req_with_headers(0), + String::from("GET / HTTP/1.1\r\n\r\n") + ); + assert_eq!( + gen_req_with_headers(1), + String::from("GET / HTTP/1.1\r\nkey0: val0\r\n\r\n") + ); + assert_eq!( + gen_req_with_headers(2), + String::from("GET / HTTP/1.1\r\nkey0: val0\r\nkey1: val1\r\n\r\n") + ); + assert_eq!( + gen_req_with_headers(3), + String::from("GET / HTTP/1.1\r\nkey0: val0\r\nkey1: val1\r\nkey2: val2\r\n\r\n") + ); + + // default max_headers is 100, so + // + // - less than or equal to 100, accepted + // + parse(None, 0, true); + parse(None, 1, true); + parse(None, 50, true); + parse(None, 99, true); + parse(None, 100, true); + // + // - more than 100, rejected + // + parse(None, 101, false); + parse(None, 102, false); + parse(None, 200, false); + + // max_headers is 0, parser will reject any headers + // + // - without header, accepted + // + parse(Some(0), 0, true); + // + // - with header(s), rejected + // + parse(Some(0), 1, false); + parse(Some(0), 100, false); + + // max_headers is 200 + // + // - less than or equal to 200, accepted + // + parse(Some(200), 0, true); + parse(Some(200), 1, true); + parse(Some(200), 100, true); + parse(Some(200), 200, true); + // + // - more than 200, rejected + // + parse(Some(200), 201, false); + parse(Some(200), 210, false); + } + #[test] fn test_write_headers_orig_case_empty_value() { let mut headers = HeaderMap::new(); @@ -2705,27 +2897,24 @@ mod tests { cached_headers: &mut headers, req_method: &mut None, h1_parser_config: Default::default(), - #[cfg(feature = "runtime")] - h1_header_read_timeout: None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_fut: &mut None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_running: &mut false, + h1_max_headers: None, preserve_header_case: false, #[cfg(feature = "ffi")] preserve_header_order: false, h09_responses: false, #[cfg(feature = "ffi")] on_informational: &mut None, - #[cfg(feature = "ffi")] - raw_headers: false, }, ) .unwrap() .unwrap(); ::test::black_box(&msg); + + // Remove all references pointing into BytesMut. msg.head.headers.clear(); headers = Some(msg.head.headers); + std::mem::take(&mut msg.head.subject); + restart(&mut raw, len); }); @@ -2753,20 +2942,13 @@ mod tests { cached_headers: &mut headers, req_method: &mut None, h1_parser_config: Default::default(), - #[cfg(feature = "runtime")] - h1_header_read_timeout: None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_fut: &mut None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_running: &mut false, + h1_max_headers: None, preserve_header_case: false, #[cfg(feature = "ffi")] preserve_header_order: false, h09_responses: false, #[cfg(feature = "ffi")] on_informational: &mut None, - #[cfg(feature = "ffi")] - raw_headers: false, }, ) .unwrap() @@ -2809,6 +2991,7 @@ mod tests { keep_alive: true, req_method: &mut Some(Method::GET), title_case_headers: false, + date_header: true, }, &mut vec, ) @@ -2837,6 +3020,7 @@ mod tests { keep_alive: true, req_method: &mut Some(Method::GET), title_case_headers: false, + date_header: true, }, &mut vec, ) diff --git a/.cargo-vendor/hyper/src/proto/h2/client.rs b/.cargo-vendor/hyper/src/proto/h2/client.rs index 8c2a4d2e0f..679b9dfada 100644 --- a/.cargo-vendor/hyper/src/proto/h2/client.rs +++ b/.cargo-vendor/hyper/src/proto/h2/client.rs @@ -1,35 +1,40 @@ -use std::convert::Infallible; -use std::error::Error as StdError; -use std::future::Future; -use std::marker::Unpin; -use std::pin::Pin; -use std::task::{Context, Poll}; -#[cfg(feature = "runtime")] -use std::time::Duration; - +use std::{ + convert::Infallible, + future::Future, + marker::PhantomData, + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; + +use crate::rt::{Read, Write}; use bytes::Bytes; +use futures_channel::mpsc::{Receiver, Sender}; use futures_channel::{mpsc, oneshot}; -use futures_util::future::{self, Either, FutureExt as _, TryFutureExt as _}; -use futures_util::stream::StreamExt as _; -use h2::client::{Builder, SendRequest}; +use futures_util::future::{Either, FusedFuture, FutureExt as _}; +use futures_util::ready; +use futures_util::stream::{StreamExt as _, StreamFuture}; +use h2::client::{Builder, Connection, SendRequest}; use h2::SendStream; use http::{Method, StatusCode}; -use tokio::io::{AsyncRead, AsyncWrite}; -use tracing::{debug, trace, warn}; +use pin_project_lite::pin_project; +use super::ping::{Ponger, Recorder}; use super::{ping, H2Upgraded, PipeToSendStream, SendBuf}; -use crate::body::HttpBody; -use crate::client::dispatch::Callback; -use crate::common::exec::Exec; +use crate::body::{Body, Incoming as IncomingBody}; +use crate::client::dispatch::{Callback, SendWhen, TrySendError}; +use crate::common::io::Compat; +use crate::common::time::Time; use crate::ext::Protocol; use crate::headers; use crate::proto::h2::UpgradedSendStream; use crate::proto::Dispatched; +use crate::rt::bounds::Http2ClientConnExec; use crate::upgrade::Upgraded; -use crate::{Body, Request, Response}; +use crate::{Request, Response}; use h2::client::ResponseFuture; -type ClientRx = crate::client::dispatch::Receiver, Response>; +type ClientRx = crate::client::dispatch::Receiver, Response>; ///// An mpsc channel is used to help notify the `Connection` task when *all* ///// other handles to it have been dropped, so that it can shutdown. @@ -46,21 +51,31 @@ const DEFAULT_CONN_WINDOW: u32 = 1024 * 1024 * 5; // 5mb const DEFAULT_STREAM_WINDOW: u32 = 1024 * 1024 * 2; // 2mb const DEFAULT_MAX_FRAME_SIZE: u32 = 1024 * 16; // 16kb const DEFAULT_MAX_SEND_BUF_SIZE: usize = 1024 * 1024; // 1mb +const DEFAULT_MAX_HEADER_LIST_SIZE: u32 = 1024 * 16; // 16kb + +// The maximum number of concurrent streams that the client is allowed to open +// before it receives the initial SETTINGS frame from the server. +// This default value is derived from what the HTTP/2 spec recommends as the +// minimum value that endpoints advertise to their peers. It means that using +// this value will minimize the chance of the failure where the local endpoint +// attempts to open too many streams and gets rejected by the remote peer with +// the `REFUSED_STREAM` error. +const DEFAULT_INITIAL_MAX_SEND_STREAMS: usize = 100; #[derive(Clone, Debug)] pub(crate) struct Config { pub(crate) adaptive_window: bool, pub(crate) initial_conn_window_size: u32, pub(crate) initial_stream_window_size: u32, + pub(crate) initial_max_send_streams: usize, pub(crate) max_frame_size: u32, - #[cfg(feature = "runtime")] + pub(crate) max_header_list_size: u32, pub(crate) keep_alive_interval: Option, - #[cfg(feature = "runtime")] pub(crate) keep_alive_timeout: Duration, - #[cfg(feature = "runtime")] pub(crate) keep_alive_while_idle: bool, pub(crate) max_concurrent_reset_streams: Option, pub(crate) max_send_buffer_size: usize, + pub(crate) max_pending_accept_reset_streams: Option, } impl Default for Config { @@ -69,15 +84,15 @@ impl Default for Config { adaptive_window: false, initial_conn_window_size: DEFAULT_CONN_WINDOW, initial_stream_window_size: DEFAULT_STREAM_WINDOW, + initial_max_send_streams: DEFAULT_INITIAL_MAX_SEND_STREAMS, max_frame_size: DEFAULT_MAX_FRAME_SIZE, - #[cfg(feature = "runtime")] + max_header_list_size: DEFAULT_MAX_HEADER_LIST_SIZE, keep_alive_interval: None, - #[cfg(feature = "runtime")] keep_alive_timeout: Duration::from_secs(20), - #[cfg(feature = "runtime")] keep_alive_while_idle: false, max_concurrent_reset_streams: None, max_send_buffer_size: DEFAULT_MAX_SEND_BUF_SIZE, + max_pending_accept_reset_streams: None, } } } @@ -85,14 +100,19 @@ impl Default for Config { fn new_builder(config: &Config) -> Builder { let mut builder = Builder::default(); builder + .initial_max_send_streams(config.initial_max_send_streams) .initial_window_size(config.initial_stream_window_size) .initial_connection_window_size(config.initial_conn_window_size) .max_frame_size(config.max_frame_size) + .max_header_list_size(config.max_header_list_size) .max_send_buffer_size(config.max_send_buffer_size) .enable_push(false); if let Some(max) = config.max_concurrent_reset_streams { builder.max_concurrent_reset_streams(max); } + if let Some(max) = config.max_pending_accept_reset_streams { + builder.max_pending_accept_reset_streams(max); + } builder } @@ -103,28 +123,28 @@ fn new_ping_config(config: &Config) -> ping::Config { } else { None }, - #[cfg(feature = "runtime")] keep_alive_interval: config.keep_alive_interval, - #[cfg(feature = "runtime")] keep_alive_timeout: config.keep_alive_timeout, - #[cfg(feature = "runtime")] keep_alive_while_idle: config.keep_alive_while_idle, } } -pub(crate) async fn handshake( +pub(crate) async fn handshake( io: T, req_rx: ClientRx, config: &Config, - exec: Exec, -) -> crate::Result> + mut exec: E, + timer: Time, +) -> crate::Result> where - T: AsyncRead + AsyncWrite + Send + Unpin + 'static, - B: HttpBody, + T: Read + Write + Unpin, + B: Body + 'static, B::Data: Send + 'static, + E: Http2ClientConnExec + Unpin, + B::Error: Into>, { let (h2_tx, mut conn) = new_builder(config) - .handshake::<_, SendBuf>(io) + .handshake::<_, SendBuf>(Compat::new(io)) .await .map_err(crate::Error::new_h2)?; @@ -135,41 +155,27 @@ where let (conn_drop_ref, rx) = mpsc::channel(1); let (cancel_tx, conn_eof) = oneshot::channel(); - let conn_drop_rx = rx.into_future().map(|(item, _rx)| { - if let Some(never) = item { - match never {} - } - }); + let conn_drop_rx = rx.into_future(); - let ping_config = new_ping_config(&config); + let ping_config = new_ping_config(config); let (conn, ping) = if ping_config.is_enabled() { let pp = conn.ping_pong().expect("conn.ping_pong"); - let (recorder, mut ponger) = ping::channel(pp, ping_config); - - let conn = future::poll_fn(move |cx| { - match ponger.poll(cx) { - Poll::Ready(ping::Ponged::SizeUpdate(wnd)) => { - conn.set_target_window_size(wnd); - conn.set_initial_window_size(wnd)?; - } - #[cfg(feature = "runtime")] - Poll::Ready(ping::Ponged::KeepAliveTimedOut) => { - debug!("connection keep-alive timed out"); - return Poll::Ready(Ok(())); - } - Poll::Pending => {} - } + let (recorder, ponger) = ping::channel(pp, ping_config, timer); - Pin::new(&mut conn).poll(cx) - }); + let conn: Conn<_, B> = Conn::new(ponger, conn); (Either::Left(conn), recorder) } else { (Either::Right(conn), ping::disabled()) }; - let conn = conn.map_err(|e| debug!("connection error: {}", e)); + let conn: ConnMapErr = ConnMapErr { + conn, + is_terminated: false, + }; - exec.execute(conn_task(conn, conn_drop_rx, cancel_tx)); + exec.execute_h2_future(H2ClientFuture::Task { + task: ConnTask::new(conn, conn_drop_rx, cancel_tx), + }); Ok(ClientTask { ping, @@ -179,80 +185,306 @@ where h2_tx, req_rx, fut_ctx: None, + marker: PhantomData, }) } -async fn conn_task(conn: C, drop_rx: D, cancel_tx: oneshot::Sender) +pin_project! { + struct Conn + where + B: Body, + { + #[pin] + ponger: Ponger, + #[pin] + conn: Connection, SendBuf<::Data>>, + } +} + +impl Conn +where + B: Body, + T: Read + Write + Unpin, +{ + fn new(ponger: Ponger, conn: Connection, SendBuf<::Data>>) -> Self { + Conn { ponger, conn } + } +} + +impl Future for Conn +where + B: Body, + T: Read + Write + Unpin, +{ + type Output = Result<(), h2::Error>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let mut this = self.project(); + match this.ponger.poll(cx) { + Poll::Ready(ping::Ponged::SizeUpdate(wnd)) => { + this.conn.set_target_window_size(wnd); + this.conn.set_initial_window_size(wnd)?; + } + Poll::Ready(ping::Ponged::KeepAliveTimedOut) => { + debug!("connection keep-alive timed out"); + return Poll::Ready(Ok(())); + } + Poll::Pending => {} + } + + Pin::new(&mut this.conn).poll(cx) + } +} + +pin_project! { + struct ConnMapErr + where + B: Body, + T: Read, + T: Write, + T: Unpin, + { + #[pin] + conn: Either, Connection, SendBuf<::Data>>>, + #[pin] + is_terminated: bool, + } +} + +impl Future for ConnMapErr +where + B: Body, + T: Read + Write + Unpin, +{ + type Output = Result<(), ()>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let mut this = self.project(); + + if *this.is_terminated { + return Poll::Pending; + } + let polled = this.conn.poll(cx); + if polled.is_ready() { + *this.is_terminated = true; + } + polled.map_err(|_e| { + debug!(error = %_e, "connection error"); + }) + } +} + +impl FusedFuture for ConnMapErr +where + B: Body, + T: Read + Write + Unpin, +{ + fn is_terminated(&self) -> bool { + self.is_terminated + } +} + +pin_project! { + pub struct ConnTask + where + B: Body, + T: Read, + T: Write, + T: Unpin, + { + #[pin] + drop_rx: StreamFuture>, + #[pin] + cancel_tx: Option>, + #[pin] + conn: ConnMapErr, + } +} + +impl ConnTask where - C: Future + Unpin, - D: Future + Unpin, + B: Body, + T: Read + Write + Unpin, { - match future::select(conn, drop_rx).await { - Either::Left(_) => { - // ok or err, the `conn` has finished + fn new( + conn: ConnMapErr, + drop_rx: StreamFuture>, + cancel_tx: oneshot::Sender, + ) -> Self { + Self { + drop_rx, + cancel_tx: Some(cancel_tx), + conn, } - Either::Right(((), conn)) => { + } +} + +impl Future for ConnTask +where + B: Body, + T: Read + Write + Unpin, +{ + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let mut this = self.project(); + + if !this.conn.is_terminated() && this.conn.poll_unpin(cx).is_ready() { + // ok or err, the `conn` has finished. + return Poll::Ready(()); + } + + if !this.drop_rx.is_terminated() && this.drop_rx.poll_unpin(cx).is_ready() { // mpsc has been dropped, hopefully polling // the connection some more should start shutdown - // and then close + // and then close. trace!("send_request dropped, starting conn shutdown"); - drop(cancel_tx); - let _ = conn.await; + drop(this.cancel_tx.take().expect("ConnTask Future polled twice")); + } + + Poll::Pending + } +} + +pin_project! { + #[project = H2ClientFutureProject] + pub enum H2ClientFuture + where + B: http_body::Body, + B: 'static, + B::Error: Into>, + T: Read, + T: Write, + T: Unpin, + { + Pipe { + #[pin] + pipe: PipeMap, + }, + Send { + #[pin] + send_when: SendWhen, + }, + Task { + #[pin] + task: ConnTask, + }, + } +} + +impl Future for H2ClientFuture +where + B: http_body::Body + 'static, + B::Error: Into>, + T: Read + Write + Unpin, +{ + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> std::task::Poll { + let this = self.project(); + + match this { + H2ClientFutureProject::Pipe { pipe } => pipe.poll(cx), + H2ClientFutureProject::Send { send_when } => send_when.poll(cx), + H2ClientFutureProject::Task { task } => task.poll(cx), } } } struct FutCtx where - B: HttpBody, + B: Body, { is_connect: bool, eos: bool, fut: ResponseFuture, body_tx: SendStream>, body: B, - cb: Callback, Response>, + cb: Callback, Response>, } -impl Unpin for FutCtx {} +impl Unpin for FutCtx {} -pub(crate) struct ClientTask +pub(crate) struct ClientTask where - B: HttpBody, + B: Body, + E: Unpin, { ping: ping::Recorder, conn_drop_ref: ConnDropRef, conn_eof: ConnEof, - executor: Exec, + executor: E, h2_tx: SendRequest>, req_rx: ClientRx, fut_ctx: Option>, + marker: PhantomData, } -impl ClientTask +impl ClientTask where - B: HttpBody + 'static, + B: Body + 'static, + E: Http2ClientConnExec + Unpin, + B::Error: Into>, + T: Read + Write + Unpin, { pub(crate) fn is_extended_connect_protocol_enabled(&self) -> bool { self.h2_tx.is_extended_connect_protocol_enabled() } } -impl ClientTask +pin_project! { + pub struct PipeMap + where + S: Body, + { + #[pin] + pipe: PipeToSendStream, + #[pin] + conn_drop_ref: Option>, + #[pin] + ping: Option, + } +} + +impl Future for PipeMap where - B: HttpBody + Send + 'static, + B: http_body::Body, + B::Error: Into>, +{ + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> std::task::Poll { + let mut this = self.project(); + + match this.pipe.poll_unpin(cx) { + Poll::Ready(result) => { + if let Err(_e) = result { + debug!("client request body error: {}", _e); + } + drop(this.conn_drop_ref.take().expect("Future polled twice")); + drop(this.ping.take().expect("Future polled twice")); + return Poll::Ready(()); + } + Poll::Pending => (), + }; + Poll::Pending + } +} + +impl ClientTask +where + B: Body + 'static + Unpin, B::Data: Send, - B::Error: Into>, + E: Http2ClientConnExec + Unpin, + B::Error: Into>, + T: Read + Write + Unpin, { fn poll_pipe(&mut self, f: FutCtx, cx: &mut Context<'_>) { let ping = self.ping.clone(); + let send_stream = if !f.is_connect { if !f.eos { - let mut pipe = Box::pin(PipeToSendStream::new(f.body, f.body_tx)).map(|res| { - if let Err(e) = res { - debug!("client request body error: {}", e); - } - }); + let mut pipe = PipeToSendStream::new(f.body, f.body_tx); // eagerly see if the body pipe is ready and // can thus skip allocating in the executor @@ -264,13 +496,15 @@ where // "open stream" alive while this body is // still sending... let ping = ping.clone(); - let pipe = pipe.map(move |x| { - drop(conn_drop_ref); - drop(ping); - x - }); + + let pipe = PipeMap { + pipe, + conn_drop_ref: Some(conn_drop_ref), + ping: Some(ping), + }; // Clear send task - self.executor.execute(pipe); + self.executor + .execute_h2_future(H2ClientFuture::Pipe { pipe }); } } } @@ -280,7 +514,49 @@ where Some(f.body_tx) }; - let fut = f.fut.map(move |result| match result { + self.executor.execute_h2_future(H2ClientFuture::Send { + send_when: SendWhen { + when: ResponseFutMap { + fut: f.fut, + ping: Some(ping), + send_stream: Some(send_stream), + }, + call_back: Some(f.cb), + }, + }); + } +} + +pin_project! { + pub(crate) struct ResponseFutMap + where + B: Body, + B: 'static, + { + #[pin] + fut: ResponseFuture, + #[pin] + ping: Option, + #[pin] + send_stream: Option::Data>>>>, + } +} + +impl Future for ResponseFutMap +where + B: Body + 'static, +{ + type Output = Result, (crate::Error, Option>)>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let mut this = self.project(); + + let result = ready!(this.fut.poll(cx)); + + let ping = this.ping.take().expect("Future polled twice"); + let send_stream = this.send_stream.take().expect("Future polled twice"); + + match result { Ok(res) => { // record that we got the response headers ping.record_non_data(); @@ -291,13 +567,13 @@ where warn!("h2 connect response with non-zero body not supported"); send_stream.send_reset(h2::Reason::INTERNAL_ERROR); - return Err(( + return Poll::Ready(Err(( crate::Error::new_h2(h2::Reason::INTERNAL_ERROR.into()), - None, - )); + None::>, + ))); } let (parts, recv_stream) = res.into_parts(); - let mut res = Response::from_parts(parts, Body::empty()); + let mut res = Response::from_parts(parts, IncomingBody::empty()); let (pending, on_upgrade) = crate::upgrade::pending(); let io = H2Upgraded { @@ -311,31 +587,32 @@ where pending.fulfill(upgraded); res.extensions_mut().insert(on_upgrade); - Ok(res) + Poll::Ready(Ok(res)) } else { let res = res.map(|stream| { let ping = ping.for_stream(&stream); - crate::Body::h2(stream, content_length.into(), ping) + IncomingBody::h2(stream, content_length.into(), ping) }); - Ok(res) + Poll::Ready(Ok(res)) } } Err(err) => { ping.ensure_not_timed_out().map_err(|e| (e, None))?; debug!("client response error: {}", err); - Err((crate::Error::new_h2(err), None)) + Poll::Ready(Err((crate::Error::new_h2(err), None::>))) } - }); - self.executor.execute(f.cb.send_when(fut)); + } } } -impl Future for ClientTask +impl Future for ClientTask where - B: HttpBody + Send + 'static, + B: Body + 'static + Unpin, B::Data: Send, - B::Error: Into>, + B::Error: Into>, + E: Http2ClientConnExec + Unpin, + T: Read + Write + Unpin, { type Output = crate::Result; @@ -354,14 +631,11 @@ where } }; - match self.fut_ctx.take() { - // If we were waiting on pending open - // continue where we left off. - Some(f) => { - self.poll_pipe(f, cx); - continue; - } - None => (), + // If we were waiting on pending open + // continue where we left off. + if let Some(f) = self.fut_ctx.take() { + self.poll_pipe(f, cx); + continue; } match self.req_rx.poll_recv(cx) { @@ -383,17 +657,16 @@ where let is_connect = req.method() == Method::CONNECT; let eos = body.is_end_stream(); - if is_connect { - if headers::content_length_parse_all(req.headers()) + if is_connect + && headers::content_length_parse_all(req.headers()) .map_or(false, |len| len != 0) - { - warn!("h2 connect request with non-zero body not supported"); - cb.send(Err(( - crate::Error::new_h2(h2::Reason::INTERNAL_ERROR.into()), - None, - ))); - continue; - } + { + warn!("h2 connect request with non-zero body not supported"); + cb.send(Err(TrySendError { + error: crate::Error::new_h2(h2::Reason::INTERNAL_ERROR.into()), + message: None, + })); + continue; } if let Some(protocol) = req.extensions_mut().remove::() { @@ -404,7 +677,10 @@ where Ok(ok) => ok, Err(err) => { debug!("client send request error: {}", err); - cb.send(Err((crate::Error::new_h2(err), None))); + cb.send(Err(TrySendError { + error: crate::Error::new_h2(err), + message: None, + })); continue; } }; @@ -429,7 +705,10 @@ where } Poll::Ready(Ok(())) => (), Poll::Ready(Err(err)) => { - f.cb.send(Err((crate::Error::new_h2(err), None))); + f.cb.send(Err(TrySendError { + error: crate::Error::new_h2(err), + message: None, + })); continue; } } diff --git a/.cargo-vendor/hyper/src/proto/h2/mod.rs b/.cargo-vendor/hyper/src/proto/h2/mod.rs index d50850d0a0..adb6de87f9 100644 --- a/.cargo-vendor/hyper/src/proto/h2/mod.rs +++ b/.cargo-vendor/hyper/src/proto/h2/mod.rs @@ -1,19 +1,20 @@ -use bytes::{Buf, Bytes}; -use h2::{Reason, RecvStream, SendStream}; -use http::header::{HeaderName, CONNECTION, TE, TRAILER, TRANSFER_ENCODING, UPGRADE}; -use http::HeaderMap; -use pin_project_lite::pin_project; use std::error::Error as StdError; use std::future::Future; -use std::io::{self, Cursor, IoSlice}; +use std::io::{Cursor, IoSlice}; use std::mem; use std::pin::Pin; use std::task::{Context, Poll}; -use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; -use tracing::{debug, trace, warn}; -use crate::body::HttpBody; +use bytes::{Buf, Bytes}; +use futures_util::ready; +use h2::{Reason, RecvStream, SendStream}; +use http::header::{HeaderName, CONNECTION, TE, TRANSFER_ENCODING, UPGRADE}; +use http::HeaderMap; +use pin_project_lite::pin_project; + +use crate::body::Body; use crate::proto::h2::ping::Recorder; +use crate::rt::{Read, ReadBufCursor, Write}; pub(crate) mod ping; @@ -30,21 +31,19 @@ cfg_server! { /// Default initial stream window size defined in HTTP2 spec. pub(crate) const SPEC_WINDOW_SIZE: u32 = 65_535; +// List of connection headers from RFC 9110 Section 7.6.1 +// +// TE headers are allowed in HTTP/2 requests as long as the value is "trailers", so they're +// tested separately. +static CONNECTION_HEADERS: [HeaderName; 4] = [ + HeaderName::from_static("keep-alive"), + HeaderName::from_static("proxy-connection"), + TRANSFER_ENCODING, + UPGRADE, +]; + fn strip_connection_headers(headers: &mut HeaderMap, is_request: bool) { - // List of connection headers from: - // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Connection - // - // TE headers are allowed in HTTP/2 requests as long as the value is "trailers", so they're - // tested separately. - let connection_headers = [ - HeaderName::from_lowercase(b"keep-alive").unwrap(), - HeaderName::from_lowercase(b"proxy-connection").unwrap(), - TRAILER, - TRANSFER_ENCODING, - UPGRADE, - ]; - - for header in connection_headers.iter() { + for header in &CONNECTION_HEADERS { if headers.remove(header).is_some() { warn!("Connection header illegal in HTTP/2: {}", header.as_str()); } @@ -53,8 +52,7 @@ fn strip_connection_headers(headers: &mut HeaderMap, is_request: bool) { if is_request { if headers .get(TE) - .map(|te_header| te_header != "trailers") - .unwrap_or(false) + .map_or(false, |te_header| te_header != "trailers") { warn!("TE headers not set to \"trailers\" are illegal in HTTP/2 requests"); headers.remove(TE); @@ -86,9 +84,9 @@ fn strip_connection_headers(headers: &mut HeaderMap, is_request: bool) { // body adapters used by both Client and Server pin_project! { - struct PipeToSendStream + pub(crate) struct PipeToSendStream where - S: HttpBody, + S: Body, { body_tx: SendStream>, data_done: bool, @@ -99,7 +97,7 @@ pin_project! { impl PipeToSendStream where - S: HttpBody, + S: Body, { fn new(stream: S, tx: SendStream>) -> PipeToSendStream { PipeToSendStream { @@ -112,7 +110,7 @@ where impl Future for PipeToSendStream where - S: HttpBody, + S: Body, S::Error: Into>, { type Output = crate::Result<()>; @@ -120,43 +118,40 @@ where fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let mut me = self.project(); loop { - if !*me.data_done { - // we don't have the next chunk of data yet, so just reserve 1 byte to make - // sure there's some capacity available. h2 will handle the capacity management - // for the actual body chunk. - me.body_tx.reserve_capacity(1); - - if me.body_tx.capacity() == 0 { - loop { - match ready!(me.body_tx.poll_capacity(cx)) { - Some(Ok(0)) => {} - Some(Ok(_)) => break, - Some(Err(e)) => { - return Poll::Ready(Err(crate::Error::new_body_write(e))) - } - None => { - // None means the stream is no longer in a - // streaming state, we either finished it - // somehow, or the remote reset us. - return Poll::Ready(Err(crate::Error::new_body_write( - "send stream capacity unexpectedly closed", - ))); - } + // we don't have the next chunk of data yet, so just reserve 1 byte to make + // sure there's some capacity available. h2 will handle the capacity management + // for the actual body chunk. + me.body_tx.reserve_capacity(1); + + if me.body_tx.capacity() == 0 { + loop { + match ready!(me.body_tx.poll_capacity(cx)) { + Some(Ok(0)) => {} + Some(Ok(_)) => break, + Some(Err(e)) => return Poll::Ready(Err(crate::Error::new_body_write(e))), + None => { + // None means the stream is no longer in a + // streaming state, we either finished it + // somehow, or the remote reset us. + return Poll::Ready(Err(crate::Error::new_body_write( + "send stream capacity unexpectedly closed", + ))); } } - } else if let Poll::Ready(reason) = me - .body_tx - .poll_reset(cx) - .map_err(crate::Error::new_body_write)? - { - debug!("stream received RST_STREAM: {:?}", reason); - return Poll::Ready(Err(crate::Error::new_body_write(::h2::Error::from( - reason, - )))); } + } else if let Poll::Ready(reason) = me + .body_tx + .poll_reset(cx) + .map_err(crate::Error::new_body_write)? + { + debug!("stream received RST_STREAM: {:?}", reason); + return Poll::Ready(Err(crate::Error::new_body_write(::h2::Error::from(reason)))); + } - match ready!(me.stream.as_mut().poll_data(cx)) { - Some(Ok(chunk)) => { + match ready!(me.stream.as_mut().poll_frame(cx)) { + Some(Ok(frame)) => { + if frame.is_data() { + let chunk = frame.into_data().unwrap_or_else(|_| unreachable!()); let is_eos = me.stream.is_end_stream(); trace!( "send body chunk: {} bytes, eos={}", @@ -172,43 +167,24 @@ where if is_eos { return Poll::Ready(Ok(())); } - } - Some(Err(e)) => return Poll::Ready(Err(me.body_tx.on_user_err(e))), - None => { + } else if frame.is_trailers() { + // no more DATA, so give any capacity back me.body_tx.reserve_capacity(0); - let is_eos = me.stream.is_end_stream(); - if is_eos { - return Poll::Ready(me.body_tx.send_eos_frame()); - } else { - *me.data_done = true; - // loop again to poll_trailers - } - } - } - } else { - if let Poll::Ready(reason) = me - .body_tx - .poll_reset(cx) - .map_err(crate::Error::new_body_write)? - { - debug!("stream received RST_STREAM: {:?}", reason); - return Poll::Ready(Err(crate::Error::new_body_write(::h2::Error::from( - reason, - )))); - } - - match ready!(me.stream.poll_trailers(cx)) { - Ok(Some(trailers)) => { me.body_tx - .send_trailers(trailers) + .send_trailers(frame.into_trailers().unwrap_or_else(|_| unreachable!())) .map_err(crate::Error::new_body_write)?; return Poll::Ready(Ok(())); + } else { + trace!("discarding unknown frame"); + // loop again } - Ok(None) => { - // There were no trailers, so send an empty DATA frame... - return Poll::Ready(me.body_tx.send_eos_frame()); - } - Err(e) => return Poll::Ready(Err(me.body_tx.on_user_err(e))), + } + Some(Err(e)) => return Poll::Ready(Err(me.body_tx.on_user_err(e))), + None => { + // no more frames means we're done here + // but at this point, we haven't sent an EOS DATA, or + // any trailers, so send an empty EOS DATA. + return Poll::Ready(me.body_tx.send_eos_frame()); } } } @@ -294,15 +270,15 @@ where buf: Bytes, } -impl AsyncRead for H2Upgraded +impl Read for H2Upgraded where B: Buf, { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, - read_buf: &mut ReadBuf<'_>, - ) -> Poll> { + mut read_buf: ReadBufCursor<'_>, + ) -> Poll> { if self.buf.is_empty() { self.buf = loop { match ready!(self.recv_stream.poll_data(cx)) { @@ -318,7 +294,7 @@ where return Poll::Ready(match e.reason() { Some(Reason::NO_ERROR) | Some(Reason::CANCEL) => Ok(()), Some(Reason::STREAM_CLOSED) => { - Err(io::Error::new(io::ErrorKind::BrokenPipe, e)) + Err(std::io::Error::new(std::io::ErrorKind::BrokenPipe, e)) } _ => Err(h2_to_io_error(e)), }) @@ -334,7 +310,7 @@ where } } -impl AsyncWrite for H2Upgraded +impl Write for H2Upgraded where B: Buf, { @@ -342,7 +318,7 @@ where mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], - ) -> Poll> { + ) -> Poll> { if buf.is_empty() { return Poll::Ready(Ok(0)); } @@ -367,7 +343,7 @@ where Poll::Ready(Err(h2_to_io_error( match ready!(self.send_stream.poll_reset(cx)) { Ok(Reason::NO_ERROR) | Ok(Reason::CANCEL) | Ok(Reason::STREAM_CLOSED) => { - return Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())) + return Poll::Ready(Err(std::io::ErrorKind::BrokenPipe.into())) } Ok(reason) => reason.into(), Err(e) => e, @@ -375,14 +351,14 @@ where ))) } - fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { + fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } fn poll_shutdown( mut self: Pin<&mut Self>, cx: &mut Context<'_>, - ) -> Poll> { + ) -> Poll> { if self.send_stream.write(&[], true).is_ok() { return Poll::Ready(Ok(())); } @@ -391,7 +367,7 @@ where match ready!(self.send_stream.poll_reset(cx)) { Ok(Reason::NO_ERROR) => return Poll::Ready(Ok(())), Ok(Reason::CANCEL) | Ok(Reason::STREAM_CLOSED) => { - return Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())) + return Poll::Ready(Err(std::io::ErrorKind::BrokenPipe.into())) } Ok(reason) => reason.into(), Err(e) => e, @@ -400,11 +376,11 @@ where } } -fn h2_to_io_error(e: h2::Error) -> io::Error { +fn h2_to_io_error(e: h2::Error) -> std::io::Error { if e.is_io() { e.into_io().unwrap() } else { - io::Error::new(io::ErrorKind::Other, e) + std::io::Error::new(std::io::ErrorKind::Other, e) } } @@ -431,7 +407,7 @@ where unsafe { self.as_inner_unchecked().poll_reset(cx) } } - fn write(&mut self, buf: &[u8], end_of_stream: bool) -> Result<(), io::Error> { + fn write(&mut self, buf: &[u8], end_of_stream: bool) -> Result<(), std::io::Error> { let send_buf = SendBuf::Cursor(Cursor::new(buf.into())); unsafe { self.as_inner_unchecked() diff --git a/.cargo-vendor/hyper/src/proto/h2/ping.rs b/.cargo-vendor/hyper/src/proto/h2/ping.rs index d830c93eda..ea2bbb36ad 100644 --- a/.cargo-vendor/hyper/src/proto/h2/ping.rs +++ b/.cargo-vendor/hyper/src/proto/h2/ping.rs @@ -18,23 +18,17 @@ /// 3b. Merge RTT with a running average. /// 3c. Calculate bdp as bytes/rtt. /// 3d. If bdp is over 2/3 max, set new max to bdp and update windows. - -#[cfg(feature = "runtime")] use std::fmt; -#[cfg(feature = "runtime")] use std::future::Future; -#[cfg(feature = "runtime")] use std::pin::Pin; use std::sync::{Arc, Mutex}; use std::task::{self, Poll}; -use std::time::Duration; -#[cfg(not(feature = "runtime"))] -use std::time::Instant; +use std::time::{Duration, Instant}; use h2::{Ping, PingPong}; -#[cfg(feature = "runtime")] -use tokio::time::{Instant, Sleep}; -use tracing::{debug, trace}; + +use crate::common::time::Time; +use crate::rt::Sleep; type WindowSize = u32; @@ -42,7 +36,7 @@ pub(super) fn disabled() -> Recorder { Recorder { shared: None } } -pub(super) fn channel(ping_pong: PingPong, config: Config) -> (Recorder, Ponger) { +pub(super) fn channel(ping_pong: PingPong, config: Config, __timer: Time) -> (Recorder, Ponger) { debug_assert!( config.is_enabled(), "ping channel requires bdp or keep-alive config", @@ -62,23 +56,20 @@ pub(super) fn channel(ping_pong: PingPong, config: Config) -> (Recorder, Ponger) (None, None) }; - #[cfg(feature = "runtime")] let keep_alive = config.keep_alive_interval.map(|interval| KeepAlive { interval, timeout: config.keep_alive_timeout, while_idle: config.keep_alive_while_idle, - timer: Box::pin(tokio::time::sleep(interval)), + sleep: __timer.sleep(interval), state: KeepAliveState::Init, + timer: __timer, }); - #[cfg(feature = "runtime")] let last_read_at = keep_alive.as_ref().map(|_| Instant::now()); let shared = Arc::new(Mutex::new(Shared { bytes, - #[cfg(feature = "runtime")] last_read_at, - #[cfg(feature = "runtime")] is_keep_alive_timed_out: false, ping_pong, ping_sent_at: None, @@ -91,7 +82,6 @@ pub(super) fn channel(ping_pong: PingPong, config: Config) -> (Recorder, Ponger) }, Ponger { bdp, - #[cfg(feature = "runtime")] keep_alive, shared, }, @@ -102,14 +92,11 @@ pub(super) fn channel(ping_pong: PingPong, config: Config) -> (Recorder, Ponger) pub(super) struct Config { pub(super) bdp_initial_window: Option, /// If no frames are received in this amount of time, a PING frame is sent. - #[cfg(feature = "runtime")] pub(super) keep_alive_interval: Option, /// After sending a keepalive PING, the connection will be closed if /// a pong is not received in this amount of time. - #[cfg(feature = "runtime")] pub(super) keep_alive_timeout: Duration, /// If true, sends pings even when there are no active streams. - #[cfg(feature = "runtime")] pub(super) keep_alive_while_idle: bool, } @@ -120,7 +107,6 @@ pub(crate) struct Recorder { pub(super) struct Ponger { bdp: Option, - #[cfg(feature = "runtime")] keep_alive: Option, shared: Arc>, } @@ -140,10 +126,8 @@ struct Shared { // keep-alive /// If `Some`, keep-alive is enabled, and the Instant is how long ago /// the connection read the last frame. - #[cfg(feature = "runtime")] last_read_at: Option, - #[cfg(feature = "runtime")] is_keep_alive_timed_out: bool, } @@ -162,7 +146,6 @@ struct Bdp { stable_count: u32, } -#[cfg(feature = "runtime")] struct KeepAlive { /// If no frames are received in this amount of time, a PING frame is sent. interval: Duration, @@ -171,25 +154,22 @@ struct KeepAlive { timeout: Duration, /// If true, sends pings even when there are no active streams. while_idle: bool, - state: KeepAliveState, - timer: Pin>, + sleep: Pin>, + timer: Time, } -#[cfg(feature = "runtime")] enum KeepAliveState { Init, - Scheduled, + Scheduled(Instant), PingSent, } pub(super) enum Ponged { SizeUpdate(WindowSize), - #[cfg(feature = "runtime")] KeepAliveTimedOut, } -#[cfg(feature = "runtime")] #[derive(Debug)] pub(super) struct KeepAliveTimedOut; @@ -197,15 +177,7 @@ pub(super) struct KeepAliveTimedOut; impl Config { pub(super) fn is_enabled(&self) -> bool { - #[cfg(feature = "runtime")] - { - self.bdp_initial_window.is_some() || self.keep_alive_interval.is_some() - } - - #[cfg(not(feature = "runtime"))] - { - self.bdp_initial_window.is_some() - } + self.bdp_initial_window.is_some() || self.keep_alive_interval.is_some() } } @@ -221,7 +193,6 @@ impl Recorder { let mut locked = shared.lock().unwrap(); - #[cfg(feature = "runtime")] locked.update_last_read_at(); // are we ready to send another bdp ping? @@ -248,18 +219,15 @@ impl Recorder { } pub(crate) fn record_non_data(&self) { - #[cfg(feature = "runtime")] - { - let shared = if let Some(ref shared) = self.shared { - shared - } else { - return; - }; + let shared = if let Some(ref shared) = self.shared { + shared + } else { + return; + }; - let mut locked = shared.lock().unwrap(); + let mut locked = shared.lock().unwrap(); - locked.update_last_read_at(); - } + locked.update_last_read_at(); } /// If the incoming stream is already closed, convert self into @@ -274,13 +242,10 @@ impl Recorder { } pub(super) fn ensure_not_timed_out(&self) -> crate::Result<()> { - #[cfg(feature = "runtime")] - { - if let Some(ref shared) = self.shared { - let locked = shared.lock().unwrap(); - if locked.is_keep_alive_timed_out { - return Err(KeepAliveTimedOut.crate_error()); - } + if let Some(ref shared) = self.shared { + let locked = shared.lock().unwrap(); + if locked.is_keep_alive_timed_out { + return Err(KeepAliveTimedOut.crate_error()); } } @@ -295,15 +260,11 @@ impl Ponger { pub(super) fn poll(&mut self, cx: &mut task::Context<'_>) -> Poll { let now = Instant::now(); let mut locked = self.shared.lock().unwrap(); - #[cfg(feature = "runtime")] let is_idle = self.is_idle(); - #[cfg(feature = "runtime")] - { - if let Some(ref mut ka) = self.keep_alive { - ka.schedule(is_idle, &locked); - ka.maybe_ping(cx, &mut locked); - } + if let Some(ref mut ka) = self.keep_alive { + ka.maybe_schedule(is_idle, &locked); + ka.maybe_ping(cx, is_idle, &mut locked); } if !locked.is_ping_sent() { @@ -320,12 +281,10 @@ impl Ponger { let rtt = now - start; trace!("recv pong"); - #[cfg(feature = "runtime")] - { - if let Some(ref mut ka) = self.keep_alive { - locked.update_last_read_at(); - ka.schedule(is_idle, &locked); - } + if let Some(ref mut ka) = self.keep_alive { + locked.update_last_read_at(); + ka.maybe_schedule(is_idle, &locked); + ka.maybe_ping(cx, is_idle, &mut locked); } if let Some(ref mut bdp) = self.bdp { @@ -340,18 +299,15 @@ impl Ponger { } } } - Poll::Ready(Err(e)) => { - debug!("pong error: {}", e); + Poll::Ready(Err(_e)) => { + debug!("pong error: {}", _e); } Poll::Pending => { - #[cfg(feature = "runtime")] - { - if let Some(ref mut ka) = self.keep_alive { - if let Err(KeepAliveTimedOut) = ka.maybe_timeout(cx) { - self.keep_alive = None; - locked.is_keep_alive_timed_out = true; - return Poll::Ready(Ponged::KeepAliveTimedOut); - } + if let Some(ref mut ka) = self.keep_alive { + if let Err(KeepAliveTimedOut) = ka.maybe_timeout(cx) { + self.keep_alive = None; + locked.is_keep_alive_timed_out = true; + return Poll::Ready(Ponged::KeepAliveTimedOut); } } } @@ -361,7 +317,6 @@ impl Ponger { Poll::Pending } - #[cfg(feature = "runtime")] fn is_idle(&self) -> bool { Arc::strong_count(&self.shared) <= 2 } @@ -376,8 +331,8 @@ impl Shared { self.ping_sent_at = Some(Instant::now()); trace!("sent ping"); } - Err(err) => { - debug!("error sending ping: {}", err); + Err(_err) => { + debug!("error sending ping: {}", _err); } } } @@ -386,14 +341,12 @@ impl Shared { self.ping_sent_at.is_some() } - #[cfg(feature = "runtime")] fn update_last_read_at(&mut self) { if self.last_read_at.is_some() { self.last_read_at = Some(Instant::now()); } } - #[cfg(feature = "runtime")] fn last_read_at(&self) -> Instant { self.last_read_at.expect("keep_alive expects last_read_at") } @@ -469,49 +422,53 @@ fn seconds(dur: Duration) -> f64 { // ===== impl KeepAlive ===== -#[cfg(feature = "runtime")] impl KeepAlive { - fn schedule(&mut self, is_idle: bool, shared: &Shared) { + fn maybe_schedule(&mut self, is_idle: bool, shared: &Shared) { match self.state { KeepAliveState::Init => { if !self.while_idle && is_idle { return; } - self.state = KeepAliveState::Scheduled; - let interval = shared.last_read_at() + self.interval; - self.timer.as_mut().reset(interval); + self.schedule(shared); } KeepAliveState::PingSent => { if shared.is_ping_sent() { return; } - - self.state = KeepAliveState::Scheduled; - let interval = shared.last_read_at() + self.interval; - self.timer.as_mut().reset(interval); + self.schedule(shared); } - KeepAliveState::Scheduled => (), + KeepAliveState::Scheduled(..) => (), } } - fn maybe_ping(&mut self, cx: &mut task::Context<'_>, shared: &mut Shared) { + fn schedule(&mut self, shared: &Shared) { + let interval = shared.last_read_at() + self.interval; + self.state = KeepAliveState::Scheduled(interval); + self.timer.reset(&mut self.sleep, interval); + } + + fn maybe_ping(&mut self, cx: &mut task::Context<'_>, is_idle: bool, shared: &mut Shared) { match self.state { - KeepAliveState::Scheduled => { - if Pin::new(&mut self.timer).poll(cx).is_pending() { + KeepAliveState::Scheduled(at) => { + if Pin::new(&mut self.sleep).poll(cx).is_pending() { return; } // check if we've received a frame while we were scheduled - if shared.last_read_at() + self.interval > self.timer.deadline() { + if shared.last_read_at() + self.interval > at { self.state = KeepAliveState::Init; cx.waker().wake_by_ref(); // schedule us again return; } + if !self.while_idle && is_idle { + trace!("keep-alive no need to ping when idle and while_idle=false"); + return; + } trace!("keep-alive interval ({:?}) reached", self.interval); shared.send_ping(); self.state = KeepAliveState::PingSent; let timeout = Instant::now() + self.timeout; - self.timer.as_mut().reset(timeout); + self.timer.reset(&mut self.sleep, timeout); } KeepAliveState::Init | KeepAliveState::PingSent => (), } @@ -520,34 +477,31 @@ impl KeepAlive { fn maybe_timeout(&mut self, cx: &mut task::Context<'_>) -> Result<(), KeepAliveTimedOut> { match self.state { KeepAliveState::PingSent => { - if Pin::new(&mut self.timer).poll(cx).is_pending() { + if Pin::new(&mut self.sleep).poll(cx).is_pending() { return Ok(()); } trace!("keep-alive timeout ({:?}) reached", self.timeout); Err(KeepAliveTimedOut) } - KeepAliveState::Init | KeepAliveState::Scheduled => Ok(()), + KeepAliveState::Init | KeepAliveState::Scheduled(..) => Ok(()), } } } // ===== impl KeepAliveTimedOut ===== -#[cfg(feature = "runtime")] impl KeepAliveTimedOut { pub(super) fn crate_error(self) -> crate::Error { crate::Error::new(crate::error::Kind::Http2).with(self) } } -#[cfg(feature = "runtime")] impl fmt::Display for KeepAliveTimedOut { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str("keep-alive timed out") } } -#[cfg(feature = "runtime")] impl std::error::Error for KeepAliveTimedOut { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { Some(&crate::error::TimedOut) diff --git a/.cargo-vendor/hyper/src/proto/h2/server.rs b/.cargo-vendor/hyper/src/proto/h2/server.rs index b7bff590ff..ee2d08eaaf 100644 --- a/.cargo-vendor/hyper/src/proto/h2/server.rs +++ b/.cargo-vendor/hyper/src/proto/h2/server.rs @@ -1,32 +1,32 @@ use std::error::Error as StdError; use std::future::Future; -use std::marker::Unpin; use std::pin::Pin; use std::task::{Context, Poll}; -#[cfg(feature = "runtime")] use std::time::Duration; use bytes::Bytes; +use futures_util::ready; use h2::server::{Connection, Handshake, SendResponse}; use h2::{Reason, RecvStream}; use http::{Method, Request}; use pin_project_lite::pin_project; -use tokio::io::{AsyncRead, AsyncWrite}; -use tracing::{debug, trace, warn}; use super::{ping, PipeToSendStream, SendBuf}; -use crate::body::HttpBody; +use crate::body::{Body, Incoming as IncomingBody}; use crate::common::date; -use crate::common::exec::ConnStreamExec; +use crate::common::io::Compat; +use crate::common::time::Time; use crate::ext::Protocol; use crate::headers; use crate::proto::h2::ping::Recorder; use crate::proto::h2::{H2Upgraded, UpgradedSendStream}; use crate::proto::Dispatched; +use crate::rt::bounds::Http2ServerConnExec; +use crate::rt::{Read, Write}; use crate::service::HttpService; use crate::upgrade::{OnUpgrade, Pending, Upgraded}; -use crate::{Body, Response}; +use crate::Response; // Our defaults are chosen for the "majority" case, which usually are not // resource constrained, and so the spec default of 64kb can be too limiting @@ -38,7 +38,7 @@ const DEFAULT_CONN_WINDOW: u32 = 1024 * 1024; // 1mb const DEFAULT_STREAM_WINDOW: u32 = 1024 * 1024; // 1mb const DEFAULT_MAX_FRAME_SIZE: u32 = 1024 * 16; // 16kb const DEFAULT_MAX_SEND_BUF_SIZE: usize = 1024 * 400; // 400kb -const DEFAULT_SETTINGS_MAX_HEADER_LIST_SIZE: u32 = 16 << 20; // 16 MB "sane default" taken from golang http2 +const DEFAULT_SETTINGS_MAX_HEADER_LIST_SIZE: u32 = 1024 * 16; // 16kb const DEFAULT_MAX_LOCAL_ERROR_RESET_STREAMS: usize = 1024; #[derive(Clone, Debug)] @@ -51,12 +51,11 @@ pub(crate) struct Config { pub(crate) max_concurrent_streams: Option, pub(crate) max_pending_accept_reset_streams: Option, pub(crate) max_local_error_reset_streams: Option, - #[cfg(feature = "runtime")] pub(crate) keep_alive_interval: Option, - #[cfg(feature = "runtime")] pub(crate) keep_alive_timeout: Duration, pub(crate) max_send_buffer_size: usize, pub(crate) max_header_list_size: u32, + pub(crate) date_header: bool, } impl Default for Config { @@ -67,15 +66,14 @@ impl Default for Config { initial_stream_window_size: DEFAULT_STREAM_WINDOW, max_frame_size: DEFAULT_MAX_FRAME_SIZE, enable_connect_protocol: false, - max_concurrent_streams: None, + max_concurrent_streams: Some(200), max_pending_accept_reset_streams: None, max_local_error_reset_streams: Some(DEFAULT_MAX_LOCAL_ERROR_RESET_STREAMS), - #[cfg(feature = "runtime")] keep_alive_interval: None, - #[cfg(feature = "runtime")] keep_alive_timeout: Duration::from_secs(20), max_send_buffer_size: DEFAULT_MAX_SEND_BUF_SIZE, max_header_list_size: DEFAULT_SETTINGS_MAX_HEADER_LIST_SIZE, + date_header: true, } } } @@ -83,22 +81,24 @@ impl Default for Config { pin_project! { pub(crate) struct Server where - S: HttpService, - B: HttpBody, + S: HttpService, + B: Body, { exec: E, + timer: Time, service: S, state: State, + date_header: bool, } } enum State where - B: HttpBody, + B: Body, { Handshaking { ping_config: ping::Config, - hs: Handshake>, + hs: Handshake, SendBuf>, }, Serving(Serving), Closed, @@ -106,29 +106,36 @@ where struct Serving where - B: HttpBody, + B: Body, { ping: Option<(ping::Recorder, ping::Ponger)>, - conn: Connection>, + conn: Connection, SendBuf>, closing: Option, + date_header: bool, } impl Server where - T: AsyncRead + AsyncWrite + Unpin, - S: HttpService, + T: Read + Write + Unpin, + S: HttpService, S::Error: Into>, - B: HttpBody + 'static, - E: ConnStreamExec, + B: Body + 'static, + E: Http2ServerConnExec, { - pub(crate) fn new(io: T, service: S, config: &Config, exec: E) -> Server { + pub(crate) fn new( + io: T, + service: S, + config: &Config, + exec: E, + timer: Time, + ) -> Server { let mut builder = h2::server::Builder::default(); builder .initial_window_size(config.initial_stream_window_size) .initial_connection_window_size(config.initial_conn_window_size) .max_frame_size(config.max_frame_size) .max_header_list_size(config.max_header_list_size) - .max_local_error_reset_streams(config.max_local_error_reset_streams) + .max_local_error_reset_streams(config.max_pending_accept_reset_streams) .max_send_buffer_size(config.max_send_buffer_size); if let Some(max) = config.max_concurrent_streams { builder.max_concurrent_streams(max); @@ -139,7 +146,7 @@ where if config.enable_connect_protocol { builder.enable_connect_protocol(); } - let handshake = builder.handshake(io); + let handshake = builder.handshake(Compat::new(io)); let bdp = if config.adaptive_window { Some(config.initial_stream_window_size) @@ -149,23 +156,22 @@ where let ping_config = ping::Config { bdp_initial_window: bdp, - #[cfg(feature = "runtime")] keep_alive_interval: config.keep_alive_interval, - #[cfg(feature = "runtime")] keep_alive_timeout: config.keep_alive_timeout, // If keep-alive is enabled for servers, always enabled while // idle, so it can more aggressively close dead connections. - #[cfg(feature = "runtime")] keep_alive_while_idle: true, }; Server { exec, + timer, state: State::Handshaking { ping_config, hs: handshake, }, service, + date_header: config.date_header, } } @@ -191,11 +197,11 @@ where impl Future for Server where - T: AsyncRead + AsyncWrite + Unpin, - S: HttpService, + T: Read + Write + Unpin, + S: HttpService, S::Error: Into>, - B: HttpBody + 'static, - E: ConnStreamExec, + B: Body + 'static, + E: Http2ServerConnExec, { type Output = crate::Result; @@ -210,7 +216,7 @@ where let mut conn = ready!(Pin::new(hs).poll(cx).map_err(crate::Error::new_h2))?; let ping = if ping_config.is_enabled() { let pp = conn.ping_pong().expect("conn.ping_pong"); - Some(ping::channel(pp, ping_config.clone())) + Some(ping::channel(pp, ping_config.clone(), me.timer.clone())) } else { None }; @@ -218,6 +224,7 @@ where ping, conn, closing: None, + date_header: me.date_header, }) } State::Serving(ref mut srv) => { @@ -237,8 +244,8 @@ where impl Serving where - T: AsyncRead + AsyncWrite + Unpin, - B: HttpBody + 'static, + T: Read + Write + Unpin, + B: Body + 'static, { fn poll_server( &mut self, @@ -247,46 +254,14 @@ where exec: &mut E, ) -> Poll> where - S: HttpService, + S: HttpService, S::Error: Into>, - E: ConnStreamExec, + E: Http2ServerConnExec, { if self.closing.is_none() { loop { self.poll_ping(cx); - // Check that the service is ready to accept a new request. - // - // - If not, just drive the connection some. - // - If ready, try to accept a new request from the connection. - match service.poll_ready(cx) { - Poll::Ready(Ok(())) => (), - Poll::Pending => { - // use `poll_closed` instead of `poll_accept`, - // in order to avoid accepting a request. - ready!(self.conn.poll_closed(cx).map_err(crate::Error::new_h2))?; - trace!("incoming connection complete"); - return Poll::Ready(Ok(())); - } - Poll::Ready(Err(err)) => { - let err = crate::Error::new_user_service(err); - debug!("service closed: {}", err); - - let reason = err.h2_reason(); - if reason == Reason::NO_ERROR { - // NO_ERROR is only used for graceful shutdowns... - trace!("interpreting NO_ERROR user error as graceful_shutdown"); - self.conn.graceful_shutdown(); - } else { - trace!("abruptly shutting down with {:?}", reason); - self.conn.abrupt_shutdown(reason); - } - self.closing = Some(err); - break; - } - } - - // When the service is ready, accepts an incoming request. match ready!(self.conn.poll_accept(cx)) { Some(Ok((req, mut respond))) => { trace!("incoming request"); @@ -306,7 +281,7 @@ where ( Request::from_parts( parts, - crate::Body::h2(stream, content_length.into(), ping), + IncomingBody::h2(stream, content_length.into(), ping), ), None, ) @@ -320,7 +295,7 @@ where debug_assert!(parts.extensions.get::().is_none()); parts.extensions.insert(upgrade); ( - Request::from_parts(parts, crate::Body::empty()), + Request::from_parts(parts, IncomingBody::empty()), Some(ConnectParts { pending, ping, @@ -333,7 +308,13 @@ where req.extensions_mut().insert(Protocol::from_inner(protocol)); } - let fut = H2Stream::new(service.call(req), connect_parts, respond); + let fut = H2Stream::new( + service.call(req), + connect_parts, + respond, + self.date_header, + ); + exec.execute_h2stream(fut); } Some(Err(e)) => { @@ -369,7 +350,6 @@ where self.conn.set_target_window_size(wnd); let _ = self.conn.set_initial_window_size(wnd); } - #[cfg(feature = "runtime")] Poll::Ready(ping::Ponged::KeepAliveTimedOut) => { debug!("keep-alive timed out, closing connection"); self.conn.abrupt_shutdown(h2::Reason::NO_ERROR); @@ -384,11 +364,12 @@ pin_project! { #[allow(missing_debug_implementations)] pub struct H2Stream where - B: HttpBody, + B: Body, { reply: SendResponse>, #[pin] state: H2StreamState, + date_header: bool, } } @@ -396,7 +377,7 @@ pin_project! { #[project = H2StreamStateProj] enum H2StreamState where - B: HttpBody, + B: Body, { Service { #[pin] @@ -418,16 +399,18 @@ struct ConnectParts { impl H2Stream where - B: HttpBody, + B: Body, { fn new( fut: F, connect_parts: Option, respond: SendResponse>, + date_header: bool, ) -> H2Stream { H2Stream { reply: respond, state: H2StreamState::Service { fut, connect_parts }, + date_header, } } } @@ -448,7 +431,7 @@ macro_rules! reply { impl H2Stream where F: Future, E>>, - B: HttpBody, + B: Body, B::Data: 'static, B::Error: Into>, E: Into>, @@ -486,10 +469,12 @@ where let mut res = ::http::Response::from_parts(head, ()); super::strip_connection_headers(res.headers_mut(), false); - // set Date header if it isn't already set... - res.headers_mut() - .entry(::http::header::DATE) - .or_insert_with(date::update_and_header_value); + // set Date header if it isn't already set if instructed + if *me.date_header { + res.headers_mut() + .entry(::http::header::DATE) + .or_insert_with(date::update_and_header_value); + } if let Some(connect_parts) = connect_parts.take() { if res.status().is_success() { @@ -541,7 +526,7 @@ where impl Future for H2Stream where F: Future, E>>, - B: HttpBody, + B: Body, B::Data: 'static, B::Error: Into>, E: Into>, @@ -550,8 +535,8 @@ where fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { self.poll2(cx).map(|res| { - if let Err(e) = res { - debug!("stream error: {}", e); + if let Err(_e) = res { + debug!("stream error: {}", _e); } }) } diff --git a/.cargo-vendor/hyper/src/proto/mod.rs b/.cargo-vendor/hyper/src/proto/mod.rs index 3628576dc1..fcdf2b97c0 100644 --- a/.cargo-vendor/hyper/src/proto/mod.rs +++ b/.cargo-vendor/hyper/src/proto/mod.rs @@ -17,6 +17,7 @@ cfg_feature! { pub(crate) mod h2; /// An Incoming Message head. Includes request/status line, and headers. +#[cfg(feature = "http1")] #[derive(Debug, Default)] pub(crate) struct MessageHead { /// HTTP version of the message. @@ -59,6 +60,7 @@ pub(crate) enum Dispatched { Upgrade(crate::upgrade::Pending), } +#[cfg(all(feature = "client", feature = "http1"))] impl MessageHead { fn into_response(self, body: B) -> http::Response { let mut res = http::Response::new(body); diff --git a/.cargo-vendor/hyper/src/rt/bounds.rs b/.cargo-vendor/hyper/src/rt/bounds.rs new file mode 100644 index 0000000000..aa3075e079 --- /dev/null +++ b/.cargo-vendor/hyper/src/rt/bounds.rs @@ -0,0 +1,109 @@ +//! Trait aliases +//! +//! Traits in this module ease setting bounds and usually automatically +//! implemented by implementing another trait. + +#[cfg(all(feature = "server", feature = "http2"))] +pub use self::h2::Http2ServerConnExec; + +#[cfg(all(feature = "client", feature = "http2"))] +pub use self::h2_client::Http2ClientConnExec; + +#[cfg(all(feature = "client", feature = "http2"))] +#[cfg_attr(docsrs, doc(cfg(all(feature = "client", feature = "http2"))))] +mod h2_client { + use std::{error::Error, future::Future}; + + use crate::rt::{Read, Write}; + use crate::{proto::h2::client::H2ClientFuture, rt::Executor}; + + /// An executor to spawn http2 futures for the client. + /// + /// This trait is implemented for any type that implements [`Executor`] + /// trait for any future. + /// + /// This trait is sealed and cannot be implemented for types outside this crate. + /// + /// [`Executor`]: crate::rt::Executor + pub trait Http2ClientConnExec: sealed_client::Sealed<(B, T)> + where + B: http_body::Body, + B::Error: Into>, + T: Read + Write + Unpin, + { + #[doc(hidden)] + fn execute_h2_future(&mut self, future: H2ClientFuture); + } + + impl Http2ClientConnExec for E + where + E: Executor>, + B: http_body::Body + 'static, + B::Error: Into>, + H2ClientFuture: Future, + T: Read + Write + Unpin, + { + fn execute_h2_future(&mut self, future: H2ClientFuture) { + self.execute(future) + } + } + + impl sealed_client::Sealed<(B, T)> for E + where + E: Executor>, + B: http_body::Body + 'static, + B::Error: Into>, + H2ClientFuture: Future, + T: Read + Write + Unpin, + { + } + + mod sealed_client { + pub trait Sealed {} + } +} + +#[cfg(all(feature = "server", feature = "http2"))] +#[cfg_attr(docsrs, doc(cfg(all(feature = "server", feature = "http2"))))] +mod h2 { + use crate::{proto::h2::server::H2Stream, rt::Executor}; + use http_body::Body; + use std::future::Future; + + /// An executor to spawn http2 connections. + /// + /// This trait is implemented for any type that implements [`Executor`] + /// trait for any future. + /// + /// This trait is sealed and cannot be implemented for types outside this crate. + /// + /// [`Executor`]: crate::rt::Executor + pub trait Http2ServerConnExec: sealed::Sealed<(F, B)> + Clone { + #[doc(hidden)] + fn execute_h2stream(&mut self, fut: H2Stream); + } + + #[doc(hidden)] + impl Http2ServerConnExec for E + where + E: Executor> + Clone, + H2Stream: Future, + B: Body, + { + fn execute_h2stream(&mut self, fut: H2Stream) { + self.execute(fut) + } + } + + impl sealed::Sealed<(F, B)> for E + where + E: Executor> + Clone, + H2Stream: Future, + B: Body, + { + } + + mod sealed { + pub trait Sealed {} + } +} diff --git a/.cargo-vendor/hyper/src/rt/io.rs b/.cargo-vendor/hyper/src/rt/io.rs new file mode 100644 index 0000000000..fbe9ecc901 --- /dev/null +++ b/.cargo-vendor/hyper/src/rt/io.rs @@ -0,0 +1,395 @@ +use std::fmt; +use std::mem::MaybeUninit; +use std::ops::DerefMut; +use std::pin::Pin; +use std::task::{Context, Poll}; + +// New IO traits? What?! Why, are you bonkers? +// +// I mean, yes, probably. But, here's the goals: +// +// 1. Supports poll-based IO operations. +// 2. Opt-in vectored IO. +// 3. Can use an optional buffer pool. +// 4. Able to add completion-based (uring) IO eventually. +// +// Frankly, the last point is the entire reason we're doing this. We want to +// have forwards-compatibility with an eventually stable io-uring runtime. We +// don't need that to work right away. But it must be possible to add in here +// without breaking hyper 1.0. +// +// While in here, if there's small tweaks to poll_read or poll_write that would +// allow even the "slow" path to be faster, such as if someone didn't remember +// to forward along an `is_completion` call. + +/// Reads bytes from a source. +/// +/// This trait is similar to `std::io::Read`, but supports asynchronous reads. +pub trait Read { + /// Attempts to read bytes into the `buf`. + /// + /// On success, returns `Poll::Ready(Ok(()))` and places data in the + /// unfilled portion of `buf`. If no data was read (`buf.remaining()` is + /// unchanged), it implies that EOF has been reached. + /// + /// If no data is available for reading, the method returns `Poll::Pending` + /// and arranges for the current task (via `cx.waker()`) to receive a + /// notification when the object becomes readable or is closed. + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: ReadBufCursor<'_>, + ) -> Poll>; +} + +/// Write bytes asynchronously. +/// +/// This trait is similar to `std::io::Write`, but for asynchronous writes. +pub trait Write { + /// Attempt to write bytes from `buf` into the destination. + /// + /// On success, returns `Poll::Ready(Ok(num_bytes_written)))`. If + /// successful, it must be guaranteed that `n <= buf.len()`. A return value + /// of `0` means that the underlying object is no longer able to accept + /// bytes, or that the provided buffer is empty. + /// + /// If the object is not ready for writing, the method returns + /// `Poll::Pending` and arranges for the current task (via `cx.waker()`) to + /// receive a notification when the object becomes writable or is closed. + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll>; + + /// Attempts to flush the object. + /// + /// On success, returns `Poll::Ready(Ok(()))`. + /// + /// If flushing cannot immediately complete, this method returns + /// `Poll::Pending` and arranges for the current task (via `cx.waker()`) to + /// receive a notification when the object can make progress. + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; + + /// Attempts to shut down this writer. + fn poll_shutdown( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll>; + + /// Returns whether this writer has an efficient `poll_write_vectored` + /// implementation. + /// + /// The default implementation returns `false`. + fn is_write_vectored(&self) -> bool { + false + } + + /// Like `poll_write`, except that it writes from a slice of buffers. + fn poll_write_vectored( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &[std::io::IoSlice<'_>], + ) -> Poll> { + let buf = bufs + .iter() + .find(|b| !b.is_empty()) + .map_or(&[][..], |b| &**b); + self.poll_write(cx, buf) + } +} + +/// A wrapper around a byte buffer that is incrementally filled and initialized. +/// +/// This type is a sort of "double cursor". It tracks three regions in the +/// buffer: a region at the beginning of the buffer that has been logically +/// filled with data, a region that has been initialized at some point but not +/// yet logically filled, and a region at the end that may be uninitialized. +/// The filled region is guaranteed to be a subset of the initialized region. +/// +/// In summary, the contents of the buffer can be visualized as: +/// +/// ```not_rust +/// [ capacity ] +/// [ filled | unfilled ] +/// [ initialized | uninitialized ] +/// ``` +/// +/// It is undefined behavior to de-initialize any bytes from the uninitialized +/// region, since it is merely unknown whether this region is uninitialized or +/// not, and if part of it turns out to be initialized, it must stay initialized. +pub struct ReadBuf<'a> { + raw: &'a mut [MaybeUninit], + filled: usize, + init: usize, +} + +/// The cursor part of a [`ReadBuf`]. +/// +/// This is created by calling `ReadBuf::unfilled()`. +#[derive(Debug)] +pub struct ReadBufCursor<'a> { + buf: &'a mut ReadBuf<'a>, +} + +impl<'data> ReadBuf<'data> { + /// Create a new `ReadBuf` with a slice of initialized bytes. + #[inline] + pub fn new(raw: &'data mut [u8]) -> Self { + let len = raw.len(); + Self { + // SAFETY: We never de-init the bytes ourselves. + raw: unsafe { &mut *(raw as *mut [u8] as *mut [MaybeUninit]) }, + filled: 0, + init: len, + } + } + + /// Create a new `ReadBuf` with a slice of uninitialized bytes. + #[inline] + pub fn uninit(raw: &'data mut [MaybeUninit]) -> Self { + Self { + raw, + filled: 0, + init: 0, + } + } + + /// Get a slice of the buffer that has been filled in with bytes. + #[inline] + pub fn filled(&self) -> &[u8] { + // SAFETY: We only slice the filled part of the buffer, which is always valid + unsafe { &*(&self.raw[0..self.filled] as *const [MaybeUninit] as *const [u8]) } + } + + /// Get a cursor to the unfilled portion of the buffer. + #[inline] + pub fn unfilled<'cursor>(&'cursor mut self) -> ReadBufCursor<'cursor> { + ReadBufCursor { + // SAFETY: self.buf is never re-assigned, so its safe to narrow + // the lifetime. + buf: unsafe { + std::mem::transmute::<&'cursor mut ReadBuf<'data>, &'cursor mut ReadBuf<'cursor>>( + self, + ) + }, + } + } + + #[inline] + #[cfg(all(any(feature = "client", feature = "server"), feature = "http2"))] + pub(crate) unsafe fn set_init(&mut self, n: usize) { + self.init = self.init.max(n); + } + + #[inline] + #[cfg(all(any(feature = "client", feature = "server"), feature = "http2"))] + pub(crate) unsafe fn set_filled(&mut self, n: usize) { + self.filled = self.filled.max(n); + } + + #[inline] + #[cfg(all(any(feature = "client", feature = "server"), feature = "http2"))] + pub(crate) fn len(&self) -> usize { + self.filled + } + + #[inline] + #[cfg(all(any(feature = "client", feature = "server"), feature = "http2"))] + pub(crate) fn init_len(&self) -> usize { + self.init + } + + #[inline] + fn remaining(&self) -> usize { + self.capacity() - self.filled + } + + #[inline] + fn capacity(&self) -> usize { + self.raw.len() + } +} + +impl<'data> fmt::Debug for ReadBuf<'data> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ReadBuf") + .field("filled", &self.filled) + .field("init", &self.init) + .field("capacity", &self.capacity()) + .finish() + } +} + +impl<'data> ReadBufCursor<'data> { + /// Access the unfilled part of the buffer. + /// + /// # Safety + /// + /// The caller must not uninitialize any bytes that may have been + /// initialized before. + #[inline] + pub unsafe fn as_mut(&mut self) -> &mut [MaybeUninit] { + &mut self.buf.raw[self.buf.filled..] + } + + /// Advance the `filled` cursor by `n` bytes. + /// + /// # Safety + /// + /// The caller must take care that `n` more bytes have been initialized. + #[inline] + pub unsafe fn advance(&mut self, n: usize) { + self.buf.filled = self.buf.filled.checked_add(n).expect("overflow"); + self.buf.init = self.buf.filled.max(self.buf.init); + } + + #[inline] + pub(crate) fn remaining(&self) -> usize { + self.buf.remaining() + } + + #[inline] + pub(crate) fn put_slice(&mut self, buf: &[u8]) { + assert!( + self.buf.remaining() >= buf.len(), + "buf.len() must fit in remaining()" + ); + + let amt = buf.len(); + // Cannot overflow, asserted above + let end = self.buf.filled + amt; + + // Safety: the length is asserted above + unsafe { + self.buf.raw[self.buf.filled..end] + .as_mut_ptr() + .cast::() + .copy_from_nonoverlapping(buf.as_ptr(), amt); + } + + if self.buf.init < end { + self.buf.init = end; + } + self.buf.filled = end; + } +} + +macro_rules! deref_async_read { + () => { + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: ReadBufCursor<'_>, + ) -> Poll> { + Pin::new(&mut **self).poll_read(cx, buf) + } + }; +} + +impl Read for Box { + deref_async_read!(); +} + +impl Read for &mut T { + deref_async_read!(); +} + +impl

Read for Pin

+where + P: DerefMut, + P::Target: Read, +{ + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: ReadBufCursor<'_>, + ) -> Poll> { + pin_as_deref_mut(self).poll_read(cx, buf) + } +} + +macro_rules! deref_async_write { + () => { + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + Pin::new(&mut **self).poll_write(cx, buf) + } + + fn poll_write_vectored( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &[std::io::IoSlice<'_>], + ) -> Poll> { + Pin::new(&mut **self).poll_write_vectored(cx, bufs) + } + + fn is_write_vectored(&self) -> bool { + (**self).is_write_vectored() + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut **self).poll_flush(cx) + } + + fn poll_shutdown( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + Pin::new(&mut **self).poll_shutdown(cx) + } + }; +} + +impl Write for Box { + deref_async_write!(); +} + +impl Write for &mut T { + deref_async_write!(); +} + +impl

Write for Pin

+where + P: DerefMut, + P::Target: Write, +{ + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + pin_as_deref_mut(self).poll_write(cx, buf) + } + + fn poll_write_vectored( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &[std::io::IoSlice<'_>], + ) -> Poll> { + pin_as_deref_mut(self).poll_write_vectored(cx, bufs) + } + + fn is_write_vectored(&self) -> bool { + (**self).is_write_vectored() + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + pin_as_deref_mut(self).poll_flush(cx) + } + + fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + pin_as_deref_mut(self).poll_shutdown(cx) + } +} + +/// Polyfill for Pin::as_deref_mut() +/// TODO: use Pin::as_deref_mut() instead once stabilized +fn pin_as_deref_mut(pin: Pin<&mut Pin

>) -> Pin<&mut P::Target> { + // SAFETY: we go directly from Pin<&mut Pin

> to Pin<&mut P::Target>, without moving or + // giving out the &mut Pin

in the process. See Pin::as_deref_mut() for more detail. + unsafe { pin.get_unchecked_mut() }.as_mut() +} diff --git a/.cargo-vendor/hyper/src/rt/mod.rs b/.cargo-vendor/hyper/src/rt/mod.rs new file mode 100644 index 0000000000..de67c3fc89 --- /dev/null +++ b/.cargo-vendor/hyper/src/rt/mod.rs @@ -0,0 +1,42 @@ +//! Runtime components +//! +//! The traits and types within this module are used to allow plugging in +//! runtime types. These include: +//! +//! - Executors +//! - Timers +//! - IO transports + +pub mod bounds; +mod io; +mod timer; + +pub use self::io::{Read, ReadBuf, ReadBufCursor, Write}; +pub use self::timer::{Sleep, Timer}; + +/// An executor of futures. +/// +/// This trait allows Hyper to abstract over async runtimes. Implement this trait for your own type. +/// +/// # Example +/// +/// ``` +/// # use hyper::rt::Executor; +/// # use std::future::Future; +/// #[derive(Clone)] +/// struct TokioExecutor; +/// +/// impl Executor for TokioExecutor +/// where +/// F: Future + Send + 'static, +/// F::Output: Send + 'static, +/// { +/// fn execute(&self, future: F) { +/// tokio::spawn(future); +/// } +/// } +/// ``` +pub trait Executor { + /// Place the future into the executor to be run. + fn execute(&self, fut: Fut); +} diff --git a/.cargo-vendor/hyper/src/rt/timer.rs b/.cargo-vendor/hyper/src/rt/timer.rs new file mode 100644 index 0000000000..c486ecc14a --- /dev/null +++ b/.cargo-vendor/hyper/src/rt/timer.rs @@ -0,0 +1,127 @@ +//! Provides a timer trait with timer-like functions +//! +//! Example using tokio timer: +//! ```rust +//! use std::{ +//! future::Future, +//! pin::Pin, +//! task::{Context, Poll}, +//! time::{Duration, Instant}, +//! }; +//! +//! use pin_project_lite::pin_project; +//! use hyper::rt::{Timer, Sleep}; +//! +//! #[derive(Clone, Debug)] +//! pub struct TokioTimer; +//! +//! impl Timer for TokioTimer { +//! fn sleep(&self, duration: Duration) -> Pin> { +//! Box::pin(TokioSleep { +//! inner: tokio::time::sleep(duration), +//! }) +//! } +//! +//! fn sleep_until(&self, deadline: Instant) -> Pin> { +//! Box::pin(TokioSleep { +//! inner: tokio::time::sleep_until(deadline.into()), +//! }) +//! } +//! +//! fn reset(&self, sleep: &mut Pin>, new_deadline: Instant) { +//! if let Some(sleep) = sleep.as_mut().downcast_mut_pin::() { +//! sleep.reset(new_deadline.into()) +//! } +//! } +//! } +//! +//! pin_project! { +//! pub(crate) struct TokioSleep { +//! #[pin] +//! pub(crate) inner: tokio::time::Sleep, +//! } +//! } +//! +//! impl Future for TokioSleep { +//! type Output = (); +//! +//! fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { +//! self.project().inner.poll(cx) +//! } +//! } +//! +//! impl Sleep for TokioSleep {} +//! +//! impl TokioSleep { +//! pub fn reset(self: Pin<&mut Self>, deadline: Instant) { +//! self.project().inner.as_mut().reset(deadline.into()); +//! } +//! } +//! ```` + +use std::{ + any::TypeId, + future::Future, + pin::Pin, + time::{Duration, Instant}, +}; + +/// A timer which provides timer-like functions. +pub trait Timer { + /// Return a future that resolves in `duration` time. + fn sleep(&self, duration: Duration) -> Pin>; + + /// Return a future that resolves at `deadline`. + fn sleep_until(&self, deadline: Instant) -> Pin>; + + /// Reset a future to resolve at `new_deadline` instead. + fn reset(&self, sleep: &mut Pin>, new_deadline: Instant) { + *sleep = self.sleep_until(new_deadline); + } +} + +/// A future returned by a `Timer`. +pub trait Sleep: Send + Sync + Future { + #[doc(hidden)] + /// This method is private and can not be implemented by downstream crate + fn __type_id(&self, _: private::Sealed) -> TypeId + where + Self: 'static, + { + TypeId::of::() + } +} + +impl dyn Sleep { + //! This is a re-implementation of downcast methods from std::any::Any + + /// Check whether the type is the same as `T` + pub fn is(&self) -> bool + where + T: Sleep + 'static, + { + self.__type_id(private::Sealed {}) == TypeId::of::() + } + + /// Downcast a pinned &mut Sleep object to its original type + pub fn downcast_mut_pin(self: Pin<&mut Self>) -> Option> + where + T: Sleep + 'static, + { + if self.is::() { + unsafe { + let inner = Pin::into_inner_unchecked(self); + Some(Pin::new_unchecked( + &mut *(&mut *inner as *mut dyn Sleep as *mut T), + )) + } + } else { + None + } + } +} + +mod private { + #![allow(missing_debug_implementations)] + pub struct Sealed {} +} diff --git a/.cargo-vendor/hyper/src/server/conn/http1.rs b/.cargo-vendor/hyper/src/server/conn/http1.rs index ab833b938b..a1e3325785 100644 --- a/.cargo-vendor/hyper/src/server/conn/http1.rs +++ b/.cargo-vendor/hyper/src/server/conn/http1.rs @@ -3,17 +3,23 @@ use std::error::Error as StdError; use std::fmt; use std::future::Future; -use std::marker::Unpin; use std::pin::Pin; +use std::sync::Arc; use std::task::{Context, Poll}; use std::time::Duration; +use crate::rt::{Read, Write}; +use crate::upgrade::Upgraded; use bytes::Bytes; -use tokio::io::{AsyncRead, AsyncWrite}; +use futures_util::ready; -use crate::body::{Body as IncomingBody, HttpBody as Body}; +use crate::body::{Body, Incoming as IncomingBody}; use crate::proto; use crate::service::HttpService; +use crate::{ + common::time::{Dur, Time}, + rt::Timer, +}; type Http1Dispatcher = proto::h1::Dispatcher< proto::h1::dispatch::Server, @@ -23,9 +29,12 @@ type Http1Dispatcher = proto::h1::Dispatcher< >; pin_project_lite::pin_project! { - /// A future binding an http1 connection with a Service. + /// A [`Future`](core::future::Future) representing an HTTP/1 connection, bound to a + /// [`Service`](crate::service::Service), returned from + /// [`Builder::serve_connection`](struct.Builder.html#method.serve_connection). /// - /// Polling this future will drive HTTP forward. + /// To drive HTTP on this connection this future **must be polled**, typically with + /// `.await`. If it isn't polled, no progress will be made on this connection. #[must_use = "futures do nothing unless polled"] pub struct Connection where @@ -36,16 +45,41 @@ pin_project_lite::pin_project! { } /// A configuration builder for HTTP/1 server connections. +/// +/// **Note**: The default values of options are *not considered stable*. They +/// are subject to change at any time. +/// +/// # Example +/// +/// ``` +/// # use std::time::Duration; +/// # use hyper::server::conn::http1::Builder; +/// # fn main() { +/// let mut http = Builder::new(); +/// // Set options one at a time +/// http.half_close(false); +/// +/// // Or, chain multiple options +/// http.keep_alive(false).title_case_headers(true).max_buf_size(8192); +/// +/// # } +/// ``` +/// +/// Use [`Builder::serve_connection`](struct.Builder.html#method.serve_connection) +/// to bind the built connection to a service. #[derive(Clone, Debug)] pub struct Builder { + timer: Time, h1_half_close: bool, h1_keep_alive: bool, h1_title_case_headers: bool, h1_preserve_header_case: bool, - h1_header_read_timeout: Option, + h1_max_headers: Option, + h1_header_read_timeout: Dur, h1_writev: Option, max_buf_size: Option, pipeline_flush: bool, + date_header: bool, } /// Deconstructed parts of a `Connection`. @@ -53,6 +87,7 @@ pub struct Builder { /// This allows taking apart a `Connection` at a later time, in order to /// reclaim the IO object, and additional related pieces. #[derive(Debug)] +#[non_exhaustive] pub struct Parts { /// The original IO object used in the handshake. pub io: T, @@ -67,7 +102,6 @@ pub struct Parts { pub read_buf: Bytes, /// The `Service` used to serve this connection. pub service: S, - _inner: (), } // ===== impl Connection ===== @@ -85,7 +119,7 @@ impl Connection where S: HttpService, S::Error: Into>, - I: AsyncRead + AsyncWrite + Unpin, + I: Read + Write + Unpin, B: Body + 'static, B::Error: Into>, { @@ -118,7 +152,6 @@ where io, read_buf, service: dispatch.into_service(), - _inner: (), } } @@ -133,7 +166,6 @@ where where S: Unpin, S::Future: Unpin, - B: Unpin, { self.conn.poll_without_shutdown(cx) } @@ -144,12 +176,7 @@ where /// # Error /// /// This errors if the underlying connection protocol is not HTTP/1. - pub fn without_shutdown(self) -> impl Future>> - where - S: Unpin, - S::Future: Unpin, - B: Unpin, - { + pub fn without_shutdown(self) -> impl Future>> { let mut zelf = Some(self); futures_util::future::poll_fn(move |cx| { ready!(zelf.as_mut().unwrap().conn.poll_without_shutdown(cx))?; @@ -160,11 +187,11 @@ where /// Enable this connection to support higher-level HTTP upgrades. /// /// See [the `upgrade` module](crate::upgrade) for more. - pub fn with_upgrades(self) -> upgrades::UpgradeableConnection + pub fn with_upgrades(self) -> UpgradeableConnection where I: Send, { - upgrades::UpgradeableConnection { inner: Some(self) } + UpgradeableConnection { inner: Some(self) } } } @@ -172,7 +199,7 @@ impl Future for Connection where S: HttpService, S::Error: Into>, - I: AsyncRead + AsyncWrite + Unpin + 'static, + I: Read + Write + Unpin, B: Body + 'static, B::Error: Into>, { @@ -191,7 +218,7 @@ where pending.manual(); } }; - return Poll::Ready(Ok(())); + Poll::Ready(Ok(())) } Err(e) => Poll::Ready(Err(e)), } @@ -204,14 +231,17 @@ impl Builder { /// Create a new connection builder. pub fn new() -> Self { Self { + timer: Time::Empty, h1_half_close: false, h1_keep_alive: true, h1_title_case_headers: false, h1_preserve_header_case: false, - h1_header_read_timeout: None, + h1_max_headers: None, + h1_header_read_timeout: Dur::Default(Some(Duration::from_secs(30))), h1_writev: None, max_buf_size: None, pipeline_flush: false, + date_header: true, } } /// Set whether HTTP/1 connections should support half-closures. @@ -260,12 +290,35 @@ impl Builder { self } + /// Set the maximum number of headers. + /// + /// When a request is received, the parser will reserve a buffer to store headers for optimal + /// performance. + /// + /// If server receives more headers than the buffer size, it responds to the client with + /// "431 Request Header Fields Too Large". + /// + /// Note that headers is allocated on the stack by default, which has higher performance. After + /// setting this value, headers will be allocated in heap memory, that is, heap memory + /// allocation will occur for each request, and there will be a performance drop of about 5%. + /// + /// Default is 100. + pub fn max_headers(&mut self, val: usize) -> &mut Self { + self.h1_max_headers = Some(val); + self + } + /// Set a timeout for reading client request headers. If a client does not /// transmit the entire header within this time, the connection is closed. /// - /// Default is None. - pub fn header_read_timeout(&mut self, read_timeout: Duration) -> &mut Self { - self.h1_header_read_timeout = Some(read_timeout); + /// Requires a [`Timer`] set by [`Builder::timer`] to take effect. Panics if `header_read_timeout` is configured + /// without a [`Timer`]. + /// + /// Pass `None` to disable. + /// + /// Default is 30 seconds. + pub fn header_read_timeout(&mut self, read_timeout: impl Into>) -> &mut Self { + self.h1_header_read_timeout = Dur::Configured(read_timeout.into()); self } @@ -302,6 +355,16 @@ impl Builder { self } + /// Set whether the `date` header should be included in HTTP responses. + /// + /// Note that including the `date` header is recommended by RFC 7231. + /// + /// Default is true. + pub fn auto_date_header(&mut self, enabled: bool) -> &mut Self { + self.date_header = enabled; + self + } + /// Aggregates flushes to better support pipelined responses. /// /// Experimental, may have bugs. @@ -312,30 +375,35 @@ impl Builder { self } - // /// Set the timer used in background tasks. - // pub fn timer(&mut self, timer: M) -> &mut Self - // where - // M: Timer + Send + Sync + 'static, - // { - // self.timer = Time::Timer(Arc::new(timer)); - // self - // } + /// Set the timer used in background tasks. + pub fn timer(&mut self, timer: M) -> &mut Self + where + M: Timer + Send + Sync + 'static, + { + self.timer = Time::Timer(Arc::new(timer)); + self + } /// Bind a connection together with a [`Service`](crate::service::Service). /// /// This returns a Future that must be polled in order for HTTP to be /// driven on the connection. /// + /// # Panics + /// + /// If a timeout option has been configured, but a `timer` has not been + /// provided, calling `serve_connection` will panic. + /// /// # Example /// /// ``` - /// # use hyper::{Body as Incoming, Request, Response}; + /// # use hyper::{body::Incoming, Request, Response}; /// # use hyper::service::Service; /// # use hyper::server::conn::http1::Builder; - /// # use tokio::io::{AsyncRead, AsyncWrite}; + /// # use hyper::rt::{Read, Write}; /// # async fn run(some_io: I, some_service: S) /// # where - /// # I: AsyncRead + AsyncWrite + Unpin + Send + 'static, + /// # I: Read + Write + Unpin + Send + 'static, /// # S: Service, Response=hyper::Response> + Send + 'static, /// # S::Error: Into>, /// # S::Future: Send, @@ -355,9 +423,10 @@ impl Builder { S::Error: Into>, S::ResBody: 'static, ::Error: Into>, - I: AsyncRead + AsyncWrite + Unpin, + I: Read + Write + Unpin, { let mut conn = proto::Conn::new(io); + conn.set_timer(self.timer.clone()); if !self.h1_keep_alive { conn.disable_keep_alive(); } @@ -370,9 +439,15 @@ impl Builder { if self.h1_preserve_header_case { conn.set_preserve_header_case(); } - if let Some(header_read_timeout) = self.h1_header_read_timeout { - conn.set_http1_header_read_timeout(header_read_timeout); + if let Some(max_headers) = self.h1_max_headers { + conn.set_http1_max_headers(max_headers); } + if let Some(dur) = self + .timer + .check(self.h1_header_read_timeout, "header_read_timeout") + { + conn.set_http1_header_read_timeout(dur); + }; if let Some(writev) = self.h1_writev { if writev { conn.set_write_strategy_queue(); @@ -390,52 +465,50 @@ impl Builder { } } -mod upgrades { - use crate::upgrade::Upgraded; - - use super::*; - - // A future binding a connection with a Service with Upgrade support. - // - // This type is unnameable outside the crate. - #[must_use = "futures do nothing unless polled"] - #[allow(missing_debug_implementations)] - pub struct UpgradeableConnection - where - S: HttpService, - { - pub(super) inner: Option>, - } +/// A future binding a connection with a Service with Upgrade support. +#[must_use = "futures do nothing unless polled"] +#[allow(missing_debug_implementations)] +pub struct UpgradeableConnection +where + S: HttpService, +{ + pub(super) inner: Option>, +} - impl UpgradeableConnection - where - S: HttpService, - S::Error: Into>, - I: AsyncRead + AsyncWrite + Unpin, - B: Body + 'static, - B::Error: Into>, - { - /// Start a graceful shutdown process for this connection. - /// - /// This `Connection` should continue to be polled until shutdown - /// can finish. - pub fn graceful_shutdown(mut self: Pin<&mut Self>) { - Pin::new(self.inner.as_mut().unwrap()).graceful_shutdown() +impl UpgradeableConnection +where + S: HttpService, + S::Error: Into>, + I: Read + Write + Unpin, + B: Body + 'static, + B::Error: Into>, +{ + /// Start a graceful shutdown process for this connection. + /// + /// This `Connection` should continue to be polled until shutdown + /// can finish. + pub fn graceful_shutdown(mut self: Pin<&mut Self>) { + // Connection (`inner`) is `None` if it was upgraded (and `poll` is `Ready`). + // In that case, we don't need to call `graceful_shutdown`. + if let Some(conn) = self.inner.as_mut() { + Pin::new(conn).graceful_shutdown() } } +} - impl Future for UpgradeableConnection - where - S: HttpService, - S::Error: Into>, - I: AsyncRead + AsyncWrite + Unpin + Send + 'static, - B: Body + 'static, - B::Error: Into>, - { - type Output = crate::Result<()>; +impl Future for UpgradeableConnection +where + S: HttpService, + S::Error: Into>, + I: Read + Write + Unpin + Send + 'static, + B: Body + 'static, + B::Error: Into>, +{ + type Output = crate::Result<()>; - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - match ready!(Pin::new(&mut self.inner.as_mut().unwrap().conn).poll(cx)) { + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + if let Some(conn) = self.inner.as_mut() { + match ready!(Pin::new(&mut conn.conn).poll(cx)) { Ok(proto::Dispatched::Shutdown) => Poll::Ready(Ok(())), Ok(proto::Dispatched::Upgrade(pending)) => { let (io, buf, _) = self.inner.take().unwrap().conn.into_inner(); @@ -444,6 +517,9 @@ mod upgrades { } Err(e) => Poll::Ready(Err(e)), } + } else { + // inner is `None`, meaning the connection was upgraded, thus it's `Poll::Ready(Ok(()))` + Poll::Ready(Ok(())) } } } diff --git a/.cargo-vendor/hyper/src/server/conn/http2.rs b/.cargo-vendor/hyper/src/server/conn/http2.rs index 4f7df823ae..43e435bfd9 100644 --- a/.cargo-vendor/hyper/src/server/conn/http2.rs +++ b/.cargo-vendor/hyper/src/server/conn/http2.rs @@ -3,23 +3,28 @@ use std::error::Error as StdError; use std::fmt; use std::future::Future; -use std::marker::Unpin; use std::pin::Pin; +use std::sync::Arc; use std::task::{Context, Poll}; use std::time::Duration; +use crate::rt::{Read, Write}; +use futures_util::ready; use pin_project_lite::pin_project; -use tokio::io::{AsyncRead, AsyncWrite}; -use crate::body::{Body as IncomingBody, HttpBody as Body}; -use crate::common::exec::ConnStreamExec; +use crate::body::{Body, Incoming as IncomingBody}; use crate::proto; +use crate::rt::bounds::Http2ServerConnExec; use crate::service::HttpService; +use crate::{common::time::Time, rt::Timer}; pin_project! { - /// A future binding an HTTP/2 connection with a Service. + /// A [`Future`](core::future::Future) representing an HTTP/2 connection, bound to a + /// [`Service`](crate::service::Service), returned from + /// [`Builder::serve_connection`](struct.Builder.html#method.serve_connection). /// - /// Polling this future will drive HTTP forward. + /// To drive HTTP on this connection this future **must be polled**, typically with + /// `.await`. If it isn't polled, no progress will be made on this connection. #[must_use = "futures do nothing unless polled"] pub struct Connection where @@ -30,9 +35,13 @@ pin_project! { } /// A configuration builder for HTTP/2 server connections. +/// +/// **Note**: The default values of options are *not considered stable*. They +/// are subject to change at any time. #[derive(Clone, Debug)] pub struct Builder { exec: E, + timer: Time, h2_builder: proto::h2::server::Config, } @@ -51,10 +60,10 @@ impl Connection where S: HttpService, S::Error: Into>, - I: AsyncRead + AsyncWrite + Unpin, + I: Read + Write + Unpin, B: Body + 'static, B::Error: Into>, - E: ConnStreamExec, + E: Http2ServerConnExec, { /// Start a graceful shutdown process for this connection. /// @@ -75,10 +84,10 @@ impl Future for Connection where S: HttpService, S::Error: Into>, - I: AsyncRead + AsyncWrite + Unpin + 'static, + I: Read + Write + Unpin, B: Body + 'static, B::Error: Into>, - E: ConnStreamExec, + E: Http2ServerConnExec, { type Output = crate::Result<()>; @@ -99,14 +108,44 @@ where impl Builder { /// Create a new connection builder. /// - /// This starts with the default options, and an executor. + /// This starts with the default options, and an executor which is a type + /// that implements [`Http2ServerConnExec`] trait. + /// + /// [`Http2ServerConnExec`]: crate::rt::bounds::Http2ServerConnExec pub fn new(exec: E) -> Self { Self { - exec: exec, + exec, + timer: Time::Empty, h2_builder: Default::default(), } } + /// Configures the maximum number of pending reset streams allowed before a GOAWAY will be sent. + /// + /// This will default to the default value set by the [`h2` crate](https://crates.io/crates/h2). + /// As of v0.4.0, it is 20. + /// + /// See for more information. + pub fn max_pending_accept_reset_streams(&mut self, max: impl Into>) -> &mut Self { + self.h2_builder.max_pending_accept_reset_streams = max.into(); + self + } + + /// Configures the maximum number of local reset streams allowed before a GOAWAY will be sent. + /// + /// If not set, hyper will use a default, currently of 1024. + /// + /// If `None` is supplied, hyper will not apply any limit. + /// This is not advised, as it can potentially expose servers to DOS vulnerabilities. + /// + /// See for more information. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn max_local_error_reset_streams(mut self, max: impl Into>) -> Self { + self.h2_builder.max_local_error_reset_streams = max.into(); + self + } + /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2 /// stream-level flow control. /// @@ -114,7 +153,7 @@ impl Builder { /// /// If not set, hyper will use a default. /// - /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE + /// [spec]: https://httpwg.org/specs/rfc9113.html#SETTINGS_INITIAL_WINDOW_SIZE pub fn initial_stream_window_size(&mut self, sz: impl Into>) -> &mut Self { if let Some(sz) = sz.into() { self.h2_builder.adaptive_window = false; @@ -167,9 +206,12 @@ impl Builder { /// Sets the [`SETTINGS_MAX_CONCURRENT_STREAMS`][spec] option for HTTP2 /// connections. /// - /// Default is no limit (`std::u32::MAX`). Passing `None` will do nothing. + /// Default is 200, but not part of the stability of hyper. It could change + /// in a future release. You are encouraged to set your own limit. + /// + /// Passing `None` will remove any limit. /// - /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_MAX_CONCURRENT_STREAMS + /// [spec]: https://httpwg.org/specs/rfc9113.html#SETTINGS_MAX_CONCURRENT_STREAMS pub fn max_concurrent_streams(&mut self, max: impl Into>) -> &mut Self { self.h2_builder.max_concurrent_streams = max.into(); self @@ -181,9 +223,6 @@ impl Builder { /// Pass `None` to disable HTTP2 keep-alive. /// /// Default is currently disabled. - /// - /// # Cargo Feature - /// pub fn keep_alive_interval(&mut self, interval: impl Into>) -> &mut Self { self.h2_builder.keep_alive_interval = interval.into(); self @@ -195,9 +234,6 @@ impl Builder { /// be closed. Does nothing if `keep_alive_interval` is disabled. /// /// Default is 20 seconds. - /// - /// # Cargo Feature - /// pub fn keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self { self.h2_builder.keep_alive_timeout = timeout; self @@ -211,7 +247,7 @@ impl Builder { /// /// The value must be no larger than `u32::MAX`. pub fn max_send_buf_size(&mut self, max: usize) -> &mut Self { - assert!(max <= std::u32::MAX as usize); + assert!(max <= u32::MAX as usize); self.h2_builder.max_send_buffer_size = max; self } @@ -226,20 +262,30 @@ impl Builder { /// Sets the max size of received header frames. /// - /// Default is currently ~16MB, but may change. + /// Default is currently 16KB, but can change. pub fn max_header_list_size(&mut self, max: u32) -> &mut Self { self.h2_builder.max_header_list_size = max; self } - // /// Set the timer used in background tasks. - // pub fn timer(&mut self, timer: M) -> &mut Self - // where - // M: Timer + Send + Sync + 'static, - // { - // self.timer = Time::Timer(Arc::new(timer)); - // self - // } + /// Set the timer used in background tasks. + pub fn timer(&mut self, timer: M) -> &mut Self + where + M: Timer + Send + Sync + 'static, + { + self.timer = Time::Timer(Arc::new(timer)); + self + } + + /// Set whether the `date` header should be included in HTTP responses. + /// + /// Note that including the `date` header is recommended by RFC 7231. + /// + /// Default is true. + pub fn auto_date_header(&mut self, enabled: bool) -> &mut Self { + self.h2_builder.date_header = enabled; + self + } /// Bind a connection together with a [`Service`](crate::service::Service). /// @@ -251,10 +297,16 @@ impl Builder { S::Error: Into>, Bd: Body + 'static, Bd::Error: Into>, - I: AsyncRead + AsyncWrite + Unpin, - E: ConnStreamExec, + I: Read + Write + Unpin, + E: Http2ServerConnExec, { - let proto = proto::h2::Server::new(io, service, &self.h2_builder, self.exec.clone()); + let proto = proto::h2::Server::new( + io, + service, + &self.h2_builder, + self.exec.clone(), + self.timer.clone(), + ); Connection { conn: proto } } } diff --git a/.cargo-vendor/hyper/src/server/conn/mod.rs b/.cargo-vendor/hyper/src/server/conn/mod.rs new file mode 100644 index 0000000000..54b309e88e --- /dev/null +++ b/.cargo-vendor/hyper/src/server/conn/mod.rs @@ -0,0 +1,20 @@ +//! Server connection API. +//! +//! The types in this module are to provide a lower-level API based around a +//! single connection. Accepting a connection and binding it with a service +//! are not handled at this level. This module provides the building blocks to +//! customize those things externally. +//! +//! This module is split by HTTP version, providing a connection builder for +//! each. They work similarly, but they each have specific options. +//! +//! If your server needs to support both versions, an auto-connection builder is +//! provided in the [`hyper-util`](https://github.com/hyperium/hyper-util/tree/master) +//! crate. This builder wraps the HTTP/1 and HTTP/2 connection builders from this +//! module, allowing you to set configuration for both. The builder will then check +//! the version of the incoming connection and serve it accordingly. + +#[cfg(feature = "http1")] +pub mod http1; +#[cfg(feature = "http2")] +pub mod http2; diff --git a/.cargo-vendor/hyper/src/server/mod.rs b/.cargo-vendor/hyper/src/server/mod.rs index 65eb7063e5..980553e5e9 100644 --- a/.cargo-vendor/hyper/src/server/mod.rs +++ b/.cargo-vendor/hyper/src/server/mod.rs @@ -1,173 +1,9 @@ //! HTTP Server //! -//! A `Server` is created to listen on a port, parse HTTP requests, and hand -//! them off to a `Service`. +//! A "server" is usually created by listening on a port for new connections, +//! parse HTTP requests, and hand them off to a `Service`. //! -//! There are two levels of APIs provide for constructing HTTP servers: -//! -//! - The higher-level [`Server`](Server) type. -//! - The lower-level [`conn`](conn) module. -//! -//! # Server -//! -//! The [`Server`](Server) is main way to start listening for HTTP requests. -//! It wraps a listener with a [`MakeService`](crate::service), and then should -//! be executed to start serving requests. -//! -//! [`Server`](Server) accepts connections in both HTTP1 and HTTP2 by default. -//! -//! ## Examples -//! -//! ```no_run -//! use std::convert::Infallible; -//! use std::net::SocketAddr; -//! use hyper::{Body, Request, Response, Server}; -//! use hyper::service::{make_service_fn, service_fn}; -//! -//! async fn handle(_req: Request) -> Result, Infallible> { -//! Ok(Response::new(Body::from("Hello World"))) -//! } -//! -//! # #[cfg(feature = "runtime")] -//! #[tokio::main] -//! async fn main() { -//! // Construct our SocketAddr to listen on... -//! let addr = SocketAddr::from(([127, 0, 0, 1], 3000)); -//! -//! // And a MakeService to handle each connection... -//! let make_service = make_service_fn(|_conn| async { -//! Ok::<_, Infallible>(service_fn(handle)) -//! }); -//! -//! // Then bind and serve... -//! let server = Server::bind(&addr).serve(make_service); -//! -//! // And run forever... -//! if let Err(e) = server.await { -//! eprintln!("server error: {}", e); -//! } -//! } -//! # #[cfg(not(feature = "runtime"))] -//! # fn main() {} -//! ``` -//! -//! If you don't need the connection and your service implements `Clone` you can use -//! [`tower::make::Shared`] instead of `make_service_fn` which is a bit simpler: -//! -//! ```no_run -//! # use std::convert::Infallible; -//! # use std::net::SocketAddr; -//! # use hyper::{Body, Request, Response, Server}; -//! # use hyper::service::{make_service_fn, service_fn}; -//! # use tower::make::Shared; -//! # async fn handle(_req: Request) -> Result, Infallible> { -//! # Ok(Response::new(Body::from("Hello World"))) -//! # } -//! # #[cfg(feature = "runtime")] -//! #[tokio::main] -//! async fn main() { -//! // Construct our SocketAddr to listen on... -//! let addr = SocketAddr::from(([127, 0, 0, 1], 3000)); -//! -//! // Shared is a MakeService that produces services by cloning an inner service... -//! let make_service = Shared::new(service_fn(handle)); -//! -//! // Then bind and serve... -//! let server = Server::bind(&addr).serve(make_service); -//! -//! // And run forever... -//! if let Err(e) = server.await { -//! eprintln!("server error: {}", e); -//! } -//! } -//! # #[cfg(not(feature = "runtime"))] -//! # fn main() {} -//! ``` -//! -//! Passing data to your request handler can be done like so: -//! -//! ```no_run -//! use std::convert::Infallible; -//! use std::net::SocketAddr; -//! use hyper::{Body, Request, Response, Server}; -//! use hyper::service::{make_service_fn, service_fn}; -//! # #[cfg(feature = "runtime")] -//! use hyper::server::conn::AddrStream; -//! -//! #[derive(Clone)] -//! struct AppContext { -//! // Whatever data your application needs can go here -//! } -//! -//! async fn handle( -//! context: AppContext, -//! addr: SocketAddr, -//! req: Request -//! ) -> Result, Infallible> { -//! Ok(Response::new(Body::from("Hello World"))) -//! } -//! -//! # #[cfg(feature = "runtime")] -//! #[tokio::main] -//! async fn main() { -//! let context = AppContext { -//! // ... -//! }; -//! -//! // A `MakeService` that produces a `Service` to handle each connection. -//! let make_service = make_service_fn(move |conn: &AddrStream| { -//! // We have to clone the context to share it with each invocation of -//! // `make_service`. If your data doesn't implement `Clone` consider using -//! // an `std::sync::Arc`. -//! let context = context.clone(); -//! -//! // You can grab the address of the incoming connection like so. -//! let addr = conn.remote_addr(); -//! -//! // Create a `Service` for responding to the request. -//! let service = service_fn(move |req| { -//! handle(context.clone(), addr, req) -//! }); -//! -//! // Return the service to hyper. -//! async move { Ok::<_, Infallible>(service) } -//! }); -//! -//! // Run the server like above... -//! let addr = SocketAddr::from(([127, 0, 0, 1], 3000)); -//! -//! let server = Server::bind(&addr).serve(make_service); -//! -//! if let Err(e) = server.await { -//! eprintln!("server error: {}", e); -//! } -//! } -//! # #[cfg(not(feature = "runtime"))] -//! # fn main() {} -//! ``` -//! -//! [`tower::make::Shared`]: https://docs.rs/tower/latest/tower/make/struct.Shared.html - -pub mod accept; +//! How exactly you choose to listen for connections is not something hyper +//! concerns itself with. After you have a connection, you can handle HTTP over +//! it with the types in the [`conn`] module. pub mod conn; -#[cfg(feature = "tcp")] -mod tcp; - -pub use self::server::Server; - -cfg_feature! { - #![any(feature = "http1", feature = "http2")] - - #[cfg_attr(feature = "deprecated", allow(deprecated))] - pub(crate) mod server; - pub use self::server::Builder; - - mod shutdown; -} - -cfg_feature! { - #![not(any(feature = "http1", feature = "http2"))] - - mod server_stub; - use server_stub as server; -} diff --git a/.cargo-vendor/hyper/src/service/http.rs b/.cargo-vendor/hyper/src/service/http.rs index d0586d8bd2..dd1743168c 100644 --- a/.cargo-vendor/hyper/src/service/http.rs +++ b/.cargo-vendor/hyper/src/service/http.rs @@ -1,14 +1,14 @@ use std::error::Error as StdError; use std::future::Future; -use std::task::{Context, Poll}; -use crate::body::HttpBody; +use crate::body::Body; +use crate::service::service::Service; use crate::{Request, Response}; /// An asynchronous function from `Request` to `Response`. pub trait HttpService: sealed::Sealed { - /// The `HttpBody` body of the `http::Response`. - type ResBody: HttpBody; + /// The `Body` body of the `http::Response`. + type ResBody: Body; /// The error type that can occur within this `Service`. /// @@ -20,17 +20,14 @@ pub trait HttpService: sealed::Sealed { /// The `Future` returned by this `Service`. type Future: Future, Self::Error>>; - #[doc(hidden)] - fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll>; - #[doc(hidden)] fn call(&mut self, req: Request) -> Self::Future; } impl HttpService for T where - T: tower_service::Service, Response = Response>, - B2: HttpBody, + T: Service, Response = Response>, + B2: Body, T::Error: Into>, { type ResBody = B2; @@ -38,19 +35,15 @@ where type Error = T::Error; type Future = T::Future; - fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { - tower_service::Service::poll_ready(self, cx) - } - fn call(&mut self, req: Request) -> Self::Future { - tower_service::Service::call(self, req) + Service::call(self, req) } } impl sealed::Sealed for T where - T: tower_service::Service, Response = Response>, - B2: HttpBody, + T: Service, Response = Response>, + B2: Body, { } diff --git a/.cargo-vendor/hyper/src/service/mod.rs b/.cargo-vendor/hyper/src/service/mod.rs index 22f850ca47..28ffaddbab 100644 --- a/.cargo-vendor/hyper/src/service/mod.rs +++ b/.cargo-vendor/hyper/src/service/mod.rs @@ -1,6 +1,6 @@ //! Asynchronous Services //! -//! A [`Service`](Service) is a trait representing an asynchronous +//! A [`Service`] is a trait representing an asynchronous //! function of a request to a response. It's similar to //! `async fn(Request) -> Result`. //! @@ -10,10 +10,6 @@ //! //! - `HttpService`: This is blanketly implemented for all types that //! implement `Service, Response = http::Response>`. -//! - `MakeService`: When a `Service` returns a new `Service` as its "response", -//! we consider it a `MakeService`. Again, blanketly implemented in those cases. -//! - `MakeConnection`: A `Service` that returns a "connection", a type that -//! implements `AsyncRead` and `AsyncWrite`. //! //! # HttpService //! @@ -21,35 +17,14 @@ //! to a single connection. It defines how to respond to **all** requests that //! connection will receive. //! -//! The helper [`service_fn`](service_fn) should be sufficient for most cases, but +//! The helper [`service_fn`] should be sufficient for most cases, but //! if you need to implement `Service` for a type manually, you can follow the example //! in `service_struct_impl.rs`. -//! -//! # MakeService -//! -//! Since a `Service` is bound to a single connection, a [`Server`](crate::Server) -//! needs a way to make them as it accepts connections. This is what a -//! `MakeService` does. -//! -//! Resources that need to be shared by all `Service`s can be put into a -//! `MakeService`, and then passed to individual `Service`s when `call` -//! is called. - -pub use tower_service::Service; mod http; -mod make; -#[cfg(all(any(feature = "http1", feature = "http2"), feature = "client"))] -mod oneshot; +mod service; mod util; -pub(super) use self::http::HttpService; -#[cfg(all(any(feature = "http1", feature = "http2"), feature = "client"))] -pub(super) use self::make::MakeConnection; -#[cfg(all(any(feature = "http1", feature = "http2"), feature = "server"))] -pub(super) use self::make::MakeServiceRef; -#[cfg(all(any(feature = "http1", feature = "http2"), feature = "client"))] -pub(super) use self::oneshot::{oneshot, Oneshot}; - -pub use self::make::make_service_fn; +pub use self::http::HttpService; +pub use self::service::Service; pub use self::util::service_fn; diff --git a/.cargo-vendor/hyper/src/service/service.rs b/.cargo-vendor/hyper/src/service/service.rs new file mode 100644 index 0000000000..1b9aea5162 --- /dev/null +++ b/.cargo-vendor/hyper/src/service/service.rs @@ -0,0 +1,95 @@ +use std::future::Future; + +/// An asynchronous function from a `Request` to a `Response`. +/// +/// The `Service` trait is a simplified interface making it easy to write +/// network applications in a modular and reusable way, decoupled from the +/// underlying protocol. +/// +/// # Functional +/// +/// A `Service` is a function of a `Request`. It immediately returns a +/// `Future` representing the eventual completion of processing the +/// request. The actual request processing may happen at any time in the +/// future, on any thread or executor. The processing may depend on calling +/// other services. At some point in the future, the processing will complete, +/// and the `Future` will resolve to a response or error. +/// +/// At a high level, the `Service::call` function represents an RPC request. The +/// `Service` value can be a server or a client. +pub trait Service { + /// Responses given by the service. + type Response; + + /// Errors produced by the service. + type Error; + + /// The future response value. + type Future: Future>; + + /// Process the request and return the response asynchronously. + /// `call` takes `&self` instead of `mut &self` because: + /// - It prepares the way for async fn, + /// since then the future only borrows `&self`, and thus a Service can concurrently handle + /// multiple outstanding requests at once. + /// - It's clearer that Services can likely be cloned + /// - To share state across clones, you generally need `Arc>` + /// That means you're not really using the `&mut self` and could do with a `&self`. + /// The discussion on this is here: + fn call(&self, req: Request) -> Self::Future; +} + +impl + ?Sized> Service for &'_ S { + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + #[inline] + fn call(&self, req: Request) -> Self::Future { + (**self).call(req) + } +} + +impl + ?Sized> Service for &'_ mut S { + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + #[inline] + fn call(&self, req: Request) -> Self::Future { + (**self).call(req) + } +} + +impl + ?Sized> Service for Box { + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + #[inline] + fn call(&self, req: Request) -> Self::Future { + (**self).call(req) + } +} + +impl + ?Sized> Service for std::rc::Rc { + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + #[inline] + fn call(&self, req: Request) -> Self::Future { + (**self).call(req) + } +} + +impl + ?Sized> Service for std::sync::Arc { + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + #[inline] + fn call(&self, req: Request) -> Self::Future { + (**self).call(req) + } +} diff --git a/.cargo-vendor/hyper/src/service/util.rs b/.cargo-vendor/hyper/src/service/util.rs index 59760a6858..3e017a782c 100644 --- a/.cargo-vendor/hyper/src/service/util.rs +++ b/.cargo-vendor/hyper/src/service/util.rs @@ -2,9 +2,9 @@ use std::error::Error as StdError; use std::fmt; use std::future::Future; use std::marker::PhantomData; -use std::task::{Context, Poll}; -use crate::body::HttpBody; +use crate::body::Body; +use crate::service::service::Service; use crate::{Request, Response}; /// Create a `Service` from a function. @@ -12,12 +12,14 @@ use crate::{Request, Response}; /// # Example /// /// ``` -/// use hyper::{Body, Request, Response, Version}; +/// use bytes::Bytes; +/// use hyper::{body, Request, Response, Version}; +/// use http_body_util::Full; /// use hyper::service::service_fn; /// -/// let service = service_fn(|req: Request| async move { +/// let service = service_fn(|req: Request| async move { /// if req.version() == Version::HTTP_11 { -/// Ok(Response::new(Body::from("Hello World"))) +/// Ok(Response::new(Full::::from("Hello World"))) /// } else { /// // Note: it's usually better to return a Response /// // with an appropriate StatusCode instead of an Err. @@ -27,7 +29,7 @@ use crate::{Request, Response}; /// ``` pub fn service_fn(f: F) -> ServiceFn where - F: FnMut(Request) -> S, + F: Fn(Request) -> S, S: Future, { ServiceFn { @@ -42,24 +44,19 @@ pub struct ServiceFn { _req: PhantomData, } -impl tower_service::Service> - for ServiceFn +impl Service> for ServiceFn where - F: FnMut(Request) -> Ret, - ReqBody: HttpBody, + F: Fn(Request) -> Ret, + ReqBody: Body, Ret: Future, E>>, E: Into>, - ResBody: HttpBody, + ResBody: Body, { type Response = crate::Response; type Error = E; type Future = Ret; - fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn call(&mut self, req: Request) -> Self::Future { + fn call(&self, req: Request) -> Self::Future { (self.f)(req) } } diff --git a/.cargo-vendor/hyper/src/trace.rs b/.cargo-vendor/hyper/src/trace.rs new file mode 100644 index 0000000000..88f9a243a0 --- /dev/null +++ b/.cargo-vendor/hyper/src/trace.rs @@ -0,0 +1,128 @@ +// For completeness, wrappers around all of tracing's public logging and span macros are provided, +// even if they are not used at the present time. +#![allow(unused_macros)] + +#[cfg(all(not(hyper_unstable_tracing), feature = "tracing"))] +compile_error!( + "\ + The `tracing` feature is unstable, and requires the \ + `RUSTFLAGS='--cfg hyper_unstable_tracing'` environment variable to be set.\ +" +); + +macro_rules! debug { + ($($arg:tt)+) => { + #[cfg(feature = "tracing")] + { + tracing::debug!($($arg)+); + } + } +} + +macro_rules! debug_span { + ($($arg:tt)*) => { + { + #[cfg(feature = "tracing")] + { + let _span = tracing::debug_span!($($arg)+); + _span.entered() + } + } + } +} + +macro_rules! error { + ($($arg:tt)*) => { + #[cfg(feature = "tracing")] + { + tracing::error!($($arg)+); + } + } +} + +macro_rules! error_span { + ($($arg:tt)*) => { + { + #[cfg(feature = "tracing")] + { + let _span = tracing::error_span!($($arg)+); + _span.entered() + } + } + } +} + +macro_rules! info { + ($($arg:tt)*) => { + #[cfg(feature = "tracing")] + { + tracing::info!($($arg)+); + } + } +} + +macro_rules! info_span { + ($($arg:tt)*) => { + { + #[cfg(feature = "tracing")] + { + let _span = tracing::info_span!($($arg)+); + _span.entered() + } + } + } +} + +macro_rules! trace { + ($($arg:tt)*) => { + #[cfg(feature = "tracing")] + { + tracing::trace!($($arg)+); + } + } +} + +macro_rules! trace_span { + ($($arg:tt)*) => { + { + #[cfg(feature = "tracing")] + { + let _span = tracing::trace_span!($($arg)+); + _span.entered() + } + } + } +} + +macro_rules! span { + ($($arg:tt)*) => { + { + #[cfg(feature = "tracing")] + { + let _span = tracing::span!($($arg)+); + _span.entered() + } + } + } +} + +macro_rules! warn { + ($($arg:tt)*) => { + #[cfg(feature = "tracing")] + { + tracing::warn!($($arg)+); + } + } +} + +macro_rules! warn_span { + ($($arg:tt)*) => { + { + #[cfg(feature = "tracing")] + { + let _span = tracing::warn_span!($($arg)+); + _span.entered() + } + } + } +} diff --git a/.cargo-vendor/hyper/src/upgrade.rs b/.cargo-vendor/hyper/src/upgrade.rs index a46a8d224d..03d7e98ddc 100644 --- a/.cargo-vendor/hyper/src/upgrade.rs +++ b/.cargo-vendor/hyper/src/upgrade.rs @@ -44,15 +44,13 @@ use std::error::Error as StdError; use std::fmt; use std::future::Future; use std::io; -use std::marker::Unpin; use std::pin::Pin; +use std::sync::{Arc, Mutex}; use std::task::{Context, Poll}; +use crate::rt::{Read, ReadBufCursor, Write}; use bytes::Bytes; -use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; use tokio::sync::oneshot; -#[cfg(any(feature = "http1", feature = "http2"))] -use tracing::trace; use crate::common::io::Rewind; @@ -71,15 +69,17 @@ pub struct Upgraded { /// A future for a possible HTTP upgrade. /// /// If no upgrade was available, or it doesn't succeed, yields an `Error`. +#[derive(Clone)] pub struct OnUpgrade { - rx: Option>>, + rx: Option>>>>, } -/// The deconstructed parts of an [`Upgraded`](Upgraded) type. +/// The deconstructed parts of an [`Upgraded`] type. /// /// Includes the original IO type, and a read buffer of bytes that the /// HTTP state machine may have already read before completing an upgrade. #[derive(Debug)] +#[non_exhaustive] pub struct Parts { /// The original IO object used before the upgrade. pub io: T, @@ -92,7 +92,6 @@ pub struct Parts { /// You will want to check for any existing bytes if you plan to continue /// communicating on the IO object. pub read_buf: Bytes, - _inner: (), } /// Gets a pending HTTP upgrade from this message. @@ -107,24 +106,38 @@ pub fn on(msg: T) -> OnUpgrade { msg.on_upgrade() } -#[cfg(any(feature = "http1", feature = "http2"))] +#[cfg(all( + any(feature = "client", feature = "server"), + any(feature = "http1", feature = "http2"), +))] pub(super) struct Pending { tx: oneshot::Sender>, } -#[cfg(any(feature = "http1", feature = "http2"))] +#[cfg(all( + any(feature = "client", feature = "server"), + any(feature = "http1", feature = "http2"), +))] pub(super) fn pending() -> (Pending, OnUpgrade) { let (tx, rx) = oneshot::channel(); - (Pending { tx }, OnUpgrade { rx: Some(rx) }) + ( + Pending { tx }, + OnUpgrade { + rx: Some(Arc::new(Mutex::new(rx))), + }, + ) } // ===== impl Upgraded ===== impl Upgraded { - #[cfg(any(feature = "http1", feature = "http2", test))] + #[cfg(all( + any(feature = "client", feature = "server"), + any(feature = "http1", feature = "http2") + ))] pub(super) fn new(io: T, read_buf: Bytes) -> Self where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static, + T: Read + Write + Unpin + Send + 'static, { Upgraded { io: Rewind::new_buffered(Box::new(io), read_buf), @@ -135,13 +148,12 @@ impl Upgraded { /// /// On success, returns the downcasted parts. On error, returns the /// `Upgraded` back. - pub fn downcast(self) -> Result, Self> { + pub fn downcast(self) -> Result, Self> { let (io, buf) = self.io.into_inner(); match io.__hyper_downcast() { Ok(t) => Ok(Parts { io: *t, read_buf: buf, - _inner: (), }), Err(io) => Err(Upgraded { io: Rewind::new_buffered(io, buf), @@ -150,17 +162,17 @@ impl Upgraded { } } -impl AsyncRead for Upgraded { +impl Read for Upgraded { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, + buf: ReadBufCursor<'_>, ) -> Poll> { Pin::new(&mut self.io).poll_read(cx, buf) } } -impl AsyncWrite for Upgraded { +impl Write for Upgraded { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, @@ -203,7 +215,7 @@ impl OnUpgrade { OnUpgrade { rx: None } } - #[cfg(feature = "http1")] + #[cfg(all(any(feature = "client", feature = "server"), feature = "http1"))] pub(super) fn is_none(&self) -> bool { self.rx.is_none() } @@ -212,13 +224,17 @@ impl OnUpgrade { impl Future for OnUpgrade { type Output = Result; - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { match self.rx { - Some(ref mut rx) => Pin::new(rx).poll(cx).map(|res| match res { - Ok(Ok(upgraded)) => Ok(upgraded), - Ok(Err(err)) => Err(err), - Err(_oneshot_canceled) => Err(crate::Error::new_canceled().with(UpgradeExpected)), - }), + Some(ref rx) => Pin::new(&mut *rx.lock().unwrap()) + .poll(cx) + .map(|res| match res { + Ok(Ok(upgraded)) => Ok(upgraded), + Ok(Err(err)) => Err(err), + Err(_oneshot_canceled) => { + Err(crate::Error::new_canceled().with(UpgradeExpected)) + } + }), None => Poll::Ready(Err(crate::Error::new_user_no_upgrade())), } } @@ -232,7 +248,10 @@ impl fmt::Debug for OnUpgrade { // ===== impl Pending ===== -#[cfg(any(feature = "http1", feature = "http2"))] +#[cfg(all( + any(feature = "client", feature = "server"), + any(feature = "http1", feature = "http2") +))] impl Pending { pub(super) fn fulfill(self, upgraded: Upgraded) { trace!("pending upgrade fulfill"); @@ -243,6 +262,7 @@ impl Pending { /// Don't fulfill the pending Upgrade, but instead signal that /// upgrades are handled manually. pub(super) fn manual(self) { + #[cfg(any(feature = "http1", feature = "http2"))] trace!("pending upgrade handled manually"); let _ = self.tx.send(Err(crate::Error::new_user_manual_upgrade())); } @@ -267,13 +287,13 @@ impl StdError for UpgradeExpected {} // ===== impl Io ===== -pub(super) trait Io: AsyncRead + AsyncWrite + Unpin + 'static { +pub(super) trait Io: Read + Write + Unpin + 'static { fn __hyper_type_id(&self) -> TypeId { TypeId::of::() } } -impl Io for T {} +impl Io for T {} impl dyn Io + Send { fn __hyper_is(&self) -> bool { @@ -334,6 +354,10 @@ mod sealed { } } +#[cfg(all( + any(feature = "client", feature = "server"), + any(feature = "http1", feature = "http2"), +))] #[cfg(test)] mod tests { use super::*; @@ -342,7 +366,9 @@ mod tests { fn upgraded_downcast() { let upgraded = Upgraded::new(Mock, Bytes::new()); - let upgraded = upgraded.downcast::>>().unwrap_err(); + let upgraded = upgraded + .downcast::>>>() + .unwrap_err(); upgraded.downcast::().unwrap(); } @@ -350,17 +376,17 @@ mod tests { // TODO: replace with tokio_test::io when it can test write_buf struct Mock; - impl AsyncRead for Mock { + impl Read for Mock { fn poll_read( self: Pin<&mut Self>, _cx: &mut Context<'_>, - _buf: &mut ReadBuf<'_>, + _buf: ReadBufCursor<'_>, ) -> Poll> { unreachable!("Mock::poll_read") } } - impl AsyncWrite for Mock { + impl Write for Mock { fn poll_write( self: Pin<&mut Self>, _: &mut Context<'_>, diff --git a/.cargo-vendor/serde_path_to_error/.cargo-checksum.json b/.cargo-vendor/serde_path_to_error/.cargo-checksum.json new file mode 100644 index 0000000000..fc0453c181 --- /dev/null +++ b/.cargo-vendor/serde_path_to_error/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"a5fa48364e3a534c20f225dc4929005a7acb84e400e3a9ec40e0f3d2a924aea6","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"a85a795381a723c0cc19c2a75a62b309b0a01a12618da605d6166a7a5e207201","src/de.rs":"8cd9680fa3150d0b39bf0dfdf6b4279eb6dc955e077b12cbe7d812e3f6f95805","src/lib.rs":"df6c3da802eac6fbaf7003196c573626e32a11faf3699f800997b28ca5622551","src/path.rs":"d4e7f9f7351c894d470655e10865ced9f25bdf1ba06052d5bb005a7cab8dcd50","src/ser.rs":"3666ccfcfa93cc0d7eb8354542e08b2fd6a441e623cd11f15e6be7c0e533338c","src/wrap.rs":"9d88271729c6dc90d16328454f0432e18ce13df6c8dc4749785c5d3c7d260c09","tests/deserialize.rs":"b9f327bc0baf5e3626045e9dafe550c50e58b043661b29cc0fc1c609abf5362e","tests/serialize.rs":"dc304dcaa9b8ae89b624c3d9ecde5a195c8dac062bb00a1f791beb11f77c09b7"},"package":"af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6"} \ No newline at end of file diff --git a/.cargo-vendor/serde_path_to_error/Cargo.toml b/.cargo-vendor/serde_path_to_error/Cargo.toml new file mode 100644 index 0000000000..452408a34e --- /dev/null +++ b/.cargo-vendor/serde_path_to_error/Cargo.toml @@ -0,0 +1,46 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.56" +name = "serde_path_to_error" +version = "0.1.16" +authors = ["David Tolnay "] +description = "Path to the element that failed to deserialize" +documentation = "https://docs.rs/serde_path_to_error" +readme = "README.md" +keywords = [ + "serde", + "serialization", +] +categories = ["encoding"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/dtolnay/path-to-error" + +[package.metadata.docs.rs] +rustdoc-args = ["--generate-link-to-definition"] +targets = ["x86_64-unknown-linux-gnu"] + +[lib] +doc-scrape-examples = false + +[dependencies.itoa] +version = "1.0" + +[dependencies.serde] +version = "1.0.194" + +[dev-dependencies.serde_derive] +version = "1.0.194" + +[dev-dependencies.serde_json] +version = "1.0.100" diff --git a/.cargo-vendor/serde_path_to_error/LICENSE-APACHE b/.cargo-vendor/serde_path_to_error/LICENSE-APACHE new file mode 100644 index 0000000000..1b5ec8b78e --- /dev/null +++ b/.cargo-vendor/serde_path_to_error/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS diff --git a/.cargo-vendor/serde_path_to_error/LICENSE-MIT b/.cargo-vendor/serde_path_to_error/LICENSE-MIT new file mode 100644 index 0000000000..31aa79387f --- /dev/null +++ b/.cargo-vendor/serde_path_to_error/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/.cargo-vendor/serde_path_to_error/README.md b/.cargo-vendor/serde_path_to_error/README.md new file mode 100644 index 0000000000..c90ba4da0a --- /dev/null +++ b/.cargo-vendor/serde_path_to_error/README.md @@ -0,0 +1,72 @@ +# Serde path to error + +[github](https://github.com/dtolnay/path-to-error) +[crates.io](https://crates.io/crates/serde_path_to_error) +[docs.rs](https://docs.rs/serde_path_to_error) +[build status](https://github.com/dtolnay/path-to-error/actions?query=branch%3Amaster) + +Find out the path at which a deserialization error occurred. This crate provides +a wrapper that works with any existing Serde `Deserializer` and exposes the +chain of field names leading to the error. + +```toml +[dependencies] +serde = "1.0" +serde_path_to_error = "0.1" +``` + +```rust +use serde::Deserialize; +use std::collections::BTreeMap as Map; + +#[derive(Deserialize)] +struct Package { + name: String, + dependencies: Map, +} + +#[derive(Deserialize)] +struct Dependency { + version: String, +} + +fn main() { + let j = r#"{ + "name": "demo", + "dependencies": { + "serde": { + "version": 1 + } + } + }"#; + + // Some Deserializer. + let jd = &mut serde_json::Deserializer::from_str(j); + + let result: Result = serde_path_to_error::deserialize(jd); + match result { + Ok(_) => panic!("expected a type error"), + Err(err) => { + let path = err.path().to_string(); + assert_eq!(path, "dependencies.serde.version"); + } + } +} +``` + +
+ +#### License + + +Licensed under either of Apache License, Version +2.0 or MIT license at your option. + + +
+ + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in this crate by you, as defined in the Apache-2.0 license, shall +be dual licensed as above, without any additional terms or conditions. + diff --git a/.cargo-vendor/serde_path_to_error/src/de.rs b/.cargo-vendor/serde_path_to_error/src/de.rs new file mode 100644 index 0000000000..536c7bff3f --- /dev/null +++ b/.cargo-vendor/serde_path_to_error/src/de.rs @@ -0,0 +1,1536 @@ +use crate::wrap::{Wrap, WrapVariant}; +use crate::{Chain, Error, Track}; +use serde::de::{self, Deserialize, DeserializeSeed, Visitor}; +use std::fmt; + +/// Entry point. See [crate documentation][crate] for an example. +pub fn deserialize<'de, D, T>(deserializer: D) -> Result> +where + D: de::Deserializer<'de>, + T: Deserialize<'de>, +{ + let mut track = Track::new(); + match T::deserialize(Deserializer::new(deserializer, &mut track)) { + Ok(t) => Ok(t), + Err(err) => Err(Error { + path: track.path(), + original: err, + }), + } +} + +/// Deserializer adapter that records path to deserialization errors. +/// +/// # Example +/// +/// ``` +/// # use serde_derive::Deserialize; +/// # +/// use serde::Deserialize; +/// use std::collections::BTreeMap as Map; +/// +/// #[derive(Deserialize)] +/// struct Package { +/// name: String, +/// dependencies: Map, +/// } +/// +/// #[derive(Deserialize)] +/// struct Dependency { +/// version: String, +/// } +/// +/// fn main() { +/// let j = r#"{ +/// "name": "demo", +/// "dependencies": { +/// "serde": { +/// "version": 1 +/// } +/// } +/// }"#; +/// +/// // Some Deserializer. +/// let jd = &mut serde_json::Deserializer::from_str(j); +/// +/// let mut track = serde_path_to_error::Track::new(); +/// let pd = serde_path_to_error::Deserializer::new(jd, &mut track); +/// +/// match Package::deserialize(pd) { +/// Ok(_) => panic!("expected a type error"), +/// Err(_) => { +/// let path = track.path().to_string(); +/// assert_eq!(path, "dependencies.serde.version"); +/// } +/// } +/// } +/// ``` +pub struct Deserializer<'a, 'b, D> { + de: D, + chain: Chain<'a>, + track: &'b Track, +} + +impl<'a, 'b, D> Deserializer<'a, 'b, D> { + #[allow(clippy::needless_pass_by_ref_mut)] + pub fn new(de: D, track: &'b mut Track) -> Self { + Deserializer { + de, + chain: Chain::Root, + track, + } + } +} + +// Plain old forwarding impl. +impl<'a, 'b, 'de, D> de::Deserializer<'de> for Deserializer<'a, 'b, D> +where + D: de::Deserializer<'de>, +{ + type Error = D::Error; + + fn deserialize_any(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let chain = self.chain; + let track = self.track; + self.de + .deserialize_any(Wrap::new(visitor, &chain, track)) + .map_err(|err| track.trigger(&chain, err)) + } + + fn deserialize_bool(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let chain = self.chain; + let track = self.track; + self.de + .deserialize_bool(Wrap::new(visitor, &chain, track)) + .map_err(|err| track.trigger(&chain, err)) + } + + fn deserialize_u8(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let chain = self.chain; + let track = self.track; + self.de + .deserialize_u8(Wrap::new(visitor, &chain, track)) + .map_err(|err| track.trigger(&chain, err)) + } + + fn deserialize_u16(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let chain = self.chain; + let track = self.track; + self.de + .deserialize_u16(Wrap::new(visitor, &chain, track)) + .map_err(|err| track.trigger(&chain, err)) + } + + fn deserialize_u32(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let chain = self.chain; + let track = self.track; + self.de + .deserialize_u32(Wrap::new(visitor, &chain, track)) + .map_err(|err| track.trigger(&chain, err)) + } + + fn deserialize_u64(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let chain = self.chain; + let track = self.track; + self.de + .deserialize_u64(Wrap::new(visitor, &chain, track)) + .map_err(|err| track.trigger(&chain, err)) + } + + fn deserialize_u128(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let chain = self.chain; + let track = self.track; + self.de + .deserialize_u128(Wrap::new(visitor, &chain, track)) + .map_err(|err| track.trigger(&chain, err)) + } + + fn deserialize_i8(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let chain = self.chain; + let track = self.track; + self.de + .deserialize_i8(Wrap::new(visitor, &chain, track)) + .map_err(|err| track.trigger(&chain, err)) + } + + fn deserialize_i16(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let chain = self.chain; + let track = self.track; + self.de + .deserialize_i16(Wrap::new(visitor, &chain, track)) + .map_err(|err| track.trigger(&chain, err)) + } + + fn deserialize_i32(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let chain = self.chain; + let track = self.track; + self.de + .deserialize_i32(Wrap::new(visitor, &chain, track)) + .map_err(|err| track.trigger(&chain, err)) + } + + fn deserialize_i64(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let chain = self.chain; + let track = self.track; + self.de + .deserialize_i64(Wrap::new(visitor, &chain, track)) + .map_err(|err| track.trigger(&chain, err)) + } + + fn deserialize_i128(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let chain = self.chain; + let track = self.track; + self.de + .deserialize_i128(Wrap::new(visitor, &chain, track)) + .map_err(|err| track.trigger(&chain, err)) + } + + fn deserialize_f32(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let chain = self.chain; + let track = self.track; + self.de + .deserialize_f32(Wrap::new(visitor, &chain, track)) + .map_err(|err| track.trigger(&chain, err)) + } + + fn deserialize_f64(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let chain = self.chain; + let track = self.track; + self.de + .deserialize_f64(Wrap::new(visitor, &chain, track)) + .map_err(|err| track.trigger(&chain, err)) + } + + fn deserialize_char(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let chain = self.chain; + let track = self.track; + self.de + .deserialize_char(Wrap::new(visitor, &chain, track)) + .map_err(|err| track.trigger(&chain, err)) + } + + fn deserialize_str(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let chain = self.chain; + let track = self.track; + self.de + .deserialize_str(Wrap::new(visitor, &chain, track)) + .map_err(|err| track.trigger(&chain, err)) + } + + fn deserialize_string(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let chain = self.chain; + let track = self.track; + self.de + .deserialize_string(Wrap::new(visitor, &chain, track)) + .map_err(|err| track.trigger(&chain, err)) + } + + fn deserialize_bytes(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let chain = self.chain; + let track = self.track; + self.de + .deserialize_bytes(Wrap::new(visitor, &chain, track)) + .map_err(|err| track.trigger(&chain, err)) + } + + fn deserialize_byte_buf(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let chain = self.chain; + let track = self.track; + self.de + .deserialize_byte_buf(Wrap::new(visitor, &chain, track)) + .map_err(|err| track.trigger(&chain, err)) + } + + fn deserialize_option(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let chain = self.chain; + let track = self.track; + self.de + .deserialize_option(Wrap::new(visitor, &chain, track)) + .map_err(|err| track.trigger(&chain, err)) + } + + fn deserialize_unit(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let chain = self.chain; + let track = self.track; + self.de + .deserialize_unit(Wrap::new(visitor, &chain, track)) + .map_err(|err| track.trigger(&chain, err)) + } + + fn deserialize_unit_struct( + self, + name: &'static str, + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + let chain = self.chain; + let track = self.track; + self.de + .deserialize_unit_struct(name, Wrap::new(visitor, &chain, track)) + .map_err(|err| track.trigger(&chain, err)) + } + + fn deserialize_newtype_struct( + self, + name: &'static str, + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + let chain = self.chain; + let track = self.track; + self.de + .deserialize_newtype_struct(name, Wrap::new(visitor, &chain, track)) + .map_err(|err| track.trigger(&chain, err)) + } + + fn deserialize_seq(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let chain = self.chain; + let track = self.track; + self.de + .deserialize_seq(Wrap::new(visitor, &chain, track)) + .map_err(|err| track.trigger(&chain, err)) + } + + fn deserialize_tuple(self, len: usize, visitor: V) -> Result + where + V: Visitor<'de>, + { + let chain = self.chain; + let track = self.track; + self.de + .deserialize_tuple(len, Wrap::new(visitor, &chain, track)) + .map_err(|err| track.trigger(&chain, err)) + } + + fn deserialize_tuple_struct( + self, + name: &'static str, + len: usize, + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + let chain = self.chain; + let track = self.track; + self.de + .deserialize_tuple_struct(name, len, Wrap::new(visitor, &chain, track)) + .map_err(|err| track.trigger(&chain, err)) + } + + fn deserialize_map(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let chain = self.chain; + let track = self.track; + self.de + .deserialize_map(Wrap::new(visitor, &chain, track)) + .map_err(|err| track.trigger(&chain, err)) + } + + fn deserialize_struct( + self, + name: &'static str, + fields: &'static [&'static str], + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + let chain = self.chain; + let track = self.track; + self.de + .deserialize_struct(name, fields, Wrap::new(visitor, &chain, track)) + .map_err(|err| track.trigger(&chain, err)) + } + + fn deserialize_enum( + self, + name: &'static str, + variants: &'static [&'static str], + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + let chain = self.chain; + let track = self.track; + self.de + .deserialize_enum(name, variants, Wrap::new(visitor, &chain, track)) + .map_err(|err| track.trigger(&chain, err)) + } + + fn deserialize_ignored_any(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let chain = self.chain; + let track = self.track; + self.de + .deserialize_ignored_any(Wrap::new(visitor, &chain, track)) + .map_err(|err| track.trigger(&chain, err)) + } + + fn deserialize_identifier(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let chain = self.chain; + let track = self.track; + self.de + .deserialize_identifier(Wrap::new(visitor, &chain, track)) + .map_err(|err| track.trigger(&chain, err)) + } + + fn is_human_readable(&self) -> bool { + self.de.is_human_readable() + } +} + +// Forwarding impl to preserve context. +impl<'a, 'b, 'de, X> Visitor<'de> for Wrap<'a, 'b, X> +where + X: Visitor<'de>, +{ + type Value = X::Value; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.delegate.expecting(formatter) + } + + fn visit_bool(self, v: bool) -> Result + where + E: de::Error, + { + let chain = self.chain; + let track = self.track; + self.delegate + .visit_bool(v) + .map_err(|err| track.trigger(chain, err)) + } + + fn visit_i8(self, v: i8) -> Result + where + E: de::Error, + { + let chain = self.chain; + let track = self.track; + self.delegate + .visit_i8(v) + .map_err(|err| track.trigger(chain, err)) + } + + fn visit_i16(self, v: i16) -> Result + where + E: de::Error, + { + let chain = self.chain; + let track = self.track; + self.delegate + .visit_i16(v) + .map_err(|err| track.trigger(chain, err)) + } + + fn visit_i32(self, v: i32) -> Result + where + E: de::Error, + { + let chain = self.chain; + let track = self.track; + self.delegate + .visit_i32(v) + .map_err(|err| track.trigger(chain, err)) + } + + fn visit_i64(self, v: i64) -> Result + where + E: de::Error, + { + let chain = self.chain; + let track = self.track; + self.delegate + .visit_i64(v) + .map_err(|err| track.trigger(chain, err)) + } + + fn visit_i128(self, v: i128) -> Result + where + E: de::Error, + { + let chain = self.chain; + let track = self.track; + self.delegate + .visit_i128(v) + .map_err(|err| track.trigger(chain, err)) + } + + fn visit_u8(self, v: u8) -> Result + where + E: de::Error, + { + let chain = self.chain; + let track = self.track; + self.delegate + .visit_u8(v) + .map_err(|err| track.trigger(chain, err)) + } + + fn visit_u16(self, v: u16) -> Result + where + E: de::Error, + { + let chain = self.chain; + let track = self.track; + self.delegate + .visit_u16(v) + .map_err(|err| track.trigger(chain, err)) + } + + fn visit_u32(self, v: u32) -> Result + where + E: de::Error, + { + let chain = self.chain; + let track = self.track; + self.delegate + .visit_u32(v) + .map_err(|err| track.trigger(chain, err)) + } + + fn visit_u64(self, v: u64) -> Result + where + E: de::Error, + { + let chain = self.chain; + let track = self.track; + self.delegate + .visit_u64(v) + .map_err(|err| track.trigger(chain, err)) + } + + fn visit_u128(self, v: u128) -> Result + where + E: de::Error, + { + let chain = self.chain; + let track = self.track; + self.delegate + .visit_u128(v) + .map_err(|err| track.trigger(chain, err)) + } + + fn visit_f32(self, v: f32) -> Result + where + E: de::Error, + { + let chain = self.chain; + let track = self.track; + self.delegate + .visit_f32(v) + .map_err(|err| track.trigger(chain, err)) + } + + fn visit_f64(self, v: f64) -> Result + where + E: de::Error, + { + let chain = self.chain; + let track = self.track; + self.delegate + .visit_f64(v) + .map_err(|err| track.trigger(chain, err)) + } + + fn visit_char(self, v: char) -> Result + where + E: de::Error, + { + let chain = self.chain; + let track = self.track; + self.delegate + .visit_char(v) + .map_err(|err| track.trigger(chain, err)) + } + + fn visit_str(self, v: &str) -> Result + where + E: de::Error, + { + let chain = self.chain; + let track = self.track; + self.delegate + .visit_str(v) + .map_err(|err| track.trigger(chain, err)) + } + + fn visit_borrowed_str(self, v: &'de str) -> Result + where + E: de::Error, + { + let chain = self.chain; + let track = self.track; + self.delegate + .visit_borrowed_str(v) + .map_err(|err| track.trigger(chain, err)) + } + + fn visit_string(self, v: String) -> Result + where + E: de::Error, + { + let chain = self.chain; + let track = self.track; + self.delegate + .visit_string(v) + .map_err(|err| track.trigger(chain, err)) + } + + fn visit_unit(self) -> Result + where + E: de::Error, + { + let chain = self.chain; + let track = self.track; + self.delegate + .visit_unit() + .map_err(|err| track.trigger(chain, err)) + } + + fn visit_none(self) -> Result + where + E: de::Error, + { + let chain = self.chain; + let track = self.track; + self.delegate + .visit_none() + .map_err(|err| track.trigger(chain, err)) + } + + fn visit_some(self, deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + let chain = self.chain; + let track = self.track; + self.delegate + .visit_some(Deserializer { + de: deserializer, + chain: Chain::Some { parent: chain }, + track, + }) + .map_err(|err| track.trigger(chain, err)) + } + + fn visit_newtype_struct(self, deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + let chain = self.chain; + let track = self.track; + self.delegate + .visit_newtype_struct(Deserializer { + de: deserializer, + chain: Chain::NewtypeStruct { parent: chain }, + track, + }) + .map_err(|err| track.trigger(chain, err)) + } + + fn visit_seq(self, visitor: V) -> Result + where + V: de::SeqAccess<'de>, + { + let chain = self.chain; + let track = self.track; + self.delegate + .visit_seq(SeqAccess::new(visitor, chain, track)) + .map_err(|err| track.trigger(chain, err)) + } + + fn visit_map(self, visitor: V) -> Result + where + V: de::MapAccess<'de>, + { + let chain = self.chain; + let track = self.track; + self.delegate + .visit_map(MapAccess::new(visitor, chain, track)) + .map_err(|err| track.trigger(chain, err)) + } + + fn visit_enum(self, visitor: V) -> Result + where + V: de::EnumAccess<'de>, + { + let chain = self.chain; + let track = self.track; + self.delegate + .visit_enum(Wrap::new(visitor, chain, track)) + .map_err(|err| track.trigger(chain, err)) + } + + fn visit_bytes(self, v: &[u8]) -> Result + where + E: de::Error, + { + let chain = self.chain; + let track = self.track; + self.delegate + .visit_bytes(v) + .map_err(|err| track.trigger(chain, err)) + } + + fn visit_borrowed_bytes(self, v: &'de [u8]) -> Result + where + E: de::Error, + { + let chain = self.chain; + let track = self.track; + self.delegate + .visit_borrowed_bytes(v) + .map_err(|err| track.trigger(chain, err)) + } + + fn visit_byte_buf(self, v: Vec) -> Result + where + E: de::Error, + { + let chain = self.chain; + let track = self.track; + self.delegate + .visit_byte_buf(v) + .map_err(|err| track.trigger(chain, err)) + } +} + +// Forwarding impl to preserve context. +impl<'a, 'b, 'de, X> de::EnumAccess<'de> for Wrap<'a, 'b, X> +where + X: de::EnumAccess<'de> + 'a, +{ + type Error = X::Error; + type Variant = WrapVariant<'a, 'b, X::Variant>; + + fn variant_seed(self, seed: V) -> Result<(V::Value, Self::Variant), X::Error> + where + V: DeserializeSeed<'de>, + { + let chain = self.chain; + let track = self.track; + let mut variant = None; + self.delegate + .variant_seed(CaptureKey::new(seed, &mut variant)) + .map_err(|err| track.trigger(chain, err)) + .map(move |(v, vis)| { + let chain = match variant { + Some(variant) => Chain::Enum { + parent: chain, + variant, + }, + None => Chain::NonStringKey { parent: chain }, + }; + (v, WrapVariant::new(vis, chain, track)) + }) + } +} + +// Forwarding impl to preserve context. +impl<'a, 'b, 'de, X> de::VariantAccess<'de> for WrapVariant<'a, 'b, X> +where + X: de::VariantAccess<'de>, +{ + type Error = X::Error; + + fn unit_variant(self) -> Result<(), X::Error> { + let chain = self.chain; + let track = self.track; + self.delegate + .unit_variant() + .map_err(|err| track.trigger(&chain, err)) + } + + fn newtype_variant_seed(self, seed: T) -> Result + where + T: DeserializeSeed<'de>, + { + let chain = self.chain; + let track = self.track; + let nested = Chain::NewtypeVariant { parent: &chain }; + self.delegate + .newtype_variant_seed(TrackedSeed::new(seed, nested, track)) + .map_err(|err| track.trigger(&chain, err)) + } + + fn tuple_variant(self, len: usize, visitor: V) -> Result + where + V: Visitor<'de>, + { + let chain = self.chain; + let track = self.track; + self.delegate + .tuple_variant(len, Wrap::new(visitor, &chain, track)) + .map_err(|err| track.trigger(&chain, err)) + } + + fn struct_variant( + self, + fields: &'static [&'static str], + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + let chain = self.chain; + let track = self.track; + self.delegate + .struct_variant(fields, Wrap::new(visitor, &chain, track)) + .map_err(|err| track.trigger(&chain, err)) + } +} + +// Seed that saves the string into the given optional during `visit_str` and +// `visit_string`. +struct CaptureKey<'a, X> { + delegate: X, + key: &'a mut Option, +} + +impl<'a, X> CaptureKey<'a, X> { + fn new(delegate: X, key: &'a mut Option) -> Self { + CaptureKey { delegate, key } + } +} + +// Forwarding impl. +impl<'a, 'de, X> DeserializeSeed<'de> for CaptureKey<'a, X> +where + X: DeserializeSeed<'de>, +{ + type Value = X::Value; + + fn deserialize(self, deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + self.delegate + .deserialize(CaptureKey::new(deserializer, self.key)) + } +} + +// Forwarding impl. +impl<'a, 'de, X> de::Deserializer<'de> for CaptureKey<'a, X> +where + X: de::Deserializer<'de>, +{ + type Error = X::Error; + + fn deserialize_any(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.delegate + .deserialize_any(CaptureKey::new(visitor, self.key)) + } + + fn deserialize_bool(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.delegate + .deserialize_bool(CaptureKey::new(visitor, self.key)) + } + + fn deserialize_u8(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.delegate + .deserialize_u8(CaptureKey::new(visitor, self.key)) + } + + fn deserialize_u16(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.delegate + .deserialize_u16(CaptureKey::new(visitor, self.key)) + } + + fn deserialize_u32(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.delegate + .deserialize_u32(CaptureKey::new(visitor, self.key)) + } + + fn deserialize_u64(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.delegate + .deserialize_u64(CaptureKey::new(visitor, self.key)) + } + + fn deserialize_u128(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.delegate + .deserialize_u128(CaptureKey::new(visitor, self.key)) + } + + fn deserialize_i8(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.delegate + .deserialize_i8(CaptureKey::new(visitor, self.key)) + } + + fn deserialize_i16(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.delegate + .deserialize_i16(CaptureKey::new(visitor, self.key)) + } + + fn deserialize_i32(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.delegate + .deserialize_i32(CaptureKey::new(visitor, self.key)) + } + + fn deserialize_i64(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.delegate + .deserialize_i64(CaptureKey::new(visitor, self.key)) + } + + fn deserialize_i128(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.delegate + .deserialize_i128(CaptureKey::new(visitor, self.key)) + } + + fn deserialize_f32(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.delegate + .deserialize_f32(CaptureKey::new(visitor, self.key)) + } + + fn deserialize_f64(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.delegate + .deserialize_f64(CaptureKey::new(visitor, self.key)) + } + + fn deserialize_char(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.delegate + .deserialize_char(CaptureKey::new(visitor, self.key)) + } + + fn deserialize_str(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.delegate + .deserialize_str(CaptureKey::new(visitor, self.key)) + } + + fn deserialize_string(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.delegate + .deserialize_string(CaptureKey::new(visitor, self.key)) + } + + fn deserialize_bytes(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.delegate + .deserialize_bytes(CaptureKey::new(visitor, self.key)) + } + + fn deserialize_byte_buf(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.delegate + .deserialize_byte_buf(CaptureKey::new(visitor, self.key)) + } + + fn deserialize_option(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.delegate + .deserialize_option(CaptureKey::new(visitor, self.key)) + } + + fn deserialize_unit(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.delegate + .deserialize_unit(CaptureKey::new(visitor, self.key)) + } + + fn deserialize_unit_struct( + self, + name: &'static str, + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + self.delegate + .deserialize_unit_struct(name, CaptureKey::new(visitor, self.key)) + } + + fn deserialize_newtype_struct( + self, + name: &'static str, + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + self.delegate + .deserialize_newtype_struct(name, CaptureKey::new(visitor, self.key)) + } + + fn deserialize_seq(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.delegate + .deserialize_seq(CaptureKey::new(visitor, self.key)) + } + + fn deserialize_tuple(self, len: usize, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.delegate + .deserialize_tuple(len, CaptureKey::new(visitor, self.key)) + } + + fn deserialize_tuple_struct( + self, + name: &'static str, + len: usize, + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + self.delegate + .deserialize_tuple_struct(name, len, CaptureKey::new(visitor, self.key)) + } + + fn deserialize_map(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.delegate + .deserialize_map(CaptureKey::new(visitor, self.key)) + } + + fn deserialize_struct( + self, + name: &'static str, + fields: &'static [&'static str], + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + self.delegate + .deserialize_struct(name, fields, CaptureKey::new(visitor, self.key)) + } + + fn deserialize_enum( + self, + name: &'static str, + variants: &'static [&'static str], + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + self.delegate + .deserialize_enum(name, variants, CaptureKey::new(visitor, self.key)) + } + + fn deserialize_ignored_any(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.delegate + .deserialize_ignored_any(CaptureKey::new(visitor, self.key)) + } + + fn deserialize_identifier(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.delegate + .deserialize_identifier(CaptureKey::new(visitor, self.key)) + } + + fn is_human_readable(&self) -> bool { + self.delegate.is_human_readable() + } +} + +// Forwarding impl except `visit_str` and `visit_string` which save the string. +impl<'a, 'de, X> Visitor<'de> for CaptureKey<'a, X> +where + X: Visitor<'de>, +{ + type Value = X::Value; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.delegate.expecting(formatter) + } + + fn visit_bool(self, v: bool) -> Result + where + E: de::Error, + { + let string = if v { "true" } else { "false" }; + *self.key = Some(string.to_owned()); + self.delegate.visit_bool(v) + } + + fn visit_i8(self, v: i8) -> Result + where + E: de::Error, + { + *self.key = Some(itoa::Buffer::new().format(v).to_owned()); + self.delegate.visit_i8(v) + } + + fn visit_i16(self, v: i16) -> Result + where + E: de::Error, + { + *self.key = Some(itoa::Buffer::new().format(v).to_owned()); + self.delegate.visit_i16(v) + } + + fn visit_i32(self, v: i32) -> Result + where + E: de::Error, + { + *self.key = Some(itoa::Buffer::new().format(v).to_owned()); + self.delegate.visit_i32(v) + } + + fn visit_i64(self, v: i64) -> Result + where + E: de::Error, + { + *self.key = Some(itoa::Buffer::new().format(v).to_owned()); + self.delegate.visit_i64(v) + } + + fn visit_i128(self, v: i128) -> Result + where + E: de::Error, + { + *self.key = Some(itoa::Buffer::new().format(v).to_owned()); + self.delegate.visit_i128(v) + } + + fn visit_u8(self, v: u8) -> Result + where + E: de::Error, + { + *self.key = Some(itoa::Buffer::new().format(v).to_owned()); + self.delegate.visit_u8(v) + } + + fn visit_u16(self, v: u16) -> Result + where + E: de::Error, + { + *self.key = Some(itoa::Buffer::new().format(v).to_owned()); + self.delegate.visit_u16(v) + } + + fn visit_u32(self, v: u32) -> Result + where + E: de::Error, + { + *self.key = Some(itoa::Buffer::new().format(v).to_owned()); + self.delegate.visit_u32(v) + } + + fn visit_u64(self, v: u64) -> Result + where + E: de::Error, + { + *self.key = Some(itoa::Buffer::new().format(v).to_owned()); + self.delegate.visit_u64(v) + } + + fn visit_u128(self, v: u128) -> Result + where + E: de::Error, + { + *self.key = Some(itoa::Buffer::new().format(v).to_owned()); + self.delegate.visit_u128(v) + } + + fn visit_f32(self, v: f32) -> Result + where + E: de::Error, + { + self.delegate.visit_f32(v) + } + + fn visit_f64(self, v: f64) -> Result + where + E: de::Error, + { + self.delegate.visit_f64(v) + } + + fn visit_char(self, v: char) -> Result + where + E: de::Error, + { + self.delegate.visit_char(v) + } + + fn visit_str(self, v: &str) -> Result + where + E: de::Error, + { + *self.key = Some(v.to_owned()); + self.delegate.visit_str(v) + } + + fn visit_borrowed_str(self, v: &'de str) -> Result + where + E: de::Error, + { + *self.key = Some(v.to_owned()); + self.delegate.visit_borrowed_str(v) + } + + fn visit_string(self, v: String) -> Result + where + E: de::Error, + { + *self.key = Some(v.clone()); + self.delegate.visit_string(v) + } + + fn visit_unit(self) -> Result + where + E: de::Error, + { + self.delegate.visit_unit() + } + + fn visit_none(self) -> Result + where + E: de::Error, + { + self.delegate.visit_none() + } + + fn visit_some(self, deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + self.delegate.visit_some(deserializer) + } + + fn visit_newtype_struct(self, deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + self.delegate.visit_newtype_struct(deserializer) + } + + fn visit_seq(self, visitor: V) -> Result + where + V: de::SeqAccess<'de>, + { + self.delegate.visit_seq(visitor) + } + + fn visit_map(self, visitor: V) -> Result + where + V: de::MapAccess<'de>, + { + self.delegate.visit_map(visitor) + } + + fn visit_enum(self, visitor: V) -> Result + where + V: de::EnumAccess<'de>, + { + self.delegate.visit_enum(visitor) + } + + fn visit_bytes(self, v: &[u8]) -> Result + where + E: de::Error, + { + self.delegate.visit_bytes(v) + } + + fn visit_borrowed_bytes(self, v: &'de [u8]) -> Result + where + E: de::Error, + { + self.delegate.visit_borrowed_bytes(v) + } + + fn visit_byte_buf(self, v: Vec) -> Result + where + E: de::Error, + { + self.delegate.visit_byte_buf(v) + } +} + +// Seed used for map values, sequence elements and newtype variants to track +// their path. +struct TrackedSeed<'a, 'b, X> { + seed: X, + chain: Chain<'a>, + track: &'b Track, +} + +impl<'a, 'b, X> TrackedSeed<'a, 'b, X> { + fn new(seed: X, chain: Chain<'a>, track: &'b Track) -> Self { + TrackedSeed { seed, chain, track } + } +} + +impl<'a, 'b, 'de, X> DeserializeSeed<'de> for TrackedSeed<'a, 'b, X> +where + X: DeserializeSeed<'de>, +{ + type Value = X::Value; + + fn deserialize(self, deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + let chain = self.chain; + let track = self.track; + self.seed + .deserialize(Deserializer { + de: deserializer, + chain: chain.clone(), + track, + }) + .map_err(|err| track.trigger(&chain, err)) + } +} + +// Seq visitor that tracks the index of its elements. +struct SeqAccess<'a, 'b, X> { + delegate: X, + chain: &'a Chain<'a>, + index: usize, + track: &'b Track, +} + +impl<'a, 'b, X> SeqAccess<'a, 'b, X> { + fn new(delegate: X, chain: &'a Chain<'a>, track: &'b Track) -> Self { + SeqAccess { + delegate, + chain, + index: 0, + track, + } + } +} + +// Forwarding impl to preserve context. +impl<'a, 'b, 'de, X> de::SeqAccess<'de> for SeqAccess<'a, 'b, X> +where + X: de::SeqAccess<'de>, +{ + type Error = X::Error; + + fn next_element_seed(&mut self, seed: T) -> Result, X::Error> + where + T: DeserializeSeed<'de>, + { + let parent = self.chain; + let chain = Chain::Seq { + parent, + index: self.index, + }; + let track = self.track; + self.index += 1; + self.delegate + .next_element_seed(TrackedSeed::new(seed, chain, track)) + .map_err(|err| track.trigger(parent, err)) + } + + fn size_hint(&self) -> Option { + self.delegate.size_hint() + } +} + +// Map visitor that captures the string value of its keys and uses that to track +// the path to its values. +struct MapAccess<'a, 'b, X> { + delegate: X, + chain: &'a Chain<'a>, + key: Option, + track: &'b Track, +} + +impl<'a, 'b, X> MapAccess<'a, 'b, X> { + fn new(delegate: X, chain: &'a Chain<'a>, track: &'b Track) -> Self { + MapAccess { + delegate, + chain, + key: None, + track, + } + } +} + +impl<'a, 'b, 'de, X> de::MapAccess<'de> for MapAccess<'a, 'b, X> +where + X: de::MapAccess<'de>, +{ + type Error = X::Error; + + fn next_key_seed(&mut self, seed: K) -> Result, X::Error> + where + K: DeserializeSeed<'de>, + { + let chain = self.chain; + let track = self.track; + let key = &mut self.key; + self.delegate + .next_key_seed(CaptureKey::new(seed, key)) + .map_err(|err| { + let chain = match key.take() { + Some(key) => Chain::Map { parent: chain, key }, + None => Chain::NonStringKey { parent: chain }, + }; + track.trigger(&chain, err) + }) + } + + fn next_value_seed(&mut self, seed: V) -> Result + where + V: DeserializeSeed<'de>, + { + let parent = self.chain; + let chain = match self.key.take() { + Some(key) => Chain::Map { parent, key }, + None => Chain::NonStringKey { parent }, + }; + let track = self.track; + self.delegate + .next_value_seed(TrackedSeed::new(seed, chain, track)) + .map_err(|err| track.trigger(parent, err)) + } + + fn size_hint(&self) -> Option { + self.delegate.size_hint() + } +} diff --git a/.cargo-vendor/serde_path_to_error/src/lib.rs b/.cargo-vendor/serde_path_to_error/src/lib.rs new file mode 100644 index 0000000000..0105a9c270 --- /dev/null +++ b/.cargo-vendor/serde_path_to_error/src/lib.rs @@ -0,0 +1,193 @@ +//! [![github]](https://github.com/dtolnay/path-to-error) [![crates-io]](https://crates.io/crates/serde_path_to_error) [![docs-rs]](https://docs.rs/serde_path_to_error) +//! +//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github +//! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust +//! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logo=docs.rs +//! +//!
+//! +//! Find out the path at which a deserialization error occurred. This crate +//! provides a wrapper that works with any existing Serde `Deserializer` and +//! exposes the chain of field names leading to the error. +//! +//! # Example +//! +//! ``` +//! # use serde_derive::Deserialize; +//! # +//! use serde::Deserialize; +//! use std::collections::BTreeMap as Map; +//! +//! #[derive(Deserialize)] +//! struct Package { +//! name: String, +//! dependencies: Map, +//! } +//! +//! #[derive(Deserialize)] +//! struct Dependency { +//! version: String, +//! } +//! +//! fn main() { +//! let j = r#"{ +//! "name": "demo", +//! "dependencies": { +//! "serde": { +//! "version": 1 +//! } +//! } +//! }"#; +//! +//! // Some Deserializer. +//! let jd = &mut serde_json::Deserializer::from_str(j); +//! +//! let result: Result = serde_path_to_error::deserialize(jd); +//! match result { +//! Ok(_) => panic!("expected a type error"), +//! Err(err) => { +//! let path = err.path().to_string(); +//! assert_eq!(path, "dependencies.serde.version"); +//! } +//! } +//! } +//! ``` + +#![doc(html_root_url = "https://docs.rs/serde_path_to_error/0.1.16")] +#![allow( + clippy::doc_link_with_quotes, // https://github.com/rust-lang/rust-clippy/issues/8961 + clippy::iter_not_returning_iterator, // https://github.com/rust-lang/rust-clippy/issues/8285 + clippy::missing_errors_doc, + clippy::module_name_repetitions, + clippy::must_use_candidate, + clippy::new_without_default +)] + +mod de; +mod path; +mod ser; +mod wrap; + +use std::cell::Cell; +use std::error::Error as StdError; +use std::fmt::{self, Display}; + +pub use crate::de::{deserialize, Deserializer}; +pub use crate::path::{Path, Segment, Segments}; +pub use crate::ser::{serialize, Serializer}; + +/// Original deserializer error together with the path at which it occurred. +#[derive(Clone, Debug)] +pub struct Error { + path: Path, + original: E, +} + +impl Error { + pub fn new(path: Path, inner: E) -> Self { + Error { + path, + original: inner, + } + } + + /// Element path at which this deserialization error occurred. + pub fn path(&self) -> &Path { + &self.path + } + + /// The Deserializer's underlying error that occurred. + pub fn into_inner(self) -> E { + self.original + } + + /// Reference to the Deserializer's underlying error that occurred. + pub fn inner(&self) -> &E { + &self.original + } +} + +impl Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + if !self.path.is_only_unknown() { + write!(f, "{}: ", self.path)?; + } + write!(f, "{}", self.original) + } +} + +impl StdError for Error { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + self.original.source() + } +} + +/// State for bookkeeping across nested deserializer calls. +/// +/// You don't need this if you are using `serde_path_to_error::deserializer`. If +/// you are managing your own `Deserializer`, see the usage example on +/// [`Deserializer`]. +pub struct Track { + path: Cell>, +} + +impl Track { + /// Empty state with no error having happened yet. + pub const fn new() -> Self { + Track { + path: Cell::new(None), + } + } + + /// Gets path at which the error occurred. Only meaningful after we know + /// that an error has occurred. Returns an empty path otherwise. + pub fn path(self) -> Path { + self.path.into_inner().unwrap_or_else(Path::empty) + } + + #[inline] + fn trigger(&self, chain: &Chain, err: E) -> E { + self.trigger_impl(chain); + err + } + + fn trigger_impl(&self, chain: &Chain) { + self.path.set(Some(match self.path.take() { + Some(already_set) => already_set, + None => Path::from_chain(chain), + })); + } +} + +#[derive(Clone)] +enum Chain<'a> { + Root, + Seq { + parent: &'a Chain<'a>, + index: usize, + }, + Map { + parent: &'a Chain<'a>, + key: String, + }, + Struct { + parent: &'a Chain<'a>, + key: &'static str, + }, + Enum { + parent: &'a Chain<'a>, + variant: String, + }, + Some { + parent: &'a Chain<'a>, + }, + NewtypeStruct { + parent: &'a Chain<'a>, + }, + NewtypeVariant { + parent: &'a Chain<'a>, + }, + NonStringKey { + parent: &'a Chain<'a>, + }, +} diff --git a/.cargo-vendor/serde_path_to_error/src/path.rs b/.cargo-vendor/serde_path_to_error/src/path.rs new file mode 100644 index 0000000000..5e4706f3d7 --- /dev/null +++ b/.cargo-vendor/serde_path_to_error/src/path.rs @@ -0,0 +1,160 @@ +use std::fmt::{self, Display}; +use std::slice; + +use super::Chain; + +/// Path to the error value in the input, like `dependencies.serde.typo1`. +/// +/// Use `path.to_string()` to get a string representation of the path with +/// segments separated by periods, or use `path.iter()` to iterate over +/// individual segments of the path. +#[derive(Clone, Debug)] +pub struct Path { + segments: Vec, +} + +/// Single segment of a path. +#[derive(Clone, Debug)] +pub enum Segment { + Seq { index: usize }, + Map { key: String }, + Enum { variant: String }, + Unknown, +} + +impl Path { + /// Returns an iterator with element type [`&Segment`][Segment]. + pub fn iter(&self) -> Segments { + Segments { + iter: self.segments.iter(), + } + } +} + +impl<'a> IntoIterator for &'a Path { + type Item = &'a Segment; + type IntoIter = Segments<'a>; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +/// Iterator over segments of a path. +pub struct Segments<'a> { + iter: slice::Iter<'a, Segment>, +} + +impl<'a> Iterator for Segments<'a> { + type Item = &'a Segment; + + fn next(&mut self) -> Option { + self.iter.next() + } + + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +impl<'a> DoubleEndedIterator for Segments<'a> { + fn next_back(&mut self) -> Option { + self.iter.next_back() + } +} + +impl<'a> ExactSizeIterator for Segments<'a> { + fn len(&self) -> usize { + self.iter.len() + } +} + +impl Display for Path { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + if self.segments.is_empty() { + return formatter.write_str("."); + } + + let mut separator = ""; + for segment in self { + if !matches!(segment, Segment::Seq { .. }) { + formatter.write_str(separator)?; + } + write!(formatter, "{}", segment)?; + separator = "."; + } + + Ok(()) + } +} + +impl Path { + pub(crate) fn empty() -> Self { + Path { + segments: Vec::new(), + } + } + + pub(crate) fn from_chain(mut chain: &Chain) -> Self { + let mut segments = Vec::new(); + loop { + match chain { + Chain::Root => break, + Chain::Seq { parent, index } => { + segments.push(Segment::Seq { index: *index }); + chain = parent; + } + Chain::Map { parent, key } => { + segments.push(Segment::Map { key: key.clone() }); + chain = parent; + } + Chain::Struct { parent, key } => { + let key = *key; + segments.push(Segment::Map { + key: key.to_owned(), + }); + chain = parent; + } + Chain::Enum { parent, variant } => { + segments.push(Segment::Enum { + variant: variant.clone(), + }); + chain = parent; + } + Chain::Some { parent } + | Chain::NewtypeStruct { parent } + | Chain::NewtypeVariant { parent } => { + chain = parent; + } + Chain::NonStringKey { parent } => { + segments.push(Segment::Unknown); + chain = parent; + } + } + } + segments.reverse(); + Path { segments } + } + + pub(crate) fn is_only_unknown(&self) -> bool { + self.segments.iter().all(Segment::is_unknown) + } +} + +impl Display for Segment { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match self { + Segment::Seq { index } => write!(formatter, "[{}]", index), + Segment::Map { key } | Segment::Enum { variant: key } => { + write!(formatter, "{}", key) + } + Segment::Unknown => formatter.write_str("?"), + } + } +} + +impl Segment { + fn is_unknown(&self) -> bool { + matches!(self, Segment::Unknown) + } +} diff --git a/.cargo-vendor/serde_path_to_error/src/ser.rs b/.cargo-vendor/serde_path_to_error/src/ser.rs new file mode 100644 index 0000000000..4e4e44b56d --- /dev/null +++ b/.cargo-vendor/serde_path_to_error/src/ser.rs @@ -0,0 +1,990 @@ +use crate::wrap::Wrap; +use crate::{Chain, Error, Track}; +use serde::ser::{self, Serialize}; +use std::cell::Cell; +use std::fmt::Display; + +/// Entry point for tracking path to Serialize error. +/// +/// # Example +/// +/// ``` +/// # use serde_derive::Serialize; +/// # +/// use serde::Serialize; +/// use std::cell::RefCell; +/// +/// #[derive(Serialize)] +/// struct Outer<'a> { +/// k: Inner<'a>, +/// } +/// +/// #[derive(Serialize)] +/// struct Inner<'a> { +/// refcell: &'a RefCell, +/// } +/// +/// let refcell = RefCell::new(String::new()); +/// let value = Outer { +/// k: Inner { refcell: &refcell }, +/// }; +/// +/// // A RefCell cannot be serialized while it is still mutably borrowed. +/// let _borrowed = refcell.borrow_mut(); +/// +/// // Some Serializer. +/// let mut out = Vec::new(); +/// let jser = &mut serde_json::Serializer::new(&mut out); +/// +/// let result = serde_path_to_error::serialize(&value, jser); +/// match result { +/// Ok(_) => panic!("expected failure to serialize RefCell"), +/// Err(err) => { +/// let path = err.path().to_string(); +/// assert_eq!(path, "k.refcell"); +/// } +/// } +/// ``` +pub fn serialize(value: &T, serializer: S) -> Result> +where + T: ?Sized + Serialize, + S: ser::Serializer, +{ + let mut track = Track::new(); + match T::serialize(value, Serializer::new(serializer, &mut track)) { + Ok(ok) => Ok(ok), + Err(err) => Err(Error { + path: track.path(), + original: err, + }), + } +} + +/// Serializer adapter that records path to serialization errors. +/// +/// # Example +/// +/// ``` +/// # use serde_derive::Serialize; +/// # +/// use serde::Serialize; +/// use std::collections::BTreeMap; +/// +/// // Maps with a non-string key are not valid in JSON. +/// let mut inner_map = BTreeMap::new(); +/// inner_map.insert(vec!['w', 'a', 't'], 0); +/// +/// let mut outer_map = BTreeMap::new(); +/// outer_map.insert("k", inner_map); +/// +/// // Some Serializer. +/// let mut out = Vec::new(); +/// let jser = &mut serde_json::Serializer::new(&mut out); +/// +/// let mut track = serde_path_to_error::Track::new(); +/// let ps = serde_path_to_error::Serializer::new(jser, &mut track); +/// +/// match outer_map.serialize(ps) { +/// Ok(_) => panic!("expected failure to serialize non-string key"), +/// Err(_) => { +/// let path = track.path().to_string(); +/// assert_eq!(path, "k"); +/// } +/// } +/// ``` +pub struct Serializer<'a, 'b, S> { + ser: S, + chain: &'a Chain<'a>, + track: &'b Track, +} + +impl<'a, 'b, S> Serializer<'a, 'b, S> { + #[allow(clippy::needless_pass_by_ref_mut)] + pub fn new(ser: S, track: &'b mut Track) -> Self { + Serializer { + ser, + chain: &Chain::Root, + track, + } + } +} + +impl<'a, 'b, S> ser::Serializer for Serializer<'a, 'b, S> +where + S: ser::Serializer, +{ + type Ok = S::Ok; + type Error = S::Error; + type SerializeSeq = WrapSeq<'a, 'b, S::SerializeSeq>; + type SerializeTuple = WrapSeq<'a, 'b, S::SerializeTuple>; + type SerializeTupleStruct = WrapSeq<'a, 'b, S::SerializeTupleStruct>; + type SerializeTupleVariant = WrapSeq<'a, 'b, S::SerializeTupleVariant>; + type SerializeMap = WrapMap<'a, 'b, S::SerializeMap>; + type SerializeStruct = Wrap<'a, 'b, S::SerializeStruct>; + type SerializeStructVariant = Wrap<'a, 'b, S::SerializeStructVariant>; + + fn serialize_bool(self, v: bool) -> Result { + let chain = self.chain; + let track = self.track; + self.ser + .serialize_bool(v) + .map_err(|err| track.trigger(chain, err)) + } + + fn serialize_i8(self, v: i8) -> Result { + let chain = self.chain; + let track = self.track; + self.ser + .serialize_i8(v) + .map_err(|err| track.trigger(chain, err)) + } + + fn serialize_i16(self, v: i16) -> Result { + let chain = self.chain; + let track = self.track; + self.ser + .serialize_i16(v) + .map_err(|err| track.trigger(chain, err)) + } + + fn serialize_i32(self, v: i32) -> Result { + let chain = self.chain; + let track = self.track; + self.ser + .serialize_i32(v) + .map_err(|err| track.trigger(chain, err)) + } + + fn serialize_i64(self, v: i64) -> Result { + let chain = self.chain; + let track = self.track; + self.ser + .serialize_i64(v) + .map_err(|err| track.trigger(chain, err)) + } + + fn serialize_i128(self, v: i128) -> Result { + let chain = self.chain; + let track = self.track; + self.ser + .serialize_i128(v) + .map_err(|err| track.trigger(chain, err)) + } + + fn serialize_u8(self, v: u8) -> Result { + let chain = self.chain; + let track = self.track; + self.ser + .serialize_u8(v) + .map_err(|err| track.trigger(chain, err)) + } + + fn serialize_u16(self, v: u16) -> Result { + let chain = self.chain; + let track = self.track; + self.ser + .serialize_u16(v) + .map_err(|err| track.trigger(chain, err)) + } + + fn serialize_u32(self, v: u32) -> Result { + let chain = self.chain; + let track = self.track; + self.ser + .serialize_u32(v) + .map_err(|err| track.trigger(chain, err)) + } + + fn serialize_u64(self, v: u64) -> Result { + let chain = self.chain; + let track = self.track; + self.ser + .serialize_u64(v) + .map_err(|err| track.trigger(chain, err)) + } + + fn serialize_u128(self, v: u128) -> Result { + let chain = self.chain; + let track = self.track; + self.ser + .serialize_u128(v) + .map_err(|err| track.trigger(chain, err)) + } + + fn serialize_f32(self, v: f32) -> Result { + let chain = self.chain; + let track = self.track; + self.ser + .serialize_f32(v) + .map_err(|err| track.trigger(chain, err)) + } + + fn serialize_f64(self, v: f64) -> Result { + let chain = self.chain; + let track = self.track; + self.ser + .serialize_f64(v) + .map_err(|err| track.trigger(chain, err)) + } + + fn serialize_char(self, v: char) -> Result { + let chain = self.chain; + let track = self.track; + self.ser + .serialize_char(v) + .map_err(|err| track.trigger(chain, err)) + } + + fn serialize_str(self, v: &str) -> Result { + let chain = self.chain; + let track = self.track; + self.ser + .serialize_str(v) + .map_err(|err| track.trigger(chain, err)) + } + + fn serialize_bytes(self, v: &[u8]) -> Result { + let chain = self.chain; + let track = self.track; + self.ser + .serialize_bytes(v) + .map_err(|err| track.trigger(chain, err)) + } + + fn serialize_none(self) -> Result { + let chain = self.chain; + let track = self.track; + self.ser + .serialize_none() + .map_err(|err| track.trigger(chain, err)) + } + + fn serialize_some(self, value: &T) -> Result + where + T: ?Sized + Serialize, + { + let chain = self.chain; + let track = self.track; + self.ser + .serialize_some(value) + .map_err(|err| track.trigger(chain, err)) + } + + fn serialize_unit(self) -> Result { + let chain = self.chain; + let track = self.track; + self.ser + .serialize_unit() + .map_err(|err| track.trigger(chain, err)) + } + + fn serialize_unit_struct(self, name: &'static str) -> Result { + let chain = self.chain; + let track = self.track; + self.ser + .serialize_unit_struct(name) + .map_err(|err| track.trigger(chain, err)) + } + + fn serialize_unit_variant( + self, + name: &'static str, + variant_index: u32, + variant: &'static str, + ) -> Result { + let chain = self.chain; + let track = self.track; + self.ser + .serialize_unit_variant(name, variant_index, variant) + .map_err(|err| track.trigger(chain, err)) + } + + fn serialize_newtype_struct( + self, + name: &'static str, + value: &T, + ) -> Result + where + T: ?Sized + Serialize, + { + let chain = self.chain; + let track = self.track; + self.ser + .serialize_newtype_struct(name, value) + .map_err(|err| track.trigger(chain, err)) + } + + fn serialize_newtype_variant( + self, + name: &'static str, + variant_index: u32, + variant: &'static str, + value: &T, + ) -> Result + where + T: ?Sized + Serialize, + { + let chain = self.chain; + let track = self.track; + self.ser + .serialize_newtype_variant(name, variant_index, variant, value) + .map_err(|err| track.trigger(chain, err)) + } + + fn serialize_seq(self, len: Option) -> Result { + let chain = self.chain; + let track = self.track; + match self.ser.serialize_seq(len) { + Ok(delegate) => Ok(WrapSeq::new(delegate, chain, track)), + Err(err) => Err(track.trigger(chain, err)), + } + } + + fn serialize_tuple(self, len: usize) -> Result { + let chain = self.chain; + let track = self.track; + match self.ser.serialize_tuple(len) { + Ok(delegate) => Ok(WrapSeq::new(delegate, chain, track)), + Err(err) => Err(track.trigger(chain, err)), + } + } + + fn serialize_tuple_struct( + self, + name: &'static str, + len: usize, + ) -> Result { + let chain = self.chain; + let track = self.track; + match self.ser.serialize_tuple_struct(name, len) { + Ok(delegate) => Ok(WrapSeq::new(delegate, chain, track)), + Err(err) => Err(track.trigger(chain, err)), + } + } + + fn serialize_tuple_variant( + self, + name: &'static str, + variant_index: u32, + variant: &'static str, + len: usize, + ) -> Result { + let chain = self.chain; + let track = self.track; + match self + .ser + .serialize_tuple_variant(name, variant_index, variant, len) + { + Ok(delegate) => Ok(WrapSeq::new(delegate, chain, track)), + Err(err) => Err(track.trigger(chain, err)), + } + } + + fn serialize_map(self, len: Option) -> Result { + let chain = self.chain; + let track = self.track; + match self.ser.serialize_map(len) { + Ok(delegate) => Ok(WrapMap::new(delegate, chain, track)), + Err(err) => Err(track.trigger(chain, err)), + } + } + + fn serialize_struct( + self, + name: &'static str, + len: usize, + ) -> Result { + let chain = self.chain; + let track = self.track; + match self.ser.serialize_struct(name, len) { + Ok(delegate) => Ok(Wrap::new(delegate, chain, track)), + Err(err) => Err(track.trigger(chain, err)), + } + } + + fn serialize_struct_variant( + self, + name: &'static str, + variant_index: u32, + variant: &'static str, + len: usize, + ) -> Result { + let chain = self.chain; + let track = self.track; + match self + .ser + .serialize_struct_variant(name, variant_index, variant, len) + { + Ok(delegate) => Ok(Wrap::new(delegate, chain, track)), + Err(err) => Err(track.trigger(chain, err)), + } + } + + fn collect_str(self, value: &T) -> Result + where + T: ?Sized + Display, + { + let chain = self.chain; + let track = self.track; + self.ser + .collect_str(value) + .map_err(|err| track.trigger(chain, err)) + } + + fn is_human_readable(&self) -> bool { + self.ser.is_human_readable() + } +} + +struct TrackedValue<'a, 'b, X> { + value: X, + chain: &'a Chain<'a>, + track: &'b Track, +} + +impl<'a, 'b, X> TrackedValue<'a, 'b, X> { + fn new(value: X, chain: &'a Chain<'a>, track: &'b Track) -> Self { + TrackedValue { + value, + chain, + track, + } + } +} + +impl<'a, 'b, X> Serialize for TrackedValue<'a, 'b, X> +where + X: Serialize, +{ + fn serialize(&self, serializer: S) -> Result + where + S: ser::Serializer, + { + let chain = self.chain; + let track = self.track; + self.value + .serialize(Serializer { + ser: serializer, + chain, + track, + }) + .map_err(|err| track.trigger(chain, err)) + } +} + +pub struct WrapSeq<'a, 'b, S> { + delegate: S, + chain: &'a Chain<'a>, + index: usize, + track: &'b Track, +} + +impl<'a, 'b, S> WrapSeq<'a, 'b, S> { + fn new(delegate: S, chain: &'a Chain<'a>, track: &'b Track) -> Self { + WrapSeq { + delegate, + chain, + index: 0, + track, + } + } +} + +impl<'a, 'b, S> ser::SerializeSeq for WrapSeq<'a, 'b, S> +where + S: ser::SerializeSeq, +{ + type Ok = S::Ok; + type Error = S::Error; + + fn serialize_element(&mut self, value: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize, + { + let parent = self.chain; + let chain = Chain::Seq { + parent, + index: self.index, + }; + let track = self.track; + self.index += 1; + self.delegate + .serialize_element(&TrackedValue::new(value, &chain, track)) + .map_err(|err| track.trigger(parent, err)) + } + + fn end(self) -> Result { + let chain = self.chain; + let track = self.track; + self.delegate.end().map_err(|err| track.trigger(chain, err)) + } +} + +impl<'a, 'b, S> ser::SerializeTuple for WrapSeq<'a, 'b, S> +where + S: ser::SerializeTuple, +{ + type Ok = S::Ok; + type Error = S::Error; + + fn serialize_element(&mut self, value: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize, + { + let parent = self.chain; + let chain = Chain::Seq { + parent, + index: self.index, + }; + let track = self.track; + self.index += 1; + self.delegate + .serialize_element(&TrackedValue::new(value, &chain, track)) + .map_err(|err| track.trigger(parent, err)) + } + + fn end(self) -> Result { + let chain = self.chain; + let track = self.track; + self.delegate.end().map_err(|err| track.trigger(chain, err)) + } +} + +impl<'a, 'b, S> ser::SerializeTupleStruct for WrapSeq<'a, 'b, S> +where + S: ser::SerializeTupleStruct, +{ + type Ok = S::Ok; + type Error = S::Error; + + fn serialize_field(&mut self, value: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize, + { + let parent = self.chain; + let chain = Chain::Seq { + parent, + index: self.index, + }; + let track = self.track; + self.index += 1; + self.delegate + .serialize_field(&TrackedValue::new(value, &chain, track)) + .map_err(|err| track.trigger(parent, err)) + } + + fn end(self) -> Result { + let chain = self.chain; + let track = self.track; + self.delegate.end().map_err(|err| track.trigger(chain, err)) + } +} + +impl<'a, 'b, S> ser::SerializeTupleVariant for WrapSeq<'a, 'b, S> +where + S: ser::SerializeTupleVariant, +{ + type Ok = S::Ok; + type Error = S::Error; + + fn serialize_field(&mut self, value: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize, + { + let parent = self.chain; + let chain = Chain::Seq { + parent, + index: self.index, + }; + let track = self.track; + self.index += 1; + self.delegate + .serialize_field(&TrackedValue::new(value, &chain, track)) + .map_err(|err| track.trigger(parent, err)) + } + + fn end(self) -> Result { + let chain = self.chain; + let track = self.track; + self.delegate.end().map_err(|err| track.trigger(chain, err)) + } +} + +pub struct WrapMap<'a, 'b, S> { + delegate: S, + chain: &'a Chain<'a>, + key: Cell>, + track: &'b Track, +} + +impl<'a, 'b, S> WrapMap<'a, 'b, S> { + fn new(delegate: S, chain: &'a Chain<'a>, track: &'b Track) -> Self { + WrapMap { + delegate, + chain, + key: Cell::new(None), + track, + } + } +} + +impl<'a, 'b, S> ser::SerializeMap for WrapMap<'a, 'b, S> +where + S: ser::SerializeMap, +{ + type Ok = S::Ok; + type Error = S::Error; + + fn serialize_key(&mut self, key: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize, + { + let chain = self.chain; + let track = self.track; + self.key.set(None); + self.delegate + .serialize_key(&CaptureKey::new(&self.key, key)) + .map_err(|err| track.trigger(chain, err)) + } + + fn serialize_value(&mut self, value: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize, + { + let parent = self.chain; + let chain = match self.key.take() { + Some(key) => Chain::Map { parent, key }, + None => Chain::NonStringKey { parent }, + }; + let track = self.track; + self.delegate + .serialize_value(&TrackedValue::new(value, &chain, track)) + .map_err(|err| track.trigger(parent, err)) + } + + fn end(self) -> Result { + let chain = self.chain; + let track = self.track; + self.delegate.end().map_err(|err| track.trigger(chain, err)) + } +} + +impl<'a, 'b, S> ser::SerializeStruct for Wrap<'a, 'b, S> +where + S: ser::SerializeStruct, +{ + type Ok = S::Ok; + type Error = S::Error; + + fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize, + { + let parent = self.chain; + let chain = Chain::Struct { parent, key }; + let track = self.track; + self.delegate + .serialize_field(key, &TrackedValue::new(value, &chain, track)) + .map_err(|err| track.trigger(parent, err)) + } + + fn end(self) -> Result { + let chain = self.chain; + let track = self.track; + self.delegate.end().map_err(|err| track.trigger(chain, err)) + } + + fn skip_field(&mut self, key: &'static str) -> Result<(), Self::Error> { + let chain = self.chain; + let track = self.track; + self.delegate + .skip_field(key) + .map_err(|err| track.trigger(chain, err)) + } +} + +impl<'a, 'b, S> ser::SerializeStructVariant for Wrap<'a, 'b, S> +where + S: ser::SerializeStructVariant, +{ + type Ok = S::Ok; + type Error = S::Error; + + fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize, + { + let parent = self.chain; + let chain = Chain::Struct { parent, key }; + let track = self.track; + self.delegate + .serialize_field(key, &TrackedValue::new(value, &chain, track)) + .map_err(|err| track.trigger(parent, err)) + } + + fn end(self) -> Result { + let chain = self.chain; + let track = self.track; + self.delegate.end().map_err(|err| track.trigger(chain, err)) + } + + fn skip_field(&mut self, key: &'static str) -> Result<(), Self::Error> { + let chain = self.chain; + let track = self.track; + self.delegate + .skip_field(key) + .map_err(|err| track.trigger(chain, err)) + } +} + +struct CaptureKey<'a, T> { + out: &'a Cell>, + delegate: T, +} + +impl<'a, T> CaptureKey<'a, T> { + fn new(out: &'a Cell>, delegate: T) -> Self { + CaptureKey { out, delegate } + } +} + +impl<'a, T> Serialize for CaptureKey<'a, T> +where + T: Serialize, +{ + fn serialize(&self, serializer: S) -> Result + where + S: ser::Serializer, + { + self.delegate + .serialize(CaptureKey::new(self.out, serializer)) + } +} + +impl<'a, S> ser::Serializer for CaptureKey<'a, S> +where + S: ser::Serializer, +{ + type Ok = S::Ok; + type Error = S::Error; + type SerializeSeq = S::SerializeSeq; + type SerializeTuple = S::SerializeTuple; + type SerializeTupleStruct = S::SerializeTupleStruct; + type SerializeTupleVariant = S::SerializeTupleVariant; + type SerializeMap = S::SerializeMap; + type SerializeStruct = S::SerializeStruct; + type SerializeStructVariant = S::SerializeStructVariant; + + fn serialize_bool(self, v: bool) -> Result { + let string = if v { "true" } else { "false" }; + self.out.set(Some(string.to_owned())); + self.delegate.serialize_bool(v) + } + + fn serialize_i8(self, v: i8) -> Result { + self.out.set(Some(itoa::Buffer::new().format(v).to_owned())); + self.delegate.serialize_i8(v) + } + + fn serialize_i16(self, v: i16) -> Result { + self.out.set(Some(itoa::Buffer::new().format(v).to_owned())); + self.delegate.serialize_i16(v) + } + + fn serialize_i32(self, v: i32) -> Result { + self.out.set(Some(itoa::Buffer::new().format(v).to_owned())); + self.delegate.serialize_i32(v) + } + + fn serialize_i64(self, v: i64) -> Result { + self.out.set(Some(itoa::Buffer::new().format(v).to_owned())); + self.delegate.serialize_i64(v) + } + + fn serialize_i128(self, v: i128) -> Result { + self.out.set(Some(itoa::Buffer::new().format(v).to_owned())); + self.delegate.serialize_i128(v) + } + + fn serialize_u8(self, v: u8) -> Result { + self.out.set(Some(itoa::Buffer::new().format(v).to_owned())); + self.delegate.serialize_u8(v) + } + + fn serialize_u16(self, v: u16) -> Result { + self.out.set(Some(itoa::Buffer::new().format(v).to_owned())); + self.delegate.serialize_u16(v) + } + + fn serialize_u32(self, v: u32) -> Result { + self.out.set(Some(itoa::Buffer::new().format(v).to_owned())); + self.delegate.serialize_u32(v) + } + + fn serialize_u64(self, v: u64) -> Result { + self.out.set(Some(itoa::Buffer::new().format(v).to_owned())); + self.delegate.serialize_u64(v) + } + + fn serialize_u128(self, v: u128) -> Result { + self.out.set(Some(itoa::Buffer::new().format(v).to_owned())); + self.delegate.serialize_u128(v) + } + + fn serialize_f32(self, v: f32) -> Result { + self.delegate.serialize_f32(v) + } + + fn serialize_f64(self, v: f64) -> Result { + self.delegate.serialize_f64(v) + } + + fn serialize_char(self, v: char) -> Result { + self.delegate.serialize_char(v) + } + + fn serialize_str(self, v: &str) -> Result { + self.out.set(Some(v.to_owned())); + self.delegate.serialize_str(v) + } + + fn serialize_bytes(self, v: &[u8]) -> Result { + self.delegate.serialize_bytes(v) + } + + fn serialize_none(self) -> Result { + self.delegate.serialize_none() + } + + fn serialize_some(self, value: &T) -> Result + where + T: ?Sized + Serialize, + { + self.delegate + .serialize_some(&CaptureKey::new(self.out, value)) + } + + fn serialize_unit(self) -> Result { + self.delegate.serialize_unit() + } + + fn serialize_unit_struct(self, name: &'static str) -> Result { + self.delegate.serialize_unit_struct(name) + } + + fn serialize_unit_variant( + self, + name: &'static str, + variant_index: u32, + variant: &'static str, + ) -> Result { + self.out.set(Some(variant.to_owned())); + self.delegate + .serialize_unit_variant(name, variant_index, variant) + } + + fn serialize_newtype_struct( + self, + name: &'static str, + value: &T, + ) -> Result + where + T: ?Sized + Serialize, + { + self.delegate + .serialize_newtype_struct(name, &CaptureKey::new(self.out, value)) + } + + fn serialize_newtype_variant( + self, + name: &'static str, + variant_index: u32, + variant: &'static str, + value: &T, + ) -> Result + where + T: ?Sized + Serialize, + { + self.delegate + .serialize_newtype_variant(name, variant_index, variant, value) + } + + fn serialize_seq(self, len: Option) -> Result { + self.delegate.serialize_seq(len) + } + + fn serialize_tuple(self, len: usize) -> Result { + self.delegate.serialize_tuple(len) + } + + fn serialize_tuple_struct( + self, + name: &'static str, + len: usize, + ) -> Result { + self.delegate.serialize_tuple_struct(name, len) + } + + fn serialize_tuple_variant( + self, + name: &'static str, + variant_index: u32, + variant: &'static str, + len: usize, + ) -> Result { + self.delegate + .serialize_tuple_variant(name, variant_index, variant, len) + } + + fn serialize_map(self, len: Option) -> Result { + self.delegate.serialize_map(len) + } + + fn serialize_struct( + self, + name: &'static str, + len: usize, + ) -> Result { + self.delegate.serialize_struct(name, len) + } + + fn serialize_struct_variant( + self, + name: &'static str, + variant_index: u32, + variant: &'static str, + len: usize, + ) -> Result { + self.delegate + .serialize_struct_variant(name, variant_index, variant, len) + } + + fn collect_seq(self, iter: I) -> Result + where + I: IntoIterator, + I::Item: Serialize, + { + self.delegate.collect_seq(iter) + } + + fn collect_map(self, iter: I) -> Result + where + K: Serialize, + V: Serialize, + I: IntoIterator, + { + self.delegate.collect_map(iter) + } + + fn collect_str(self, value: &T) -> Result + where + T: ?Sized + Display, + { + self.out.set(Some(value.to_string())); + self.delegate.collect_str(value) + } + + fn is_human_readable(&self) -> bool { + self.delegate.is_human_readable() + } +} diff --git a/.cargo-vendor/serde_path_to_error/src/wrap.rs b/.cargo-vendor/serde_path_to_error/src/wrap.rs new file mode 100644 index 0000000000..d730995a71 --- /dev/null +++ b/.cargo-vendor/serde_path_to_error/src/wrap.rs @@ -0,0 +1,35 @@ +use crate::{Chain, Track}; + +// Wrapper that attaches context to a `Visitor`, `SeqAccess` or `EnumAccess`. +pub struct Wrap<'a, 'b, X> { + pub(crate) delegate: X, + pub(crate) chain: &'a Chain<'a>, + pub(crate) track: &'b Track, +} + +// Wrapper that attaches context to a `VariantAccess`. +pub struct WrapVariant<'a, 'b, X> { + pub(crate) delegate: X, + pub(crate) chain: Chain<'a>, + pub(crate) track: &'b Track, +} + +impl<'a, 'b, X> Wrap<'a, 'b, X> { + pub(crate) fn new(delegate: X, chain: &'a Chain<'a>, track: &'b Track) -> Self { + Wrap { + delegate, + chain, + track, + } + } +} + +impl<'a, 'b, X> WrapVariant<'a, 'b, X> { + pub(crate) fn new(delegate: X, chain: Chain<'a>, track: &'b Track) -> Self { + WrapVariant { + delegate, + chain, + track, + } + } +} diff --git a/.cargo-vendor/serde_path_to_error/tests/deserialize.rs b/.cargo-vendor/serde_path_to_error/tests/deserialize.rs new file mode 100644 index 0000000000..624967d6c6 --- /dev/null +++ b/.cargo-vendor/serde_path_to_error/tests/deserialize.rs @@ -0,0 +1,228 @@ +#![allow(clippy::unreadable_literal, dead_code)] + +use serde::Deserialize; +use serde_derive::Deserialize; +use std::collections::BTreeMap as Map; +use std::fmt::Debug; + +fn test<'de, T>(json: &'de str, expected: &str) +where + T: Deserialize<'de> + Debug, +{ + let de = &mut serde_json::Deserializer::from_str(json); + let result: Result = serde_path_to_error::deserialize(de); + let path = result.unwrap_err().path().to_string(); + assert_eq!(path, expected); +} + +#[test] +fn test_struct() { + #[derive(Deserialize, Debug)] + struct Package { + name: String, + dependencies: Map, + } + + #[derive(Deserialize, Debug)] + struct Dependency { + version: String, + } + + let j = r#"{ + "name": "demo", + "dependencies": { + "serde": { + "version": 1 + } + } + }"#; + + test::(j, "dependencies.serde.version"); +} + +#[test] +fn test_vec() { + #[derive(Deserialize, Debug)] + struct Package { + dependencies: Vec, + } + + #[derive(Deserialize, Debug)] + struct Dependency { + name: String, + version: String, + } + + let j = r#"{ + "dependencies": [ + { + "name": "serde", + "version": "1.0" + }, + { + "name": "serde_json", + "version": 1 + } + } + }"#; + + test::(j, "dependencies[1].version"); +} + +#[test] +fn test_option() { + #[derive(Deserialize, Debug)] + struct Package { + dependency: Option, + } + + #[derive(Deserialize, Debug)] + struct Dependency { + version: String, + } + + let j = r#"{ + "dependency": { + "version": 1 + } + }"#; + + test::(j, "dependency.version"); +} + +#[test] +fn test_struct_variant() { + #[derive(Deserialize, Debug)] + struct Package { + dependency: Dependency, + } + + #[derive(Deserialize, Debug)] + enum Dependency { + Struct { version: String }, + } + + let j = r#"{ + "dependency": { + "Struct": { + "version": 1 + } + } + }"#; + + test::(j, "dependency.Struct.version"); +} + +#[test] +fn test_tuple_variant() { + #[derive(Deserialize, Debug)] + struct Package { + dependency: Dependency, + } + + #[derive(Deserialize, Debug)] + enum Dependency { + Tuple(String, String), + } + + let j = r#"{ + "dependency": { + "Tuple": ["serde", 1] + } + }"#; + + test::(j, "dependency.Tuple[1]"); +} + +#[test] +fn test_unknown_field() { + #[derive(Deserialize, Debug)] + struct Package { + dependency: Dependency, + } + + #[derive(Deserialize, Debug)] + #[serde(deny_unknown_fields)] + struct Dependency { + version: String, + } + + let j = r#"{ + "dependency": { + "version": "1.0", + "name": "serde" + } + }"#; + + test::(j, "dependency.name"); +} + +#[test] +fn test_invalid_length() { + #[derive(Deserialize, Debug)] + struct Package { + dependency: Dependency, + } + + #[derive(Deserialize, Debug)] + struct Dependency(String, String); + + let j = r#"{ + "dependency": ["serde"] + }"#; + + test::(j, "dependency"); +} + +#[test] +fn test_syntax_error() { + #[derive(Deserialize, Debug)] + struct Package { + dependency: Dependency, + } + + #[derive(Deserialize, Debug)] + struct Dependency { + version: String, + } + + let j = r#"{ + "dependency": { + "error": * + }"#; + + test::(j, "dependency.error"); +} + +#[test] +fn test_u128() { + #[derive(Deserialize, Debug)] + struct Container { + n: u128, + } + + let j = r#"{ + "n": 130033514578017493995102500318550798591 + }"#; + + let de = &mut serde_json::Deserializer::from_str(j); + let container: Container = serde_path_to_error::deserialize(de).expect("failed to deserialize"); + + assert_eq!(container.n, 130033514578017493995102500318550798591u128); +} + +#[test] +fn test_map_nonstring_key() { + #[derive(Deserialize, Debug)] + struct Dependency { + version: String, + } + + let j = r#"{ + "100": { + "version": false + } + }"#; + + test::>(j, "100.version"); +} diff --git a/.cargo-vendor/serde_path_to_error/tests/serialize.rs b/.cargo-vendor/serde_path_to_error/tests/serialize.rs new file mode 100644 index 0000000000..2c2871b7da --- /dev/null +++ b/.cargo-vendor/serde_path_to_error/tests/serialize.rs @@ -0,0 +1,52 @@ +use serde::Serialize; +use serde_derive::Serialize; +use std::cell::RefCell; +use std::collections::BTreeMap; +use std::fmt::Debug; + +fn test(value: &T, expected: &str) +where + T: ?Sized + Serialize + Debug, +{ + let mut out = Vec::new(); + let ser = &mut serde_json::Serializer::new(&mut out); + let result = serde_path_to_error::serialize(value, ser); + let path = result.unwrap_err().path().to_string(); + assert_eq!(path, expected); +} + +#[test] +fn test_refcell_already_borrowed() { + #[derive(Serialize, Debug)] + struct Outer<'a> { + k: Inner<'a>, + } + + #[derive(Serialize, Debug)] + struct Inner<'a> { + refcell: &'a RefCell, + } + + let refcell = RefCell::new(String::new()); + let outer = Outer { + k: Inner { refcell: &refcell }, + }; + + let _borrowed = refcell.borrow_mut(); + test(&outer, "k.refcell"); +} + +#[test] +fn test_map_nonstring_key() { + fn singleton_map(key: K, value: V) -> BTreeMap { + let mut map = BTreeMap::new(); + map.insert(key, value); + map + } + + let map = singleton_map(b"", 0); + let map = singleton_map("k", map); + let map = singleton_map(100, map); + + test(&map, "100.k"); +} diff --git a/.cargo-vendor/serde_urlencoded/.cargo-checksum.json b/.cargo-vendor/serde_urlencoded/.cargo-checksum.json new file mode 100644 index 0000000000..126414d161 --- /dev/null +++ b/.cargo-vendor/serde_urlencoded/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"776416273e0e2004aaf8869df552bd0ff39858184730540604dad69e4dd17873","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"b9eb266294324f672cbe945fe8f2e32f85024f0d61a1a7d14382cdde0ac44769","README.md":"0adc2e76e529922436075eddeaaf8decf04f1a642f0e6d9c513b634e72c00699","rustfmt.toml":"5dab9ecd7e76bc1f49cc5a6985196912b9ac8086dfb70833b6251721ba5bf74c","src/de.rs":"3e7ed20d227e2dab88c201f9fda80ebc39b8219fe66ceaf6bef478ca5c9bd891","src/lib.rs":"83718fe61b847408cd08da3515ce4f4ec45998615605b6e878461313986d8571","src/ser/key.rs":"4651a34088cf08b948b27e885efe5c1f6876dbd70f502892bbb7710ce76c878f","src/ser/mod.rs":"2a2eeaf30790e24fbee6bb2ba140dc29343ef797bf3668587892f2c6d2644648","src/ser/pair.rs":"be19e319092dba66aac06bae47f77d2ef39c4c308bbd67c13d031ab88c0e68e7","src/ser/part.rs":"8da25ff5a5159a05dd4b221a757aac514975243b4860aa5eee4ad4500a46d48b","src/ser/value.rs":"5eacb91e054476b982c5fa1b0b38179e844ca79842170d47665aca9e4515552a","tests/test_deserialize.rs":"4525a4d05fd86b2535d50857adab101a02bb5c61a6cb9c138513c8687eca32a7","tests/test_serialize.rs":"2f57ffda172d84573c5d9abe2d700b756fa844639ff0f1bf295441429ddd662b"},"package":"d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd"} \ No newline at end of file diff --git a/.cargo-vendor/serde_urlencoded/Cargo.toml b/.cargo-vendor/serde_urlencoded/Cargo.toml new file mode 100644 index 0000000000..dfe8cc1ff2 --- /dev/null +++ b/.cargo-vendor/serde_urlencoded/Cargo.toml @@ -0,0 +1,41 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +name = "serde_urlencoded" +version = "0.7.1" +authors = ["Anthony Ramine "] +exclude = ["/.travis.yml", "/bors.toml"] +description = "`x-www-form-urlencoded` meets Serde" +documentation = "https://docs.rs/serde_urlencoded/0.7.1/serde_urlencoded/" +keywords = ["serde", "serialization", "urlencoded"] +categories = ["encoding", "web-programming"] +license = "MIT/Apache-2.0" +repository = "https://github.com/nox/serde_urlencoded" + +[lib] +test = false +[dependencies.form_urlencoded] +version = "1" + +[dependencies.itoa] +version = "1" + +[dependencies.ryu] +version = "1" + +[dependencies.serde] +version = "1.0.69" +[dev-dependencies.serde_derive] +version = "1" +[badges.travis-ci] +repository = "nox/serde_urlencoded" diff --git a/.cargo-vendor/serde_urlencoded/LICENSE-APACHE b/.cargo-vendor/serde_urlencoded/LICENSE-APACHE new file mode 100644 index 0000000000..1b5ec8b78e --- /dev/null +++ b/.cargo-vendor/serde_urlencoded/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS diff --git a/.cargo-vendor/serde_urlencoded/LICENSE-MIT b/.cargo-vendor/serde_urlencoded/LICENSE-MIT new file mode 100644 index 0000000000..39f6303ad3 --- /dev/null +++ b/.cargo-vendor/serde_urlencoded/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2016 Anthony Ramine + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/.cargo-vendor/serde_urlencoded/README.md b/.cargo-vendor/serde_urlencoded/README.md new file mode 100644 index 0000000000..92cacb65b8 --- /dev/null +++ b/.cargo-vendor/serde_urlencoded/README.md @@ -0,0 +1,53 @@ +`x-www-form-urlencoded` meets Serde +=================================== + +This crate is a Rust library for serialising to and deserialising from +the [`application/x-www-form-urlencoded`][urlencoded] format. It is built +upon [Serde], a high performance generic serialization framework and [rust-url], +a URL parser for Rust. + +[rust-url]: https://github.com/servo/rust-url +[Serde]: https://github.com/serde-rs/serde +[urlencoded]: https://url.spec.whatwg.org/#application/x-www-form-urlencoded + +Installation +============ + +This crate works with Cargo and can be found on +[crates.io] with a `Cargo.toml` like: + +```toml +[dependencies] +serde_urlencoded = "0.7" +``` + +The documentation is available on [docs.rs]. + +[crates.io]: https://crates.io/crates/serde_urlencoded +[docs.rs]: https://docs.rs/serde_urlencoded/0.7.1/serde_urlencoded/ + +## Getting help + +Serde developers live in the #serde channel on +[`irc.mozilla.org`](https://wiki.mozilla.org/IRC) and most rust-url developers +live in the #servo one. The #rust channel is also a good resource with generally +faster response time but less specific knowledge about Serde, rust-url or this +crate. If IRC is not your thing, we are happy to respond to [GitHub +issues](https://github.com/nox/serde_urlencoded/issues/new) as well. + +## License + +serde_urlencoded is licensed under either of + + * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or + http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or + http://opensource.org/licenses/MIT) + +at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in serde_urlencoded by you, as defined in the Apache-2.0 license, +shall be dual licensed as above, without any additional terms or conditions. diff --git a/.cargo-vendor/serde_urlencoded/rustfmt.toml b/.cargo-vendor/serde_urlencoded/rustfmt.toml new file mode 100644 index 0000000000..f80ce4e9c8 --- /dev/null +++ b/.cargo-vendor/serde_urlencoded/rustfmt.toml @@ -0,0 +1,5 @@ +match_block_trailing_comma = false +max_width = 80 +newline_style = "Unix" +reorder_imports = true +use_try_shorthand = true diff --git a/.cargo-vendor/serde_urlencoded/src/de.rs b/.cargo-vendor/serde_urlencoded/src/de.rs new file mode 100644 index 0000000000..d906eaae2a --- /dev/null +++ b/.cargo-vendor/serde_urlencoded/src/de.rs @@ -0,0 +1,321 @@ +//! Deserialization support for the `application/x-www-form-urlencoded` format. + +use form_urlencoded::parse; +use form_urlencoded::Parse as UrlEncodedParse; +use serde::de::value::MapDeserializer; +use serde::de::Error as de_Error; +use serde::de::{self, IntoDeserializer}; +use serde::forward_to_deserialize_any; +use std::borrow::Cow; +use std::io::Read; + +#[doc(inline)] +pub use serde::de::value::Error; + +/// Deserializes a `application/x-www-form-urlencoded` value from a `&[u8]`. +/// +/// ``` +/// let meal = vec![ +/// ("bread".to_owned(), "baguette".to_owned()), +/// ("cheese".to_owned(), "comté".to_owned()), +/// ("meat".to_owned(), "ham".to_owned()), +/// ("fat".to_owned(), "butter".to_owned()), +/// ]; +/// +/// assert_eq!( +/// serde_urlencoded::from_bytes::>( +/// b"bread=baguette&cheese=comt%C3%A9&meat=ham&fat=butter"), +/// Ok(meal)); +/// ``` +pub fn from_bytes<'de, T>(input: &'de [u8]) -> Result +where + T: de::Deserialize<'de>, +{ + T::deserialize(Deserializer::new(parse(input))) +} + +/// Deserializes a `application/x-www-form-urlencoded` value from a `&str`. +/// +/// ``` +/// let meal = vec![ +/// ("bread".to_owned(), "baguette".to_owned()), +/// ("cheese".to_owned(), "comté".to_owned()), +/// ("meat".to_owned(), "ham".to_owned()), +/// ("fat".to_owned(), "butter".to_owned()), +/// ]; +/// +/// assert_eq!( +/// serde_urlencoded::from_str::>( +/// "bread=baguette&cheese=comt%C3%A9&meat=ham&fat=butter"), +/// Ok(meal)); +/// ``` +pub fn from_str<'de, T>(input: &'de str) -> Result +where + T: de::Deserialize<'de>, +{ + from_bytes(input.as_bytes()) +} + +/// Convenience function that reads all bytes from `reader` and deserializes +/// them with `from_bytes`. +pub fn from_reader(mut reader: R) -> Result +where + T: de::DeserializeOwned, + R: Read, +{ + let mut buf = vec![]; + reader.read_to_end(&mut buf).map_err(|e| { + de::Error::custom(format_args!("could not read input: {}", e)) + })?; + from_bytes(&buf) +} + +/// A deserializer for the `application/x-www-form-urlencoded` format. +/// +/// * Supported top-level outputs are structs, maps and sequences of pairs, +/// with or without a given length. +/// +/// * Main `deserialize` methods defers to `deserialize_map`. +/// +/// * Everything else but `deserialize_seq` and `deserialize_seq_fixed_size` +/// defers to `deserialize`. +pub struct Deserializer<'de> { + inner: MapDeserializer<'de, PartIterator<'de>, Error>, +} + +impl<'de> Deserializer<'de> { + /// Returns a new `Deserializer`. + pub fn new(parser: UrlEncodedParse<'de>) -> Self { + Deserializer { + inner: MapDeserializer::new(PartIterator(parser)), + } + } +} + +impl<'de> de::Deserializer<'de> for Deserializer<'de> { + type Error = Error; + + fn deserialize_any(self, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + self.deserialize_map(visitor) + } + + fn deserialize_map(self, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + visitor.visit_map(self.inner) + } + + fn deserialize_seq(self, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + visitor.visit_seq(self.inner) + } + + fn deserialize_unit(self, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + self.inner.end()?; + visitor.visit_unit() + } + + forward_to_deserialize_any! { + bool + u8 + u16 + u32 + u64 + i8 + i16 + i32 + i64 + f32 + f64 + char + str + string + option + bytes + byte_buf + unit_struct + newtype_struct + tuple_struct + struct + identifier + tuple + enum + ignored_any + } +} + +struct PartIterator<'de>(UrlEncodedParse<'de>); + +impl<'de> Iterator for PartIterator<'de> { + type Item = (Part<'de>, Part<'de>); + + fn next(&mut self) -> Option { + self.0.next().map(|(k, v)| (Part(k), Part(v))) + } +} + +struct Part<'de>(Cow<'de, str>); + +impl<'de> IntoDeserializer<'de> for Part<'de> { + type Deserializer = Self; + + fn into_deserializer(self) -> Self::Deserializer { + self + } +} + +macro_rules! forward_parsed_value { + ($($ty:ident => $method:ident,)*) => { + $( + fn $method(self, visitor: V) -> Result + where V: de::Visitor<'de> + { + match self.0.parse::<$ty>() { + Ok(val) => val.into_deserializer().$method(visitor), + Err(e) => Err(de::Error::custom(e)) + } + } + )* + } +} + +impl<'de> de::Deserializer<'de> for Part<'de> { + type Error = Error; + + fn deserialize_any(self, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + match self.0 { + Cow::Borrowed(value) => visitor.visit_borrowed_str(value), + Cow::Owned(value) => visitor.visit_string(value), + } + } + + fn deserialize_option(self, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + visitor.visit_some(self) + } + + fn deserialize_enum( + self, + _name: &'static str, + _variants: &'static [&'static str], + visitor: V, + ) -> Result + where + V: de::Visitor<'de>, + { + visitor.visit_enum(ValueEnumAccess(self.0)) + } + + fn deserialize_newtype_struct( + self, + _name: &'static str, + visitor: V, + ) -> Result + where + V: de::Visitor<'de>, + { + visitor.visit_newtype_struct(self) + } + + forward_to_deserialize_any! { + char + str + string + unit + bytes + byte_buf + unit_struct + tuple_struct + struct + identifier + tuple + ignored_any + seq + map + } + + forward_parsed_value! { + bool => deserialize_bool, + u8 => deserialize_u8, + u16 => deserialize_u16, + u32 => deserialize_u32, + u64 => deserialize_u64, + i8 => deserialize_i8, + i16 => deserialize_i16, + i32 => deserialize_i32, + i64 => deserialize_i64, + f32 => deserialize_f32, + f64 => deserialize_f64, + } +} + +struct ValueEnumAccess<'de>(Cow<'de, str>); + +impl<'de> de::EnumAccess<'de> for ValueEnumAccess<'de> { + type Error = Error; + type Variant = UnitOnlyVariantAccess; + + fn variant_seed( + self, + seed: V, + ) -> Result<(V::Value, Self::Variant), Self::Error> + where + V: de::DeserializeSeed<'de>, + { + let variant = seed.deserialize(self.0.into_deserializer())?; + Ok((variant, UnitOnlyVariantAccess)) + } +} + +struct UnitOnlyVariantAccess; + +impl<'de> de::VariantAccess<'de> for UnitOnlyVariantAccess { + type Error = Error; + + fn unit_variant(self) -> Result<(), Self::Error> { + Ok(()) + } + + fn newtype_variant_seed(self, _seed: T) -> Result + where + T: de::DeserializeSeed<'de>, + { + Err(Error::custom("expected unit variant")) + } + + fn tuple_variant( + self, + _len: usize, + _visitor: V, + ) -> Result + where + V: de::Visitor<'de>, + { + Err(Error::custom("expected unit variant")) + } + + fn struct_variant( + self, + _fields: &'static [&'static str], + _visitor: V, + ) -> Result + where + V: de::Visitor<'de>, + { + Err(Error::custom("expected unit variant")) + } +} diff --git a/.cargo-vendor/serde_urlencoded/src/lib.rs b/.cargo-vendor/serde_urlencoded/src/lib.rs new file mode 100644 index 0000000000..b7ccc783f5 --- /dev/null +++ b/.cargo-vendor/serde_urlencoded/src/lib.rs @@ -0,0 +1,12 @@ +//! `x-www-form-urlencoded` meets Serde + +#![warn(unused_extern_crates)] +#![forbid(unsafe_code)] + +pub mod de; +pub mod ser; + +#[doc(inline)] +pub use crate::de::{from_bytes, from_reader, from_str, Deserializer}; +#[doc(inline)] +pub use crate::ser::{to_string, Serializer}; diff --git a/.cargo-vendor/serde_urlencoded/src/ser/key.rs b/.cargo-vendor/serde_urlencoded/src/ser/key.rs new file mode 100644 index 0000000000..8128a64ebb --- /dev/null +++ b/.cargo-vendor/serde_urlencoded/src/ser/key.rs @@ -0,0 +1,77 @@ +use crate::ser::part::Sink; +use crate::ser::Error; +use serde::Serialize; +use std::borrow::Cow; +use std::ops::Deref; + +pub enum Key<'key> { + Static(&'static str), + Dynamic(Cow<'key, str>), +} + +impl<'key> Deref for Key<'key> { + type Target = str; + + fn deref(&self) -> &str { + match *self { + Key::Static(key) => key, + Key::Dynamic(ref key) => key, + } + } +} + +impl<'key> From> for Cow<'static, str> { + fn from(key: Key<'key>) -> Self { + match key { + Key::Static(key) => key.into(), + Key::Dynamic(key) => key.into_owned().into(), + } + } +} + +pub struct KeySink { + end: End, +} + +impl KeySink +where + End: for<'key> FnOnce(Key<'key>) -> Result, +{ + pub fn new(end: End) -> Self { + KeySink { end } + } +} + +impl Sink for KeySink +where + End: for<'key> FnOnce(Key<'key>) -> Result, +{ + type Ok = Ok; + + fn serialize_static_str(self, value: &'static str) -> Result { + (self.end)(Key::Static(value)) + } + + fn serialize_str(self, value: &str) -> Result { + (self.end)(Key::Dynamic(value.into())) + } + + fn serialize_string(self, value: String) -> Result { + (self.end)(Key::Dynamic(value.into())) + } + + fn serialize_none(self) -> Result { + Err(self.unsupported()) + } + + fn serialize_some( + self, + _value: &T, + ) -> Result { + Err(self.unsupported()) + } + + fn unsupported(self) -> Error { + Error::Custom("unsupported key".into()) + } +} diff --git a/.cargo-vendor/serde_urlencoded/src/ser/mod.rs b/.cargo-vendor/serde_urlencoded/src/ser/mod.rs new file mode 100644 index 0000000000..d75b9022b2 --- /dev/null +++ b/.cargo-vendor/serde_urlencoded/src/ser/mod.rs @@ -0,0 +1,555 @@ +//! Serialization support for the `application/x-www-form-urlencoded` format. + +mod key; +mod pair; +mod part; +mod value; + +use form_urlencoded::Serializer as UrlEncodedSerializer; +use form_urlencoded::Target as UrlEncodedTarget; +use serde::ser; +use std::borrow::Cow; +use std::error; +use std::fmt; +use std::str; + +/// Serializes a value into a `application/x-www-form-urlencoded` `String` buffer. +/// +/// ``` +/// let meal = &[ +/// ("bread", "baguette"), +/// ("cheese", "comté"), +/// ("meat", "ham"), +/// ("fat", "butter"), +/// ]; +/// +/// assert_eq!( +/// serde_urlencoded::to_string(meal), +/// Ok("bread=baguette&cheese=comt%C3%A9&meat=ham&fat=butter".to_owned())); +/// ``` +pub fn to_string(input: T) -> Result { + let mut urlencoder = UrlEncodedSerializer::new("".to_owned()); + input.serialize(Serializer::new(&mut urlencoder))?; + Ok(urlencoder.finish()) +} + +/// A serializer for the `application/x-www-form-urlencoded` format. +/// +/// * Supported top-level inputs are structs, maps and sequences of pairs, +/// with or without a given length. +/// +/// * Supported keys and values are integers, bytes (if convertible to strings), +/// unit structs and unit variants. +/// +/// * Newtype structs defer to their inner values. +pub struct Serializer<'input, 'output, Target: UrlEncodedTarget> { + urlencoder: &'output mut UrlEncodedSerializer<'input, Target>, +} + +impl<'input, 'output, Target: 'output + UrlEncodedTarget> + Serializer<'input, 'output, Target> +{ + /// Returns a new `Serializer`. + pub fn new( + urlencoder: &'output mut UrlEncodedSerializer<'input, Target>, + ) -> Self { + Serializer { urlencoder } + } +} + +/// Errors returned during serializing to `application/x-www-form-urlencoded`. +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum Error { + Custom(Cow<'static, str>), + Utf8(str::Utf8Error), +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + Error::Custom(ref msg) => msg.fmt(f), + Error::Utf8(ref err) => write!(f, "invalid UTF-8: {}", err), + } + } +} + +impl error::Error for Error { + fn description(&self) -> &str { + match *self { + Error::Custom(ref msg) => msg, + Error::Utf8(ref err) => error::Error::description(err), + } + } + + /// The lower-level cause of this error, in the case of a `Utf8` error. + fn cause(&self) -> Option<&dyn error::Error> { + match *self { + Error::Custom(_) => None, + Error::Utf8(ref err) => Some(err), + } + } + + /// The lower-level source of this error, in the case of a `Utf8` error. + fn source(&self) -> Option<&(dyn error::Error + 'static)> { + match *self { + Error::Custom(_) => None, + Error::Utf8(ref err) => Some(err), + } + } +} + +impl ser::Error for Error { + fn custom(msg: T) -> Self { + Error::Custom(format!("{}", msg).into()) + } +} + +/// Sequence serializer. +pub struct SeqSerializer<'input, 'output, Target: UrlEncodedTarget> { + urlencoder: &'output mut UrlEncodedSerializer<'input, Target>, +} + +/// Tuple serializer. +/// +/// Mostly used for arrays. +pub struct TupleSerializer<'input, 'output, Target: UrlEncodedTarget> { + urlencoder: &'output mut UrlEncodedSerializer<'input, Target>, +} + +/// Tuple struct serializer. +/// +/// Never instantiated, tuple structs are not supported. +pub struct TupleStructSerializer<'input, 'output, T: UrlEncodedTarget> { + inner: ser::Impossible<&'output mut UrlEncodedSerializer<'input, T>, Error>, +} + +/// Tuple variant serializer. +/// +/// Never instantiated, tuple variants are not supported. +pub struct TupleVariantSerializer<'input, 'output, T: UrlEncodedTarget> { + inner: ser::Impossible<&'output mut UrlEncodedSerializer<'input, T>, Error>, +} + +/// Map serializer. +pub struct MapSerializer<'input, 'output, Target: UrlEncodedTarget> { + urlencoder: &'output mut UrlEncodedSerializer<'input, Target>, + key: Option>, +} + +/// Struct serializer. +pub struct StructSerializer<'input, 'output, Target: UrlEncodedTarget> { + urlencoder: &'output mut UrlEncodedSerializer<'input, Target>, +} + +/// Struct variant serializer. +/// +/// Never instantiated, struct variants are not supported. +pub struct StructVariantSerializer<'input, 'output, T: UrlEncodedTarget> { + inner: ser::Impossible<&'output mut UrlEncodedSerializer<'input, T>, Error>, +} + +impl<'input, 'output, Target> ser::Serializer + for Serializer<'input, 'output, Target> +where + Target: 'output + UrlEncodedTarget, +{ + type Ok = &'output mut UrlEncodedSerializer<'input, Target>; + type Error = Error; + type SerializeSeq = SeqSerializer<'input, 'output, Target>; + type SerializeTuple = TupleSerializer<'input, 'output, Target>; + type SerializeTupleStruct = TupleStructSerializer<'input, 'output, Target>; + type SerializeTupleVariant = + TupleVariantSerializer<'input, 'output, Target>; + type SerializeMap = MapSerializer<'input, 'output, Target>; + type SerializeStruct = StructSerializer<'input, 'output, Target>; + type SerializeStructVariant = + StructVariantSerializer<'input, 'output, Target>; + + /// Returns an error. + fn serialize_bool(self, _v: bool) -> Result { + Err(Error::top_level()) + } + + /// Returns an error. + fn serialize_i8(self, _v: i8) -> Result { + Err(Error::top_level()) + } + + /// Returns an error. + fn serialize_i16(self, _v: i16) -> Result { + Err(Error::top_level()) + } + + /// Returns an error. + fn serialize_i32(self, _v: i32) -> Result { + Err(Error::top_level()) + } + + /// Returns an error. + fn serialize_i64(self, _v: i64) -> Result { + Err(Error::top_level()) + } + + /// Returns an error. + fn serialize_u8(self, _v: u8) -> Result { + Err(Error::top_level()) + } + + /// Returns an error. + fn serialize_u16(self, _v: u16) -> Result { + Err(Error::top_level()) + } + + /// Returns an error. + fn serialize_u32(self, _v: u32) -> Result { + Err(Error::top_level()) + } + + /// Returns an error. + fn serialize_u64(self, _v: u64) -> Result { + Err(Error::top_level()) + } + + /// Returns an error. + fn serialize_f32(self, _v: f32) -> Result { + Err(Error::top_level()) + } + + /// Returns an error. + fn serialize_f64(self, _v: f64) -> Result { + Err(Error::top_level()) + } + + /// Returns an error. + fn serialize_char(self, _v: char) -> Result { + Err(Error::top_level()) + } + + /// Returns an error. + fn serialize_str(self, _value: &str) -> Result { + Err(Error::top_level()) + } + + /// Returns an error. + fn serialize_bytes(self, _value: &[u8]) -> Result { + Err(Error::top_level()) + } + + /// Returns `Ok`. + fn serialize_unit(self) -> Result { + Ok(self.urlencoder) + } + + /// Returns `Ok`. + fn serialize_unit_struct( + self, + _name: &'static str, + ) -> Result { + Ok(self.urlencoder) + } + + /// Returns an error. + fn serialize_unit_variant( + self, + _name: &'static str, + _variant_index: u32, + _variant: &'static str, + ) -> Result { + Err(Error::top_level()) + } + + /// Serializes the inner value, ignoring the newtype name. + fn serialize_newtype_struct( + self, + _name: &'static str, + value: &T, + ) -> Result { + value.serialize(self) + } + + /// Returns an error. + fn serialize_newtype_variant( + self, + _name: &'static str, + _variant_index: u32, + _variant: &'static str, + _value: &T, + ) -> Result { + Err(Error::top_level()) + } + + /// Returns `Ok`. + fn serialize_none(self) -> Result { + Ok(self.urlencoder) + } + + /// Serializes the given value. + fn serialize_some( + self, + value: &T, + ) -> Result { + value.serialize(self) + } + + /// Serialize a sequence, given length (if any) is ignored. + fn serialize_seq( + self, + _len: Option, + ) -> Result { + Ok(SeqSerializer { + urlencoder: self.urlencoder, + }) + } + + /// Returns an error. + fn serialize_tuple( + self, + _len: usize, + ) -> Result { + Ok(TupleSerializer { + urlencoder: self.urlencoder, + }) + } + + /// Returns an error. + fn serialize_tuple_struct( + self, + _name: &'static str, + _len: usize, + ) -> Result { + Err(Error::top_level()) + } + + /// Returns an error. + fn serialize_tuple_variant( + self, + _name: &'static str, + _variant_index: u32, + _variant: &'static str, + _len: usize, + ) -> Result { + Err(Error::top_level()) + } + + /// Serializes a map, given length is ignored. + fn serialize_map( + self, + _len: Option, + ) -> Result { + Ok(MapSerializer { + urlencoder: self.urlencoder, + key: None, + }) + } + + /// Serializes a struct, given length is ignored. + fn serialize_struct( + self, + _name: &'static str, + _len: usize, + ) -> Result { + Ok(StructSerializer { + urlencoder: self.urlencoder, + }) + } + + /// Returns an error. + fn serialize_struct_variant( + self, + _name: &'static str, + _variant_index: u32, + _variant: &'static str, + _len: usize, + ) -> Result { + Err(Error::top_level()) + } +} + +impl<'input, 'output, Target> ser::SerializeSeq + for SeqSerializer<'input, 'output, Target> +where + Target: 'output + UrlEncodedTarget, +{ + type Ok = &'output mut UrlEncodedSerializer<'input, Target>; + type Error = Error; + + fn serialize_element( + &mut self, + value: &T, + ) -> Result<(), Error> { + value.serialize(pair::PairSerializer::new(self.urlencoder)) + } + + fn end(self) -> Result { + Ok(self.urlencoder) + } +} + +impl<'input, 'output, Target> ser::SerializeTuple + for TupleSerializer<'input, 'output, Target> +where + Target: 'output + UrlEncodedTarget, +{ + type Ok = &'output mut UrlEncodedSerializer<'input, Target>; + type Error = Error; + + fn serialize_element( + &mut self, + value: &T, + ) -> Result<(), Error> { + value.serialize(pair::PairSerializer::new(self.urlencoder)) + } + + fn end(self) -> Result { + Ok(self.urlencoder) + } +} + +impl<'input, 'output, Target> ser::SerializeTupleStruct + for TupleStructSerializer<'input, 'output, Target> +where + Target: 'output + UrlEncodedTarget, +{ + type Ok = &'output mut UrlEncodedSerializer<'input, Target>; + type Error = Error; + + fn serialize_field( + &mut self, + value: &T, + ) -> Result<(), Error> { + self.inner.serialize_field(value) + } + + fn end(self) -> Result { + self.inner.end() + } +} + +impl<'input, 'output, Target> ser::SerializeTupleVariant + for TupleVariantSerializer<'input, 'output, Target> +where + Target: 'output + UrlEncodedTarget, +{ + type Ok = &'output mut UrlEncodedSerializer<'input, Target>; + type Error = Error; + + fn serialize_field( + &mut self, + value: &T, + ) -> Result<(), Error> { + self.inner.serialize_field(value) + } + + fn end(self) -> Result { + self.inner.end() + } +} + +impl<'input, 'output, Target> ser::SerializeMap + for MapSerializer<'input, 'output, Target> +where + Target: 'output + UrlEncodedTarget, +{ + type Ok = &'output mut UrlEncodedSerializer<'input, Target>; + type Error = Error; + + fn serialize_entry< + K: ?Sized + ser::Serialize, + V: ?Sized + ser::Serialize, + >( + &mut self, + key: &K, + value: &V, + ) -> Result<(), Error> { + let key_sink = key::KeySink::new(|key| { + let value_sink = value::ValueSink::new(self.urlencoder, &key); + value.serialize(part::PartSerializer::new(value_sink))?; + self.key = None; + Ok(()) + }); + let entry_serializer = part::PartSerializer::new(key_sink); + key.serialize(entry_serializer) + } + + fn serialize_key( + &mut self, + key: &T, + ) -> Result<(), Error> { + let key_sink = key::KeySink::new(|key| Ok(key.into())); + let key_serializer = part::PartSerializer::new(key_sink); + self.key = Some(key.serialize(key_serializer)?); + Ok(()) + } + + fn serialize_value( + &mut self, + value: &T, + ) -> Result<(), Error> { + { + let key = self.key.as_ref().ok_or_else(Error::no_key)?; + let value_sink = value::ValueSink::new(self.urlencoder, &key); + value.serialize(part::PartSerializer::new(value_sink))?; + } + self.key = None; + Ok(()) + } + + fn end(self) -> Result { + Ok(self.urlencoder) + } +} + +impl<'input, 'output, Target> ser::SerializeStruct + for StructSerializer<'input, 'output, Target> +where + Target: 'output + UrlEncodedTarget, +{ + type Ok = &'output mut UrlEncodedSerializer<'input, Target>; + type Error = Error; + + fn serialize_field( + &mut self, + key: &'static str, + value: &T, + ) -> Result<(), Error> { + let value_sink = value::ValueSink::new(self.urlencoder, key); + value.serialize(part::PartSerializer::new(value_sink)) + } + + fn end(self) -> Result { + Ok(self.urlencoder) + } +} + +impl<'input, 'output, Target> ser::SerializeStructVariant + for StructVariantSerializer<'input, 'output, Target> +where + Target: 'output + UrlEncodedTarget, +{ + type Ok = &'output mut UrlEncodedSerializer<'input, Target>; + type Error = Error; + + fn serialize_field( + &mut self, + key: &'static str, + value: &T, + ) -> Result<(), Error> { + self.inner.serialize_field(key, value) + } + + fn end(self) -> Result { + self.inner.end() + } +} + +impl Error { + fn top_level() -> Self { + let msg = "top-level serializer supports only maps and structs"; + Error::Custom(msg.into()) + } + + fn no_key() -> Self { + let msg = "tried to serialize a value before serializing key"; + Error::Custom(msg.into()) + } +} diff --git a/.cargo-vendor/serde_urlencoded/src/ser/pair.rs b/.cargo-vendor/serde_urlencoded/src/ser/pair.rs new file mode 100644 index 0000000000..429ce4b2bd --- /dev/null +++ b/.cargo-vendor/serde_urlencoded/src/ser/pair.rs @@ -0,0 +1,271 @@ +use crate::ser::key::KeySink; +use crate::ser::part::PartSerializer; +use crate::ser::value::ValueSink; +use crate::ser::Error; +use form_urlencoded::Serializer as UrlEncodedSerializer; +use form_urlencoded::Target as UrlEncodedTarget; +use serde::ser; +use std::borrow::Cow; +use std::mem; + +pub struct PairSerializer<'input, 'target, Target: UrlEncodedTarget> { + urlencoder: &'target mut UrlEncodedSerializer<'input, Target>, + state: PairState, +} + +impl<'input, 'target, Target> PairSerializer<'input, 'target, Target> +where + Target: 'target + UrlEncodedTarget, +{ + pub fn new( + urlencoder: &'target mut UrlEncodedSerializer<'input, Target>, + ) -> Self { + PairSerializer { + urlencoder, + state: PairState::WaitingForKey, + } + } +} + +impl<'input, 'target, Target> ser::Serializer + for PairSerializer<'input, 'target, Target> +where + Target: 'target + UrlEncodedTarget, +{ + type Ok = (); + type Error = Error; + type SerializeSeq = ser::Impossible<(), Error>; + type SerializeTuple = Self; + type SerializeTupleStruct = ser::Impossible<(), Error>; + type SerializeTupleVariant = ser::Impossible<(), Error>; + type SerializeMap = ser::Impossible<(), Error>; + type SerializeStruct = ser::Impossible<(), Error>; + type SerializeStructVariant = ser::Impossible<(), Error>; + + fn serialize_bool(self, _v: bool) -> Result<(), Error> { + Err(Error::unsupported_pair()) + } + + fn serialize_i8(self, _v: i8) -> Result<(), Error> { + Err(Error::unsupported_pair()) + } + + fn serialize_i16(self, _v: i16) -> Result<(), Error> { + Err(Error::unsupported_pair()) + } + + fn serialize_i32(self, _v: i32) -> Result<(), Error> { + Err(Error::unsupported_pair()) + } + + fn serialize_i64(self, _v: i64) -> Result<(), Error> { + Err(Error::unsupported_pair()) + } + + fn serialize_u8(self, _v: u8) -> Result<(), Error> { + Err(Error::unsupported_pair()) + } + + fn serialize_u16(self, _v: u16) -> Result<(), Error> { + Err(Error::unsupported_pair()) + } + + fn serialize_u32(self, _v: u32) -> Result<(), Error> { + Err(Error::unsupported_pair()) + } + + fn serialize_u64(self, _v: u64) -> Result<(), Error> { + Err(Error::unsupported_pair()) + } + + fn serialize_f32(self, _v: f32) -> Result<(), Error> { + Err(Error::unsupported_pair()) + } + + fn serialize_f64(self, _v: f64) -> Result<(), Error> { + Err(Error::unsupported_pair()) + } + + fn serialize_char(self, _v: char) -> Result<(), Error> { + Err(Error::unsupported_pair()) + } + + fn serialize_str(self, _value: &str) -> Result<(), Error> { + Err(Error::unsupported_pair()) + } + + fn serialize_bytes(self, _value: &[u8]) -> Result<(), Error> { + Err(Error::unsupported_pair()) + } + + fn serialize_unit(self) -> Result<(), Error> { + Err(Error::unsupported_pair()) + } + + fn serialize_unit_struct(self, _name: &'static str) -> Result<(), Error> { + Err(Error::unsupported_pair()) + } + + fn serialize_unit_variant( + self, + _name: &'static str, + _variant_index: u32, + _variant: &'static str, + ) -> Result<(), Error> { + Err(Error::unsupported_pair()) + } + + fn serialize_newtype_struct( + self, + _name: &'static str, + value: &T, + ) -> Result<(), Error> { + value.serialize(self) + } + + fn serialize_newtype_variant( + self, + _name: &'static str, + _variant_index: u32, + _variant: &'static str, + _value: &T, + ) -> Result<(), Error> { + Err(Error::unsupported_pair()) + } + + fn serialize_none(self) -> Result<(), Error> { + Ok(()) + } + + fn serialize_some( + self, + value: &T, + ) -> Result<(), Error> { + value.serialize(self) + } + + fn serialize_seq( + self, + _len: Option, + ) -> Result { + Err(Error::unsupported_pair()) + } + + fn serialize_tuple(self, len: usize) -> Result { + if len == 2 { + Ok(self) + } else { + Err(Error::unsupported_pair()) + } + } + + fn serialize_tuple_struct( + self, + _name: &'static str, + _len: usize, + ) -> Result { + Err(Error::unsupported_pair()) + } + + fn serialize_tuple_variant( + self, + _name: &'static str, + _variant_index: u32, + _variant: &'static str, + _len: usize, + ) -> Result { + Err(Error::unsupported_pair()) + } + + fn serialize_map( + self, + _len: Option, + ) -> Result { + Err(Error::unsupported_pair()) + } + + fn serialize_struct( + self, + _name: &'static str, + _len: usize, + ) -> Result { + Err(Error::unsupported_pair()) + } + + fn serialize_struct_variant( + self, + _name: &'static str, + _variant_index: u32, + _variant: &'static str, + _len: usize, + ) -> Result { + Err(Error::unsupported_pair()) + } +} + +impl<'input, 'target, Target> ser::SerializeTuple + for PairSerializer<'input, 'target, Target> +where + Target: 'target + UrlEncodedTarget, +{ + type Ok = (); + type Error = Error; + + fn serialize_element( + &mut self, + value: &T, + ) -> Result<(), Error> { + match mem::replace(&mut self.state, PairState::Done) { + PairState::WaitingForKey => { + let key_sink = KeySink::new(|key| Ok(key.into())); + let key_serializer = PartSerializer::new(key_sink); + self.state = PairState::WaitingForValue { + key: value.serialize(key_serializer)?, + }; + Ok(()) + } + PairState::WaitingForValue { key } => { + let result = { + let value_sink = ValueSink::new(self.urlencoder, &key); + let value_serializer = PartSerializer::new(value_sink); + value.serialize(value_serializer) + }; + if result.is_ok() { + self.state = PairState::Done; + } else { + self.state = PairState::WaitingForValue { key }; + } + result + } + PairState::Done => Err(Error::done()), + } + } + + fn end(self) -> Result<(), Error> { + if let PairState::Done = self.state { + Ok(()) + } else { + Err(Error::not_done()) + } + } +} + +enum PairState { + WaitingForKey, + WaitingForValue { key: Cow<'static, str> }, + Done, +} + +impl Error { + fn done() -> Self { + Error::Custom("this pair has already been serialized".into()) + } + + fn not_done() -> Self { + Error::Custom("this pair has not yet been serialized".into()) + } + + fn unsupported_pair() -> Self { + Error::Custom("unsupported pair".into()) + } +} diff --git a/.cargo-vendor/serde_urlencoded/src/ser/part.rs b/.cargo-vendor/serde_urlencoded/src/ser/part.rs new file mode 100644 index 0000000000..1deffa54b5 --- /dev/null +++ b/.cargo-vendor/serde_urlencoded/src/ser/part.rs @@ -0,0 +1,236 @@ +use crate::ser::Error; +use serde::ser; +use std::str; + +pub struct PartSerializer { + sink: S, +} + +impl PartSerializer { + pub fn new(sink: S) -> Self { + PartSerializer { sink } + } +} + +pub trait Sink: Sized { + type Ok; + + fn serialize_static_str( + self, + value: &'static str, + ) -> Result; + + fn serialize_str(self, value: &str) -> Result; + fn serialize_string(self, value: String) -> Result; + fn serialize_none(self) -> Result; + + fn serialize_some( + self, + value: &T, + ) -> Result; + + fn unsupported(self) -> Error; +} + +impl ser::Serializer for PartSerializer { + type Ok = S::Ok; + type Error = Error; + type SerializeSeq = ser::Impossible; + type SerializeTuple = ser::Impossible; + type SerializeTupleStruct = ser::Impossible; + type SerializeTupleVariant = ser::Impossible; + type SerializeMap = ser::Impossible; + type SerializeStruct = ser::Impossible; + type SerializeStructVariant = ser::Impossible; + + fn serialize_bool(self, v: bool) -> Result { + self.sink + .serialize_static_str(if v { "true" } else { "false" }) + } + + fn serialize_i8(self, v: i8) -> Result { + self.serialize_integer(v) + } + + fn serialize_i16(self, v: i16) -> Result { + self.serialize_integer(v) + } + + fn serialize_i32(self, v: i32) -> Result { + self.serialize_integer(v) + } + + fn serialize_i64(self, v: i64) -> Result { + self.serialize_integer(v) + } + + fn serialize_u8(self, v: u8) -> Result { + self.serialize_integer(v) + } + + fn serialize_u16(self, v: u16) -> Result { + self.serialize_integer(v) + } + + fn serialize_u32(self, v: u32) -> Result { + self.serialize_integer(v) + } + + fn serialize_u64(self, v: u64) -> Result { + self.serialize_integer(v) + } + + fn serialize_u128(self, v: u128) -> Result { + self.serialize_integer(v) + } + + fn serialize_i128(self, v: i128) -> Result { + self.serialize_integer(v) + } + + fn serialize_f32(self, v: f32) -> Result { + self.serialize_floating(v) + } + + fn serialize_f64(self, v: f64) -> Result { + self.serialize_floating(v) + } + + fn serialize_char(self, v: char) -> Result { + self.sink.serialize_string(v.to_string()) + } + + fn serialize_str(self, value: &str) -> Result { + self.sink.serialize_str(value) + } + + fn serialize_bytes(self, value: &[u8]) -> Result { + match str::from_utf8(value) { + Ok(value) => self.sink.serialize_str(value), + Err(err) => Err(Error::Utf8(err)), + } + } + + fn serialize_unit(self) -> Result { + Err(self.sink.unsupported()) + } + + fn serialize_unit_struct(self, name: &'static str) -> Result { + self.sink.serialize_static_str(name) + } + + fn serialize_unit_variant( + self, + _name: &'static str, + _variant_index: u32, + variant: &'static str, + ) -> Result { + self.sink.serialize_static_str(variant) + } + + fn serialize_newtype_struct( + self, + _name: &'static str, + value: &T, + ) -> Result { + value.serialize(self) + } + + fn serialize_newtype_variant( + self, + _name: &'static str, + _variant_index: u32, + _variant: &'static str, + _value: &T, + ) -> Result { + Err(self.sink.unsupported()) + } + + fn serialize_none(self) -> Result { + self.sink.serialize_none() + } + + fn serialize_some( + self, + value: &T, + ) -> Result { + self.sink.serialize_some(value) + } + + fn serialize_seq( + self, + _len: Option, + ) -> Result { + Err(self.sink.unsupported()) + } + + fn serialize_tuple( + self, + _len: usize, + ) -> Result { + Err(self.sink.unsupported()) + } + + fn serialize_tuple_struct( + self, + _name: &'static str, + _len: usize, + ) -> Result { + Err(self.sink.unsupported()) + } + + fn serialize_tuple_variant( + self, + _name: &'static str, + _variant_index: u32, + _variant: &'static str, + _len: usize, + ) -> Result { + Err(self.sink.unsupported()) + } + + fn serialize_map( + self, + _len: Option, + ) -> Result { + Err(self.sink.unsupported()) + } + + fn serialize_struct( + self, + _name: &'static str, + _len: usize, + ) -> Result { + Err(self.sink.unsupported()) + } + + fn serialize_struct_variant( + self, + _name: &'static str, + _variant_index: u32, + _variant: &'static str, + _len: usize, + ) -> Result { + Err(self.sink.unsupported()) + } +} + +impl PartSerializer { + fn serialize_integer(self, value: I) -> Result + where + I: itoa::Integer, + { + let mut buf = itoa::Buffer::new(); + let part = buf.format(value); + ser::Serializer::serialize_str(self, part) + } + + fn serialize_floating(self, value: F) -> Result + where + F: ryu::Float, + { + let mut buf = ryu::Buffer::new(); + let part = buf.format(value); + ser::Serializer::serialize_str(self, part) + } +} diff --git a/.cargo-vendor/serde_urlencoded/src/ser/value.rs b/.cargo-vendor/serde_urlencoded/src/ser/value.rs new file mode 100644 index 0000000000..e8823ce703 --- /dev/null +++ b/.cargo-vendor/serde_urlencoded/src/ser/value.rs @@ -0,0 +1,62 @@ +use crate::ser::part::{PartSerializer, Sink}; +use crate::ser::Error; +use form_urlencoded::Serializer as UrlEncodedSerializer; +use form_urlencoded::Target as UrlEncodedTarget; +use serde::ser::Serialize; +use std::str; + +pub struct ValueSink<'input, 'key, 'target, Target> +where + Target: UrlEncodedTarget, +{ + urlencoder: &'target mut UrlEncodedSerializer<'input, Target>, + key: &'key str, +} + +impl<'input, 'key, 'target, Target> ValueSink<'input, 'key, 'target, Target> +where + Target: 'target + UrlEncodedTarget, +{ + pub fn new( + urlencoder: &'target mut UrlEncodedSerializer<'input, Target>, + key: &'key str, + ) -> Self { + ValueSink { urlencoder, key } + } +} + +impl<'input, 'key, 'target, Target> Sink + for ValueSink<'input, 'key, 'target, Target> +where + Target: 'target + UrlEncodedTarget, +{ + type Ok = (); + + fn serialize_str(self, value: &str) -> Result<(), Error> { + self.urlencoder.append_pair(self.key, value); + Ok(()) + } + + fn serialize_static_str(self, value: &'static str) -> Result<(), Error> { + self.serialize_str(value) + } + + fn serialize_string(self, value: String) -> Result<(), Error> { + self.serialize_str(&value) + } + + fn serialize_none(self) -> Result { + Ok(()) + } + + fn serialize_some( + self, + value: &T, + ) -> Result { + value.serialize(PartSerializer::new(self)) + } + + fn unsupported(self) -> Error { + Error::Custom("unsupported value".into()) + } +} diff --git a/.cargo-vendor/serde_urlencoded/tests/test_deserialize.rs b/.cargo-vendor/serde_urlencoded/tests/test_deserialize.rs new file mode 100644 index 0000000000..00700d3060 --- /dev/null +++ b/.cargo-vendor/serde_urlencoded/tests/test_deserialize.rs @@ -0,0 +1,88 @@ +use serde_derive::Deserialize; + +#[derive(Deserialize, Debug, PartialEq)] +struct NewType(T); + +#[test] +fn deserialize_newtype_i32() { + let result = vec![("field".to_owned(), NewType(11))]; + + assert_eq!(serde_urlencoded::from_str("field=11"), Ok(result)); +} + +#[test] +fn deserialize_bytes() { + let result = vec![("first".to_owned(), 23), ("last".to_owned(), 42)]; + + assert_eq!( + serde_urlencoded::from_bytes(b"first=23&last=42"), + Ok(result) + ); +} + +#[test] +fn deserialize_str() { + let result = vec![("first".to_owned(), 23), ("last".to_owned(), 42)]; + + assert_eq!(serde_urlencoded::from_str("first=23&last=42"), Ok(result)); +} + +#[test] +fn deserialize_borrowed_str() { + let result = vec![("first", 23), ("last", 42)]; + + assert_eq!(serde_urlencoded::from_str("first=23&last=42"), Ok(result)); +} + +#[test] +fn deserialize_reader() { + let result = vec![("first".to_owned(), 23), ("last".to_owned(), 42)]; + + assert_eq!( + serde_urlencoded::from_reader(b"first=23&last=42" as &[_]), + Ok(result) + ); +} + +#[test] +fn deserialize_option() { + let result = vec![ + ("first".to_owned(), Some(23)), + ("last".to_owned(), Some(42)), + ]; + assert_eq!(serde_urlencoded::from_str("first=23&last=42"), Ok(result)); +} + +#[test] +fn deserialize_unit() { + assert_eq!(serde_urlencoded::from_str(""), Ok(())); + assert_eq!(serde_urlencoded::from_str("&"), Ok(())); + assert_eq!(serde_urlencoded::from_str("&&"), Ok(())); + assert!(serde_urlencoded::from_str::<()>("first=23").is_err()); +} + +#[derive(Deserialize, Debug, PartialEq, Eq)] +enum X { + A, + B, + C, +} + +#[test] +fn deserialize_unit_enum() { + let result = vec![ + ("one".to_owned(), X::A), + ("two".to_owned(), X::B), + ("three".to_owned(), X::C), + ]; + + assert_eq!( + serde_urlencoded::from_str("one=A&two=B&three=C"), + Ok(result) + ); +} + +#[test] +fn deserialize_unit_type() { + assert_eq!(serde_urlencoded::from_str(""), Ok(())); +} diff --git a/.cargo-vendor/serde_urlencoded/tests/test_serialize.rs b/.cargo-vendor/serde_urlencoded/tests/test_serialize.rs new file mode 100644 index 0000000000..abb4907a46 --- /dev/null +++ b/.cargo-vendor/serde_urlencoded/tests/test_serialize.rs @@ -0,0 +1,104 @@ +use serde_derive::Serialize; + +#[derive(Serialize)] +struct NewType(T); + +#[test] +fn serialize_newtype_i32() { + let params = &[("field", Some(NewType(11)))]; + assert_eq!( + serde_urlencoded::to_string(params), + Ok("field=11".to_owned()) + ); +} + +#[test] +fn serialize_newtype_u128() { + let params = &[("field", Some(NewType(u128::MAX)))]; + assert_eq!( + serde_urlencoded::to_string(params), + Ok(format!("field={}", u128::MAX)) + ); +} + +#[test] +fn serialize_newtype_i128() { + let params = &[("field", Some(NewType(i128::MIN)))]; + assert_eq!( + serde_urlencoded::to_string(params), + Ok(format!("field={}", i128::MIN)) + ); +} + +#[test] +fn serialize_option_map_int() { + let params = &[("first", Some(23)), ("middle", None), ("last", Some(42))]; + + assert_eq!( + serde_urlencoded::to_string(params), + Ok("first=23&last=42".to_owned()) + ); +} + +#[test] +fn serialize_option_map_string() { + let params = &[ + ("first", Some("hello")), + ("middle", None), + ("last", Some("world")), + ]; + + assert_eq!( + serde_urlencoded::to_string(params), + Ok("first=hello&last=world".to_owned()) + ); +} + +#[test] +fn serialize_option_map_bool() { + let params = &[("one", Some(true)), ("two", Some(false))]; + + assert_eq!( + serde_urlencoded::to_string(params), + Ok("one=true&two=false".to_owned()) + ); +} + +#[test] +fn serialize_map_bool() { + let params = &[("one", true), ("two", false)]; + + assert_eq!( + serde_urlencoded::to_string(params), + Ok("one=true&two=false".to_owned()) + ); +} + +#[derive(Serialize)] +enum X { + A, + B, + C, +} + +#[test] +fn serialize_unit_enum() { + let params = &[("one", X::A), ("two", X::B), ("three", X::C)]; + assert_eq!( + serde_urlencoded::to_string(params), + Ok("one=A&two=B&three=C".to_owned()) + ); +} + +#[derive(Serialize)] +struct Unit; + +#[test] +fn serialize_unit_struct() { + assert_eq!(serde_urlencoded::to_string(Unit), Ok("".to_owned())); +} + +#[test] +fn serialize_unit_type() { + assert_eq!(serde_urlencoded::to_string(()), Ok("".to_owned())); +} diff --git a/.cargo-vendor/sha1/.cargo-checksum.json b/.cargo-vendor/sha1/.cargo-checksum.json new file mode 100644 index 0000000000..7fa32e2439 --- /dev/null +++ b/.cargo-vendor/sha1/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"7e9245847760fa99eaab7e4864a199148387aaa6121ac3dfbe189f0b684cf7d2","Cargo.toml":"cbfdd2dcaf34776f686caf30420c0a32a13d032fdd62495a054e65c3b0769132","LICENSE-APACHE":"a9040321c3712d8fd0b09cf52b17445de04a23a10165049ae187cd39e5c86be5","LICENSE-MIT":"b4eb00df6e2a4d22518fcaa6a2b4646f249b3a3c9814509b22bd2091f1392ff1","README.md":"8028a4064be59cba366c1bafa682002b52891a3cc2d2169d8f38064ba69773d1","benches/mod.rs":"2765aec429270711235d6cd4911d0c21658e2fa3dfbe24559f24e21b056b507c","src/compress.rs":"fcd99dc9c137e1fc05e47d406adfeeafccb31bed981e7c8bf480d765fb36e877","src/compress/aarch64.rs":"b1e66c2df53eaf84ccf7466aed47c0718fc3dbe1d1c565b65abd60e84b6fc5d4","src/compress/loongarch64_asm.rs":"0fd7d61b5ca7d7bce40cd9ce9012eb787da9f9df87595bfe00f8d882267d182e","src/compress/soft.rs":"bc6dda22b15acd76d97e202a8b251d9c68441d426a71600b2a61c36dd6f3a08e","src/compress/x86.rs":"9d2b4f240a311a84b351c136cccdc4dc00a7fc35703146ad51ed2d6e0aaef2d0","src/lib.rs":"c3409d489833cf896d611baf0ca0e5958629fc9e5f91a1144e789dc1858c513c","tests/data/sha1.blb":"b9c03b9e56e0a7b08a6d6867599a33cab1a55aec3f41fef910c133fc35dc2851","tests/mod.rs":"34bb42bf4679b3f23ffc2338356c8579c872026eef9989955a29ba44a432c3b5"},"package":"e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba"} \ No newline at end of file diff --git a/.cargo-vendor/sha1/CHANGELOG.md b/.cargo-vendor/sha1/CHANGELOG.md new file mode 100644 index 0000000000..429e677276 --- /dev/null +++ b/.cargo-vendor/sha1/CHANGELOG.md @@ -0,0 +1,48 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## 0.10.6 (2023-09-21) +### Added +- `asm!`-based backend for LoongArch64 targets gated behind `loongarch64_asm` feature [#504] + +[#504]: https://github.com/RustCrypto/hashes/pull/504 + +## 0.10.5 (2022-09-16) +### Added +- Feature-gated OID support ([#405]) + +[#405]: https://github.com/RustCrypto/hashes/pull/405 + +## 0.10.4 (2022-09-02) +### Fixed +- MSRV issue which was not resolved by v0.10.3 ([#401]) + +[#401]: https://github.com/RustCrypto/hashes/pull/401 + +## 0.10.3 (2022-09-02) +### Fixed +- MSRV issue caused by publishing v0.10.2 using a buggy Nightly toolchain ([#399]) + +[#399]: https://github.com/RustCrypto/hashes/pull/399 + +## 0.10.2 (2022-08-30) +### Changed +- Ignore `asm` feature on unsupported targets ([#388]) + +[#388]: https://github.com/RustCrypto/hashes/pull/388 + +## 0.10.1 (2022-02-17) +### Fixed +- Minimal versions build ([#363]) + +[#363]: https://github.com/RustCrypto/hashes/pull/363 + +## 0.10.0 (2022-01-17) +### Changed +- The crate is transferred to the RustCrypto organization. New implementation is identical to the `sha-1 v0.10.0` crate and expressed in terms of traits from the `digest` crate. ([#350]) + +[#350]: https://github.com/RustCrypto/hashes/pull/350 diff --git a/.cargo-vendor/sha1/Cargo.toml b/.cargo-vendor/sha1/Cargo.toml new file mode 100644 index 0000000000..8ddd909738 --- /dev/null +++ b/.cargo-vendor/sha1/Cargo.toml @@ -0,0 +1,67 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +name = "sha1" +version = "0.10.6" +authors = ["RustCrypto Developers"] +description = "SHA-1 hash function" +documentation = "https://docs.rs/sha1" +readme = "README.md" +keywords = [ + "crypto", + "sha1", + "hash", + "digest", +] +categories = [ + "cryptography", + "no-std", +] +license = "MIT OR Apache-2.0" +repository = "https://github.com/RustCrypto/hashes" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = [ + "--cfg", + "docsrs", +] + +[dependencies.cfg-if] +version = "1.0" + +[dependencies.digest] +version = "0.10.7" + +[dev-dependencies.digest] +version = "0.10.7" +features = ["dev"] + +[dev-dependencies.hex-literal] +version = "0.2.2" + +[features] +asm = ["sha1-asm"] +compress = [] +default = ["std"] +force-soft = [] +loongarch64_asm = [] +oid = ["digest/oid"] +std = ["digest/std"] + +[target."cfg(any(target_arch = \"aarch64\", target_arch = \"x86\", target_arch = \"x86_64\"))".dependencies.cpufeatures] +version = "0.2" + +[target."cfg(any(target_arch = \"aarch64\", target_arch = \"x86\", target_arch = \"x86_64\"))".dependencies.sha1-asm] +version = "0.5" +optional = true diff --git a/.cargo-vendor/sha1/LICENSE-APACHE b/.cargo-vendor/sha1/LICENSE-APACHE new file mode 100644 index 0000000000..78173fa2e7 --- /dev/null +++ b/.cargo-vendor/sha1/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/.cargo-vendor/sha1/LICENSE-MIT b/.cargo-vendor/sha1/LICENSE-MIT new file mode 100644 index 0000000000..66cf75563b --- /dev/null +++ b/.cargo-vendor/sha1/LICENSE-MIT @@ -0,0 +1,27 @@ +Copyright (c) 2006-2009 Graydon Hoare +Copyright (c) 2009-2013 Mozilla Foundation +Copyright (c) 2016 Artyom Pavlov + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/.cargo-vendor/sha1/README.md b/.cargo-vendor/sha1/README.md new file mode 100644 index 0000000000..bd76f0973c --- /dev/null +++ b/.cargo-vendor/sha1/README.md @@ -0,0 +1,65 @@ +# RustCrypto: SHA-1 + +[![crate][crate-image]][crate-link] +[![Docs][docs-image]][docs-link] +![Apache2/MIT licensed][license-image] +![Rust Version][rustc-image] +[![Project Chat][chat-image]][chat-link] +[![Build Status][build-image]][build-link] + +Pure Rust implementation of the [SHA-1 hash function][1]. + +[Documentation][docs-link] + +## 🚨 Warning: Cryptographically Broken 🚨 + +The SHA-1 hash function should be considered cryptographically broken and +unsuitable for further use in any security critical capacity, as it is +[practically vulnerable to chosen-prefix collisions][2]. + +We provide this crate for legacy interoperability purposes only. + +## Minimum Supported Rust Version + +Rust **1.41** or higher. + +Minimum supported Rust version can be changed in the future, but it will be +done with a minor version bump. + +## SemVer Policy + +- All on-by-default features of this library are covered by SemVer +- MSRV is considered exempt from SemVer as noted above + +## License + +Licensed under either of: + + * [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) + * [MIT license](http://opensource.org/licenses/MIT) + +at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. + +[//]: # (badges) + +[crate-image]: https://img.shields.io/crates/v/sha1.svg +[crate-link]: https://crates.io/crates/sha1 +[docs-image]: https://docs.rs/sha1/badge.svg +[docs-link]: https://docs.rs/sha1/ +[license-image]: https://img.shields.io/badge/license-Apache2.0/MIT-blue.svg +[rustc-image]: https://img.shields.io/badge/rustc-1.41+-blue.svg +[chat-image]: https://img.shields.io/badge/zulip-join_chat-blue.svg +[chat-link]: https://rustcrypto.zulipchat.com/#narrow/stream/260041-hashes +[build-image]: https://github.com/RustCrypto/hashes/workflows/sha1/badge.svg?branch=master +[build-link]: https://github.com/RustCrypto/hashes/actions?query=workflow%3Asha1 + +[//]: # (general links) + +[1]: https://en.wikipedia.org/wiki/SHA-1 +[2]: https://sha-mbles.github.io/ diff --git a/.cargo-vendor/sha1/benches/mod.rs b/.cargo-vendor/sha1/benches/mod.rs new file mode 100644 index 0000000000..5c52e954cb --- /dev/null +++ b/.cargo-vendor/sha1/benches/mod.rs @@ -0,0 +1,14 @@ +#![feature(test)] +extern crate test; + +use digest::bench_update; +use sha1::Sha1; +use test::Bencher; + +bench_update!( + Sha1::default(); + sha1_10 10; + sha1_100 100; + sha1_1000 1000; + sha1_10000 10000; +); diff --git a/.cargo-vendor/sha1/src/compress.rs b/.cargo-vendor/sha1/src/compress.rs new file mode 100644 index 0000000000..6f7e40c413 --- /dev/null +++ b/.cargo-vendor/sha1/src/compress.rs @@ -0,0 +1,40 @@ +use crate::{Block, BlockSizeUser, Sha1Core}; +use digest::typenum::Unsigned; + +cfg_if::cfg_if! { + if #[cfg(feature = "force-soft")] { + mod soft; + use soft::compress as compress_inner; + } else if #[cfg(all(feature = "asm", target_arch = "aarch64"))] { + mod soft; + mod aarch64; + use aarch64::compress as compress_inner; + } else if #[cfg(all(feature = "loongarch64_asm", target_arch = "loongarch64"))] { + mod loongarch64_asm; + use loongarch64_asm::compress as compress_inner; + } else if #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] { + #[cfg(not(feature = "asm"))] + mod soft; + #[cfg(feature = "asm")] + mod soft { + pub use sha1_asm::compress; + } + mod x86; + use x86::compress as compress_inner; + } else { + mod soft; + use soft::compress as compress_inner; + } +} + +const BLOCK_SIZE: usize = ::BlockSize::USIZE; + +/// SHA-1 compression function +#[cfg_attr(docsrs, doc(cfg(feature = "compress")))] +pub fn compress(state: &mut [u32; 5], blocks: &[Block]) { + // SAFETY: GenericArray and [u8; 64] have + // exactly the same memory layout + let blocks: &[[u8; BLOCK_SIZE]] = + unsafe { &*(blocks as *const _ as *const [[u8; BLOCK_SIZE]]) }; + compress_inner(state, blocks); +} diff --git a/.cargo-vendor/sha1/src/compress/aarch64.rs b/.cargo-vendor/sha1/src/compress/aarch64.rs new file mode 100644 index 0000000000..5952d1f624 --- /dev/null +++ b/.cargo-vendor/sha1/src/compress/aarch64.rs @@ -0,0 +1,18 @@ +//! SHA-1 `aarch64` backend. + +// Per rustc target feature docs for `aarch64-unknown-linux-gnu` and +// `aarch64-apple-darwin` platforms, the `sha2` target feature enables +// SHA-1 as well: +// +// > Enable SHA1 and SHA256 support. +cpufeatures::new!(sha1_hwcap, "sha2"); + +pub fn compress(state: &mut [u32; 5], blocks: &[[u8; 64]]) { + // TODO: Replace with https://github.com/rust-lang/rfcs/pull/2725 + // after stabilization + if sha1_hwcap::get() { + sha1_asm::compress(state, blocks); + } else { + super::soft::compress(state, blocks); + } +} diff --git a/.cargo-vendor/sha1/src/compress/loongarch64_asm.rs b/.cargo-vendor/sha1/src/compress/loongarch64_asm.rs new file mode 100644 index 0000000000..facef1b195 --- /dev/null +++ b/.cargo-vendor/sha1/src/compress/loongarch64_asm.rs @@ -0,0 +1,255 @@ +//! LoongArch64 assembly backend + +use core::arch::asm; + +const K: [u32; 4] = [0x5A827999, 0x6ED9EBA1, 0x8F1BBCDC, 0xCA62C1D6]; + +macro_rules! c { + ($($l:expr)*) => { + concat!($($l ,)*) + }; +} + +macro_rules! round0a { + ($a:literal, $b:literal, $c:literal, $d:literal, $e:literal, $i:literal) => { + c!( + "ld.w $t5, $a1, (" $i " * 4);" + "revb.2h $t5, $t5;" + "rotri.w $t5, $t5, 16;" + "add.w " $e ", " $e ", $t5;" + "st.w $t5, $sp, (" $i " * 4);" + "xor $t5, " $c "," $d ";" + "and $t5, $t5, " $b ";" + "xor $t5, $t5, " $d ";" + roundtail!($a, $b, $e, $i, "$a4") + ) + }; +} + +macro_rules! scheldule { + ($i:literal, $e:literal) => { + c!( + "ld.w $t5, $sp, (((" $i " - 3) & 0xF) * 4);" + "ld.w $t6, $sp, (((" $i " - 8) & 0xF) * 4);" + "ld.w $t7, $sp, (((" $i " - 14) & 0xF) * 4);" + "ld.w $t8, $sp, (((" $i " - 16) & 0xF) * 4);" + "xor $t5, $t5, $t6;" + "xor $t5, $t5, $t7;" + "xor $t5, $t5, $t8;" + "rotri.w $t5, $t5, 31;" + "add.w " $e "," $e ", $t5;" + "st.w $t5, $sp, ((" $i " & 0xF) * 4);" + ) + }; +} + +macro_rules! round0b { + ($a:literal, $b:literal, $c:literal, $d:literal, $e:literal, $i:literal) => { + c!( + scheldule!($i, $e) + "xor $t5," $c "," $d ";" + "and $t5, $t5," $b ";" + "xor $t5, $t5," $d ";" + roundtail!($a, $b, $e, $i, "$a4") + ) + }; +} + +macro_rules! round1 { + ($a:literal, $b:literal, $c:literal, $d:literal, $e:literal, $i:literal) => { + c!( + scheldule!($i, $e) + "xor $t5," $b "," $c ";" + "xor $t5, $t5," $d ";" + roundtail!($a, $b, $e, $i, "$a5") + ) + }; +} + +macro_rules! round2 { + ($a:literal, $b:literal, $c:literal, $d:literal, $e:literal, $i:literal) => { + c!( + scheldule!($i, $e) + "or $t5," $c "," $d ";" + "and $t5, $t5, " $b ";" + "and $t7," $c "," $d ";" + "or $t5, $t5, $t7;" + roundtail!($a, $b, $e, $i, "$a6") + ) + }; +} + +macro_rules! round3 { + ($a:literal, $b:literal, $c:literal, $d:literal, $e:literal, $i:literal) => { + c!( + scheldule!($i, $e) + "xor $t5," $b "," $c ";" + "xor $t5, $t5," $d ";" + roundtail!($a, $b, $e, $i, "$a7") + ) + }; +} + +macro_rules! roundtail { + ($a:literal, $b:literal, $e:literal, $i:literal, $k:literal) => { + c!( + "rotri.w " $b "," $b ", 2;" + "add.w " $e "," $e ", $t5;" + "add.w " $e "," $e "," $k ";" + "rotri.w $t5," $a ", 27;" + "add.w " $e "," $e ", $t5;" + ) + }; +} + +pub fn compress(state: &mut [u32; 5], blocks: &[[u8; 64]]) { + if blocks.is_empty() { + return; + } + + unsafe { + asm!( + // Allocate scratch stack space + "addi.d $sp, $sp, -64;", + + // Load state + "ld.w $t0, $a0, 0", + "ld.w $t1, $a0, 4", + "ld.w $t2, $a0, 8", + "ld.w $t3, $a0, 12", + "ld.w $t4, $a0, 16", + + "42:", + + round0a!("$t0", "$t1", "$t2", "$t3", "$t4", 0), + round0a!("$t4", "$t0", "$t1", "$t2", "$t3", 1), + round0a!("$t3", "$t4", "$t0", "$t1", "$t2", 2), + round0a!("$t2", "$t3", "$t4", "$t0", "$t1", 3), + round0a!("$t1", "$t2", "$t3", "$t4", "$t0", 4), + round0a!("$t0", "$t1", "$t2", "$t3", "$t4", 5), + round0a!("$t4", "$t0", "$t1", "$t2", "$t3", 6), + round0a!("$t3", "$t4", "$t0", "$t1", "$t2", 7), + round0a!("$t2", "$t3", "$t4", "$t0", "$t1", 8), + round0a!("$t1", "$t2", "$t3", "$t4", "$t0", 9), + round0a!("$t0", "$t1", "$t2", "$t3", "$t4", 10), + round0a!("$t4", "$t0", "$t1", "$t2", "$t3", 11), + round0a!("$t3", "$t4", "$t0", "$t1", "$t2", 12), + round0a!("$t2", "$t3", "$t4", "$t0", "$t1", 13), + round0a!("$t1", "$t2", "$t3", "$t4", "$t0", 14), + round0a!("$t0", "$t1", "$t2", "$t3", "$t4", 15), + round0b!("$t4", "$t0", "$t1", "$t2", "$t3", 16), + round0b!("$t3", "$t4", "$t0", "$t1", "$t2", 17), + round0b!("$t2", "$t3", "$t4", "$t0", "$t1", 18), + round0b!("$t1", "$t2", "$t3", "$t4", "$t0", 19), + round1!("$t0", "$t1", "$t2", "$t3", "$t4", 20), + round1!("$t4", "$t0", "$t1", "$t2", "$t3", 21), + round1!("$t3", "$t4", "$t0", "$t1", "$t2", 22), + round1!("$t2", "$t3", "$t4", "$t0", "$t1", 23), + round1!("$t1", "$t2", "$t3", "$t4", "$t0", 24), + round1!("$t0", "$t1", "$t2", "$t3", "$t4", 25), + round1!("$t4", "$t0", "$t1", "$t2", "$t3", 26), + round1!("$t3", "$t4", "$t0", "$t1", "$t2", 27), + round1!("$t2", "$t3", "$t4", "$t0", "$t1", 28), + round1!("$t1", "$t2", "$t3", "$t4", "$t0", 29), + round1!("$t0", "$t1", "$t2", "$t3", "$t4", 30), + round1!("$t4", "$t0", "$t1", "$t2", "$t3", 31), + round1!("$t3", "$t4", "$t0", "$t1", "$t2", 32), + round1!("$t2", "$t3", "$t4", "$t0", "$t1", 33), + round1!("$t1", "$t2", "$t3", "$t4", "$t0", 34), + round1!("$t0", "$t1", "$t2", "$t3", "$t4", 35), + round1!("$t4", "$t0", "$t1", "$t2", "$t3", 36), + round1!("$t3", "$t4", "$t0", "$t1", "$t2", 37), + round1!("$t2", "$t3", "$t4", "$t0", "$t1", 38), + round1!("$t1", "$t2", "$t3", "$t4", "$t0", 39), + round2!("$t0", "$t1", "$t2", "$t3", "$t4", 40), + round2!("$t4", "$t0", "$t1", "$t2", "$t3", 41), + round2!("$t3", "$t4", "$t0", "$t1", "$t2", 42), + round2!("$t2", "$t3", "$t4", "$t0", "$t1", 43), + round2!("$t1", "$t2", "$t3", "$t4", "$t0", 44), + round2!("$t0", "$t1", "$t2", "$t3", "$t4", 45), + round2!("$t4", "$t0", "$t1", "$t2", "$t3", 46), + round2!("$t3", "$t4", "$t0", "$t1", "$t2", 47), + round2!("$t2", "$t3", "$t4", "$t0", "$t1", 48), + round2!("$t1", "$t2", "$t3", "$t4", "$t0", 49), + round2!("$t0", "$t1", "$t2", "$t3", "$t4", 50), + round2!("$t4", "$t0", "$t1", "$t2", "$t3", 51), + round2!("$t3", "$t4", "$t0", "$t1", "$t2", 52), + round2!("$t2", "$t3", "$t4", "$t0", "$t1", 53), + round2!("$t1", "$t2", "$t3", "$t4", "$t0", 54), + round2!("$t0", "$t1", "$t2", "$t3", "$t4", 55), + round2!("$t4", "$t0", "$t1", "$t2", "$t3", 56), + round2!("$t3", "$t4", "$t0", "$t1", "$t2", 57), + round2!("$t2", "$t3", "$t4", "$t0", "$t1", 58), + round2!("$t1", "$t2", "$t3", "$t4", "$t0", 59), + round3!("$t0", "$t1", "$t2", "$t3", "$t4", 60), + round3!("$t4", "$t0", "$t1", "$t2", "$t3", 61), + round3!("$t3", "$t4", "$t0", "$t1", "$t2", 62), + round3!("$t2", "$t3", "$t4", "$t0", "$t1", 63), + round3!("$t1", "$t2", "$t3", "$t4", "$t0", 64), + round3!("$t0", "$t1", "$t2", "$t3", "$t4", 65), + round3!("$t4", "$t0", "$t1", "$t2", "$t3", 66), + round3!("$t3", "$t4", "$t0", "$t1", "$t2", 67), + round3!("$t2", "$t3", "$t4", "$t0", "$t1", 68), + round3!("$t1", "$t2", "$t3", "$t4", "$t0", 69), + round3!("$t0", "$t1", "$t2", "$t3", "$t4", 70), + round3!("$t4", "$t0", "$t1", "$t2", "$t3", 71), + round3!("$t3", "$t4", "$t0", "$t1", "$t2", 72), + round3!("$t2", "$t3", "$t4", "$t0", "$t1", 73), + round3!("$t1", "$t2", "$t3", "$t4", "$t0", 74), + round3!("$t0", "$t1", "$t2", "$t3", "$t4", 75), + round3!("$t4", "$t0", "$t1", "$t2", "$t3", 76), + round3!("$t3", "$t4", "$t0", "$t1", "$t2", 77), + round3!("$t2", "$t3", "$t4", "$t0", "$t1", 78), + round3!("$t1", "$t2", "$t3", "$t4", "$t0", 79), + + // Update state registers + "ld.w $t5, $a0, 0", // a + "ld.w $t6, $a0, 4", // b + "ld.w $t7, $a0, 8", // c + "ld.w $t8, $a0, 12", // d + "add.w $t0, $t0, $t5", + "ld.w $t5, $a0, 16", // e + "add.w $t1, $t1, $t6", + "add.w $t2, $t2, $t7", + "add.w $t3, $t3, $t8", + "add.w $t4, $t4, $t5", + + // Save updated state + "st.w $t0, $a0, 0", + "st.w $t1, $a0, 4", + "st.w $t2, $a0, 8", + "st.w $t3, $a0, 12", + "st.w $t4, $a0, 16", + + // Looping over blocks + "addi.d $a1, $a1, 64", + "addi.d $a2, $a2, -1", + "bnez $a2, 42b", + + // Restore stack register + "addi.d $sp, $sp, 64", + + in("$a0") state, + inout("$a1") blocks.as_ptr() => _, + inout("$a2") blocks.len() => _, + + in("$a4") K[0], + in("$a5") K[1], + in("$a6") K[2], + in("$a7") K[3], + + // Clobbers + out("$t0") _, + out("$t1") _, + out("$t2") _, + out("$t3") _, + out("$t4") _, + out("$t5") _, + out("$t6") _, + out("$t7") _, + out("$t8") _, + + options(preserves_flags), + ); + } +} diff --git a/.cargo-vendor/sha1/src/compress/soft.rs b/.cargo-vendor/sha1/src/compress/soft.rs new file mode 100644 index 0000000000..0b9fb27014 --- /dev/null +++ b/.cargo-vendor/sha1/src/compress/soft.rs @@ -0,0 +1,260 @@ +#![allow(clippy::many_single_char_names)] +use super::BLOCK_SIZE; +use core::convert::TryInto; + +const K: [u32; 4] = [0x5A827999, 0x6ED9EBA1, 0x8F1BBCDC, 0xCA62C1D6]; + +#[inline(always)] +fn add(a: [u32; 4], b: [u32; 4]) -> [u32; 4] { + [ + a[0].wrapping_add(b[0]), + a[1].wrapping_add(b[1]), + a[2].wrapping_add(b[2]), + a[3].wrapping_add(b[3]), + ] +} + +#[inline(always)] +fn xor(a: [u32; 4], b: [u32; 4]) -> [u32; 4] { + [a[0] ^ b[0], a[1] ^ b[1], a[2] ^ b[2], a[3] ^ b[3]] +} + +#[inline] +pub fn sha1_first_add(e: u32, w0: [u32; 4]) -> [u32; 4] { + let [a, b, c, d] = w0; + [e.wrapping_add(a), b, c, d] +} + +fn sha1msg1(a: [u32; 4], b: [u32; 4]) -> [u32; 4] { + let [_, _, w2, w3] = a; + let [w4, w5, _, _] = b; + [a[0] ^ w2, a[1] ^ w3, a[2] ^ w4, a[3] ^ w5] +} + +fn sha1msg2(a: [u32; 4], b: [u32; 4]) -> [u32; 4] { + let [x0, x1, x2, x3] = a; + let [_, w13, w14, w15] = b; + + let w16 = (x0 ^ w13).rotate_left(1); + let w17 = (x1 ^ w14).rotate_left(1); + let w18 = (x2 ^ w15).rotate_left(1); + let w19 = (x3 ^ w16).rotate_left(1); + + [w16, w17, w18, w19] +} + +#[inline] +fn sha1_first_half(abcd: [u32; 4], msg: [u32; 4]) -> [u32; 4] { + sha1_first_add(abcd[0].rotate_left(30), msg) +} + +fn sha1_digest_round_x4(abcd: [u32; 4], work: [u32; 4], i: i8) -> [u32; 4] { + match i { + 0 => sha1rnds4c(abcd, add(work, [K[0]; 4])), + 1 => sha1rnds4p(abcd, add(work, [K[1]; 4])), + 2 => sha1rnds4m(abcd, add(work, [K[2]; 4])), + 3 => sha1rnds4p(abcd, add(work, [K[3]; 4])), + _ => unreachable!("unknown icosaround index"), + } +} + +fn sha1rnds4c(abcd: [u32; 4], msg: [u32; 4]) -> [u32; 4] { + let [mut a, mut b, mut c, mut d] = abcd; + let [t, u, v, w] = msg; + let mut e = 0u32; + + macro_rules! bool3ary_202 { + ($a:expr, $b:expr, $c:expr) => { + $c ^ ($a & ($b ^ $c)) + }; + } // Choose, MD5F, SHA1C + + e = e + .wrapping_add(a.rotate_left(5)) + .wrapping_add(bool3ary_202!(b, c, d)) + .wrapping_add(t); + b = b.rotate_left(30); + + d = d + .wrapping_add(e.rotate_left(5)) + .wrapping_add(bool3ary_202!(a, b, c)) + .wrapping_add(u); + a = a.rotate_left(30); + + c = c + .wrapping_add(d.rotate_left(5)) + .wrapping_add(bool3ary_202!(e, a, b)) + .wrapping_add(v); + e = e.rotate_left(30); + + b = b + .wrapping_add(c.rotate_left(5)) + .wrapping_add(bool3ary_202!(d, e, a)) + .wrapping_add(w); + d = d.rotate_left(30); + + [b, c, d, e] +} + +fn sha1rnds4p(abcd: [u32; 4], msg: [u32; 4]) -> [u32; 4] { + let [mut a, mut b, mut c, mut d] = abcd; + let [t, u, v, w] = msg; + let mut e = 0u32; + + macro_rules! bool3ary_150 { + ($a:expr, $b:expr, $c:expr) => { + $a ^ $b ^ $c + }; + } // Parity, XOR, MD5H, SHA1P + + e = e + .wrapping_add(a.rotate_left(5)) + .wrapping_add(bool3ary_150!(b, c, d)) + .wrapping_add(t); + b = b.rotate_left(30); + + d = d + .wrapping_add(e.rotate_left(5)) + .wrapping_add(bool3ary_150!(a, b, c)) + .wrapping_add(u); + a = a.rotate_left(30); + + c = c + .wrapping_add(d.rotate_left(5)) + .wrapping_add(bool3ary_150!(e, a, b)) + .wrapping_add(v); + e = e.rotate_left(30); + + b = b + .wrapping_add(c.rotate_left(5)) + .wrapping_add(bool3ary_150!(d, e, a)) + .wrapping_add(w); + d = d.rotate_left(30); + + [b, c, d, e] +} + +fn sha1rnds4m(abcd: [u32; 4], msg: [u32; 4]) -> [u32; 4] { + let [mut a, mut b, mut c, mut d] = abcd; + let [t, u, v, w] = msg; + let mut e = 0u32; + + macro_rules! bool3ary_232 { + ($a:expr, $b:expr, $c:expr) => { + ($a & $b) ^ ($a & $c) ^ ($b & $c) + }; + } // Majority, SHA1M + + e = e + .wrapping_add(a.rotate_left(5)) + .wrapping_add(bool3ary_232!(b, c, d)) + .wrapping_add(t); + b = b.rotate_left(30); + + d = d + .wrapping_add(e.rotate_left(5)) + .wrapping_add(bool3ary_232!(a, b, c)) + .wrapping_add(u); + a = a.rotate_left(30); + + c = c + .wrapping_add(d.rotate_left(5)) + .wrapping_add(bool3ary_232!(e, a, b)) + .wrapping_add(v); + e = e.rotate_left(30); + + b = b + .wrapping_add(c.rotate_left(5)) + .wrapping_add(bool3ary_232!(d, e, a)) + .wrapping_add(w); + d = d.rotate_left(30); + + [b, c, d, e] +} + +macro_rules! rounds4 { + ($h0:ident, $h1:ident, $wk:expr, $i:expr) => { + sha1_digest_round_x4($h0, sha1_first_half($h1, $wk), $i) + }; +} + +macro_rules! schedule { + ($v0:expr, $v1:expr, $v2:expr, $v3:expr) => { + sha1msg2(xor(sha1msg1($v0, $v1), $v2), $v3) + }; +} + +macro_rules! schedule_rounds4 { + ( + $h0:ident, $h1:ident, + $w0:expr, $w1:expr, $w2:expr, $w3:expr, $w4:expr, + $i:expr + ) => { + $w4 = schedule!($w0, $w1, $w2, $w3); + $h1 = rounds4!($h0, $h1, $w4, $i); + }; +} + +#[inline(always)] +fn sha1_digest_block_u32(state: &mut [u32; 5], block: &[u32; 16]) { + let mut w0 = [block[0], block[1], block[2], block[3]]; + let mut w1 = [block[4], block[5], block[6], block[7]]; + let mut w2 = [block[8], block[9], block[10], block[11]]; + let mut w3 = [block[12], block[13], block[14], block[15]]; + #[allow(clippy::needless_late_init)] + let mut w4; + + let mut h0 = [state[0], state[1], state[2], state[3]]; + let mut h1 = sha1_first_add(state[4], w0); + + // Rounds 0..20 + h1 = sha1_digest_round_x4(h0, h1, 0); + h0 = rounds4!(h1, h0, w1, 0); + h1 = rounds4!(h0, h1, w2, 0); + h0 = rounds4!(h1, h0, w3, 0); + schedule_rounds4!(h0, h1, w0, w1, w2, w3, w4, 0); + + // Rounds 20..40 + schedule_rounds4!(h1, h0, w1, w2, w3, w4, w0, 1); + schedule_rounds4!(h0, h1, w2, w3, w4, w0, w1, 1); + schedule_rounds4!(h1, h0, w3, w4, w0, w1, w2, 1); + schedule_rounds4!(h0, h1, w4, w0, w1, w2, w3, 1); + schedule_rounds4!(h1, h0, w0, w1, w2, w3, w4, 1); + + // Rounds 40..60 + schedule_rounds4!(h0, h1, w1, w2, w3, w4, w0, 2); + schedule_rounds4!(h1, h0, w2, w3, w4, w0, w1, 2); + schedule_rounds4!(h0, h1, w3, w4, w0, w1, w2, 2); + schedule_rounds4!(h1, h0, w4, w0, w1, w2, w3, 2); + schedule_rounds4!(h0, h1, w0, w1, w2, w3, w4, 2); + + // Rounds 60..80 + schedule_rounds4!(h1, h0, w1, w2, w3, w4, w0, 3); + schedule_rounds4!(h0, h1, w2, w3, w4, w0, w1, 3); + schedule_rounds4!(h1, h0, w3, w4, w0, w1, w2, 3); + schedule_rounds4!(h0, h1, w4, w0, w1, w2, w3, 3); + schedule_rounds4!(h1, h0, w0, w1, w2, w3, w4, 3); + + let e = h1[0].rotate_left(30); + let [a, b, c, d] = h0; + + state[0] = state[0].wrapping_add(a); + state[1] = state[1].wrapping_add(b); + state[2] = state[2].wrapping_add(c); + state[3] = state[3].wrapping_add(d); + state[4] = state[4].wrapping_add(e); +} + +pub fn compress(state: &mut [u32; 5], blocks: &[[u8; BLOCK_SIZE]]) { + let mut block_u32 = [0u32; BLOCK_SIZE / 4]; + // since LLVM can't properly use aliasing yet it will make + // unnecessary state stores without this copy + let mut state_cpy = *state; + for block in blocks.iter() { + for (o, chunk) in block_u32.iter_mut().zip(block.chunks_exact(4)) { + *o = u32::from_be_bytes(chunk.try_into().unwrap()); + } + sha1_digest_block_u32(&mut state_cpy, &block_u32); + } + *state = state_cpy; +} diff --git a/.cargo-vendor/sha1/src/compress/x86.rs b/.cargo-vendor/sha1/src/compress/x86.rs new file mode 100644 index 0000000000..4dcd56b8a7 --- /dev/null +++ b/.cargo-vendor/sha1/src/compress/x86.rs @@ -0,0 +1,112 @@ +//! SHA-1 `x86`/`x86_64` backend + +#![cfg(any(target_arch = "x86", target_arch = "x86_64"))] + +#[cfg(target_arch = "x86")] +use core::arch::x86::*; +#[cfg(target_arch = "x86_64")] +use core::arch::x86_64::*; + +macro_rules! rounds4 { + ($h0:ident, $h1:ident, $wk:expr, $i:expr) => { + _mm_sha1rnds4_epu32($h0, _mm_sha1nexte_epu32($h1, $wk), $i) + }; +} + +macro_rules! schedule { + ($v0:expr, $v1:expr, $v2:expr, $v3:expr) => { + _mm_sha1msg2_epu32(_mm_xor_si128(_mm_sha1msg1_epu32($v0, $v1), $v2), $v3) + }; +} + +macro_rules! schedule_rounds4 { + ( + $h0:ident, $h1:ident, + $w0:expr, $w1:expr, $w2:expr, $w3:expr, $w4:expr, + $i:expr + ) => { + $w4 = schedule!($w0, $w1, $w2, $w3); + $h1 = rounds4!($h0, $h1, $w4, $i); + }; +} + +#[target_feature(enable = "sha,sse2,ssse3,sse4.1")] +unsafe fn digest_blocks(state: &mut [u32; 5], blocks: &[[u8; 64]]) { + #[allow(non_snake_case)] + let MASK: __m128i = _mm_set_epi64x(0x0001_0203_0405_0607, 0x0809_0A0B_0C0D_0E0F); + + let mut state_abcd = _mm_set_epi32( + state[0] as i32, + state[1] as i32, + state[2] as i32, + state[3] as i32, + ); + let mut state_e = _mm_set_epi32(state[4] as i32, 0, 0, 0); + + for block in blocks { + // SAFETY: we use only unaligned loads with this pointer + #[allow(clippy::cast_ptr_alignment)] + let block_ptr = block.as_ptr() as *const __m128i; + + let mut w0 = _mm_shuffle_epi8(_mm_loadu_si128(block_ptr.offset(0)), MASK); + let mut w1 = _mm_shuffle_epi8(_mm_loadu_si128(block_ptr.offset(1)), MASK); + let mut w2 = _mm_shuffle_epi8(_mm_loadu_si128(block_ptr.offset(2)), MASK); + let mut w3 = _mm_shuffle_epi8(_mm_loadu_si128(block_ptr.offset(3)), MASK); + #[allow(clippy::needless_late_init)] + let mut w4; + + let mut h0 = state_abcd; + let mut h1 = _mm_add_epi32(state_e, w0); + + // Rounds 0..20 + h1 = _mm_sha1rnds4_epu32(h0, h1, 0); + h0 = rounds4!(h1, h0, w1, 0); + h1 = rounds4!(h0, h1, w2, 0); + h0 = rounds4!(h1, h0, w3, 0); + schedule_rounds4!(h0, h1, w0, w1, w2, w3, w4, 0); + + // Rounds 20..40 + schedule_rounds4!(h1, h0, w1, w2, w3, w4, w0, 1); + schedule_rounds4!(h0, h1, w2, w3, w4, w0, w1, 1); + schedule_rounds4!(h1, h0, w3, w4, w0, w1, w2, 1); + schedule_rounds4!(h0, h1, w4, w0, w1, w2, w3, 1); + schedule_rounds4!(h1, h0, w0, w1, w2, w3, w4, 1); + + // Rounds 40..60 + schedule_rounds4!(h0, h1, w1, w2, w3, w4, w0, 2); + schedule_rounds4!(h1, h0, w2, w3, w4, w0, w1, 2); + schedule_rounds4!(h0, h1, w3, w4, w0, w1, w2, 2); + schedule_rounds4!(h1, h0, w4, w0, w1, w2, w3, 2); + schedule_rounds4!(h0, h1, w0, w1, w2, w3, w4, 2); + + // Rounds 60..80 + schedule_rounds4!(h1, h0, w1, w2, w3, w4, w0, 3); + schedule_rounds4!(h0, h1, w2, w3, w4, w0, w1, 3); + schedule_rounds4!(h1, h0, w3, w4, w0, w1, w2, 3); + schedule_rounds4!(h0, h1, w4, w0, w1, w2, w3, 3); + schedule_rounds4!(h1, h0, w0, w1, w2, w3, w4, 3); + + state_abcd = _mm_add_epi32(state_abcd, h0); + state_e = _mm_sha1nexte_epu32(h1, state_e); + } + + state[0] = _mm_extract_epi32(state_abcd, 3) as u32; + state[1] = _mm_extract_epi32(state_abcd, 2) as u32; + state[2] = _mm_extract_epi32(state_abcd, 1) as u32; + state[3] = _mm_extract_epi32(state_abcd, 0) as u32; + state[4] = _mm_extract_epi32(state_e, 3) as u32; +} + +cpufeatures::new!(shani_cpuid, "sha", "sse2", "ssse3", "sse4.1"); + +pub fn compress(state: &mut [u32; 5], blocks: &[[u8; 64]]) { + // TODO: Replace with https://github.com/rust-lang/rfcs/pull/2725 + // after stabilization + if shani_cpuid::get() { + unsafe { + digest_blocks(state, blocks); + } + } else { + super::soft::compress(state, blocks); + } +} diff --git a/.cargo-vendor/sha1/src/lib.rs b/.cargo-vendor/sha1/src/lib.rs new file mode 100644 index 0000000000..38ddc4b51e --- /dev/null +++ b/.cargo-vendor/sha1/src/lib.rs @@ -0,0 +1,154 @@ +//! Pure Rust implementation of the [SHA-1][1] cryptographic hash algorithm +//! with optional hardware-specific optimizations. +//! +//! # 🚨 Warning: Cryptographically Broken! 🚨 +//! +//! The SHA-1 hash function should be considered cryptographically broken and +//! unsuitable for further use in any security critical capacity, as it is +//! [practically vulnerable to chosen-prefix collisions][2]. +//! +//! We provide this crate for legacy interoperability purposes only. +//! +//! # Usage +//! +//! ```rust +//! use hex_literal::hex; +//! use sha1::{Sha1, Digest}; +//! +//! // create a Sha1 object +//! let mut hasher = Sha1::new(); +//! +//! // process input message +//! hasher.update(b"hello world"); +//! +//! // acquire hash digest in the form of GenericArray, +//! // which in this case is equivalent to [u8; 20] +//! let result = hasher.finalize(); +//! assert_eq!(result[..], hex!("2aae6c35c94fcfb415dbe95f408b9ce91ee846ed")); +//! ``` +//! +//! Also see [RustCrypto/hashes][3] readme. +//! +//! # Note for users of `sha1 v0.6` +//! +//! This crate has been transferred to the RustCrypto organization and uses +//! implementation previously published as the `sha-1` crate. The previous +//! zero dependencies version is now published as the [`sha1_smol`] crate. +//! +//! [1]: https://en.wikipedia.org/wiki/SHA-1 +//! [2]: https://sha-mbles.github.io/ +//! [3]: https://github.com/RustCrypto/hashes +//! [`sha1_smol`]: https://github.com/mitsuhiko/sha1-smol/ + +#![no_std] +#![cfg_attr(docsrs, feature(doc_cfg))] +#![doc( + html_logo_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg", + html_favicon_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg" +)] +#![warn(missing_docs, rust_2018_idioms)] + +pub use digest::{self, Digest}; + +use core::{fmt, slice::from_ref}; +#[cfg(feature = "oid")] +use digest::const_oid::{AssociatedOid, ObjectIdentifier}; +use digest::{ + block_buffer::Eager, + core_api::{ + AlgorithmName, Block, BlockSizeUser, Buffer, BufferKindUser, CoreWrapper, FixedOutputCore, + OutputSizeUser, Reset, UpdateCore, + }, + typenum::{Unsigned, U20, U64}, + HashMarker, Output, +}; + +mod compress; + +#[cfg(feature = "compress")] +pub use compress::compress; +#[cfg(not(feature = "compress"))] +use compress::compress; + +const STATE_LEN: usize = 5; + +/// Core SHA-1 hasher state. +#[derive(Clone)] +pub struct Sha1Core { + h: [u32; STATE_LEN], + block_len: u64, +} + +impl HashMarker for Sha1Core {} + +impl BlockSizeUser for Sha1Core { + type BlockSize = U64; +} + +impl BufferKindUser for Sha1Core { + type BufferKind = Eager; +} + +impl OutputSizeUser for Sha1Core { + type OutputSize = U20; +} + +impl UpdateCore for Sha1Core { + #[inline] + fn update_blocks(&mut self, blocks: &[Block]) { + self.block_len += blocks.len() as u64; + compress(&mut self.h, blocks); + } +} + +impl FixedOutputCore for Sha1Core { + #[inline] + fn finalize_fixed_core(&mut self, buffer: &mut Buffer, out: &mut Output) { + let bs = Self::BlockSize::U64; + let bit_len = 8 * (buffer.get_pos() as u64 + bs * self.block_len); + + let mut h = self.h; + buffer.len64_padding_be(bit_len, |b| compress(&mut h, from_ref(b))); + for (chunk, v) in out.chunks_exact_mut(4).zip(h.iter()) { + chunk.copy_from_slice(&v.to_be_bytes()); + } + } +} + +impl Default for Sha1Core { + #[inline] + fn default() -> Self { + Self { + h: [0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0], + block_len: 0, + } + } +} + +impl Reset for Sha1Core { + #[inline] + fn reset(&mut self) { + *self = Default::default(); + } +} + +impl AlgorithmName for Sha1Core { + fn write_alg_name(f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("Sha1") + } +} + +impl fmt::Debug for Sha1Core { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("Sha1Core { ... }") + } +} + +#[cfg(feature = "oid")] +#[cfg_attr(docsrs, doc(cfg(feature = "oid")))] +impl AssociatedOid for Sha1Core { + const OID: ObjectIdentifier = ObjectIdentifier::new_unwrap("1.3.14.3.2.26"); +} + +/// SHA-1 hasher state. +pub type Sha1 = CoreWrapper; diff --git a/.cargo-vendor/sha1/tests/data/sha1.blb b/.cargo-vendor/sha1/tests/data/sha1.blb new file mode 100644 index 0000000000..e8dc49163c Binary files /dev/null and b/.cargo-vendor/sha1/tests/data/sha1.blb differ diff --git a/.cargo-vendor/sha1/tests/mod.rs b/.cargo-vendor/sha1/tests/mod.rs new file mode 100644 index 0000000000..04aa69f1c3 --- /dev/null +++ b/.cargo-vendor/sha1/tests/mod.rs @@ -0,0 +1,15 @@ +use digest::dev::{feed_rand_16mib, fixed_reset_test}; +use hex_literal::hex; +use sha1::{Digest, Sha1}; + +digest::new_test!(sha1_main, "sha1", Sha1, fixed_reset_test); + +#[test] +fn sha1_rand() { + let mut h = Sha1::new(); + feed_rand_16mib(&mut h); + assert_eq!( + h.finalize()[..], + hex!("7e565a25a8b123e9881addbcedcd927b23377a78")[..] + ); +} diff --git a/.cargo-vendor/sync_wrapper-0.1.2/.cargo-checksum.json b/.cargo-vendor/sync_wrapper-0.1.2/.cargo-checksum.json new file mode 100644 index 0000000000..aab63c70fc --- /dev/null +++ b/.cargo-vendor/sync_wrapper-0.1.2/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"00f943849c012302f371fcccfb7541b9daf15f94bf3c8d6412f739300e4ee0e9","LICENSE":"0d542e0c8804e39aa7f37eb00da5a762149dc682d7829451287e11b938e94594","README.md":"61e995daa67a37597f76b78ca3c61916a42a66034f01e9473ee7b7753029ca3a","src/lib.rs":"824fa08776b004a2315172fe5ed23dcf14315bfb01bb2115a663ed70a4aeac30"},"package":"2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160"} \ No newline at end of file diff --git a/.cargo-vendor/sync_wrapper-0.1.2/Cargo.toml b/.cargo-vendor/sync_wrapper-0.1.2/Cargo.toml new file mode 100644 index 0000000000..c742187755 --- /dev/null +++ b/.cargo-vendor/sync_wrapper-0.1.2/Cargo.toml @@ -0,0 +1,38 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +name = "sync_wrapper" +version = "0.1.2" +authors = ["Actyx AG "] +description = "A tool for enlisting the compiler’s help in proving the absence of concurrency" +homepage = "https://docs.rs/sync_wrapper" +documentation = "https://docs.rs/sync_wrapper" +readme = "README.md" +keywords = ["concurrency"] +categories = ["concurrency"] +license = "Apache-2.0" +repository = "https://github.com/Actyx/sync_wrapper" + +[dependencies.futures-core] +version = "0.3" +optional = true +default-features = false + +[dev-dependencies.futures] +version = "0.3" + +[dev-dependencies.pin-project-lite] +version = "0.2.7" + +[features] +futures = ["futures-core"] diff --git a/.cargo-vendor/sync_wrapper-0.1.2/LICENSE b/.cargo-vendor/sync_wrapper-0.1.2/LICENSE new file mode 100644 index 0000000000..f433b1a53f --- /dev/null +++ b/.cargo-vendor/sync_wrapper-0.1.2/LICENSE @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/.cargo-vendor/sync_wrapper-0.1.2/README.md b/.cargo-vendor/sync_wrapper-0.1.2/README.md new file mode 100644 index 0000000000..20261c9372 --- /dev/null +++ b/.cargo-vendor/sync_wrapper-0.1.2/README.md @@ -0,0 +1,8 @@ +[![Latest Version](https://img.shields.io/crates/v/sync_wrapper.svg)](https://crates.io/crates/sync_wrapper) +[![Rust Documentation](https://docs.rs/sync_wrapper/badge.svg)](https://docs.rs/sync_wrapper) + +# SyncWrapper + +A mutual exclusion primitive that relies on static type information only. + +This library is inspired by [this discussion](https://internals.rust-lang.org/t/what-shall-sync-mean-across-an-await/12020/2). diff --git a/.cargo-vendor/sync_wrapper-0.1.2/src/lib.rs b/.cargo-vendor/sync_wrapper-0.1.2/src/lib.rs new file mode 100644 index 0000000000..2800d9a732 --- /dev/null +++ b/.cargo-vendor/sync_wrapper-0.1.2/src/lib.rs @@ -0,0 +1,248 @@ +/* + * Copyright 2020 Actyx AG + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +//! A mutual exclusion primitive that relies on static type information only +//! +//! This library is inspired by [this discussion](https://internals.rust-lang.org/t/what-shall-sync-mean-across-an-await/12020/2). +#![doc(html_logo_url = "https://developer.actyx.com/img/logo.svg")] +#![doc(html_favicon_url = "https://developer.actyx.com/img/favicon.ico")] +#![no_std] + +use core::{ + fmt::{self, Debug, Formatter}, + pin::Pin, + future::Future, + task::{Context, Poll}, +}; + +/// A mutual exclusion primitive that relies on static type information only +/// +/// In some cases synchronization can be proven statically: whenever you hold an exclusive `&mut` +/// reference, the Rust type system ensures that no other part of the program can hold another +/// reference to the data. Therefore it is safe to access it even if the current thread obtained +/// this reference via a channel. Whenever this is the case, the overhead of allocating and locking +/// a [`Mutex`] can be avoided by using this static version. +/// +/// One example where this is often applicable is [`Future`], which requires an exclusive reference +/// for its [`poll`] method: While a given `Future` implementation may not be safe to access by +/// multiple threads concurrently, the executor can only run the `Future` on one thread at any +/// given time, making it [`Sync`] in practice as long as the implementation is `Send`. You can +/// therefore use the static mutex to prove that your data structure is `Sync` even though it +/// contains such a `Future`. +/// +/// # Example +/// +/// ``` +/// use sync_wrapper::SyncWrapper; +/// use std::future::Future; +/// +/// struct MyThing { +/// future: SyncWrapper + Send>>, +/// } +/// +/// impl MyThing { +/// // all accesses to `self.future` now require an exclusive reference or ownership +/// } +/// +/// fn assert_sync() {} +/// +/// assert_sync::(); +/// ``` +/// +/// [`Mutex`]: https://doc.rust-lang.org/std/sync/struct.Mutex.html +/// [`Future`]: https://doc.rust-lang.org/std/future/trait.Future.html +/// [`poll`]: https://doc.rust-lang.org/std/future/trait.Future.html#method.poll +/// [`Sync`]: https://doc.rust-lang.org/std/marker/trait.Sync.html +#[repr(transparent)] +pub struct SyncWrapper(T); + +impl SyncWrapper { + /// Creates a new static mutex containing the given value. + /// + /// # Examples + /// + /// ``` + /// use sync_wrapper::SyncWrapper; + /// + /// let mutex = SyncWrapper::new(42); + /// ``` + pub const fn new(value: T) -> Self { + Self(value) + } + + /// Acquires a reference to the protected value. + /// + /// This is safe because it requires an exclusive reference to the mutex. Therefore this method + /// neither panics nor does it return an error. This is in contrast to [`Mutex::get_mut`] which + /// returns an error if another thread panicked while holding the lock. It is not recommended + /// to send an exclusive reference to a potentially damaged value to another thread for further + /// processing. + /// + /// [`Mutex::get_mut`]: https://doc.rust-lang.org/std/sync/struct.Mutex.html#method.get_mut + /// + /// # Examples + /// + /// ``` + /// use sync_wrapper::SyncWrapper; + /// + /// let mut mutex = SyncWrapper::new(42); + /// let value = mutex.get_mut(); + /// *value = 0; + /// assert_eq!(*mutex.get_mut(), 0); + /// ``` + pub fn get_mut(&mut self) -> &mut T { + &mut self.0 + } + + /// Acquires a pinned reference to the protected value. + /// + /// See [`Self::get_mut`] for why this method is safe. + /// + /// # Examples + /// + /// ``` + /// use std::future::Future; + /// use std::pin::Pin; + /// use std::task::{Context, Poll}; + /// + /// use pin_project_lite::pin_project; + /// use sync_wrapper::SyncWrapper; + /// + /// pin_project! { + /// struct FutureWrapper { + /// #[pin] + /// inner: SyncWrapper, + /// } + /// } + /// + /// impl Future for FutureWrapper { + /// type Output = F::Output; + /// + /// fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + /// self.project().inner.get_pin_mut().poll(cx) + /// } + /// } + /// ``` + pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut T> { + unsafe { Pin::map_unchecked_mut(self, |this| &mut this.0) } + } + + /// Consumes this mutex, returning the underlying data. + /// + /// This is safe because it requires ownership of the mutex, therefore this method will neither + /// panic nor does it return an error. This is in contrast to [`Mutex::into_inner`] which + /// returns an error if another thread panicked while holding the lock. It is not recommended + /// to send an exclusive reference to a potentially damaged value to another thread for further + /// processing. + /// + /// [`Mutex::into_inner`]: https://doc.rust-lang.org/std/sync/struct.Mutex.html#method.into_inner + /// + /// # Examples + /// + /// ``` + /// use sync_wrapper::SyncWrapper; + /// + /// let mut mutex = SyncWrapper::new(42); + /// assert_eq!(mutex.into_inner(), 42); + /// ``` + pub fn into_inner(self) -> T { + self.0 + } +} + +// this is safe because the only operations permitted on this data structure require exclusive +// access or ownership +unsafe impl Sync for SyncWrapper {} + +impl Debug for SyncWrapper { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.pad("SyncWrapper") + } +} + +impl Default for SyncWrapper { + fn default() -> Self { + Self::new(T::default()) + } +} + +impl From for SyncWrapper { + fn from(value: T) -> Self { + Self::new(value) + } +} + +/// `Future` which is `Sync`. +/// +/// # Examples +/// +/// ``` +/// use sync_wrapper::{SyncWrapper, SyncFuture}; +/// +/// let fut = async { 1 }; +/// let fut = SyncFuture::new(fut); +/// ``` +pub struct SyncFuture { + inner: SyncWrapper +} +impl SyncFuture { + pub fn new(inner: F) -> Self { + Self { inner: SyncWrapper::new(inner) } + } + pub fn into_inner(self) -> F { + self.inner.into_inner() + } +} +impl Future for SyncFuture { + type Output = F::Output; + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let inner = unsafe { self.map_unchecked_mut(|x| x.inner.get_mut()) }; + inner.poll(cx) + } +} + +/// `Stream` which is `Sync`. +/// +/// # Examples +/// +/// ``` +/// use sync_wrapper::SyncStream; +/// use futures::stream; +/// +/// let st = stream::iter(vec![1]); +/// let st = SyncStream::new(st); +/// ``` +#[cfg(feature = "futures")] +pub struct SyncStream { + inner: SyncWrapper +} +#[cfg(feature = "futures")] +impl SyncStream { + pub fn new(inner: S) -> Self { + Self { inner: SyncWrapper::new(inner) } + } + pub fn into_inner(self) -> S { + self.inner.into_inner() + } +} +#[cfg(feature = "futures")] +impl futures_core::Stream for SyncStream { + type Item = S::Item; + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let inner = unsafe { self.map_unchecked_mut(|x| x.inner.get_mut()) }; + inner.poll_next(cx) + } +} + diff --git a/.cargo-vendor/sync_wrapper/.cargo-checksum.json b/.cargo-vendor/sync_wrapper/.cargo-checksum.json index aab63c70fc..dd44981e82 100644 --- a/.cargo-vendor/sync_wrapper/.cargo-checksum.json +++ b/.cargo-vendor/sync_wrapper/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"00f943849c012302f371fcccfb7541b9daf15f94bf3c8d6412f739300e4ee0e9","LICENSE":"0d542e0c8804e39aa7f37eb00da5a762149dc682d7829451287e11b938e94594","README.md":"61e995daa67a37597f76b78ca3c61916a42a66034f01e9473ee7b7753029ca3a","src/lib.rs":"824fa08776b004a2315172fe5ed23dcf14315bfb01bb2115a663ed70a4aeac30"},"package":"2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160"} \ No newline at end of file +{"files":{"Cargo.toml":"7fa965b7d5c0bff8117f796e8855ee207006e5c2641ce26a7e570a751d88a88b","LICENSE":"0d542e0c8804e39aa7f37eb00da5a762149dc682d7829451287e11b938e94594","README.md":"61e995daa67a37597f76b78ca3c61916a42a66034f01e9473ee7b7753029ca3a","src/lib.rs":"824fa08776b004a2315172fe5ed23dcf14315bfb01bb2115a663ed70a4aeac30"},"package":"a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394"} \ No newline at end of file diff --git a/.cargo-vendor/sync_wrapper/Cargo.toml b/.cargo-vendor/sync_wrapper/Cargo.toml index c742187755..327ec67b67 100644 --- a/.cargo-vendor/sync_wrapper/Cargo.toml +++ b/.cargo-vendor/sync_wrapper/Cargo.toml @@ -12,9 +12,9 @@ [package] edition = "2018" name = "sync_wrapper" -version = "0.1.2" +version = "1.0.1" authors = ["Actyx AG "] -description = "A tool for enlisting the compiler’s help in proving the absence of concurrency" +description = "A tool for enlisting the compiler's help in proving the absence of concurrency" homepage = "https://docs.rs/sync_wrapper" documentation = "https://docs.rs/sync_wrapper" readme = "README.md" @@ -23,6 +23,9 @@ categories = ["concurrency"] license = "Apache-2.0" repository = "https://github.com/Actyx/sync_wrapper" +[package.metadata.docs.rs] +features = ["futures"] + [dependencies.futures-core] version = "0.3" optional = true diff --git a/.cargo-vendor/tokio-tungstenite/.cargo-checksum.json b/.cargo-vendor/tokio-tungstenite/.cargo-checksum.json new file mode 100644 index 0000000000..3b9225da7c --- /dev/null +++ b/.cargo-vendor/tokio-tungstenite/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"0608da97f3f6cc560e5d0c81ed5d085c2084658a36c7f542105d6c5f15f6f294","Cargo.lock":"9879842f6b198cfee609681f9134153c076b050094571c827c49695e586eeaaf","Cargo.toml":"4d15d1019756c843641ada4be19494c1b6406f0436ef7299b66bf4f51d44d4cc","LICENSE":"fdd55e2b2da854b0fbdc1e607df7c2ba1e1ebf91ecb77c515511ebeef972bc8f","README.md":"10544c21c0f73c2c7203c4e2532d4f1cf710fda32c16952cdfb43fa8f44e3256","examples/README.md":"067ceaf7b70949f087fd582d71bb114e88ef0313560dbd33b345158ffa33eff3","examples/autobahn-client.rs":"eea5b9fbe6c5aceaba1a5702a4c628300b3067168cf6380ffbe741b9e6c59982","examples/autobahn-server.rs":"bb5286c2dbb2d2a5e758c4f7c345afe0d4d9b43efc365be512ec4a31728df452","examples/client.rs":"3077c2a4587b788963eb183d960b58080ab4e970d765fafa48ed48798f2b809d","examples/echo-server.rs":"5bb6b5d84a10086edd28d3d5b6d402e851b7558b90e2a9327bba8d1034a49b52","examples/interval-server.rs":"d99c345ae58c6a450fb1b53d48e002dba9c4c71038351a1c826305a6899030bc","examples/server-custom-accept.rs":"a055d40f821b7f59e7a0f3474c0f4a9018f31e7efb9b22ce3dc4ff838f778a42","examples/server-headers.rs":"3b06c34718d82dbaa21c9b40fccd94411e7f80fad72f8a02b48132cdb8c83738","examples/server.rs":"b3f8369b474af3e73ae544dae200e5634c45f84a62788c7d6ce50d2eefc8dd1e","src/compat.rs":"691667016a1f818d8370f98db177896ce7f03f04a18a929ea521348da1814df7","src/connect.rs":"bb8130f2150addda8ffc1cd9a834f3f28f089e6b827108f7fe50e0ae6266a3b7","src/handshake.rs":"2030d6e704a97606cd88c3cf16404e9fded9beef9b9f661c9ddb988528e1873d","src/lib.rs":"9dae674dbfa530abf7a93e36a2148e546d49ccefc9fa80b8e69ce11fe9f6890e","src/stream.rs":"735aed6eabec038a9789c2f94723c10dcbc26e052b0126642ac77f8cd8afbf25","src/tls.rs":"c42474f07f5e7f145f22b95bc98f8441744649e09b59865f8f68a3e810c24cb5"},"package":"c83b561d025642014097b66e6c1bb422783339e0909e4429cde4749d1990bc38"} \ No newline at end of file diff --git a/.cargo-vendor/tokio-tungstenite/CHANGELOG.md b/.cargo-vendor/tokio-tungstenite/CHANGELOG.md new file mode 100644 index 0000000000..d1cd46d8bb --- /dev/null +++ b/.cargo-vendor/tokio-tungstenite/CHANGELOG.md @@ -0,0 +1,66 @@ +# 0.20.1 + +- Fix RUSTSEC-2023-0053. +- Fix transitive CVE-2023-43669 from `tungstenite`. + +# 0.20.0 + +- Change the buffering behavior for `Sink::send()` and `Sink::feed()`, [see `tungstenite`'s changelog for more details](https://github.com/snapview/tungstenite-rs/blob/master/CHANGELOG.md#0200). + +# 0.19.0 + +- Allow users to enable/disable Nagle algorithm when using `connect()` helpers. +- Improve the behavior of the `Sink` for the `WebSocketStream`, so it does not return an error when it’s not necessary (when `poll_flush()` is called on a connection that has just been closed). +- Workaround an issue where `rustls` TLS backend expected domain in a certain format and reject IPv6 addresses if they contained square brackets in them. +- Update dependencies and remove unused errors. + +# 0.18.0 + +- Update dependencies (underlying `tungstenite` core). + +# 0.17.2 + +- Make `Origin` header case-sensitive (to keep compatibility with poorely-written servers that don't accept lowercase `Origin` header). +- Make semantics of the reading form the `WebSocketStream` more reasonable (return `None` instead of an error when the stream is normally closed). +- Imrpove the way `poll_close()` works by properly driving the close of the stream till completion. + +# 0.17.1 + +- Update the `tungstenite` dependency (fixes a panic in `tungstenite` and MSRV), see [`tungstenite`'s changelog for more details](https://github.com/snapview/tungstenite-rs/blob/master/CHANGELOG.md#0172). + +# 0.17.0 + +- Update the dependencies, please refer to the [`tungstenite` changelog](https://github.com/snapview/tungstenite-rs/blob/master/CHANGELOG.md#0170) for the actual changes. + +# 0.16.1 + +- Fix feature selection problem when using TLS. + +# 0.16.0 + +- Add a function to allow to specify the TLS connector when using `connect()` like logic. +- Add support for choosing the right root certificates for the TLS. +- Change the behavior of the `connect()` so that it fails when using TLS without TLS feature. +- Do not project with Unpin. +- Update the dependencies with important [implications / improvements](https://github.com/snapview/tungstenite-rs/blob/master/CHANGELOG.md#0160). + +# 0.15.0 + +- Update the `tungstenite-rs` version to `0.14.0`, + [check `tungstenite-rs` release for more details](https://github.com/snapview/tungstenite-rs/blob/master/CHANGELOG.md#0140). + +# 0.14.0 + +- Support for `rustls` as TLS backend. + - The `tls` feature was renamed to `native-tls` and uses a OS-native TLS implementation. + - A new `native-tls-vendored` feature that uses `native-tls` but forces to build a vendored + version (mostly for `openssl`) instead of linking against the system installation. + - New `rustls-tls` feature flag to enable TLS with `rustls` as backend. + - `stream::Stream` was renamed to `MaybeTlsStream` and wraps a `rustls` TLS stream as well now. + - If both `native-tls` and `rustls-tls` are enabled `native-tls` is used by default. + - A new `Connector` was introduced that is similar to the previous `TlsConnector` but now allows + to control the used TLS backend explicitly (or disable it) in `client_async_tls_with_config`. + +# 0.13.0 + +- Upgrade from Tokio 0.3 to Tokio 1.0.0. diff --git a/.cargo-vendor/tokio-tungstenite/Cargo.lock b/.cargo-vendor/tokio-tungstenite/Cargo.lock new file mode 100644 index 0000000000..a918542693 --- /dev/null +++ b/.cargo-vendor/tokio-tungstenite/Cargo.lock @@ -0,0 +1,1267 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "addr2line" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + +[[package]] +name = "aho-corasick" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" +dependencies = [ + "memchr", +] + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "backtrace" +version = "0.3.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +dependencies = [ + "addr2line", + "cc", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] + +[[package]] +name = "base64" +version = "0.21.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9" + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" + +[[package]] +name = "cc" +version = "1.0.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +dependencies = [ + "libc", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" + +[[package]] +name = "cpufeatures" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce420fe07aecd3e67c5f910618fe65e94158f6dcc0adf44e00d69ce2bdfe0fd0" +dependencies = [ + "libc", +] + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "data-encoding" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5" + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "crypto-common", +] + +[[package]] +name = "env_logger" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95b3f3e67048839cb0d0781f445682a35113da7121f7c949db0e2be96a4fbece" +dependencies = [ + "humantime", + "is-terminal", + "log", + "regex", + "termcolor", +] + +[[package]] +name = "errno" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "fastrand" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + +[[package]] +name = "form_urlencoded" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "futures-channel" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb" +dependencies = [ + "futures-core", +] + +[[package]] +name = "futures-core" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" + +[[package]] +name = "futures-sink" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" + +[[package]] +name = "futures-task" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2" + +[[package]] +name = "futures-util" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" +dependencies = [ + "futures-core", + "futures-sink", + "futures-task", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "gimli" +version = "0.28.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" + +[[package]] +name = "hermit-abi" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" + +[[package]] +name = "http" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" +dependencies = [ + "bytes", + "http 0.2.11", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" + +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + +[[package]] +name = "hyper" +version = "0.14.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "http 0.2.11", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "socket2 0.4.10", + "tokio", + "tower-service", + "tracing", + "want", +] + +[[package]] +name = "idna" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "is-terminal" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" +dependencies = [ + "hermit-abi", + "rustix", + "windows-sys 0.48.0", +] + +[[package]] +name = "itoa" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.150" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c" + +[[package]] +name = "linux-raw-sys" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456" + +[[package]] +name = "log" +version = "0.4.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" + +[[package]] +name = "memchr" +version = "2.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" + +[[package]] +name = "miniz_oxide" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +dependencies = [ + "adler", +] + +[[package]] +name = "mio" +version = "0.8.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" +dependencies = [ + "libc", + "wasi", + "windows-sys 0.48.0", +] + +[[package]] +name = "native-tls" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" +dependencies = [ + "lazy_static", + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + +[[package]] +name = "num_cpus" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "object" +version = "0.32.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" +dependencies = [ + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" + +[[package]] +name = "openssl" +version = "0.10.61" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b8419dc8cc6d866deb801274bba2e6f8f6108c1bb7fcc10ee5ab864931dbb45" +dependencies = [ + "bitflags 2.4.1", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "openssl-probe" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" + +[[package]] +name = "openssl-src" +version = "300.1.6+3.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439fac53e092cd7442a3660c85dde4643ab3b5bd39040912388dcdabf6b88085" +dependencies = [ + "cc", +] + +[[package]] +name = "openssl-sys" +version = "0.9.97" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3eaad34cdd97d81de97964fc7f29e2d104f483840d906ef56daa1912338460b" +dependencies = [ + "cc", + "libc", + "openssl-src", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "percent-encoding" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" + +[[package]] +name = "pin-project-lite" +version = "0.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pkg-config" +version = "0.3.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" + +[[package]] +name = "ppv-lite86" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" + +[[package]] +name = "proc-macro2" +version = "1.0.70" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39278fbbf5fb4f646ce651690877f89d1c5811a3d4acb27700c1cb3cdb78fd3b" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + +[[package]] +name = "redox_syscall" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +dependencies = [ + "bitflags 1.3.2", +] + +[[package]] +name = "regex" +version = "1.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" + +[[package]] +name = "ring" +version = "0.17.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" +dependencies = [ + "cc", + "getrandom", + "libc", + "spin", + "untrusted", + "windows-sys 0.48.0", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" + +[[package]] +name = "rustix" +version = "0.38.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9470c4bf8246c8daf25f9598dca807fb6510347b1e1cfa55749113850c79d88a" +dependencies = [ + "bitflags 2.4.1", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.52.0", +] + +[[package]] +name = "rustls" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5bc238b76c51bbc449c55ffbc39d03772a057cc8cf783c49d4af4c2537b74a8b" +dependencies = [ + "log", + "ring", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-native-certs" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f1fb85efa936c42c6d5fc28d2629bb51e4b2f4b8a5211e297d599cc5a093792" +dependencies = [ + "openssl-probe", + "rustls-pemfile", + "rustls-pki-types", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-pemfile" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35e4980fa29e4c4b212ffb3db068a564cbf560e51d3944b7c88bd8bf5bec64f4" +dependencies = [ + "base64", + "rustls-pki-types", +] + +[[package]] +name = "rustls-pki-types" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7673e0aa20ee4937c6aacfc12bb8341cfbf054cdd21df6bec5fd0629fe9339b" + +[[package]] +name = "rustls-webpki" +version = "0.102.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de2635c8bc2b88d367767c5de8ea1d8db9af3f6219eba28442242d9ab81d1b89" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted", +] + +[[package]] +name = "schannel" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" +dependencies = [ + "windows-sys 0.48.0", +] + +[[package]] +name = "security-framework" +version = "2.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "sha1" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "slab" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg", +] + +[[package]] +name = "socket2" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "socket2" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" +dependencies = [ + "libc", + "windows-sys 0.48.0", +] + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + +[[package]] +name = "subtle" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" + +[[package]] +name = "syn" +version = "2.0.39" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "tempfile" +version = "3.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" +dependencies = [ + "cfg-if", + "fastrand", + "redox_syscall", + "rustix", + "windows-sys 0.48.0", +] + +[[package]] +name = "termcolor" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff1bc3d3f05aff0403e8ac0d92ced918ec05b666a43f83297ccef5bea8a3d449" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "thiserror" +version = "1.0.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tinyvec" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tokio" +version = "1.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0c014766411e834f7af5b8f4cf46257aab4036ca95e9d2c144a10f59ad6f5b9" +dependencies = [ + "backtrace", + "bytes", + "libc", + "mio", + "num_cpus", + "pin-project-lite", + "socket2 0.5.5", + "tokio-macros", + "windows-sys 0.48.0", +] + +[[package]] +name = "tokio-macros" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" +dependencies = [ + "rustls", + "rustls-pki-types", + "tokio", +] + +[[package]] +name = "tokio-tungstenite" +version = "0.21.0" +dependencies = [ + "env_logger", + "futures-channel", + "futures-util", + "hyper", + "log", + "native-tls", + "rustls", + "rustls-native-certs", + "rustls-pki-types", + "tokio", + "tokio-native-tls", + "tokio-rustls", + "tungstenite", + "url", + "webpki-roots", +] + +[[package]] +name = "tower-service" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" + +[[package]] +name = "tracing" +version = "0.1.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +dependencies = [ + "pin-project-lite", + "tracing-core", +] + +[[package]] +name = "tracing-core" +version = "0.1.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +dependencies = [ + "once_cell", +] + +[[package]] +name = "try-lock" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" + +[[package]] +name = "tungstenite" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ef1a641ea34f399a848dea702823bbecfb4c486f911735368f1f137cb8257e1" +dependencies = [ + "byteorder", + "bytes", + "data-encoding", + "http 1.0.0", + "httparse", + "log", + "native-tls", + "rand", + "rustls", + "rustls-pki-types", + "sha1", + "thiserror", + "url", + "utf-8", +] + +[[package]] +name = "typenum" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" + +[[package]] +name = "unicode-bidi" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f2528f27a9eb2b21e69c95319b30bd0efd85d09c379741b0f78ea1d86be2416" + +[[package]] +name = "unicode-ident" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" + +[[package]] +name = "unicode-normalization" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "url" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", +] + +[[package]] +name = "utf-8" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "version_check" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "webpki-roots" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de2cfda980f21be5a7ed2eadb3e6fe074d56022bea2cdeb1a62eb220fc04188" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" +dependencies = [ + "winapi", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.0", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +dependencies = [ + "windows_aarch64_gnullvm 0.52.0", + "windows_aarch64_msvc 0.52.0", + "windows_i686_gnu 0.52.0", + "windows_i686_msvc 0.52.0", + "windows_x86_64_gnu 0.52.0", + "windows_x86_64_gnullvm 0.52.0", + "windows_x86_64_msvc 0.52.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" + +[[package]] +name = "zeroize" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" diff --git a/.cargo-vendor/tokio-tungstenite/Cargo.toml b/.cargo-vendor/tokio-tungstenite/Cargo.toml new file mode 100644 index 0000000000..11b9a95a11 --- /dev/null +++ b/.cargo-vendor/tokio-tungstenite/Cargo.toml @@ -0,0 +1,201 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +rust-version = "1.63" +name = "tokio-tungstenite" +version = "0.21.0" +authors = [ + "Daniel Abramov ", + "Alexey Galakhov ", +] +include = [ + "examples/**/*", + "src/**/*", + "LICENSE", + "README.md", + "CHANGELOG.md", +] +description = "Tokio binding for Tungstenite, the Lightweight stream-based WebSocket implementation" +homepage = "https://github.com/snapview/tokio-tungstenite" +documentation = "https://docs.rs/tokio-tungstenite/0.21.0" +readme = "README.md" +keywords = [ + "websocket", + "io", + "web", +] +categories = [ + "web-programming::websocket", + "network-programming", + "asynchronous", + "concurrency", +] +license = "MIT" +repository = "https://github.com/snapview/tokio-tungstenite" + +[package.metadata.docs.rs] +features = [ + "native-tls", + "__rustls-tls", +] + +[[example]] +name = "autobahn-client" +required-features = ["connect"] + +[[example]] +name = "autobahn-server" +required-features = ["handshake"] + +[[example]] +name = "client" +required-features = ["connect"] + +[[example]] +name = "echo-server" +required-features = ["handshake"] + +[[example]] +name = "server-custom-accept" +required-features = ["handshake"] + +[[example]] +name = "server" +required-features = ["handshake"] + +[[example]] +name = "server-headers" +required-features = ["handshake"] + +[[example]] +name = "interval-server" +required-features = ["handshake"] + +[dependencies.futures-util] +version = "0.3.28" +features = [ + "sink", + "std", +] +default-features = false + +[dependencies.log] +version = "0.4.17" + +[dependencies.native-tls-crate] +version = "0.2.11" +optional = true +package = "native-tls" + +[dependencies.rustls] +version = "0.22.0" +optional = true + +[dependencies.rustls-native-certs] +version = "0.7.0" +optional = true + +[dependencies.rustls-pki-types] +version = "1.0" +optional = true + +[dependencies.tokio] +version = "1.0.0" +features = ["io-util"] +default-features = false + +[dependencies.tokio-native-tls] +version = "0.3.1" +optional = true + +[dependencies.tokio-rustls] +version = "0.25.0" +optional = true + +[dependencies.tungstenite] +version = "0.21.0" +default-features = false + +[dependencies.webpki-roots] +version = "0.26.0" +optional = true + +[dev-dependencies.env_logger] +version = "0.10.0" + +[dev-dependencies.futures-channel] +version = "0.3.28" + +[dev-dependencies.hyper] +version = "0.14.25" +features = [ + "http1", + "server", + "tcp", +] +default-features = false + +[dev-dependencies.tokio] +version = "1.27.0" +features = [ + "io-std", + "macros", + "net", + "rt-multi-thread", + "time", +] +default-features = false + +[dev-dependencies.url] +version = "2.3.1" + +[features] +__rustls-tls = [ + "rustls", + "rustls-pki-types", + "tokio-rustls", + "stream", + "tungstenite/__rustls-tls", + "handshake", +] +connect = [ + "stream", + "tokio/net", + "handshake", +] +default = [ + "connect", + "handshake", +] +handshake = ["tungstenite/handshake"] +native-tls = [ + "native-tls-crate", + "tokio-native-tls", + "stream", + "tungstenite/native-tls", + "handshake", +] +native-tls-vendored = [ + "native-tls", + "native-tls-crate/vendored", + "tungstenite/native-tls-vendored", +] +rustls-tls-native-roots = [ + "__rustls-tls", + "rustls-native-certs", +] +rustls-tls-webpki-roots = [ + "__rustls-tls", + "webpki-roots", +] +stream = [] diff --git a/.cargo-vendor/tokio-tungstenite/LICENSE b/.cargo-vendor/tokio-tungstenite/LICENSE new file mode 100644 index 0000000000..4bf5242e2d --- /dev/null +++ b/.cargo-vendor/tokio-tungstenite/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2017 Daniel Abramov +Copyright (c) 2017 Alexey Galakhov + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/.cargo-vendor/tokio-tungstenite/README.md b/.cargo-vendor/tokio-tungstenite/README.md new file mode 100644 index 0000000000..2ce0a6a57c --- /dev/null +++ b/.cargo-vendor/tokio-tungstenite/README.md @@ -0,0 +1,45 @@ +# tokio-tungstenite + +Asynchronous WebSockets for Tokio stack. + +[![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](./LICENSE) +[![Crates.io](https://img.shields.io/crates/v/tokio-tungstenite.svg?maxAge=2592000)](https://crates.io/crates/tokio-tungstenite) +[![Build Status](https://travis-ci.org/snapview/tokio-tungstenite.svg?branch=master)](https://travis-ci.org/snapview/tokio-tungstenite) + +[Documentation](https://docs.rs/tokio-tungstenite) + +## Usage + +Add this in your `Cargo.toml`: + +```toml +[dependencies] +tokio-tungstenite = "*" +``` + +Take a look at the `examples/` directory for client and server examples. You may also want to get familiar with +[Tokio](https://github.com/tokio-rs/tokio) if you don't have any experience with it. + +## What is tokio-tungstenite? + +This crate is based on [`tungstenite-rs`](https://github.com/snapview/tungstenite-rs) Rust WebSocket library and provides `Tokio` bindings and wrappers for it, so you +can use it with non-blocking/asynchronous `TcpStream`s from and couple it together with other crates from `Tokio` stack. + +## Features + +As with [`tungstenite-rs`](https://github.com/snapview/tungstenite-rs) TLS is supported on all platforms using [`native-tls`](https://github.com/sfackler/rust-native-tls) or [`rustls`](https://github.com/ctz/rustls) through feature flags: `native-tls`, `rustls-tls-native-roots` or `rustls-tls-webpki-roots` feature flags. Neither is enabled by default. See the `Cargo.toml` for more information. If you require support for secure WebSockets (`wss://`) enable one of them. + +## Is it performant? + +In essence, `tokio-tungstenite` is a wrapper for `tungstenite`, so the performance is capped by the performance of `tungstenite`. `tungstenite` +has a decent performance (it has been used in production for real-time communication software, video conferencing, etc), but it's definitely +not the fastest WebSocket library in the world at the moment of writing this note. + +If performance is of a paramount importance for you (especially if you send **large messages**), then you might want to check other libraries +that have been designed to be performant or you could file a PR against `tungstenite` to improve the performance! + +We are aware of changes that both `tungstenite` and `tokio-tungstenite` need in order to fill the gap of ~30% performance difference between `tungstenite` +and more performant libraries like `fastwebsockets`, but we have not worked on that yet as it was not required for the use case that original authors designed +the library for. In the course of past years we have merged several performance improvements submitted by the awesome community of Rust users who helped to improve +the library! For a quick summary of the pending performance problems/improvements, see [the comment](https://github.com/snapview/tungstenite-rs/issues/352#issuecomment-1537488614). + diff --git a/.cargo-vendor/tokio-tungstenite/examples/README.md b/.cargo-vendor/tokio-tungstenite/examples/README.md new file mode 100644 index 0000000000..b0bb00a12d --- /dev/null +++ b/.cargo-vendor/tokio-tungstenite/examples/README.md @@ -0,0 +1,9 @@ +Examples + +- [autobahn-client.rs](https://github.com/snapview/tokio-tungstenite/blob/master/examples/autobahn-client.rs) +- [autobahn-server.rs](https://github.com/snapview/tokio-tungstenite/blob/master/examples/autobahn-server.rs) +- [client.rs](https://github.com/snapview/tokio-tungstenite/blob/master/examples/client.rs) +- [echo-server.rs](https://github.com/snapview/tokio-tungstenite/blob/master/examples/echo-server.rs) +- [server.rs](https://github.com/snapview/tokio-tungstenite/blob/master/examples/server.rs) +- [server-headers.rs](https://github.com/snapview/tokio-tungstenite/blob/master/examples/server-headers.rs) +- [interval-server.rs](https://github.com/snapview/tokio-tungstenite/blob/master/examples/interval-server.rs) diff --git a/.cargo-vendor/tokio-tungstenite/examples/autobahn-client.rs b/.cargo-vendor/tokio-tungstenite/examples/autobahn-client.rs new file mode 100644 index 0000000000..86b765b011 --- /dev/null +++ b/.cargo-vendor/tokio-tungstenite/examples/autobahn-client.rs @@ -0,0 +1,64 @@ +use futures_util::{SinkExt, StreamExt}; +use log::*; +use tokio_tungstenite::{ + connect_async, + tungstenite::{Error, Result}, +}; +use url::Url; + +const AGENT: &str = "Tungstenite"; + +async fn get_case_count() -> Result { + let (mut socket, _) = connect_async( + Url::parse("ws://localhost:9001/getCaseCount").expect("Can't connect to case count URL"), + ) + .await?; + let msg = socket.next().await.expect("Can't fetch case count")?; + socket.close(None).await?; + Ok(msg.into_text()?.parse::().expect("Can't parse case count")) +} + +async fn update_reports() -> Result<()> { + let (mut socket, _) = connect_async( + Url::parse(&format!("ws://localhost:9001/updateReports?agent={}", AGENT)) + .expect("Can't update reports"), + ) + .await?; + socket.close(None).await?; + Ok(()) +} + +async fn run_test(case: u32) -> Result<()> { + info!("Running test case {}", case); + let case_url = + Url::parse(&format!("ws://localhost:9001/runCase?case={}&agent={}", case, AGENT)) + .expect("Bad testcase URL"); + + let (mut ws_stream, _) = connect_async(case_url).await?; + while let Some(msg) = ws_stream.next().await { + let msg = msg?; + if msg.is_text() || msg.is_binary() { + ws_stream.send(msg).await?; + } + } + + Ok(()) +} + +#[tokio::main] +async fn main() { + env_logger::init(); + + let total = get_case_count().await.expect("Error getting case count"); + + for case in 1..=total { + if let Err(e) = run_test(case).await { + match e { + Error::ConnectionClosed | Error::Protocol(_) | Error::Utf8 => (), + err => error!("Testcase failed: {}", err), + } + } + } + + update_reports().await.expect("Error updating reports"); +} diff --git a/.cargo-vendor/tokio-tungstenite/examples/autobahn-server.rs b/.cargo-vendor/tokio-tungstenite/examples/autobahn-server.rs new file mode 100644 index 0000000000..ad5b16e88e --- /dev/null +++ b/.cargo-vendor/tokio-tungstenite/examples/autobahn-server.rs @@ -0,0 +1,48 @@ +use futures_util::{SinkExt, StreamExt}; +use log::*; +use std::net::SocketAddr; +use tokio::net::{TcpListener, TcpStream}; +use tokio_tungstenite::{ + accept_async, + tungstenite::{Error, Result}, +}; + +async fn accept_connection(peer: SocketAddr, stream: TcpStream) { + if let Err(e) = handle_connection(peer, stream).await { + match e { + Error::ConnectionClosed | Error::Protocol(_) | Error::Utf8 => (), + err => error!("Error processing connection: {}", err), + } + } +} + +async fn handle_connection(peer: SocketAddr, stream: TcpStream) -> Result<()> { + let mut ws_stream = accept_async(stream).await.expect("Failed to accept"); + + info!("New WebSocket connection: {}", peer); + + while let Some(msg) = ws_stream.next().await { + let msg = msg?; + if msg.is_text() || msg.is_binary() { + ws_stream.send(msg).await?; + } + } + + Ok(()) +} + +#[tokio::main] +async fn main() { + env_logger::init(); + + let addr = "127.0.0.1:9002"; + let listener = TcpListener::bind(&addr).await.expect("Can't listen"); + info!("Listening on: {}", addr); + + while let Ok((stream, _)) = listener.accept().await { + let peer = stream.peer_addr().expect("connected streams should have a peer address"); + info!("Peer address: {}", peer); + + tokio::spawn(accept_connection(peer, stream)); + } +} diff --git a/.cargo-vendor/tokio-tungstenite/examples/client.rs b/.cargo-vendor/tokio-tungstenite/examples/client.rs new file mode 100644 index 0000000000..58fa960e28 --- /dev/null +++ b/.cargo-vendor/tokio-tungstenite/examples/client.rs @@ -0,0 +1,59 @@ +//! A simple example of hooking up stdin/stdout to a WebSocket stream. +//! +//! This example will connect to a server specified in the argument list and +//! then forward all data read on stdin to the server, printing out all data +//! received on stdout. +//! +//! Note that this is not currently optimized for performance, especially around +//! buffer management. Rather it's intended to show an example of working with a +//! client. +//! +//! You can use this example together with the `server` example. + +use std::env; + +use futures_util::{future, pin_mut, StreamExt}; +use tokio::io::{AsyncReadExt, AsyncWriteExt}; +use tokio_tungstenite::{connect_async, tungstenite::protocol::Message}; + +#[tokio::main] +async fn main() { + let connect_addr = + env::args().nth(1).unwrap_or_else(|| panic!("this program requires at least one argument")); + + let url = url::Url::parse(&connect_addr).unwrap(); + + let (stdin_tx, stdin_rx) = futures_channel::mpsc::unbounded(); + tokio::spawn(read_stdin(stdin_tx)); + + let (ws_stream, _) = connect_async(url).await.expect("Failed to connect"); + println!("WebSocket handshake has been successfully completed"); + + let (write, read) = ws_stream.split(); + + let stdin_to_ws = stdin_rx.map(Ok).forward(write); + let ws_to_stdout = { + read.for_each(|message| async { + let data = message.unwrap().into_data(); + tokio::io::stdout().write_all(&data).await.unwrap(); + }) + }; + + pin_mut!(stdin_to_ws, ws_to_stdout); + future::select(stdin_to_ws, ws_to_stdout).await; +} + +// Our helper method which will read data from stdin and send it along the +// sender provided. +async fn read_stdin(tx: futures_channel::mpsc::UnboundedSender) { + let mut stdin = tokio::io::stdin(); + loop { + let mut buf = vec![0; 1024]; + let n = match stdin.read(&mut buf).await { + Err(_) | Ok(0) => break, + Ok(n) => n, + }; + buf.truncate(n); + tx.unbounded_send(Message::binary(buf)).unwrap(); + } +} diff --git a/.cargo-vendor/tokio-tungstenite/examples/echo-server.rs b/.cargo-vendor/tokio-tungstenite/examples/echo-server.rs new file mode 100644 index 0000000000..603b0f10cd --- /dev/null +++ b/.cargo-vendor/tokio-tungstenite/examples/echo-server.rs @@ -0,0 +1,53 @@ +//! A simple echo server. +//! +//! You can test this out by running: +//! +//! cargo run --example echo-server 127.0.0.1:12345 +//! +//! And then in another window run: +//! +//! cargo run --example client ws://127.0.0.1:12345/ +//! +//! Type a message into the client window, press enter to send it and +//! see it echoed back. + +use std::{env, io::Error}; + +use futures_util::{future, StreamExt, TryStreamExt}; +use log::info; +use tokio::net::{TcpListener, TcpStream}; + +#[tokio::main] +async fn main() -> Result<(), Error> { + let _ = env_logger::try_init(); + let addr = env::args().nth(1).unwrap_or_else(|| "127.0.0.1:8080".to_string()); + + // Create the event loop and TCP listener we'll accept connections on. + let try_socket = TcpListener::bind(&addr).await; + let listener = try_socket.expect("Failed to bind"); + info!("Listening on: {}", addr); + + while let Ok((stream, _)) = listener.accept().await { + tokio::spawn(accept_connection(stream)); + } + + Ok(()) +} + +async fn accept_connection(stream: TcpStream) { + let addr = stream.peer_addr().expect("connected streams should have a peer address"); + info!("Peer address: {}", addr); + + let ws_stream = tokio_tungstenite::accept_async(stream) + .await + .expect("Error during the websocket handshake occurred"); + + info!("New WebSocket connection: {}", addr); + + let (write, read) = ws_stream.split(); + // We should not forward messages other than text or binary. + read.try_filter(|msg| future::ready(msg.is_text() || msg.is_binary())) + .forward(write) + .await + .expect("Failed to forward messages") +} diff --git a/.cargo-vendor/tokio-tungstenite/examples/interval-server.rs b/.cargo-vendor/tokio-tungstenite/examples/interval-server.rs new file mode 100644 index 0000000000..d175f2e7ea --- /dev/null +++ b/.cargo-vendor/tokio-tungstenite/examples/interval-server.rs @@ -0,0 +1,65 @@ +use futures_util::{SinkExt, StreamExt}; +use log::*; +use std::{net::SocketAddr, time::Duration}; +use tokio::net::{TcpListener, TcpStream}; +use tokio_tungstenite::{ + accept_async, + tungstenite::{Error, Message, Result}, +}; + +async fn accept_connection(peer: SocketAddr, stream: TcpStream) { + if let Err(e) = handle_connection(peer, stream).await { + match e { + Error::ConnectionClosed | Error::Protocol(_) | Error::Utf8 => (), + err => error!("Error processing connection: {}", err), + } + } +} + +async fn handle_connection(peer: SocketAddr, stream: TcpStream) -> Result<()> { + let ws_stream = accept_async(stream).await.expect("Failed to accept"); + info!("New WebSocket connection: {}", peer); + let (mut ws_sender, mut ws_receiver) = ws_stream.split(); + let mut interval = tokio::time::interval(Duration::from_millis(1000)); + + // Echo incoming WebSocket messages and send a message periodically every second. + + loop { + tokio::select! { + msg = ws_receiver.next() => { + match msg { + Some(msg) => { + let msg = msg?; + if msg.is_text() ||msg.is_binary() { + ws_sender.send(msg).await?; + } else if msg.is_close() { + break; + } + } + None => break, + } + } + _ = interval.tick() => { + ws_sender.send(Message::Text("tick".to_owned())).await?; + } + } + } + + Ok(()) +} + +#[tokio::main] +async fn main() { + env_logger::init(); + + let addr = "127.0.0.1:9002"; + let listener = TcpListener::bind(&addr).await.expect("Can't listen"); + info!("Listening on: {}", addr); + + while let Ok((stream, _)) = listener.accept().await { + let peer = stream.peer_addr().expect("connected streams should have a peer address"); + info!("Peer address: {}", peer); + + tokio::spawn(accept_connection(peer, stream)); + } +} diff --git a/.cargo-vendor/tokio-tungstenite/examples/server-custom-accept.rs b/.cargo-vendor/tokio-tungstenite/examples/server-custom-accept.rs new file mode 100644 index 0000000000..67ea04ff20 --- /dev/null +++ b/.cargo-vendor/tokio-tungstenite/examples/server-custom-accept.rs @@ -0,0 +1,171 @@ +//! A chat server that broadcasts a message to all connections. +//! +//! This is a simple line-based server which accepts WebSocket connections, +//! reads lines from those connections, and broadcasts the lines to all other +//! connected clients. +//! +//! You can test this out by running: +//! +//! cargo run --example server 127.0.0.1:12345 +//! +//! And then in another window run: +//! +//! cargo run --example client ws://127.0.0.1:12345/socket +//! +//! You can run the second command in multiple windows and then chat between the +//! two, seeing the messages from the other client as they're received. For all +//! connected clients they'll all join the same room and see everyone else's +//! messages. + +use std::{ + collections::HashMap, + convert::Infallible, + env, + net::SocketAddr, + sync::{Arc, Mutex}, +}; + +use hyper::{ + header::{ + HeaderValue, CONNECTION, SEC_WEBSOCKET_ACCEPT, SEC_WEBSOCKET_KEY, SEC_WEBSOCKET_VERSION, + UPGRADE, + }, + server::conn::AddrStream, + service::{make_service_fn, service_fn}, + upgrade::Upgraded, + Body, Method, Request, Response, Server, StatusCode, Version, +}; + +use futures_channel::mpsc::{unbounded, UnboundedSender}; +use futures_util::{future, pin_mut, stream::TryStreamExt, StreamExt}; + +use tokio_tungstenite::{ + tungstenite::{ + handshake::derive_accept_key, + protocol::{Message, Role}, + }, + WebSocketStream, +}; + +type Tx = UnboundedSender; +type PeerMap = Arc>>; + +async fn handle_connection( + peer_map: PeerMap, + ws_stream: WebSocketStream, + addr: SocketAddr, +) { + println!("WebSocket connection established: {}", addr); + + // Insert the write part of this peer to the peer map. + let (tx, rx) = unbounded(); + peer_map.lock().unwrap().insert(addr, tx); + + let (outgoing, incoming) = ws_stream.split(); + + let broadcast_incoming = incoming.try_for_each(|msg| { + println!("Received a message from {}: {}", addr, msg.to_text().unwrap()); + let peers = peer_map.lock().unwrap(); + + // We want to broadcast the message to everyone except ourselves. + let broadcast_recipients = + peers.iter().filter(|(peer_addr, _)| peer_addr != &&addr).map(|(_, ws_sink)| ws_sink); + + for recp in broadcast_recipients { + recp.unbounded_send(msg.clone()).unwrap(); + } + + future::ok(()) + }); + + let receive_from_others = rx.map(Ok).forward(outgoing); + + pin_mut!(broadcast_incoming, receive_from_others); + future::select(broadcast_incoming, receive_from_others).await; + + println!("{} disconnected", &addr); + peer_map.lock().unwrap().remove(&addr); +} + +async fn handle_request( + peer_map: PeerMap, + mut req: Request, + addr: SocketAddr, +) -> Result, Infallible> { + println!("Received a new, potentially ws handshake"); + println!("The request's path is: {}", req.uri().path()); + println!("The request's headers are:"); + for (ref header, _value) in req.headers() { + println!("* {}", header); + } + let upgrade = HeaderValue::from_static("Upgrade"); + let websocket = HeaderValue::from_static("websocket"); + let headers = req.headers(); + let key = headers.get(SEC_WEBSOCKET_KEY); + let derived = key.map(|k| derive_accept_key(k.as_bytes())); + if req.method() != Method::GET + || req.version() < Version::HTTP_11 + || !headers + .get(CONNECTION) + .and_then(|h| h.to_str().ok()) + .map(|h| { + h.split(|c| c == ' ' || c == ',') + .any(|p| p.eq_ignore_ascii_case(upgrade.to_str().unwrap())) + }) + .unwrap_or(false) + || !headers + .get(UPGRADE) + .and_then(|h| h.to_str().ok()) + .map(|h| h.eq_ignore_ascii_case("websocket")) + .unwrap_or(false) + || !headers.get(SEC_WEBSOCKET_VERSION).map(|h| h == "13").unwrap_or(false) + || key.is_none() + || req.uri() != "/socket" + { + return Ok(Response::new(Body::from("Hello World!"))); + } + let ver = req.version(); + tokio::task::spawn(async move { + match hyper::upgrade::on(&mut req).await { + Ok(upgraded) => { + handle_connection( + peer_map, + WebSocketStream::from_raw_socket(upgraded, Role::Server, None).await, + addr, + ) + .await; + } + Err(e) => println!("upgrade error: {}", e), + } + }); + let mut res = Response::new(Body::empty()); + *res.status_mut() = StatusCode::SWITCHING_PROTOCOLS; + *res.version_mut() = ver; + res.headers_mut().append(CONNECTION, upgrade); + res.headers_mut().append(UPGRADE, websocket); + res.headers_mut().append(SEC_WEBSOCKET_ACCEPT, derived.unwrap().parse().unwrap()); + // Let's add an additional header to our response to the client. + res.headers_mut().append("MyCustomHeader", ":)".parse().unwrap()); + res.headers_mut().append("SOME_TUNGSTENITE_HEADER", "header_value".parse().unwrap()); + Ok(res) +} + +#[tokio::main] +async fn main() -> Result<(), hyper::Error> { + let state = PeerMap::new(Mutex::new(HashMap::new())); + + let addr = env::args().nth(1).unwrap_or_else(|| "127.0.0.1:8080".to_string()).parse().unwrap(); + + let make_svc = make_service_fn(move |conn: &AddrStream| { + let remote_addr = conn.remote_addr(); + let state = state.clone(); + let service = service_fn(move |req| handle_request(state.clone(), req, remote_addr)); + async { Ok::<_, Infallible>(service) } + }); + + let server = Server::bind(&addr).serve(make_svc); + + server.await?; + + Ok::<_, hyper::Error>(()) +} diff --git a/.cargo-vendor/tokio-tungstenite/examples/server-headers.rs b/.cargo-vendor/tokio-tungstenite/examples/server-headers.rs new file mode 100644 index 0000000000..014c454a11 --- /dev/null +++ b/.cargo-vendor/tokio-tungstenite/examples/server-headers.rs @@ -0,0 +1,86 @@ +//! Read/Write headers on server example +//! +//! Run with logs: +//! Linux: +//! ```sh +//! RUST_LOG=debug cargo run --example server-headers +//! ``` +//! Windows +//! ```sh +//! cmd /c "set RUST_LOG=debug && cargo run --example server-headers" +//! ``` +use tokio::net::{TcpListener, TcpStream}; +use tokio_tungstenite::{ + accept_hdr_async, + tungstenite::{ + connect, + handshake::server::{Request, Response}, + Message, + }, +}; +use url::Url; +#[macro_use] +extern crate log; +use futures_util::{SinkExt, StreamExt}; + +#[tokio::main] +async fn main() { + env_logger::builder().format_timestamp(None).init(); + + tokio::spawn(async move { + server().await; + }); + client(); +} + +async fn server() { + let server = TcpListener::bind("127.0.0.1:8080").await.unwrap(); + + while let Ok((stream, _)) = server.accept().await { + tokio::spawn(accept_connection(stream)); + } +} + +async fn accept_connection(stream: TcpStream) { + let callback = |req: &Request, mut response: Response| { + debug!("Received a new ws handshake"); + debug!("The request's path is: {}", req.uri().path()); + debug!("The request's headers are:"); + for (ref header, _value) in req.headers() { + debug!("* {}: {:?}", header, _value); + } + + let headers = response.headers_mut(); + headers.append("MyCustomHeader", ":)".parse().unwrap()); + + Ok(response) + }; + let mut ws_stream = accept_hdr_async(stream, callback) + .await + .expect("Error during the websocket handshake occurred"); + + while let Some(msg) = ws_stream.next().await { + let msg = msg.unwrap(); + if msg.is_text() || msg.is_binary() { + debug!("Server on message: {:?}", &msg); + ws_stream.send(msg).await.unwrap(); + } + } +} + +fn client() { + let (mut socket, response) = + connect(Url::parse("ws://localhost:8080/socket").unwrap()).expect("Can't connect"); + debug!("Connected to the server"); + debug!("Response HTTP code: {}", response.status()); + debug!("Response contains the following headers:"); + for (ref header, _value) in response.headers() { + debug!("* {}: {:?}", header, _value); + } + + socket.send(Message::Text("Hello WebSocket".into())).unwrap(); + loop { + let msg = socket.read().expect("Error reading message"); + debug!("Received: {}", msg); + } +} diff --git a/.cargo-vendor/tokio-tungstenite/examples/server.rs b/.cargo-vendor/tokio-tungstenite/examples/server.rs new file mode 100644 index 0000000000..288aeaa509 --- /dev/null +++ b/.cargo-vendor/tokio-tungstenite/examples/server.rs @@ -0,0 +1,92 @@ +//! A chat server that broadcasts a message to all connections. +//! +//! This is a simple line-based server which accepts WebSocket connections, +//! reads lines from those connections, and broadcasts the lines to all other +//! connected clients. +//! +//! You can test this out by running: +//! +//! cargo run --example server 127.0.0.1:12345 +//! +//! And then in another window run: +//! +//! cargo run --example client ws://127.0.0.1:12345/ +//! +//! You can run the second command in multiple windows and then chat between the +//! two, seeing the messages from the other client as they're received. For all +//! connected clients they'll all join the same room and see everyone else's +//! messages. + +use std::{ + collections::HashMap, + env, + io::Error as IoError, + net::SocketAddr, + sync::{Arc, Mutex}, +}; + +use futures_channel::mpsc::{unbounded, UnboundedSender}; +use futures_util::{future, pin_mut, stream::TryStreamExt, StreamExt}; + +use tokio::net::{TcpListener, TcpStream}; +use tokio_tungstenite::tungstenite::protocol::Message; + +type Tx = UnboundedSender; +type PeerMap = Arc>>; + +async fn handle_connection(peer_map: PeerMap, raw_stream: TcpStream, addr: SocketAddr) { + println!("Incoming TCP connection from: {}", addr); + + let ws_stream = tokio_tungstenite::accept_async(raw_stream) + .await + .expect("Error during the websocket handshake occurred"); + println!("WebSocket connection established: {}", addr); + + // Insert the write part of this peer to the peer map. + let (tx, rx) = unbounded(); + peer_map.lock().unwrap().insert(addr, tx); + + let (outgoing, incoming) = ws_stream.split(); + + let broadcast_incoming = incoming.try_for_each(|msg| { + println!("Received a message from {}: {}", addr, msg.to_text().unwrap()); + let peers = peer_map.lock().unwrap(); + + // We want to broadcast the message to everyone except ourselves. + let broadcast_recipients = + peers.iter().filter(|(peer_addr, _)| peer_addr != &&addr).map(|(_, ws_sink)| ws_sink); + + for recp in broadcast_recipients { + recp.unbounded_send(msg.clone()).unwrap(); + } + + future::ok(()) + }); + + let receive_from_others = rx.map(Ok).forward(outgoing); + + pin_mut!(broadcast_incoming, receive_from_others); + future::select(broadcast_incoming, receive_from_others).await; + + println!("{} disconnected", &addr); + peer_map.lock().unwrap().remove(&addr); +} + +#[tokio::main] +async fn main() -> Result<(), IoError> { + let addr = env::args().nth(1).unwrap_or_else(|| "127.0.0.1:8080".to_string()); + + let state = PeerMap::new(Mutex::new(HashMap::new())); + + // Create the event loop and TCP listener we'll accept connections on. + let try_socket = TcpListener::bind(&addr).await; + let listener = try_socket.expect("Failed to bind"); + println!("Listening on: {}", addr); + + // Let's spawn the handling of each connection in a separate task. + while let Ok((stream, addr)) = listener.accept().await { + tokio::spawn(handle_connection(state.clone(), stream, addr)); + } + + Ok(()) +} diff --git a/.cargo-vendor/tokio-tungstenite/src/compat.rs b/.cargo-vendor/tokio-tungstenite/src/compat.rs new file mode 100644 index 0000000000..6bbd0e68fe --- /dev/null +++ b/.cargo-vendor/tokio-tungstenite/src/compat.rs @@ -0,0 +1,198 @@ +use log::*; +use std::{ + io::{Read, Write}, + pin::Pin, + task::{Context, Poll}, +}; + +use futures_util::task; +use std::sync::Arc; +use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; +use tungstenite::Error as WsError; + +pub(crate) enum ContextWaker { + Read, + Write, +} + +#[derive(Debug)] +pub(crate) struct AllowStd { + inner: S, + // We have the problem that external read operations (i.e. the Stream impl) + // can trigger both read (AsyncRead) and write (AsyncWrite) operations on + // the underyling stream. At the same time write operations (i.e. the Sink + // impl) can trigger write operations (AsyncWrite) too. + // Both the Stream and the Sink can be used on two different tasks, but it + // is required that AsyncRead and AsyncWrite are only ever used by a single + // task (or better: with a single waker) at a time. + // + // Doing otherwise would cause only the latest waker to be remembered, so + // in our case either the Stream or the Sink impl would potentially wait + // forever to be woken up because only the other one would've been woken + // up. + // + // To solve this we implement a waker proxy that has two slots (one for + // read, one for write) to store wakers. One waker proxy is always passed + // to the AsyncRead, the other to AsyncWrite so that they will only ever + // have to store a single waker, but internally we dispatch any wakeups to + // up to two actual wakers (one from the Sink impl and one from the Stream + // impl). + // + // write_waker_proxy is always used for AsyncWrite, read_waker_proxy for + // AsyncRead. The read_waker slots of both are used for the Stream impl + // (and handshaking), the write_waker slots for the Sink impl. + write_waker_proxy: Arc, + read_waker_proxy: Arc, +} + +// Internal trait used only in the Handshake module for registering +// the waker for the context used during handshaking. We're using the +// read waker slot for this, but any would do. +// +// Don't ever use this from multiple tasks at the same time! +pub(crate) trait SetWaker { + fn set_waker(&self, waker: &task::Waker); +} + +impl SetWaker for AllowStd { + fn set_waker(&self, waker: &task::Waker) { + self.set_waker(ContextWaker::Read, waker); + } +} + +impl AllowStd { + pub(crate) fn new(inner: S, waker: &task::Waker) -> Self { + let res = Self { + inner, + write_waker_proxy: Default::default(), + read_waker_proxy: Default::default(), + }; + + // Register the handshake waker as read waker for both proxies, + // see also the SetWaker trait. + res.write_waker_proxy.read_waker.register(waker); + res.read_waker_proxy.read_waker.register(waker); + + res + } + + // Set the read or write waker for our proxies. + // + // Read: this is only supposed to be called by read (or handshake) operations, i.e. the Stream + // impl on the WebSocketStream. + // Reading can also cause writes to happen, e.g. in case of Message::Ping handling. + // + // Write: this is only supposde to be called by write operations, i.e. the Sink impl on the + // WebSocketStream. + pub(crate) fn set_waker(&self, kind: ContextWaker, waker: &task::Waker) { + match kind { + ContextWaker::Read => { + self.write_waker_proxy.read_waker.register(waker); + self.read_waker_proxy.read_waker.register(waker); + } + ContextWaker::Write => { + self.write_waker_proxy.write_waker.register(waker); + self.read_waker_proxy.write_waker.register(waker); + } + } + } +} + +// Proxy Waker that we pass to the internal AsyncRead/Write of the +// stream underlying the websocket. We have two slots here for the +// actual wakers to allow external read operations to trigger both +// reads and writes, and the same for writes. +#[derive(Debug, Default)] +struct WakerProxy { + read_waker: task::AtomicWaker, + write_waker: task::AtomicWaker, +} + +impl task::ArcWake for WakerProxy { + fn wake_by_ref(arc_self: &Arc) { + arc_self.read_waker.wake(); + arc_self.write_waker.wake(); + } +} + +impl AllowStd +where + S: Unpin, +{ + fn with_context(&mut self, kind: ContextWaker, f: F) -> Poll> + where + F: FnOnce(&mut Context<'_>, Pin<&mut S>) -> Poll>, + { + trace!("{}:{} AllowStd.with_context", file!(), line!()); + let waker = match kind { + ContextWaker::Read => task::waker_ref(&self.read_waker_proxy), + ContextWaker::Write => task::waker_ref(&self.write_waker_proxy), + }; + let mut context = task::Context::from_waker(&waker); + f(&mut context, Pin::new(&mut self.inner)) + } + + pub(crate) fn get_mut(&mut self) -> &mut S { + &mut self.inner + } + + pub(crate) fn get_ref(&self) -> &S { + &self.inner + } +} + +impl Read for AllowStd +where + S: AsyncRead + Unpin, +{ + fn read(&mut self, buf: &mut [u8]) -> std::io::Result { + trace!("{}:{} Read.read", file!(), line!()); + let mut buf = ReadBuf::new(buf); + match self.with_context(ContextWaker::Read, |ctx, stream| { + trace!("{}:{} Read.with_context read -> poll_read", file!(), line!()); + stream.poll_read(ctx, &mut buf) + }) { + Poll::Ready(Ok(_)) => Ok(buf.filled().len()), + Poll::Ready(Err(err)) => Err(err), + Poll::Pending => Err(std::io::Error::from(std::io::ErrorKind::WouldBlock)), + } + } +} + +impl Write for AllowStd +where + S: AsyncWrite + Unpin, +{ + fn write(&mut self, buf: &[u8]) -> std::io::Result { + trace!("{}:{} Write.write", file!(), line!()); + match self.with_context(ContextWaker::Write, |ctx, stream| { + trace!("{}:{} Write.with_context write -> poll_write", file!(), line!()); + stream.poll_write(ctx, buf) + }) { + Poll::Ready(r) => r, + Poll::Pending => Err(std::io::Error::from(std::io::ErrorKind::WouldBlock)), + } + } + + fn flush(&mut self) -> std::io::Result<()> { + trace!("{}:{} Write.flush", file!(), line!()); + match self.with_context(ContextWaker::Write, |ctx, stream| { + trace!("{}:{} Write.with_context flush -> poll_flush", file!(), line!()); + stream.poll_flush(ctx) + }) { + Poll::Ready(r) => r, + Poll::Pending => Err(std::io::Error::from(std::io::ErrorKind::WouldBlock)), + } + } +} + +pub(crate) fn cvt(r: Result) -> Poll> { + match r { + Ok(v) => Poll::Ready(Ok(v)), + Err(WsError::Io(ref e)) if e.kind() == std::io::ErrorKind::WouldBlock => { + trace!("WouldBlock"); + Poll::Pending + } + Err(e) => Poll::Ready(Err(e)), + } +} diff --git a/.cargo-vendor/tokio-tungstenite/src/connect.rs b/.cargo-vendor/tokio-tungstenite/src/connect.rs new file mode 100644 index 0000000000..5787af1531 --- /dev/null +++ b/.cargo-vendor/tokio-tungstenite/src/connect.rs @@ -0,0 +1,80 @@ +//! Connection helper. +use tokio::net::TcpStream; + +use tungstenite::{ + error::{Error, UrlError}, + handshake::client::{Request, Response}, + protocol::WebSocketConfig, +}; + +use crate::{domain, stream::MaybeTlsStream, Connector, IntoClientRequest, WebSocketStream}; + +/// Connect to a given URL. +pub async fn connect_async( + request: R, +) -> Result<(WebSocketStream>, Response), Error> +where + R: IntoClientRequest + Unpin, +{ + connect_async_with_config(request, None, false).await +} + +/// The same as `connect_async()` but the one can specify a websocket configuration. +/// Please refer to `connect_async()` for more details. `disable_nagle` specifies if +/// the Nagle's algorithm must be disabled, i.e. `set_nodelay(true)`. If you don't know +/// what the Nagle's algorithm is, better leave it set to `false`. +pub async fn connect_async_with_config( + request: R, + config: Option, + disable_nagle: bool, +) -> Result<(WebSocketStream>, Response), Error> +where + R: IntoClientRequest + Unpin, +{ + connect(request.into_client_request()?, config, disable_nagle, None).await +} + +/// The same as `connect_async()` but the one can specify a websocket configuration, +/// and a TLS connector to use. Please refer to `connect_async()` for more details. +/// `disable_nagle` specifies if the Nagle's algorithm must be disabled, i.e. +/// `set_nodelay(true)`. If you don't know what the Nagle's algorithm is, better +/// leave it to `false`. +#[cfg(any(feature = "native-tls", feature = "__rustls-tls"))] +pub async fn connect_async_tls_with_config( + request: R, + config: Option, + disable_nagle: bool, + connector: Option, +) -> Result<(WebSocketStream>, Response), Error> +where + R: IntoClientRequest + Unpin, +{ + connect(request.into_client_request()?, config, disable_nagle, connector).await +} + +async fn connect( + request: Request, + config: Option, + disable_nagle: bool, + connector: Option, +) -> Result<(WebSocketStream>, Response), Error> { + let domain = domain(&request)?; + let port = request + .uri() + .port_u16() + .or_else(|| match request.uri().scheme_str() { + Some("wss") => Some(443), + Some("ws") => Some(80), + _ => None, + }) + .ok_or(Error::Url(UrlError::UnsupportedUrlScheme))?; + + let addr = format!("{domain}:{port}"); + let socket = TcpStream::connect(addr).await.map_err(Error::Io)?; + + if disable_nagle { + socket.set_nodelay(true)?; + } + + crate::tls::client_async_tls_with_config(request, socket, config, connector).await +} diff --git a/.cargo-vendor/tokio-tungstenite/src/handshake.rs b/.cargo-vendor/tokio-tungstenite/src/handshake.rs new file mode 100644 index 0000000000..aa51276960 --- /dev/null +++ b/.cargo-vendor/tokio-tungstenite/src/handshake.rs @@ -0,0 +1,179 @@ +#[cfg(feature = "handshake")] +use crate::compat::SetWaker; +use crate::{compat::AllowStd, WebSocketStream}; +use log::*; +use std::{ + future::Future, + io::{Read, Write}, + pin::Pin, + task::{Context, Poll}, +}; +use tokio::io::{AsyncRead, AsyncWrite}; +use tungstenite::WebSocket; +#[cfg(feature = "handshake")] +use tungstenite::{ + handshake::{ + client::Response, server::Callback, HandshakeError as Error, HandshakeRole, + MidHandshake as WsHandshake, + }, + ClientHandshake, ServerHandshake, +}; + +pub(crate) async fn without_handshake(stream: S, f: F) -> WebSocketStream +where + F: FnOnce(AllowStd) -> WebSocket> + Unpin, + S: AsyncRead + AsyncWrite + Unpin, +{ + let start = SkippedHandshakeFuture(Some(SkippedHandshakeFutureInner { f, stream })); + + let ws = start.await; + + WebSocketStream::new(ws) +} + +struct SkippedHandshakeFuture(Option>); +struct SkippedHandshakeFutureInner { + f: F, + stream: S, +} + +impl Future for SkippedHandshakeFuture +where + F: FnOnce(AllowStd) -> WebSocket> + Unpin, + S: Unpin, + AllowStd: Read + Write, +{ + type Output = WebSocket>; + + fn poll(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll { + let inner = self.get_mut().0.take().expect("future polled after completion"); + trace!("Setting context when skipping handshake"); + let stream = AllowStd::new(inner.stream, ctx.waker()); + + Poll::Ready((inner.f)(stream)) + } +} + +#[cfg(feature = "handshake")] +struct MidHandshake(Option>); + +#[cfg(feature = "handshake")] +enum StartedHandshake { + Done(Role::FinalResult), + Mid(WsHandshake), +} + +#[cfg(feature = "handshake")] +struct StartedHandshakeFuture(Option>); +#[cfg(feature = "handshake")] +struct StartedHandshakeFutureInner { + f: F, + stream: S, +} + +#[cfg(feature = "handshake")] +async fn handshake(stream: S, f: F) -> Result> +where + Role: HandshakeRole + Unpin, + Role::InternalStream: SetWaker + Unpin, + F: FnOnce(AllowStd) -> Result> + Unpin, + S: AsyncRead + AsyncWrite + Unpin, +{ + let start = StartedHandshakeFuture(Some(StartedHandshakeFutureInner { f, stream })); + + match start.await? { + StartedHandshake::Done(r) => Ok(r), + StartedHandshake::Mid(s) => { + let res: Result> = MidHandshake::(Some(s)).await; + res + } + } +} + +#[cfg(feature = "handshake")] +pub(crate) async fn client_handshake( + stream: S, + f: F, +) -> Result<(WebSocketStream, Response), Error>>> +where + F: FnOnce( + AllowStd, + ) -> Result< + > as HandshakeRole>::FinalResult, + Error>>, + > + Unpin, + S: AsyncRead + AsyncWrite + Unpin, +{ + let result = handshake(stream, f).await?; + let (s, r) = result; + Ok((WebSocketStream::new(s), r)) +} + +#[cfg(feature = "handshake")] +pub(crate) async fn server_handshake( + stream: S, + f: F, +) -> Result, Error, C>>> +where + C: Callback + Unpin, + F: FnOnce( + AllowStd, + ) -> Result< + , C> as HandshakeRole>::FinalResult, + Error, C>>, + > + Unpin, + S: AsyncRead + AsyncWrite + Unpin, +{ + let s: WebSocket> = handshake(stream, f).await?; + Ok(WebSocketStream::new(s)) +} + +#[cfg(feature = "handshake")] +impl Future for StartedHandshakeFuture +where + Role: HandshakeRole, + Role::InternalStream: SetWaker + Unpin, + F: FnOnce(AllowStd) -> Result> + Unpin, + S: Unpin, + AllowStd: Read + Write, +{ + type Output = Result, Error>; + + fn poll(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll { + let inner = self.0.take().expect("future polled after completion"); + trace!("Setting ctx when starting handshake"); + let stream = AllowStd::new(inner.stream, ctx.waker()); + + match (inner.f)(stream) { + Ok(r) => Poll::Ready(Ok(StartedHandshake::Done(r))), + Err(Error::Interrupted(mid)) => Poll::Ready(Ok(StartedHandshake::Mid(mid))), + Err(Error::Failure(e)) => Poll::Ready(Err(Error::Failure(e))), + } + } +} + +#[cfg(feature = "handshake")] +impl Future for MidHandshake +where + Role: HandshakeRole + Unpin, + Role::InternalStream: SetWaker + Unpin, +{ + type Output = Result>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let mut s = self.as_mut().0.take().expect("future polled after completion"); + + let machine = s.get_mut(); + trace!("Setting context in handshake"); + machine.get_mut().set_waker(cx.waker()); + + match s.handshake() { + Ok(stream) => Poll::Ready(Ok(stream)), + Err(Error::Failure(e)) => Poll::Ready(Err(Error::Failure(e))), + Err(Error::Interrupted(mid)) => { + self.0 = Some(mid); + Poll::Pending + } + } + } +} diff --git a/.cargo-vendor/tokio-tungstenite/src/lib.rs b/.cargo-vendor/tokio-tungstenite/src/lib.rs new file mode 100644 index 0000000000..734fc64885 --- /dev/null +++ b/.cargo-vendor/tokio-tungstenite/src/lib.rs @@ -0,0 +1,441 @@ +//! Async WebSocket usage. +//! +//! This library is an implementation of WebSocket handshakes and streams. It +//! is based on the crate which implements all required WebSocket protocol +//! logic. So this crate basically just brings tokio support / tokio integration +//! to it. +//! +//! Each WebSocket stream implements the required `Stream` and `Sink` traits, +//! so the socket is just a stream of messages coming in and going out. + +#![deny(missing_docs, unused_must_use, unused_mut, unused_imports, unused_import_braces)] + +pub use tungstenite; + +mod compat; +#[cfg(feature = "connect")] +mod connect; +mod handshake; +#[cfg(feature = "stream")] +mod stream; +#[cfg(any(feature = "native-tls", feature = "__rustls-tls", feature = "connect"))] +mod tls; + +use std::io::{Read, Write}; + +use compat::{cvt, AllowStd, ContextWaker}; +use futures_util::{ + sink::{Sink, SinkExt}, + stream::{FusedStream, Stream}, +}; +use log::*; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; +use tokio::io::{AsyncRead, AsyncWrite}; + +#[cfg(feature = "handshake")] +use tungstenite::{ + client::IntoClientRequest, + handshake::{ + client::{ClientHandshake, Response}, + server::{Callback, NoCallback}, + HandshakeError, + }, +}; +use tungstenite::{ + error::Error as WsError, + protocol::{Message, Role, WebSocket, WebSocketConfig}, +}; + +#[cfg(any(feature = "native-tls", feature = "__rustls-tls", feature = "connect"))] +pub use tls::Connector; +#[cfg(any(feature = "native-tls", feature = "__rustls-tls"))] +pub use tls::{client_async_tls, client_async_tls_with_config}; + +#[cfg(feature = "connect")] +pub use connect::{connect_async, connect_async_with_config}; + +#[cfg(all(any(feature = "native-tls", feature = "__rustls-tls"), feature = "connect"))] +pub use connect::connect_async_tls_with_config; + +#[cfg(feature = "stream")] +pub use stream::MaybeTlsStream; + +use tungstenite::protocol::CloseFrame; + +/// Creates a WebSocket handshake from a request and a stream. +/// For convenience, the user may call this with a url string, a URL, +/// or a `Request`. Calling with `Request` allows the user to add +/// a WebSocket protocol or other custom headers. +/// +/// Internally, this custom creates a handshake representation and returns +/// a future representing the resolution of the WebSocket handshake. The +/// returned future will resolve to either `WebSocketStream` or `Error` +/// depending on whether the handshake is successful. +/// +/// This is typically used for clients who have already established, for +/// example, a TCP connection to the remote server. +#[cfg(feature = "handshake")] +pub async fn client_async<'a, R, S>( + request: R, + stream: S, +) -> Result<(WebSocketStream, Response), WsError> +where + R: IntoClientRequest + Unpin, + S: AsyncRead + AsyncWrite + Unpin, +{ + client_async_with_config(request, stream, None).await +} + +/// The same as `client_async()` but the one can specify a websocket configuration. +/// Please refer to `client_async()` for more details. +#[cfg(feature = "handshake")] +pub async fn client_async_with_config<'a, R, S>( + request: R, + stream: S, + config: Option, +) -> Result<(WebSocketStream, Response), WsError> +where + R: IntoClientRequest + Unpin, + S: AsyncRead + AsyncWrite + Unpin, +{ + let f = handshake::client_handshake(stream, move |allow_std| { + let request = request.into_client_request()?; + let cli_handshake = ClientHandshake::start(allow_std, request, config)?; + cli_handshake.handshake() + }); + f.await.map_err(|e| match e { + HandshakeError::Failure(e) => e, + e => WsError::Io(std::io::Error::new(std::io::ErrorKind::Other, e.to_string())), + }) +} + +/// Accepts a new WebSocket connection with the provided stream. +/// +/// This function will internally call `server::accept` to create a +/// handshake representation and returns a future representing the +/// resolution of the WebSocket handshake. The returned future will resolve +/// to either `WebSocketStream` or `Error` depending if it's successful +/// or not. +/// +/// This is typically used after a socket has been accepted from a +/// `TcpListener`. That socket is then passed to this function to perform +/// the server half of the accepting a client's websocket connection. +#[cfg(feature = "handshake")] +pub async fn accept_async(stream: S) -> Result, WsError> +where + S: AsyncRead + AsyncWrite + Unpin, +{ + accept_hdr_async(stream, NoCallback).await +} + +/// The same as `accept_async()` but the one can specify a websocket configuration. +/// Please refer to `accept_async()` for more details. +#[cfg(feature = "handshake")] +pub async fn accept_async_with_config( + stream: S, + config: Option, +) -> Result, WsError> +where + S: AsyncRead + AsyncWrite + Unpin, +{ + accept_hdr_async_with_config(stream, NoCallback, config).await +} + +/// Accepts a new WebSocket connection with the provided stream. +/// +/// This function does the same as `accept_async()` but accepts an extra callback +/// for header processing. The callback receives headers of the incoming +/// requests and is able to add extra headers to the reply. +#[cfg(feature = "handshake")] +pub async fn accept_hdr_async(stream: S, callback: C) -> Result, WsError> +where + S: AsyncRead + AsyncWrite + Unpin, + C: Callback + Unpin, +{ + accept_hdr_async_with_config(stream, callback, None).await +} + +/// The same as `accept_hdr_async()` but the one can specify a websocket configuration. +/// Please refer to `accept_hdr_async()` for more details. +#[cfg(feature = "handshake")] +pub async fn accept_hdr_async_with_config( + stream: S, + callback: C, + config: Option, +) -> Result, WsError> +where + S: AsyncRead + AsyncWrite + Unpin, + C: Callback + Unpin, +{ + let f = handshake::server_handshake(stream, move |allow_std| { + tungstenite::accept_hdr_with_config(allow_std, callback, config) + }); + f.await.map_err(|e| match e { + HandshakeError::Failure(e) => e, + e => WsError::Io(std::io::Error::new(std::io::ErrorKind::Other, e.to_string())), + }) +} + +/// A wrapper around an underlying raw stream which implements the WebSocket +/// protocol. +/// +/// A `WebSocketStream` represents a handshake that has been completed +/// successfully and both the server and the client are ready for receiving +/// and sending data. Message from a `WebSocketStream` are accessible +/// through the respective `Stream` and `Sink`. Check more information about +/// them in `futures-rs` crate documentation or have a look on the examples +/// and unit tests for this crate. +#[derive(Debug)] +pub struct WebSocketStream { + inner: WebSocket>, + closing: bool, + ended: bool, + /// Tungstenite is probably ready to receive more data. + /// + /// `false` once start_send hits `WouldBlock` errors. + /// `true` initially and after `flush`ing. + ready: bool, +} + +impl WebSocketStream { + /// Convert a raw socket into a WebSocketStream without performing a + /// handshake. + pub async fn from_raw_socket(stream: S, role: Role, config: Option) -> Self + where + S: AsyncRead + AsyncWrite + Unpin, + { + handshake::without_handshake(stream, move |allow_std| { + WebSocket::from_raw_socket(allow_std, role, config) + }) + .await + } + + /// Convert a raw socket into a WebSocketStream without performing a + /// handshake. + pub async fn from_partially_read( + stream: S, + part: Vec, + role: Role, + config: Option, + ) -> Self + where + S: AsyncRead + AsyncWrite + Unpin, + { + handshake::without_handshake(stream, move |allow_std| { + WebSocket::from_partially_read(allow_std, part, role, config) + }) + .await + } + + pub(crate) fn new(ws: WebSocket>) -> Self { + Self { inner: ws, closing: false, ended: false, ready: true } + } + + fn with_context(&mut self, ctx: Option<(ContextWaker, &mut Context<'_>)>, f: F) -> R + where + S: Unpin, + F: FnOnce(&mut WebSocket>) -> R, + AllowStd: Read + Write, + { + trace!("{}:{} WebSocketStream.with_context", file!(), line!()); + if let Some((kind, ctx)) = ctx { + self.inner.get_mut().set_waker(kind, ctx.waker()); + } + f(&mut self.inner) + } + + /// Returns a shared reference to the inner stream. + pub fn get_ref(&self) -> &S + where + S: AsyncRead + AsyncWrite + Unpin, + { + self.inner.get_ref().get_ref() + } + + /// Returns a mutable reference to the inner stream. + pub fn get_mut(&mut self) -> &mut S + where + S: AsyncRead + AsyncWrite + Unpin, + { + self.inner.get_mut().get_mut() + } + + /// Returns a reference to the configuration of the tungstenite stream. + pub fn get_config(&self) -> &WebSocketConfig { + self.inner.get_config() + } + + /// Close the underlying web socket + pub async fn close(&mut self, msg: Option>) -> Result<(), WsError> + where + S: AsyncRead + AsyncWrite + Unpin, + { + let msg = msg.map(|msg| msg.into_owned()); + self.send(Message::Close(msg)).await + } +} + +impl Stream for WebSocketStream +where + T: AsyncRead + AsyncWrite + Unpin, +{ + type Item = Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + trace!("{}:{} Stream.poll_next", file!(), line!()); + + // The connection has been closed or a critical error has occurred. + // We have already returned the error to the user, the `Stream` is unusable, + // so we assume that the stream has been "fused". + if self.ended { + return Poll::Ready(None); + } + + match futures_util::ready!(self.with_context(Some((ContextWaker::Read, cx)), |s| { + trace!("{}:{} Stream.with_context poll_next -> read()", file!(), line!()); + cvt(s.read()) + })) { + Ok(v) => Poll::Ready(Some(Ok(v))), + Err(e) => { + self.ended = true; + if matches!(e, WsError::AlreadyClosed | WsError::ConnectionClosed) { + Poll::Ready(None) + } else { + Poll::Ready(Some(Err(e))) + } + } + } + } +} + +impl FusedStream for WebSocketStream +where + T: AsyncRead + AsyncWrite + Unpin, +{ + fn is_terminated(&self) -> bool { + self.ended + } +} + +impl Sink for WebSocketStream +where + T: AsyncRead + AsyncWrite + Unpin, +{ + type Error = WsError; + + fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + if self.ready { + Poll::Ready(Ok(())) + } else { + // Currently blocked so try to flush the blockage away + (*self).with_context(Some((ContextWaker::Write, cx)), |s| cvt(s.flush())).map(|r| { + self.ready = true; + r + }) + } + } + + fn start_send(mut self: Pin<&mut Self>, item: Message) -> Result<(), Self::Error> { + match (*self).with_context(None, |s| s.write(item)) { + Ok(()) => { + self.ready = true; + Ok(()) + } + Err(WsError::Io(err)) if err.kind() == std::io::ErrorKind::WouldBlock => { + // the message was accepted and queued so not an error + // but `poll_ready` will now start trying to flush the block + self.ready = false; + Ok(()) + } + Err(e) => { + self.ready = true; + debug!("websocket start_send error: {}", e); + Err(e) + } + } + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + (*self).with_context(Some((ContextWaker::Write, cx)), |s| cvt(s.flush())).map(|r| { + self.ready = true; + match r { + // WebSocket connection has just been closed. Flushing completed, not an error. + Err(WsError::ConnectionClosed) => Ok(()), + other => other, + } + }) + } + + fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.ready = true; + let res = if self.closing { + // After queueing it, we call `flush` to drive the close handshake to completion. + (*self).with_context(Some((ContextWaker::Write, cx)), |s| s.flush()) + } else { + (*self).with_context(Some((ContextWaker::Write, cx)), |s| s.close(None)) + }; + + match res { + Ok(()) => Poll::Ready(Ok(())), + Err(WsError::ConnectionClosed) => Poll::Ready(Ok(())), + Err(WsError::Io(err)) if err.kind() == std::io::ErrorKind::WouldBlock => { + trace!("WouldBlock"); + self.closing = true; + Poll::Pending + } + Err(err) => { + debug!("websocket close error: {}", err); + Poll::Ready(Err(err)) + } + } + } +} + +/// Get a domain from an URL. +#[cfg(any(feature = "connect", feature = "native-tls", feature = "__rustls-tls"))] +#[inline] +fn domain(request: &tungstenite::handshake::client::Request) -> Result { + match request.uri().host() { + // rustls expects IPv6 addresses without the surrounding [] brackets + #[cfg(feature = "__rustls-tls")] + Some(d) if d.starts_with('[') && d.ends_with(']') => Ok(d[1..d.len() - 1].to_string()), + Some(d) => Ok(d.to_string()), + None => Err(WsError::Url(tungstenite::error::UrlError::NoHostName)), + } +} + +#[cfg(test)] +mod tests { + #[cfg(feature = "connect")] + use crate::stream::MaybeTlsStream; + use crate::{compat::AllowStd, WebSocketStream}; + use std::io::{Read, Write}; + #[cfg(feature = "connect")] + use tokio::io::{AsyncReadExt, AsyncWriteExt}; + + fn is_read() {} + fn is_write() {} + #[cfg(feature = "connect")] + fn is_async_read() {} + #[cfg(feature = "connect")] + fn is_async_write() {} + fn is_unpin() {} + + #[test] + fn web_socket_stream_has_traits() { + is_read::>(); + is_write::>(); + + #[cfg(feature = "connect")] + is_async_read::>(); + #[cfg(feature = "connect")] + is_async_write::>(); + + is_unpin::>(); + #[cfg(feature = "connect")] + is_unpin::>>(); + } +} diff --git a/.cargo-vendor/tokio-tungstenite/src/stream.rs b/.cargo-vendor/tokio-tungstenite/src/stream.rs new file mode 100644 index 0000000000..18affc3dbf --- /dev/null +++ b/.cargo-vendor/tokio-tungstenite/src/stream.rs @@ -0,0 +1,80 @@ +//! Convenience wrapper for streams to switch between plain TCP and TLS at runtime. +//! +//! There is no dependency on actual TLS implementations. Everything like +//! `native_tls` or `openssl` will work as long as there is a TLS stream supporting standard +//! `Read + Write` traits. +use std::{ + pin::Pin, + task::{Context, Poll}, +}; + +use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; + +/// A stream that might be protected with TLS. +#[non_exhaustive] +#[derive(Debug)] +pub enum MaybeTlsStream { + /// Unencrypted socket stream. + Plain(S), + /// Encrypted socket stream using `native-tls`. + #[cfg(feature = "native-tls")] + NativeTls(tokio_native_tls::TlsStream), + /// Encrypted socket stream using `rustls`. + #[cfg(feature = "__rustls-tls")] + Rustls(tokio_rustls::client::TlsStream), +} + +impl AsyncRead for MaybeTlsStream { + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut ReadBuf<'_>, + ) -> Poll> { + match self.get_mut() { + MaybeTlsStream::Plain(ref mut s) => Pin::new(s).poll_read(cx, buf), + #[cfg(feature = "native-tls")] + MaybeTlsStream::NativeTls(s) => Pin::new(s).poll_read(cx, buf), + #[cfg(feature = "__rustls-tls")] + MaybeTlsStream::Rustls(s) => Pin::new(s).poll_read(cx, buf), + } + } +} + +impl AsyncWrite for MaybeTlsStream { + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + match self.get_mut() { + MaybeTlsStream::Plain(ref mut s) => Pin::new(s).poll_write(cx, buf), + #[cfg(feature = "native-tls")] + MaybeTlsStream::NativeTls(s) => Pin::new(s).poll_write(cx, buf), + #[cfg(feature = "__rustls-tls")] + MaybeTlsStream::Rustls(s) => Pin::new(s).poll_write(cx, buf), + } + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match self.get_mut() { + MaybeTlsStream::Plain(ref mut s) => Pin::new(s).poll_flush(cx), + #[cfg(feature = "native-tls")] + MaybeTlsStream::NativeTls(s) => Pin::new(s).poll_flush(cx), + #[cfg(feature = "__rustls-tls")] + MaybeTlsStream::Rustls(s) => Pin::new(s).poll_flush(cx), + } + } + + fn poll_shutdown( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + match self.get_mut() { + MaybeTlsStream::Plain(ref mut s) => Pin::new(s).poll_shutdown(cx), + #[cfg(feature = "native-tls")] + MaybeTlsStream::NativeTls(s) => Pin::new(s).poll_shutdown(cx), + #[cfg(feature = "__rustls-tls")] + MaybeTlsStream::Rustls(s) => Pin::new(s).poll_shutdown(cx), + } + } +} diff --git a/.cargo-vendor/tokio-tungstenite/src/tls.rs b/.cargo-vendor/tokio-tungstenite/src/tls.rs new file mode 100644 index 0000000000..7fe7329bd8 --- /dev/null +++ b/.cargo-vendor/tokio-tungstenite/src/tls.rs @@ -0,0 +1,221 @@ +//! Connection helper. +use tokio::io::{AsyncRead, AsyncWrite}; + +use tungstenite::{ + client::uri_mode, error::Error, handshake::client::Response, protocol::WebSocketConfig, +}; + +use crate::{client_async_with_config, IntoClientRequest, WebSocketStream}; + +pub use crate::stream::MaybeTlsStream; + +/// A connector that can be used when establishing connections, allowing to control whether +/// `native-tls` or `rustls` is used to create a TLS connection. Or TLS can be disabled with the +/// `Plain` variant. +#[non_exhaustive] +#[derive(Clone)] +pub enum Connector { + /// Plain (non-TLS) connector. + Plain, + /// `native-tls` TLS connector. + #[cfg(feature = "native-tls")] + NativeTls(native_tls_crate::TlsConnector), + /// `rustls` TLS connector. + #[cfg(feature = "__rustls-tls")] + Rustls(std::sync::Arc), +} + +mod encryption { + #[cfg(feature = "native-tls")] + pub mod native_tls { + use native_tls_crate::TlsConnector; + use tokio_native_tls::TlsConnector as TokioTlsConnector; + + use tokio::io::{AsyncRead, AsyncWrite}; + + use tungstenite::{error::TlsError, stream::Mode, Error}; + + use crate::stream::MaybeTlsStream; + + pub async fn wrap_stream( + socket: S, + domain: String, + mode: Mode, + tls_connector: Option, + ) -> Result, Error> + where + S: 'static + AsyncRead + AsyncWrite + Send + Unpin, + { + match mode { + Mode::Plain => Ok(MaybeTlsStream::Plain(socket)), + Mode::Tls => { + let try_connector = tls_connector.map_or_else(TlsConnector::new, Ok); + let connector = try_connector.map_err(TlsError::Native)?; + let stream = TokioTlsConnector::from(connector); + let connected = stream.connect(&domain, socket).await; + match connected { + Err(e) => Err(Error::Tls(e.into())), + Ok(s) => Ok(MaybeTlsStream::NativeTls(s)), + } + } + } + } + } + + #[cfg(feature = "__rustls-tls")] + pub mod rustls { + pub use rustls::ClientConfig; + use rustls::RootCertStore; + use rustls_pki_types::ServerName; + use tokio_rustls::TlsConnector as TokioTlsConnector; + + use std::{convert::TryFrom, sync::Arc}; + use tokio::io::{AsyncRead, AsyncWrite}; + + use tungstenite::{error::TlsError, stream::Mode, Error}; + + use crate::stream::MaybeTlsStream; + + pub async fn wrap_stream( + socket: S, + domain: String, + mode: Mode, + tls_connector: Option>, + ) -> Result, Error> + where + S: 'static + AsyncRead + AsyncWrite + Send + Unpin, + { + match mode { + Mode::Plain => Ok(MaybeTlsStream::Plain(socket)), + Mode::Tls => { + let config = match tls_connector { + Some(config) => config, + None => { + #[allow(unused_mut)] + let mut root_store = RootCertStore::empty(); + #[cfg(feature = "rustls-tls-native-roots")] + { + let native_certs = rustls_native_certs::load_native_certs()?; + let total_number = native_certs.len(); + let (number_added, number_ignored) = + root_store.add_parsable_certificates(native_certs); + log::debug!("Added {number_added}/{total_number} native root certificates (ignored {number_ignored})"); + } + #[cfg(feature = "rustls-tls-webpki-roots")] + { + root_store.extend(webpki_roots::TLS_SERVER_ROOTS.iter().cloned()); + } + + Arc::new( + ClientConfig::builder() + .with_root_certificates(root_store) + .with_no_client_auth(), + ) + } + }; + let domain = ServerName::try_from(domain.as_str()) + .map_err(|_| TlsError::InvalidDnsName)? + .to_owned(); + let stream = TokioTlsConnector::from(config); + let connected = stream.connect(domain, socket).await; + + match connected { + Err(e) => Err(Error::Io(e)), + Ok(s) => Ok(MaybeTlsStream::Rustls(s)), + } + } + } + } + } + + pub mod plain { + use tokio::io::{AsyncRead, AsyncWrite}; + + use tungstenite::{ + error::{Error, UrlError}, + stream::Mode, + }; + + use crate::stream::MaybeTlsStream; + + pub async fn wrap_stream(socket: S, mode: Mode) -> Result, Error> + where + S: 'static + AsyncRead + AsyncWrite + Send + Unpin, + { + match mode { + Mode::Plain => Ok(MaybeTlsStream::Plain(socket)), + Mode::Tls => Err(Error::Url(UrlError::TlsFeatureNotEnabled)), + } + } + } +} + +/// Creates a WebSocket handshake from a request and a stream, +/// upgrading the stream to TLS if required. +#[cfg(any(feature = "native-tls", feature = "__rustls-tls"))] +pub async fn client_async_tls( + request: R, + stream: S, +) -> Result<(WebSocketStream>, Response), Error> +where + R: IntoClientRequest + Unpin, + S: 'static + AsyncRead + AsyncWrite + Send + Unpin, + MaybeTlsStream: Unpin, +{ + client_async_tls_with_config(request, stream, None, None).await +} + +/// The same as `client_async_tls()` but the one can specify a websocket configuration, +/// and an optional connector. If no connector is specified, a default one will +/// be created. +/// +/// Please refer to `client_async_tls()` for more details. +pub async fn client_async_tls_with_config( + request: R, + stream: S, + config: Option, + connector: Option, +) -> Result<(WebSocketStream>, Response), Error> +where + R: IntoClientRequest + Unpin, + S: 'static + AsyncRead + AsyncWrite + Send + Unpin, + MaybeTlsStream: Unpin, +{ + let request = request.into_client_request()?; + + #[cfg(any(feature = "native-tls", feature = "__rustls-tls"))] + let domain = crate::domain(&request)?; + + // Make sure we check domain and mode first. URL must be valid. + let mode = uri_mode(request.uri())?; + + let stream = match connector { + Some(conn) => match conn { + #[cfg(feature = "native-tls")] + Connector::NativeTls(conn) => { + self::encryption::native_tls::wrap_stream(stream, domain, mode, Some(conn)).await + } + #[cfg(feature = "__rustls-tls")] + Connector::Rustls(conn) => { + self::encryption::rustls::wrap_stream(stream, domain, mode, Some(conn)).await + } + Connector::Plain => self::encryption::plain::wrap_stream(stream, mode).await, + }, + None => { + #[cfg(feature = "native-tls")] + { + self::encryption::native_tls::wrap_stream(stream, domain, mode, None).await + } + #[cfg(all(feature = "__rustls-tls", not(feature = "native-tls")))] + { + self::encryption::rustls::wrap_stream(stream, domain, mode, None).await + } + #[cfg(not(any(feature = "native-tls", feature = "__rustls-tls")))] + { + self::encryption::plain::wrap_stream(stream, mode).await + } + } + }?; + + client_async_with_config(request, stream, config).await +} diff --git a/.cargo-vendor/tower-http/.cargo-checksum.json b/.cargo-vendor/tower-http/.cargo-checksum.json new file mode 100644 index 0000000000..28c2ded334 --- /dev/null +++ b/.cargo-vendor/tower-http/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"a6f97152e710665e5e43e56f6895cd597f0164afa987a20a5689b0038d340c00","Cargo.toml":"b8b6b4e4181bcf3d693a5bcca6f589c3ec9a5a8e990d551ad3557ef694ab8c72","LICENSE":"5049cf464977eff4b4fcfa7988d84e74116956a3eb9d5f1d451b3f828f945233","README.md":"b222111103d0522442def1ccb0277520bf01c15a8902738dfd16d472e0375063","src/add_extension.rs":"689e8d8c0319911391a533616b1f642cebc7e3dc0838594b1582d7bb0dc0175c","src/auth/add_authorization.rs":"a5d866ead65ff7ca5bc89f7ec479c385dc90715d48911286bbd10d8a1ed644e8","src/auth/async_require_authorization.rs":"33018529261aaf346a034eb2ba9e81c7fdf19d933a4372fdd8d3929db0b9c549","src/auth/mod.rs":"4a16268a7bfa5ca1f110c6eff9b8733ebfe96c0d280607c6b1f1154a92ccf128","src/auth/require_authorization.rs":"4e6886d3bf439c62c0ffc69556eb6b6847c243853283fba9f4cb2c20d711d18e","src/body.rs":"b6cb0269c26cd23838595288c495893b5eae5aa71b580dcae060ac157cb38af1","src/builder.rs":"f9e2f48a097195540869df1a0d3fb9fd00d8217b0baf6b2e5b87bf3c170c916e","src/catch_panic.rs":"4f933eeeb4b7e0ca0ea3ae5e72814efced9bbf629c2cf772bf00c9b7c6369ffc","src/classify/grpc_errors_as_failures.rs":"df08114cf6f3d57e45516ebc2521e03de2b31085852112023eace3f51ad02f24","src/classify/map_failure_class.rs":"a9f7d36f3aede2990205a88c06542d68126bb58f0f530bb1f8716fa47587f1f6","src/classify/mod.rs":"680e44d010b96a5360d98876c22cae7be3f86508f1518f3a0532fa392d341fda","src/classify/status_in_range_is_error.rs":"9f104bc9eeb7bcb4bbdf31367a85dc402bb106fa5e0a7a85a21f05ebcfa2767d","src/compression/body.rs":"bcbdf3fdf5399ce095d0315e683dc11a279282c9d47081caa81a4640ddb3e664","src/compression/future.rs":"2211923f96a93843ba8b4eb82bdcceb0b550db7ed81d026f32d62b7b00d95dbf","src/compression/layer.rs":"dffbf9f25ad28273dd78049945d161266916ff1f9f2c915592c7e29d0c90755c","src/compression/mod.rs":"84b28607a29b44fa5eae19ebbf255dc96d3181034ceadc11b30302e6546332e7","src/compression/pin_project_cfg.rs":"a98033f4b8b12f8372ba51522f22a8610d005e82e29b3e24e28b4a8cb902b2ef","src/compression/predicate.rs":"70753e77ed770ebb8c2fa40fffa79463badd27a42a0a51bcd7c65f21dc36962f","src/compression/service.rs":"61e5aa8d19cb032875d7024002926d132c9b7e8120ef6e3d2e68f68261e8e327","src/compression_utils.rs":"c9e61ab625054c23a69311fb58b322b7588b1fac5ec935697e345699ad9e9001","src/content_encoding.rs":"df9fc6bd00020dee8a238fd32e2ed6de76727f5faddfc4e6cdbbceea0f669456","src/cors/allow_credentials.rs":"9b4e114f78f08e9fe583bcca642df069c4af9b97af82a1d611fd3223a6f254ea","src/cors/allow_headers.rs":"a30892e380530864a17e4fe432d3d8b5f3f3da0afb37bd19ac309833ee34734a","src/cors/allow_methods.rs":"ea35bf01415873be4a013d4851e495750d409854384c3bc32e24aed18b9079fd","src/cors/allow_origin.rs":"67e8abe17198de2c0b9f9461d21657c415601d7ff8ef2141655b579e05094781","src/cors/allow_private_network.rs":"2ba150f838456be32588c2b95cee668e7f96ab191a769694c880a86d8a3c1814","src/cors/expose_headers.rs":"9ab7d0dbfa921013c9be7679d89cb811649b2606c51becc72a16f627eeea465b","src/cors/max_age.rs":"0d2b67f1c092b9d36b20e05a04bfdf7eb57215c23a04cf2ca5fae07fca066697","src/cors/mod.rs":"d951c68b96b0d39eb71a22fcb38691020c7d6150bc7e923d3a63ba13c7b63e62","src/cors/tests.rs":"a3dd33294ded9ceb606de03ffdb626a8e395167f8c1db889efb363079d7ee32f","src/cors/vary.rs":"1f60668bb835da2c71d58711aa7f08f9707de65844055b9a4fed5ba608f1127e","src/decompression/body.rs":"65941e08a5e954a88d1b573ad914d813ffdbd2fb6fc65053ab0b929ae3dda4db","src/decompression/future.rs":"a9cfc2f175854bb85b76901e8dbcbfdf743db92c822ccf589647ba18ef82c730","src/decompression/layer.rs":"98d13d3a107ad4809b5bfbc6e509cde0c0876ae4596f6ae5d985d007594cbbdf","src/decompression/mod.rs":"c19af4bbd4d8a2915f17cd3592e3729218753b5de6b7b23959e266f730a1bfe0","src/decompression/request/future.rs":"d7da33415760ef36cd42d519ff44f0157333f5f64e5372deb7b68fde058ed95c","src/decompression/request/layer.rs":"f17a14ab9d8408067767b058a6fb848a2891b9f68fbbf6e192086e8f00bc7d89","src/decompression/request/mod.rs":"57b9e4844d6b9320547b05e00a2256737afd38e86fc17fefb1b62974ac6d8e9e","src/decompression/request/service.rs":"af905c7eee15d72840ec4685bc2e68854ff1103a760504a6db91d00de47b7f93","src/decompression/service.rs":"94accf60490c9e6184b1da72f0a9dc9f4a2428481955f23f920a381348e19860","src/follow_redirect/mod.rs":"e1cfc14e44789984108a3a3bddbe56dbbf47db11f46704a8f6ff1559eba21b8a","src/follow_redirect/policy/and.rs":"a62623042f4d13029ca0d35a21cab20f26bf98fff0d321dd19ee6eadef96ee02","src/follow_redirect/policy/clone_body_fn.rs":"3a78bf37d4bd000d9c2d60d84a2d02d2d0ae584a0790da2dcdb34fab43fcd557","src/follow_redirect/policy/filter_credentials.rs":"918ce212685ce6501f78e6346c929fec8e01e81b26d681f6d3c86d88fe2eaa97","src/follow_redirect/policy/limited.rs":"b958035fc38736e12ef2a5421366de51c806c8ca849e8e9310b9d14e8b0b1e07","src/follow_redirect/policy/mod.rs":"e4185953e23944928f49eb9debe59da78ffb1fd87e5ac03cbab0079ccef3e316","src/follow_redirect/policy/or.rs":"02de001232c92a9e7e19cdef70b1321df181c6323973e6297642cc234dbf3119","src/follow_redirect/policy/redirect_fn.rs":"f4f7bf9219df8da1021ef9f44b07b50814dfa0728c8dbf52090e0dfab0b8edcc","src/follow_redirect/policy/same_origin.rs":"9c47be5b615c3dd31db8056e324a4bc87b0510c19df09b6a9e5f7ea8de2829fe","src/lib.rs":"c4b3bcaeef4cc09cb9766e13b517626d0fcf246c86cbf639687dc73f686ecf0c","src/limit/body.rs":"aa59aba00aae4ca98097d746efeabff2f650e1ac60bbea30179644e76b5ea1f9","src/limit/future.rs":"6c6feba8766a38e8fd68df7f73677a13047d71911acc0578f80b7d70ab0142d0","src/limit/layer.rs":"a9ebe5e09b32d7ca71442f855af622b3657dca140d95acf7021c4d57c7b50576","src/limit/mod.rs":"22ecc0e5cf5e2d526da2b04e8ec8af715ae58338fc4031e05050d4a64ce79c8a","src/limit/service.rs":"b73f2016d1feb0b61fc4924597cbb06a43106ce213ce8104feabf953c7eefe2d","src/macros.rs":"d9e425b15811809ef9a002c7f86376737ca401a435630f59d4451c1863aed823","src/map_request_body.rs":"35eb77cc8d2257b849348a68aae050d9dee7a0869b433748b4a038cc8c70ee2f","src/map_response_body.rs":"691263b8c85bd595aed6c55a3d3e2fd6d8e19dca77dd2e5df283fba581b7df56","src/metrics/in_flight_requests.rs":"615652b49586bb809b8f841f15ee3ba70c8e62620e31c81d062e1100e88619e2","src/metrics/mod.rs":"71d79df8dcb242f4925dc9b0d380d3c4fa7ae1f3d6a125f9db4f8a4ee3be9b3d","src/normalize_path.rs":"ea127de90e9eb699f22c4a702948d18bc7bda815476aa5fde0fd1c2cc234ffd3","src/propagate_header.rs":"d123d13557a28d9a797486285233cb4bade6fc318d803d07f8e93bca831a7750","src/request_id.rs":"beb9c283692a975cad0ff657ddb9a4d0304ae03ace6a91ffdbc86f4e33d718a0","src/sensitive_headers.rs":"ab78f92d1482a3818f743b412316d6073dae6bf94ee06b22b60fe488d645cbbc","src/services/fs/mod.rs":"69984ffd0f88cca24e0d35d76b265492d87de7094914e07a7c13a6ad899ee7d9","src/services/fs/serve_dir/future.rs":"39c383ca162ff8720bfa47b2201c74b5c1cfd67124188b9248ad883f42a2d98e","src/services/fs/serve_dir/headers.rs":"d48fb514ca575e5e380f80eb84521a5fcab0560e57a995f1fef5ca35590400e8","src/services/fs/serve_dir/mod.rs":"3efa5af990c5d92021563aebf33a99ae27782bc553a3fee36906610e109cbfd6","src/services/fs/serve_dir/open_file.rs":"60f73b3693e0e8a3bb050aded5607dae5a7a71fd78e040b35b8c7c34d0a38a66","src/services/fs/serve_dir/tests.rs":"6c6aaff66c6af9bca1850ffc7290b8dfce2eedd29f3cf7c90d52013f9782e229","src/services/fs/serve_file.rs":"a2f6feee5b6261d6643c91a2b6c5547f3dea2b48e823ccea4d95ec0f0f4bb561","src/services/mod.rs":"177bf1406c13c0386c82b247e6d1556c55c7a2f6704de7e50dbc987400713b96","src/services/redirect.rs":"480cb9d2fefdcbe1f70c428a78faa3aa283a4f44eb26dff5c2d36cdd543d011a","src/set_header/mod.rs":"642db9ef0fed43886e12311e26ae2522d25275ff9263cb5f0ef500ed6ce5f6bd","src/set_header/request.rs":"6261a0e89413bf8d5bcecb25acbf0c7eee6bbfd57590676ac22d4dfe46c12ad1","src/set_header/response.rs":"b6c659771a61cfba3ab814df32353582c05f695bf6bf21a2265d7d6ccb709440","src/set_status.rs":"9dfc8c6d598a45483b8a489d6362b7bb8debd1feb0c8304a69c003a6ae4882d3","src/test_helpers.rs":"2461f2eddbc947ab5cf41cfc404e173700eb5dc3a84d1621c0cb75ac776baa1f","src/timeout/body.rs":"c96b374dcb30a62eb7e06572c4dacb9a9f5d6a752576a402a40205a8ae94fa53","src/timeout/mod.rs":"8032cbcc0863d22d9bd3f89dda5e7dd85574e53811ab5c98c99aaa12d21bd646","src/timeout/service.rs":"77260f6a9b2583ff33dc63c365e3170f147072231ca85bc948688bf5fbfc97a7","src/trace/body.rs":"c4aabdc0c6799e8425ca750730f6a6c108727f0c48cef57f2160a5cc22e96ecb","src/trace/future.rs":"1b0334a22f07017b589e51f6d7bda472161ac58435202be031a5aab0e741e266","src/trace/layer.rs":"9f9a52c51f356fa0e2f4e83942dac1e04424d52589723ff74927754d68b38a77","src/trace/make_span.rs":"1066f20074c3da019901f4c614b18e8bd574170fe6cdcbc090ab9bf42361f876","src/trace/mod.rs":"22af338562d2742be1b563d0a9d6a532114af337dd7a6771adab73c5ae04c289","src/trace/on_body_chunk.rs":"824f83e4b44e5656fd48922addf02c010764cd73ec4d290213be2b990751f3ca","src/trace/on_eos.rs":"321f2afd63eef9a1be0bbe5e5bb450555bea984bc28381f92b31a17b6e466237","src/trace/on_failure.rs":"2aa316893e4c2df0ac0bfe8b597a9eaee8db79a243c42480be16fe2ebdf58f41","src/trace/on_request.rs":"9a88d6061c2f638d04dabf79317d014f7d47abb3c6e30730b687294ff135d646","src/trace/on_response.rs":"9b22781e2c2f1003ad5c4d0525ab26c037905660d769dd0717f4dcf359e7319a","src/trace/service.rs":"2b96171af5c11ad7d7e372afd8d86aed824de84ca64ae1cfdf832b8506646a66","src/validate_request.rs":"d773d6d2be85fea154e55b1743bd13537f400b6fc39a4075322d9b4b5acd3054"},"package":"1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5"} \ No newline at end of file diff --git a/.cargo-vendor/tower-http/CHANGELOG.md b/.cargo-vendor/tower-http/CHANGELOG.md new file mode 100644 index 0000000000..2d7ee4bec9 --- /dev/null +++ b/.cargo-vendor/tower-http/CHANGELOG.md @@ -0,0 +1,413 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +# 0.5.2 + +## Added: + +- **compression:** Will now send a `vary: accept-encoding` header on compressed responses ([#399]) +- **compression:** Support `x-gzip` as equivalent to `gzip` in `accept-encoding` request header ([#467]) + +## Fixed + +- **compression:** Skip compression for range requests ([#446]) +- **compression:** Skip compression for SSE responses by default ([#465]) +- **cors:** *Actually* keep Vary headers set by the inner service when setting response headers ([#473]) + - Version 0.5.1 intended to ship this, but the implementation was buggy and didn't actually do anything + +[#399]: https://github.com/tower-rs/tower-http/pull/399 +[#446]: https://github.com/tower-rs/tower-http/pull/446 +[#465]: https://github.com/tower-rs/tower-http/pull/465 +[#467]: https://github.com/tower-rs/tower-http/pull/467 +[#473]: https://github.com/tower-rs/tower-http/pull/473 + +# 0.5.1 (January 14, 2024) + +## Added + +- **fs:** Support files precompressed with `zstd` in `ServeFile` +- **trace:** Add default generic parameters for `ResponseBody` and `ResponseFuture` ([#455]) +- **trace:** Add type aliases `HttpMakeClassifier` and `GrpcMakeClassifier` ([#455]) + +## Fixed + +- **cors:** Keep Vary headers set by the inner service when setting response headers ([#398]) +- **fs:** `ServeDir` now no longer redirects from `/directory` to `/directory/` + if `append_index_html_on_directories` is disabled ([#421]) + +[#398]: https://github.com/tower-rs/tower-http/pull/398 +[#421]: https://github.com/tower-rs/tower-http/pull/421 +[#455]: https://github.com/tower-rs/tower-http/pull/455 + +# 0.5.0 (November 21, 2023) + +## Changed + +- Bump Minimum Supported Rust Version to 1.66 ([#433]) +- Update to http-body 1.0 ([#348]) +- Update to http 1.0 ([#348]) +- Preserve service error type in RequestDecompression ([#368]) + +## Fixed + +- Accepts range headers with ranges where the end of range goes past the end of the document by bumping +http-range-header to `0.4` + +[#418]: https://github.com/tower-rs/tower-http/pull/418 +[#433]: https://github.com/tower-rs/tower-http/pull/433 +[#348]: https://github.com/tower-rs/tower-http/pull/348 +[#368]: https://github.com/tower-rs/tower-http/pull/368 + +# 0.4.2 (July 19, 2023) + +## Added + +- **cors:** Add support for private network preflights ([#373]) +- **compression:** Implement `Default` for `DecompressionBody` ([#370]) + +## Changed + +- **compression:** Update to async-compression 0.4 ([#371]) + +## Fixed + +- **compression:** Override default brotli compression level 11 -> 4 ([#356]) +- **trace:** Simplify dynamic tracing level application ([#380]) +- **normalize_path:** Fix path normalization for preceding slashes ([#359]) + +[#356]: https://github.com/tower-rs/tower-http/pull/356 +[#359]: https://github.com/tower-rs/tower-http/pull/359 +[#370]: https://github.com/tower-rs/tower-http/pull/370 +[#371]: https://github.com/tower-rs/tower-http/pull/371 +[#373]: https://github.com/tower-rs/tower-http/pull/373 +[#380]: https://github.com/tower-rs/tower-http/pull/380 + +# 0.4.1 (June 20, 2023) + +## Added + +- **request_id:** Derive `Default` for `MakeRequestUuid` ([#335]) +- **fs:** Derive `Default` for `ServeFileSystemResponseBody` ([#336]) +- **compression:** Expose compression quality on the CompressionLayer ([#333]) + +## Fixed + +- **compression:** Improve parsing of `Accept-Encoding` request header ([#220]) +- **normalize_path:** Fix path normalization of index route ([#347]) +- **decompression:** Enable `multiple_members` for `GzipDecoder` ([#354]) + +[#347]: https://github.com/tower-rs/tower-http/pull/347 +[#333]: https://github.com/tower-rs/tower-http/pull/333 +[#220]: https://github.com/tower-rs/tower-http/pull/220 +[#335]: https://github.com/tower-rs/tower-http/pull/335 +[#336]: https://github.com/tower-rs/tower-http/pull/336 +[#354]: https://github.com/tower-rs/tower-http/pull/354 + +# 0.4.0 (February 24, 2023) + +## Added + +- **decompression:** Add `RequestDecompression` middleware ([#282]) +- **compression:** Implement `Default` for `CompressionBody` ([#323]) +- **compression, decompression:** Support zstd (de)compression ([#322]) + +## Changed + +- **serve_dir:** `ServeDir` and `ServeFile`'s error types are now `Infallible` and any IO errors + will be converted into responses. Use `try_call` to generate error responses manually (BREAKING) ([#283]) +- **serve_dir:** `ServeDir::fallback` and `ServeDir::not_found_service` now requires + the fallback service to use `Infallible` as its error type (BREAKING) ([#283]) +- **compression, decompression:** Tweak prefered compression encodings ([#325]) + +## Removed + +- Removed `RequireAuthorization` in favor of `ValidateRequest` (BREAKING) ([#290]) + +## Fixed + +- **serve_dir:** Don't include identity in Content-Encoding header ([#317]) +- **compression:** Do compress SVGs ([#321]) +- **serve_dir:** In `ServeDir`, convert `io::ErrorKind::NotADirectory` to `404 Not Found` ([#331]) + +[#282]: https://github.com/tower-rs/tower-http/pull/282 +[#283]: https://github.com/tower-rs/tower-http/pull/283 +[#290]: https://github.com/tower-rs/tower-http/pull/290 +[#317]: https://github.com/tower-rs/tower-http/pull/317 +[#321]: https://github.com/tower-rs/tower-http/pull/321 +[#322]: https://github.com/tower-rs/tower-http/pull/322 +[#323]: https://github.com/tower-rs/tower-http/pull/323 +[#325]: https://github.com/tower-rs/tower-http/pull/325 +[#331]: https://github.com/tower-rs/tower-http/pull/331 + +# 0.3.5 (December 02, 2022) + +## Added + +- Add `NormalizePath` middleware ([#275]) +- Add `ValidateRequest` middleware ([#289]) +- Add `RequestBodyTimeout` middleware ([#303]) + +## Changed + +- Bump Minimum Supported Rust Version to 1.60 ([#299]) + +## Fixed + +- **trace:** Correctly identify gRPC requests in default `on_response` callback ([#278]) +- **cors:** Panic if a wildcard (`*`) is passed to `AllowOrigin::list`. Use + `AllowOrigin::any()` instead ([#285]) +- **serve_dir:** Call the fallback on non-uft8 request paths ([#310]) + +[#275]: https://github.com/tower-rs/tower-http/pull/275 +[#278]: https://github.com/tower-rs/tower-http/pull/278 +[#285]: https://github.com/tower-rs/tower-http/pull/285 +[#289]: https://github.com/tower-rs/tower-http/pull/289 +[#299]: https://github.com/tower-rs/tower-http/pull/299 +[#303]: https://github.com/tower-rs/tower-http/pull/303 +[#310]: https://github.com/tower-rs/tower-http/pull/310 + +# 0.3.4 (June 06, 2022) + +## Added + +- Add `Timeout` middleware ([#270]) +- Add `RequestBodyLimit` middleware ([#271]) + +[#270]: https://github.com/tower-rs/tower-http/pull/270 +[#271]: https://github.com/tower-rs/tower-http/pull/271 + +# 0.3.3 (May 08, 2022) + +## Added + +- **serve_dir:** Add `ServeDir::call_fallback_on_method_not_allowed` to allow calling the fallback + for requests that aren't `GET` or `HEAD` ([#264]) +- **request_id:** Add `MakeRequestUuid` for generating request ids using UUIDs ([#266]) + +[#264]: https://github.com/tower-rs/tower-http/pull/264 +[#266]: https://github.com/tower-rs/tower-http/pull/266 + +## Fixed + +- **serve_dir:** Include `Allow` header for `405 Method Not Allowed` responses ([#263]) + +[#263]: https://github.com/tower-rs/tower-http/pull/263 + +# 0.3.2 (April 29, 2022) + +## Fixed + +- **serve_dir**: Fix empty request parts being passed to `ServeDir`'s fallback instead of the actual ones ([#258]) + +[#258]: https://github.com/tower-rs/tower-http/pull/258 + +# 0.3.1 (April 28, 2022) + +## Fixed + +- **cors**: Only send a single origin in `Access-Control-Allow-Origin` header when a list of + allowed origins is configured (the previous behavior of sending a comma-separated list like for + allowed methods and allowed headers is not allowed by any standard) + +# 0.3.0 (April 25, 2022) + +## Added + +- **fs**: Add `ServeDir::{fallback, not_found_service}` for calling another service if + the file cannot be found ([#243]) +- **fs**: Add `SetStatus` to override status codes ([#248]) +- `ServeDir` and `ServeFile` now respond with `405 Method Not Allowed` to requests where the + method isn't `GET` or `HEAD` ([#249]) +- **cors**: Added `CorsLayer::very_permissive` which is like + `CorsLayer::permissive` except it (truly) allows credentials. This is made + possible by mirroring the request's origin as well as method and headers + back as CORS-whitelisted ones ([#237]) +- **cors**: Allow customizing the value(s) for the `Vary` header ([#237]) + +## Changed + +- **cors**: Removed `allow-credentials: true` from `CorsLayer::permissive`. + It never actually took effect in compliant browsers because it is mutually + exclusive with the `*` wildcard (`Any`) on origins, methods and headers ([#237]) +- **cors**: Rewrote the CORS middleware. Almost all existing usage patterns + will continue to work. (BREAKING) ([#237]) +- **cors**: The CORS middleware will now panic if you try to use `Any` in + combination with `.allow_credentials(true)`. This configuration worked + before, but resulted in browsers ignoring the `allow-credentials` header, + which defeats the purpose of setting it and can be very annoying to debug + ([#237]) + +## Fixed + +- **fs**: Fix content-length calculation on range requests ([#228]) + +[#228]: https://github.com/tower-rs/tower-http/pull/228 +[#237]: https://github.com/tower-rs/tower-http/pull/237 +[#243]: https://github.com/tower-rs/tower-http/pull/243 +[#248]: https://github.com/tower-rs/tower-http/pull/248 +[#249]: https://github.com/tower-rs/tower-http/pull/249 + +# 0.2.4 (March 5, 2022) + +## Added + +- Added `CatchPanic` middleware which catches panics and converts them + into `500 Internal Server` responses ([#214]) + +## Fixed + +- Make parsing of `Accept-Encoding` more robust ([#220]) + +[#214]: https://github.com/tower-rs/tower-http/pull/214 +[#220]: https://github.com/tower-rs/tower-http/pull/220 + +# 0.2.3 (February 18, 2022) + +## Changed + +- Update to tokio-util 0.7 ([#221]) + +## Fixed + +- The CORS layer / service methods `allow_headers`, `allow_methods`, `allow_origin` + and `expose_headers` now do nothing if given an empty `Vec`, instead of sending + the respective header with an empty value ([#218]) + +[#218]: https://github.com/tower-rs/tower-http/pull/218 +[#221]: https://github.com/tower-rs/tower-http/pull/221 + +# 0.2.2 (February 8, 2022) + +## Fixed + +- Add `Vary` headers for CORS preflight responses ([#216]) + +[#216]: https://github.com/tower-rs/tower-http/pull/216 + +# 0.2.1 (January 21, 2022) + +## Added + +- Support `Last-Modified` (and friends) headers in `ServeDir` and `ServeFile` ([#145]) +- Add `AsyncRequireAuthorization::layer` ([#195]) + +## Fixed + +- Fix build error for certain feature sets ([#209]) +- `Cors`: Set `Vary` header ([#199]) +- `ServeDir` and `ServeFile`: Fix potential directory traversal attack due to + improper path validation on Windows ([#204]) + +[#145]: https://github.com/tower-rs/tower-http/pull/145 +[#195]: https://github.com/tower-rs/tower-http/pull/195 +[#199]: https://github.com/tower-rs/tower-http/pull/199 +[#204]: https://github.com/tower-rs/tower-http/pull/204 +[#209]: https://github.com/tower-rs/tower-http/pull/209 + +# 0.2.0 (December 1, 2021) + +## Added + +- **builder**: Add `ServiceBuilderExt` which adds methods to `tower::ServiceBuilder` for + adding middleware from tower-http ([#106]) +- **request_id**: Add `SetRequestId` and `PropagateRequestId` middleware ([#150]) +- **trace**: Add `DefaultMakeSpan::level` to make log level of tracing spans easily configurable ([#124]) +- **trace**: Add `LatencyUnit::Seconds` for formatting latencies as seconds ([#179]) +- **trace**: Support customizing which status codes are considered failures by `GrpcErrorsAsFailures` ([#189]) +- **compression**: Support specifying predicates to choose when responses should + be compressed. This can be used to disable compression of small responses, + responses with a certain `content-type`, or something user defined ([#172]) +- **fs**: Ability to serve precompressed files ([#156]) +- **fs**: Support `Range` requests ([#173]) +- **fs**: Properly support HEAD requests which return no body and have the `Content-Length` header set ([#169]) + +## Changed + +- `AddAuthorization`, `InFlightRequests`, `SetRequestHeader`, + `SetResponseHeader`, `AddExtension`, `MapRequestBody` and `MapResponseBody` + now requires underlying service to use `http::Request` and + `http::Response` as request and responses ([#182]) (BREAKING) +- **set_header**: Remove unnecessary generic parameter from `SetRequestHeaderLayer` + and `SetResponseHeaderLayer`. This removes the need (and possibility) to specify a + body type for these layers ([#148]) (BREAKING) +- **compression, decompression**: Change the response body error type to + `Box`. This makes them usable if + the body they're wrapping uses `Box` as + its error type which they previously weren't ([#166]) (BREAKING) +- **fs**: Change response body type of `ServeDir` and `ServeFile` to + `ServeFileSystemResponseBody` and `ServeFileSystemResponseFuture` ([#187]) (BREAKING) +- **auth**: Change `AuthorizeRequest` and `AsyncAuthorizeRequest` traits to be simpler ([#192]) (BREAKING) + +## Removed + +- **compression, decompression**: Remove `BodyOrIoError`. Its been replaced with `Box` ([#166]) (BREAKING) +- **compression, decompression**: Remove the `compression` and `decompression` feature. They were unnecessary + and `compression-full`/`decompression-full` can be used to get full + compression/decompression support. For more granular control, `[compression|decompression]-gzip`, + `[compression|decompression]-br` and `[compression|decompression]-deflate` may + be used instead ([#170]) (BREAKING) + +[#106]: https://github.com/tower-rs/tower-http/pull/106 +[#124]: https://github.com/tower-rs/tower-http/pull/124 +[#148]: https://github.com/tower-rs/tower-http/pull/148 +[#150]: https://github.com/tower-rs/tower-http/pull/150 +[#156]: https://github.com/tower-rs/tower-http/pull/156 +[#166]: https://github.com/tower-rs/tower-http/pull/166 +[#169]: https://github.com/tower-rs/tower-http/pull/169 +[#170]: https://github.com/tower-rs/tower-http/pull/170 +[#172]: https://github.com/tower-rs/tower-http/pull/172 +[#173]: https://github.com/tower-rs/tower-http/pull/173 +[#179]: https://github.com/tower-rs/tower-http/pull/179 +[#182]: https://github.com/tower-rs/tower-http/pull/182 +[#187]: https://github.com/tower-rs/tower-http/pull/187 +[#189]: https://github.com/tower-rs/tower-http/pull/189 +[#192]: https://github.com/tower-rs/tower-http/pull/192 + +# 0.1.2 (November 13, 2021) + +- New middleware: Add `Cors` for setting [CORS] headers ([#112]) +- New middleware: Add `AsyncRequireAuthorization` ([#118]) +- `Compression`: Don't recompress HTTP responses ([#140]) +- `Compression` and `Decompression`: Pass configuration from layer into middleware ([#132]) +- `ServeDir` and `ServeFile`: Improve performance ([#137]) +- `Compression`: Remove needless `ResBody::Error: Into` bounds ([#117]) +- `ServeDir`: Percent decode path segments ([#129]) +- `ServeDir`: Use correct redirection status ([#130]) +- `ServeDir`: Return `404 Not Found` on requests to directories if + `append_index_html_on_directories` is set to `false` ([#122]) + +[#112]: https://github.com/tower-rs/tower-http/pull/112 +[#118]: https://github.com/tower-rs/tower-http/pull/118 +[#140]: https://github.com/tower-rs/tower-http/pull/140 +[#132]: https://github.com/tower-rs/tower-http/pull/132 +[#137]: https://github.com/tower-rs/tower-http/pull/137 +[#117]: https://github.com/tower-rs/tower-http/pull/117 +[#129]: https://github.com/tower-rs/tower-http/pull/129 +[#130]: https://github.com/tower-rs/tower-http/pull/130 +[#122]: https://github.com/tower-rs/tower-http/pull/122 + +# 0.1.1 (July 2, 2021) + +- Add example of using `SharedClassifier`. +- Add `StatusInRangeAsFailures` which is a response classifier that considers + responses with status code in a certain range as failures. Useful for HTTP + clients where both server errors (5xx) and client errors (4xx) are considered + failures. +- Implement `Debug` for `NeverClassifyEos`. +- Update iri-string to 0.4. +- Add `ClassifyResponse::map_failure_class` and `ClassifyEos::map_failure_class` + for transforming the failure classification using a function. +- Clarify exactly when each `Trace` callback is called. +- Add `AddAuthorizationLayer` for setting the `Authorization` header on + requests. + +# 0.1.0 (May 27, 2021) + +- Initial release. + +[CORS]: https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS diff --git a/.cargo-vendor/tower-http/Cargo.toml b/.cargo-vendor/tower-http/Cargo.toml new file mode 100644 index 0000000000..dc4d812848 --- /dev/null +++ b/.cargo-vendor/tower-http/Cargo.toml @@ -0,0 +1,341 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +rust-version = "1.66" +name = "tower-http" +version = "0.5.2" +authors = ["Tower Maintainers "] +description = "Tower middleware and utilities for HTTP clients and servers" +homepage = "https://github.com/tower-rs/tower-http" +readme = "README.md" +keywords = [ + "io", + "async", + "futures", + "service", + "http", +] +categories = [ + "asynchronous", + "network-programming", + "web-programming", +] +license = "MIT" +repository = "https://github.com/tower-rs/tower-http" +resolver = "2" + +[package.metadata.cargo-public-api-crates] +allowed = [ + "bytes", + "http", + "http_body", + "mime", + "tokio", + "tower", + "tower_layer", + "tower_service", + "tracing", + "tracing_core", +] + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = [ + "--cfg", + "docsrs", +] + +[package.metadata.playground] +features = ["full"] + +[dependencies.async-compression] +version = "0.4" +features = ["tokio"] +optional = true + +[dependencies.base64] +version = "0.21" +optional = true + +[dependencies.bitflags] +version = "2.0.2" + +[dependencies.bytes] +version = "1" + +[dependencies.futures-core] +version = "0.3" +optional = true +default_features = false + +[dependencies.futures-util] +version = "0.3.14" +optional = true +default_features = false + +[dependencies.http] +version = "1.0" + +[dependencies.http-body] +version = "1.0.0" + +[dependencies.http-body-util] +version = "0.1.0" + +[dependencies.http-range-header] +version = "0.4.0" +optional = true + +[dependencies.httpdate] +version = "1.0" +optional = true + +[dependencies.iri-string] +version = "0.7.0" +optional = true + +[dependencies.mime] +version = "0.3.17" +optional = true +default_features = false + +[dependencies.mime_guess] +version = "2" +optional = true +default_features = false + +[dependencies.percent-encoding] +version = "2.1.0" +optional = true + +[dependencies.pin-project-lite] +version = "0.2.7" + +[dependencies.tokio] +version = "1.6" +optional = true +default_features = false + +[dependencies.tokio-util] +version = "0.7" +features = ["io"] +optional = true +default_features = false + +[dependencies.tower] +version = "0.4.1" +optional = true + +[dependencies.tower-layer] +version = "0.3" + +[dependencies.tower-service] +version = "0.3" + +[dependencies.tracing] +version = "0.1" +optional = true +default_features = false + +[dependencies.uuid] +version = "1.0" +features = ["v4"] +optional = true + +[dev-dependencies.async-trait] +version = "0.1" + +[dev-dependencies.brotli] +version = "3" + +[dev-dependencies.bytes] +version = "1" + +[dev-dependencies.flate2] +version = "1.0" + +[dev-dependencies.futures-util] +version = "0.3.14" + +[dev-dependencies.hyper-util] +version = "0.1" +features = [ + "client-legacy", + "http1", + "tokio", +] + +[dev-dependencies.once_cell] +version = "1" + +[dev-dependencies.serde_json] +version = "1.0" + +[dev-dependencies.sync_wrapper] +version = "0.1.1" + +[dev-dependencies.tokio] +version = "1" +features = ["full"] + +[dev-dependencies.tower] +version = "0.4.10" +features = [ + "buffer", + "util", + "retry", + "make", + "timeout", +] + +[dev-dependencies.tracing-subscriber] +version = "0.3" + +[dev-dependencies.uuid] +version = "1.0" +features = ["v4"] + +[dev-dependencies.zstd] +version = "0.12" + +[features] +add-extension = [] +auth = [ + "base64", + "validate-request", +] +catch-panic = [ + "tracing", + "futures-util/std", +] +compression-br = [ + "async-compression/brotli", + "futures-core", + "tokio-util", + "tokio", +] +compression-deflate = [ + "async-compression/zlib", + "futures-core", + "tokio-util", + "tokio", +] +compression-full = [ + "compression-br", + "compression-deflate", + "compression-gzip", + "compression-zstd", +] +compression-gzip = [ + "async-compression/gzip", + "futures-core", + "tokio-util", + "tokio", +] +compression-zstd = [ + "async-compression/zstd", + "futures-core", + "tokio-util", + "tokio", +] +cors = [] +decompression-br = [ + "async-compression/brotli", + "futures-core", + "tokio-util", + "tokio", +] +decompression-deflate = [ + "async-compression/zlib", + "futures-core", + "tokio-util", + "tokio", +] +decompression-full = [ + "decompression-br", + "decompression-deflate", + "decompression-gzip", + "decompression-zstd", +] +decompression-gzip = [ + "async-compression/gzip", + "futures-core", + "tokio-util", + "tokio", +] +decompression-zstd = [ + "async-compression/zstd", + "futures-core", + "tokio-util", + "tokio", +] +default = [] +follow-redirect = [ + "futures-util", + "iri-string", + "tower/util", +] +fs = [ + "futures-util", + "tokio/fs", + "tokio-util/io", + "tokio/io-util", + "dep:http-range-header", + "mime_guess", + "mime", + "percent-encoding", + "httpdate", + "set-status", + "futures-util/alloc", + "tracing", +] +full = [ + "add-extension", + "auth", + "catch-panic", + "compression-full", + "cors", + "decompression-full", + "follow-redirect", + "fs", + "limit", + "map-request-body", + "map-response-body", + "metrics", + "normalize-path", + "propagate-header", + "redirect", + "request-id", + "sensitive-headers", + "set-header", + "set-status", + "timeout", + "trace", + "util", + "validate-request", +] +limit = [] +map-request-body = [] +map-response-body = [] +metrics = ["tokio/time"] +normalize-path = [] +propagate-header = [] +redirect = [] +request-id = ["uuid"] +sensitive-headers = [] +set-header = [] +set-status = [] +timeout = ["tokio/time"] +trace = ["tracing"] +util = ["tower"] +validate-request = ["mime"] diff --git a/.cargo-vendor/tower-http/LICENSE b/.cargo-vendor/tower-http/LICENSE new file mode 100644 index 0000000000..352c2cfa8e --- /dev/null +++ b/.cargo-vendor/tower-http/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2019-2021 Tower Contributors + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/.cargo-vendor/tower-http/README.md b/.cargo-vendor/tower-http/README.md new file mode 100644 index 0000000000..4df241869b --- /dev/null +++ b/.cargo-vendor/tower-http/README.md @@ -0,0 +1,79 @@ +# Tower HTTP + +Tower middleware and utilities for HTTP clients and servers. + +[![Build status](https://github.com/tower-rs/tower-http/workflows/CI/badge.svg)](https://github.com/tower-rs/tower-http/actions) +[![Crates.io](https://img.shields.io/crates/v/tower-http)](https://crates.io/crates/tower-http) +[![Documentation](https://docs.rs/tower-http/badge.svg)](https://docs.rs/tower-http) +[![Crates.io](https://img.shields.io/crates/l/tower-http)](tower-http/LICENSE) + +More information about this crate can be found in the [crate documentation][docs]. + +## Middleware + +Tower HTTP contains lots of middleware that are generally useful when building +HTTP servers and clients. Some of the highlights are: + +- `Trace` adds high level logging of requests and responses. Supports both + regular HTTP requests as well as gRPC. +- `Compression` and `Decompression` to compress/decompress response bodies. +- `FollowRedirect` to automatically follow redirection responses. + +See the [docs] for the complete list of middleware. + +Middleware uses the [http] crate as the HTTP interface so they're compatible +with any library or framework that also uses [http]. For example [hyper]. + +The middleware were originally extracted from one of [@EmbarkStudios] internal +projects. + +## Examples + +The [examples] folder contains various examples of how to use Tower HTTP: + +- [warp-key-value-store]: A key/value store with an HTTP API built with warp. +- [tonic-key-value-store]: A key/value store with a gRPC API and client built with tonic. +- [axum-key-value-store]: A key/value store with an HTTP API built with axum. + +## Minimum supported Rust version + +tower-http's MSRV is 1.66. + +## Getting Help + +If you're new to tower its [guides] might help. In the tower-http repo we also +have a [number of examples][examples] showing how to put everything together. +You're also welcome to ask in the [`#tower` Discord channel][chat] or open an +[issue] with your question. + +## Contributing + +:balloon: Thanks for your help improving the project! We are so happy to have +you! We have a [contributing guide][guide] to help you get involved in the Tower +HTTP project. + +[guide]: CONTRIBUTING.md + +## License + +This project is licensed under the [MIT license](tower-http/LICENSE). + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in Tower HTTP by you, shall be licensed as MIT, without any +additional terms or conditions. + +[@EmbarkStudios]: https://github.com/EmbarkStudios +[examples]: https://github.com/tower-rs/tower-http/tree/master/examples +[http]: https://crates.io/crates/http +[tonic-key-value-store]: https://github.com/tower-rs/tower-http/tree/master/examples/tonic-key-value-store +[warp-key-value-store]: https://github.com/tower-rs/tower-http/tree/master/examples/warp-key-value-store +[axum-key-value-store]: https://github.com/tower-rs/tower-http/tree/master/examples/axum-key-value-store +[chat]: https://discord.gg/tokio +[docs]: https://docs.rs/tower-http +[hyper]: https://github.com/hyperium/hyper +[issue]: https://github.com/tower-rs/tower-http/issues/new +[milestone]: https://github.com/tower-rs/tower-http/milestones +[examples]: https://github.com/tower-rs/tower-http/tree/master/examples +[guides]: https://github.com/tower-rs/tower/tree/master/guides diff --git a/.cargo-vendor/tower-http/src/add_extension.rs b/.cargo-vendor/tower-http/src/add_extension.rs new file mode 100644 index 0000000000..095646df37 --- /dev/null +++ b/.cargo-vendor/tower-http/src/add_extension.rs @@ -0,0 +1,167 @@ +//! Middleware that clones a value into each request's [extensions]. +//! +//! [extensions]: https://docs.rs/http/latest/http/struct.Extensions.html +//! +//! # Example +//! +//! ``` +//! use tower_http::add_extension::AddExtensionLayer; +//! use tower::{Service, ServiceExt, ServiceBuilder, service_fn}; +//! use http::{Request, Response}; +//! use bytes::Bytes; +//! use http_body_util::Full; +//! use std::{sync::Arc, convert::Infallible}; +//! +//! # struct DatabaseConnectionPool; +//! # impl DatabaseConnectionPool { +//! # fn new() -> DatabaseConnectionPool { DatabaseConnectionPool } +//! # } +//! # +//! // Shared state across all request handlers --- in this case, a pool of database connections. +//! struct State { +//! pool: DatabaseConnectionPool, +//! } +//! +//! async fn handle(req: Request>) -> Result>, Infallible> { +//! // Grab the state from the request extensions. +//! let state = req.extensions().get::>().unwrap(); +//! +//! Ok(Response::new(Full::default())) +//! } +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! // Construct the shared state. +//! let state = State { +//! pool: DatabaseConnectionPool::new(), +//! }; +//! +//! let mut service = ServiceBuilder::new() +//! // Share an `Arc` with all requests. +//! .layer(AddExtensionLayer::new(Arc::new(state))) +//! .service_fn(handle); +//! +//! // Call the service. +//! let response = service +//! .ready() +//! .await? +//! .call(Request::new(Full::default())) +//! .await?; +//! # Ok(()) +//! # } +//! ``` + +use http::{Request, Response}; +use std::task::{Context, Poll}; +use tower_layer::Layer; +use tower_service::Service; + +/// [`Layer`] for adding some shareable value to [request extensions]. +/// +/// See the [module docs](crate::add_extension) for more details. +/// +/// [request extensions]: https://docs.rs/http/latest/http/struct.Extensions.html +#[derive(Clone, Copy, Debug)] +pub struct AddExtensionLayer { + value: T, +} + +impl AddExtensionLayer { + /// Create a new [`AddExtensionLayer`]. + pub fn new(value: T) -> Self { + AddExtensionLayer { value } + } +} + +impl Layer for AddExtensionLayer +where + T: Clone, +{ + type Service = AddExtension; + + fn layer(&self, inner: S) -> Self::Service { + AddExtension { + inner, + value: self.value.clone(), + } + } +} + +/// Middleware for adding some shareable value to [request extensions]. +/// +/// See the [module docs](crate::add_extension) for more details. +/// +/// [request extensions]: https://docs.rs/http/latest/http/struct.Extensions.html +#[derive(Clone, Copy, Debug)] +pub struct AddExtension { + inner: S, + value: T, +} + +impl AddExtension { + /// Create a new [`AddExtension`]. + pub fn new(inner: S, value: T) -> Self { + Self { inner, value } + } + + define_inner_service_accessors!(); + + /// Returns a new [`Layer`] that wraps services with a `AddExtension` middleware. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer(value: T) -> AddExtensionLayer { + AddExtensionLayer::new(value) + } +} + +impl Service> for AddExtension +where + S: Service, Response = Response>, + T: Clone + Send + Sync + 'static, +{ + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, mut req: Request) -> Self::Future { + req.extensions_mut().insert(self.value.clone()); + self.inner.call(req) + } +} + +#[cfg(test)] +mod tests { + #[allow(unused_imports)] + use super::*; + use crate::test_helpers::Body; + use http::Response; + use std::{convert::Infallible, sync::Arc}; + use tower::{service_fn, ServiceBuilder, ServiceExt}; + + struct State(i32); + + #[tokio::test] + async fn basic() { + let state = Arc::new(State(1)); + + let svc = ServiceBuilder::new() + .layer(AddExtensionLayer::new(state)) + .service(service_fn(|req: Request| async move { + let state = req.extensions().get::>().unwrap(); + Ok::<_, Infallible>(Response::new(state.0)) + })); + + let res = svc + .oneshot(Request::new(Body::empty())) + .await + .unwrap() + .into_body(); + + assert_eq!(1, res); + } +} diff --git a/.cargo-vendor/tower-http/src/auth/add_authorization.rs b/.cargo-vendor/tower-http/src/auth/add_authorization.rs new file mode 100644 index 0000000000..246c13b6c3 --- /dev/null +++ b/.cargo-vendor/tower-http/src/auth/add_authorization.rs @@ -0,0 +1,267 @@ +//! Add authorization to requests using the [`Authorization`] header. +//! +//! [`Authorization`]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Authorization +//! +//! # Example +//! +//! ``` +//! use tower_http::validate_request::{ValidateRequestHeader, ValidateRequestHeaderLayer}; +//! use tower_http::auth::AddAuthorizationLayer; +//! use http::{Request, Response, StatusCode, header::AUTHORIZATION}; +//! use tower::{Service, ServiceExt, ServiceBuilder, service_fn, BoxError}; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! # async fn handle(request: Request>) -> Result>, BoxError> { +//! # Ok(Response::new(Full::default())) +//! # } +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), BoxError> { +//! # let service_that_requires_auth = ValidateRequestHeader::basic( +//! # tower::service_fn(handle), +//! # "username", +//! # "password", +//! # ); +//! let mut client = ServiceBuilder::new() +//! // Use basic auth with the given username and password +//! .layer(AddAuthorizationLayer::basic("username", "password")) +//! .service(service_that_requires_auth); +//! +//! // Make a request, we don't have to add the `Authorization` header manually +//! let response = client +//! .ready() +//! .await? +//! .call(Request::new(Full::default())) +//! .await?; +//! +//! assert_eq!(StatusCode::OK, response.status()); +//! # Ok(()) +//! # } +//! ``` + +use base64::Engine as _; +use http::{HeaderValue, Request, Response}; +use std::{ + convert::TryFrom, + task::{Context, Poll}, +}; +use tower_layer::Layer; +use tower_service::Service; + +const BASE64: base64::engine::GeneralPurpose = base64::engine::general_purpose::STANDARD; + +/// Layer that applies [`AddAuthorization`] which adds authorization to all requests using the +/// [`Authorization`] header. +/// +/// See the [module docs](crate::auth::add_authorization) for an example. +/// +/// You can also use [`SetRequestHeader`] if you have a use case that isn't supported by this +/// middleware. +/// +/// [`Authorization`]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Authorization +/// [`SetRequestHeader`]: crate::set_header::SetRequestHeader +#[derive(Debug, Clone)] +pub struct AddAuthorizationLayer { + value: HeaderValue, +} + +impl AddAuthorizationLayer { + /// Authorize requests using a username and password pair. + /// + /// The `Authorization` header will be set to `Basic {credentials}` where `credentials` is + /// `base64_encode("{username}:{password}")`. + /// + /// Since the username and password is sent in clear text it is recommended to use HTTPS/TLS + /// with this method. However use of HTTPS/TLS is not enforced by this middleware. + pub fn basic(username: &str, password: &str) -> Self { + let encoded = BASE64.encode(format!("{}:{}", username, password)); + let value = HeaderValue::try_from(format!("Basic {}", encoded)).unwrap(); + Self { value } + } + + /// Authorize requests using a "bearer token". Commonly used for OAuth 2. + /// + /// The `Authorization` header will be set to `Bearer {token}`. + /// + /// # Panics + /// + /// Panics if the token is not a valid [`HeaderValue`]. + pub fn bearer(token: &str) -> Self { + let value = + HeaderValue::try_from(format!("Bearer {}", token)).expect("token is not valid header"); + Self { value } + } + + /// Mark the header as [sensitive]. + /// + /// This can for example be used to hide the header value from logs. + /// + /// [sensitive]: https://docs.rs/http/latest/http/header/struct.HeaderValue.html#method.set_sensitive + #[allow(clippy::wrong_self_convention)] + pub fn as_sensitive(mut self, sensitive: bool) -> Self { + self.value.set_sensitive(sensitive); + self + } +} + +impl Layer for AddAuthorizationLayer { + type Service = AddAuthorization; + + fn layer(&self, inner: S) -> Self::Service { + AddAuthorization { + inner, + value: self.value.clone(), + } + } +} + +/// Middleware that adds authorization all requests using the [`Authorization`] header. +/// +/// See the [module docs](crate::auth::add_authorization) for an example. +/// +/// You can also use [`SetRequestHeader`] if you have a use case that isn't supported by this +/// middleware. +/// +/// [`Authorization`]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Authorization +/// [`SetRequestHeader`]: crate::set_header::SetRequestHeader +#[derive(Debug, Clone)] +pub struct AddAuthorization { + inner: S, + value: HeaderValue, +} + +impl AddAuthorization { + /// Authorize requests using a username and password pair. + /// + /// The `Authorization` header will be set to `Basic {credentials}` where `credentials` is + /// `base64_encode("{username}:{password}")`. + /// + /// Since the username and password is sent in clear text it is recommended to use HTTPS/TLS + /// with this method. However use of HTTPS/TLS is not enforced by this middleware. + pub fn basic(inner: S, username: &str, password: &str) -> Self { + AddAuthorizationLayer::basic(username, password).layer(inner) + } + + /// Authorize requests using a "bearer token". Commonly used for OAuth 2. + /// + /// The `Authorization` header will be set to `Bearer {token}`. + /// + /// # Panics + /// + /// Panics if the token is not a valid [`HeaderValue`]. + pub fn bearer(inner: S, token: &str) -> Self { + AddAuthorizationLayer::bearer(token).layer(inner) + } + + define_inner_service_accessors!(); + + /// Mark the header as [sensitive]. + /// + /// This can for example be used to hide the header value from logs. + /// + /// [sensitive]: https://docs.rs/http/latest/http/header/struct.HeaderValue.html#method.set_sensitive + #[allow(clippy::wrong_self_convention)] + pub fn as_sensitive(mut self, sensitive: bool) -> Self { + self.value.set_sensitive(sensitive); + self + } +} + +impl Service> for AddAuthorization +where + S: Service, Response = Response>, +{ + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, mut req: Request) -> Self::Future { + req.headers_mut() + .insert(http::header::AUTHORIZATION, self.value.clone()); + self.inner.call(req) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_helpers::Body; + use crate::validate_request::ValidateRequestHeaderLayer; + use http::{Response, StatusCode}; + use std::convert::Infallible; + use tower::{BoxError, Service, ServiceBuilder, ServiceExt}; + + #[tokio::test] + async fn basic() { + // service that requires auth for all requests + let svc = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::basic("foo", "bar")) + .service_fn(echo); + + // make a client that adds auth + let mut client = AddAuthorization::basic(svc, "foo", "bar"); + + let res = client + .ready() + .await + .unwrap() + .call(Request::new(Body::empty())) + .await + .unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + } + + #[tokio::test] + async fn token() { + // service that requires auth for all requests + let svc = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::bearer("foo")) + .service_fn(echo); + + // make a client that adds auth + let mut client = AddAuthorization::bearer(svc, "foo"); + + let res = client + .ready() + .await + .unwrap() + .call(Request::new(Body::empty())) + .await + .unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + } + + #[tokio::test] + async fn making_header_sensitive() { + let svc = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::bearer("foo")) + .service_fn(|request: Request| async move { + let auth = request.headers().get(http::header::AUTHORIZATION).unwrap(); + assert!(auth.is_sensitive()); + + Ok::<_, Infallible>(Response::new(Body::empty())) + }); + + let mut client = AddAuthorization::bearer(svc, "foo").as_sensitive(true); + + let res = client + .ready() + .await + .unwrap() + .call(Request::new(Body::empty())) + .await + .unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + } + + async fn echo(req: Request) -> Result, BoxError> { + Ok(Response::new(req.into_body())) + } +} diff --git a/.cargo-vendor/tower-http/src/auth/async_require_authorization.rs b/.cargo-vendor/tower-http/src/auth/async_require_authorization.rs new file mode 100644 index 0000000000..f086add2d0 --- /dev/null +++ b/.cargo-vendor/tower-http/src/auth/async_require_authorization.rs @@ -0,0 +1,389 @@ +//! Authorize requests using the [`Authorization`] header asynchronously. +//! +//! [`Authorization`]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Authorization +//! +//! # Example +//! +//! ``` +//! use tower_http::auth::{AsyncRequireAuthorizationLayer, AsyncAuthorizeRequest}; +//! use http::{Request, Response, StatusCode, header::AUTHORIZATION}; +//! use tower::{Service, ServiceExt, ServiceBuilder, service_fn, BoxError}; +//! use futures_util::future::BoxFuture; +//! use bytes::Bytes; +//! use http_body_util::Full; +//! +//! #[derive(Clone, Copy)] +//! struct MyAuth; +//! +//! impl AsyncAuthorizeRequest for MyAuth +//! where +//! B: Send + Sync + 'static, +//! { +//! type RequestBody = B; +//! type ResponseBody = Full; +//! type Future = BoxFuture<'static, Result, Response>>; +//! +//! fn authorize(&mut self, mut request: Request) -> Self::Future { +//! Box::pin(async { +//! if let Some(user_id) = check_auth(&request).await { +//! // Set `user_id` as a request extension so it can be accessed by other +//! // services down the stack. +//! request.extensions_mut().insert(user_id); +//! +//! Ok(request) +//! } else { +//! let unauthorized_response = Response::builder() +//! .status(StatusCode::UNAUTHORIZED) +//! .body(Full::::default()) +//! .unwrap(); +//! +//! Err(unauthorized_response) +//! } +//! }) +//! } +//! } +//! +//! async fn check_auth(request: &Request) -> Option { +//! // ... +//! # None +//! } +//! +//! #[derive(Debug, Clone)] +//! struct UserId(String); +//! +//! async fn handle(request: Request>) -> Result>, BoxError> { +//! // Access the `UserId` that was set in `on_authorized`. If `handle` gets called the +//! // request was authorized and `UserId` will be present. +//! let user_id = request +//! .extensions() +//! .get::() +//! .expect("UserId will be there if request was authorized"); +//! +//! println!("request from {:?}", user_id); +//! +//! Ok(Response::new(Full::default())) +//! } +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), BoxError> { +//! let service = ServiceBuilder::new() +//! // Authorize requests using `MyAuth` +//! .layer(AsyncRequireAuthorizationLayer::new(MyAuth)) +//! .service_fn(handle); +//! # Ok(()) +//! # } +//! ``` +//! +//! Or using a closure: +//! +//! ``` +//! use tower_http::auth::{AsyncRequireAuthorizationLayer, AsyncAuthorizeRequest}; +//! use http::{Request, Response, StatusCode}; +//! use tower::{Service, ServiceExt, ServiceBuilder, BoxError}; +//! use futures_util::future::BoxFuture; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! +//! async fn check_auth(request: &Request) -> Option { +//! // ... +//! # None +//! } +//! +//! #[derive(Debug)] +//! struct UserId(String); +//! +//! async fn handle(request: Request>) -> Result>, BoxError> { +//! # todo!(); +//! // ... +//! } +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), BoxError> { +//! let service = ServiceBuilder::new() +//! .layer(AsyncRequireAuthorizationLayer::new(|request: Request>| async move { +//! if let Some(user_id) = check_auth(&request).await { +//! Ok(request) +//! } else { +//! let unauthorized_response = Response::builder() +//! .status(StatusCode::UNAUTHORIZED) +//! .body(Full::::default()) +//! .unwrap(); +//! +//! Err(unauthorized_response) +//! } +//! })) +//! .service_fn(handle); +//! # Ok(()) +//! # } +//! ``` + +use http::{Request, Response}; +use pin_project_lite::pin_project; +use std::{ + future::Future, + pin::Pin, + task::{ready, Context, Poll}, +}; +use tower_layer::Layer; +use tower_service::Service; + +/// Layer that applies [`AsyncRequireAuthorization`] which authorizes all requests using the +/// [`Authorization`] header. +/// +/// See the [module docs](crate::auth::async_require_authorization) for an example. +/// +/// [`Authorization`]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Authorization +#[derive(Debug, Clone)] +pub struct AsyncRequireAuthorizationLayer { + auth: T, +} + +impl AsyncRequireAuthorizationLayer { + /// Authorize requests using a custom scheme. + pub fn new(auth: T) -> AsyncRequireAuthorizationLayer { + Self { auth } + } +} + +impl Layer for AsyncRequireAuthorizationLayer +where + T: Clone, +{ + type Service = AsyncRequireAuthorization; + + fn layer(&self, inner: S) -> Self::Service { + AsyncRequireAuthorization::new(inner, self.auth.clone()) + } +} + +/// Middleware that authorizes all requests using the [`Authorization`] header. +/// +/// See the [module docs](crate::auth::async_require_authorization) for an example. +/// +/// [`Authorization`]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Authorization +#[derive(Clone, Debug)] +pub struct AsyncRequireAuthorization { + inner: S, + auth: T, +} + +impl AsyncRequireAuthorization { + define_inner_service_accessors!(); +} + +impl AsyncRequireAuthorization { + /// Authorize requests using a custom scheme. + /// + /// The `Authorization` header is required to have the value provided. + pub fn new(inner: S, auth: T) -> AsyncRequireAuthorization { + Self { inner, auth } + } + + /// Returns a new [`Layer`] that wraps services with an [`AsyncRequireAuthorizationLayer`] + /// middleware. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer(auth: T) -> AsyncRequireAuthorizationLayer { + AsyncRequireAuthorizationLayer::new(auth) + } +} + +impl Service> for AsyncRequireAuthorization +where + Auth: AsyncAuthorizeRequest, + S: Service, Response = Response> + Clone, +{ + type Response = Response; + type Error = S::Error; + type Future = ResponseFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + let inner = self.inner.clone(); + let authorize = self.auth.authorize(req); + + ResponseFuture { + state: State::Authorize { authorize }, + service: inner, + } + } +} + +pin_project! { + /// Response future for [`AsyncRequireAuthorization`]. + pub struct ResponseFuture + where + Auth: AsyncAuthorizeRequest, + S: Service>, + { + #[pin] + state: State, + service: S, + } +} + +pin_project! { + #[project = StateProj] + enum State { + Authorize { + #[pin] + authorize: A, + }, + Authorized { + #[pin] + fut: SFut, + }, + } +} + +impl Future for ResponseFuture +where + Auth: AsyncAuthorizeRequest, + S: Service, Response = Response>, +{ + type Output = Result, S::Error>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let mut this = self.project(); + + loop { + match this.state.as_mut().project() { + StateProj::Authorize { authorize } => { + let auth = ready!(authorize.poll(cx)); + match auth { + Ok(req) => { + let fut = this.service.call(req); + this.state.set(State::Authorized { fut }) + } + Err(res) => { + return Poll::Ready(Ok(res)); + } + }; + } + StateProj::Authorized { fut } => { + return fut.poll(cx); + } + } + } + } +} + +/// Trait for authorizing requests. +pub trait AsyncAuthorizeRequest { + /// The type of request body returned by `authorize`. + /// + /// Set this to `B` unless you need to change the request body type. + type RequestBody; + + /// The body type used for responses to unauthorized requests. + type ResponseBody; + + /// The Future type returned by `authorize` + type Future: Future, Response>>; + + /// Authorize the request. + /// + /// If the future resolves to `Ok(request)` then the request is allowed through, otherwise not. + fn authorize(&mut self, request: Request) -> Self::Future; +} + +impl AsyncAuthorizeRequest for F +where + F: FnMut(Request) -> Fut, + Fut: Future, Response>>, +{ + type RequestBody = ReqBody; + type ResponseBody = ResBody; + type Future = Fut; + + fn authorize(&mut self, request: Request) -> Self::Future { + self(request) + } +} + +#[cfg(test)] +mod tests { + #[allow(unused_imports)] + use super::*; + use crate::test_helpers::Body; + use futures_util::future::BoxFuture; + use http::{header, StatusCode}; + use tower::{BoxError, ServiceBuilder, ServiceExt}; + + #[derive(Clone, Copy)] + struct MyAuth; + + impl AsyncAuthorizeRequest for MyAuth + where + B: Send + 'static, + { + type RequestBody = B; + type ResponseBody = Body; + type Future = BoxFuture<'static, Result, Response>>; + + fn authorize(&mut self, mut request: Request) -> Self::Future { + Box::pin(async move { + let authorized = request + .headers() + .get(header::AUTHORIZATION) + .and_then(|it| it.to_str().ok()) + .and_then(|it| it.strip_prefix("Bearer ")) + .map(|it| it == "69420") + .unwrap_or(false); + + if authorized { + let user_id = UserId("6969".to_owned()); + request.extensions_mut().insert(user_id); + Ok(request) + } else { + Err(Response::builder() + .status(StatusCode::UNAUTHORIZED) + .body(Body::empty()) + .unwrap()) + } + }) + } + } + + #[derive(Clone, Debug)] + struct UserId(String); + + #[tokio::test] + async fn require_async_auth_works() { + let mut service = ServiceBuilder::new() + .layer(AsyncRequireAuthorizationLayer::new(MyAuth)) + .service_fn(echo); + + let request = Request::get("/") + .header(header::AUTHORIZATION, "Bearer 69420") + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + } + + #[tokio::test] + async fn require_async_auth_401() { + let mut service = ServiceBuilder::new() + .layer(AsyncRequireAuthorizationLayer::new(MyAuth)) + .service_fn(echo); + + let request = Request::get("/") + .header(header::AUTHORIZATION, "Bearer deez") + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::UNAUTHORIZED); + } + + async fn echo(req: Request) -> Result, BoxError> { + Ok(Response::new(req.into_body())) + } +} diff --git a/.cargo-vendor/tower-http/src/auth/mod.rs b/.cargo-vendor/tower-http/src/auth/mod.rs new file mode 100644 index 0000000000..fc8c2308f1 --- /dev/null +++ b/.cargo-vendor/tower-http/src/auth/mod.rs @@ -0,0 +1,13 @@ +//! Authorization related middleware. + +pub mod add_authorization; +pub mod async_require_authorization; +pub mod require_authorization; + +#[doc(inline)] +pub use self::{ + add_authorization::{AddAuthorization, AddAuthorizationLayer}, + async_require_authorization::{ + AsyncAuthorizeRequest, AsyncRequireAuthorization, AsyncRequireAuthorizationLayer, + }, +}; diff --git a/.cargo-vendor/tower-http/src/auth/require_authorization.rs b/.cargo-vendor/tower-http/src/auth/require_authorization.rs new file mode 100644 index 0000000000..d5c9508fba --- /dev/null +++ b/.cargo-vendor/tower-http/src/auth/require_authorization.rs @@ -0,0 +1,405 @@ +//! Authorize requests using [`ValidateRequest`]. +//! +//! [`Authorization`]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Authorization +//! +//! # Example +//! +//! ``` +//! use tower_http::validate_request::{ValidateRequest, ValidateRequestHeader, ValidateRequestHeaderLayer}; +//! use http::{Request, Response, StatusCode, header::AUTHORIZATION}; +//! use tower::{Service, ServiceExt, ServiceBuilder, service_fn, BoxError}; +//! use bytes::Bytes; +//! use http_body_util::Full; +//! +//! async fn handle(request: Request>) -> Result>, BoxError> { +//! Ok(Response::new(Full::default())) +//! } +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), BoxError> { +//! let mut service = ServiceBuilder::new() +//! // Require the `Authorization` header to be `Bearer passwordlol` +//! .layer(ValidateRequestHeaderLayer::bearer("passwordlol")) +//! .service_fn(handle); +//! +//! // Requests with the correct token are allowed through +//! let request = Request::builder() +//! .header(AUTHORIZATION, "Bearer passwordlol") +//! .body(Full::default()) +//! .unwrap(); +//! +//! let response = service +//! .ready() +//! .await? +//! .call(request) +//! .await?; +//! +//! assert_eq!(StatusCode::OK, response.status()); +//! +//! // Requests with an invalid token get a `401 Unauthorized` response +//! let request = Request::builder() +//! .body(Full::default()) +//! .unwrap(); +//! +//! let response = service +//! .ready() +//! .await? +//! .call(request) +//! .await?; +//! +//! assert_eq!(StatusCode::UNAUTHORIZED, response.status()); +//! # Ok(()) +//! # } +//! ``` +//! +//! Custom validation can be made by implementing [`ValidateRequest`]. + +use crate::validate_request::{ValidateRequest, ValidateRequestHeader, ValidateRequestHeaderLayer}; +use base64::Engine as _; +use http::{ + header::{self, HeaderValue}, + Request, Response, StatusCode, +}; +use http_body::Body; +use std::{fmt, marker::PhantomData}; + +const BASE64: base64::engine::GeneralPurpose = base64::engine::general_purpose::STANDARD; + +impl ValidateRequestHeader> { + /// Authorize requests using a username and password pair. + /// + /// The `Authorization` header is required to be `Basic {credentials}` where `credentials` is + /// `base64_encode("{username}:{password}")`. + /// + /// Since the username and password is sent in clear text it is recommended to use HTTPS/TLS + /// with this method. However use of HTTPS/TLS is not enforced by this middleware. + pub fn basic(inner: S, username: &str, value: &str) -> Self + where + ResBody: Body + Default, + { + Self::custom(inner, Basic::new(username, value)) + } +} + +impl ValidateRequestHeaderLayer> { + /// Authorize requests using a username and password pair. + /// + /// The `Authorization` header is required to be `Basic {credentials}` where `credentials` is + /// `base64_encode("{username}:{password}")`. + /// + /// Since the username and password is sent in clear text it is recommended to use HTTPS/TLS + /// with this method. However use of HTTPS/TLS is not enforced by this middleware. + pub fn basic(username: &str, password: &str) -> Self + where + ResBody: Body + Default, + { + Self::custom(Basic::new(username, password)) + } +} + +impl ValidateRequestHeader> { + /// Authorize requests using a "bearer token". Commonly used for OAuth 2. + /// + /// The `Authorization` header is required to be `Bearer {token}`. + /// + /// # Panics + /// + /// Panics if the token is not a valid [`HeaderValue`]. + pub fn bearer(inner: S, token: &str) -> Self + where + ResBody: Body + Default, + { + Self::custom(inner, Bearer::new(token)) + } +} + +impl ValidateRequestHeaderLayer> { + /// Authorize requests using a "bearer token". Commonly used for OAuth 2. + /// + /// The `Authorization` header is required to be `Bearer {token}`. + /// + /// # Panics + /// + /// Panics if the token is not a valid [`HeaderValue`]. + pub fn bearer(token: &str) -> Self + where + ResBody: Body + Default, + { + Self::custom(Bearer::new(token)) + } +} + +/// Type that performs "bearer token" authorization. +/// +/// See [`ValidateRequestHeader::bearer`] for more details. +pub struct Bearer { + header_value: HeaderValue, + _ty: PhantomData ResBody>, +} + +impl Bearer { + fn new(token: &str) -> Self + where + ResBody: Body + Default, + { + Self { + header_value: format!("Bearer {}", token) + .parse() + .expect("token is not a valid header value"), + _ty: PhantomData, + } + } +} + +impl Clone for Bearer { + fn clone(&self) -> Self { + Self { + header_value: self.header_value.clone(), + _ty: PhantomData, + } + } +} + +impl fmt::Debug for Bearer { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Bearer") + .field("header_value", &self.header_value) + .finish() + } +} + +impl ValidateRequest for Bearer +where + ResBody: Body + Default, +{ + type ResponseBody = ResBody; + + fn validate(&mut self, request: &mut Request) -> Result<(), Response> { + match request.headers().get(header::AUTHORIZATION) { + Some(actual) if actual == self.header_value => Ok(()), + _ => { + let mut res = Response::new(ResBody::default()); + *res.status_mut() = StatusCode::UNAUTHORIZED; + Err(res) + } + } + } +} + +/// Type that performs basic authorization. +/// +/// See [`ValidateRequestHeader::basic`] for more details. +pub struct Basic { + header_value: HeaderValue, + _ty: PhantomData ResBody>, +} + +impl Basic { + fn new(username: &str, password: &str) -> Self + where + ResBody: Body + Default, + { + let encoded = BASE64.encode(format!("{}:{}", username, password)); + let header_value = format!("Basic {}", encoded).parse().unwrap(); + Self { + header_value, + _ty: PhantomData, + } + } +} + +impl Clone for Basic { + fn clone(&self) -> Self { + Self { + header_value: self.header_value.clone(), + _ty: PhantomData, + } + } +} + +impl fmt::Debug for Basic { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Basic") + .field("header_value", &self.header_value) + .finish() + } +} + +impl ValidateRequest for Basic +where + ResBody: Body + Default, +{ + type ResponseBody = ResBody; + + fn validate(&mut self, request: &mut Request) -> Result<(), Response> { + match request.headers().get(header::AUTHORIZATION) { + Some(actual) if actual == self.header_value => Ok(()), + _ => { + let mut res = Response::new(ResBody::default()); + *res.status_mut() = StatusCode::UNAUTHORIZED; + res.headers_mut() + .insert(header::WWW_AUTHENTICATE, "Basic".parse().unwrap()); + Err(res) + } + } + } +} + +#[cfg(test)] +mod tests { + use crate::validate_request::ValidateRequestHeaderLayer; + + #[allow(unused_imports)] + use super::*; + use crate::test_helpers::Body; + use http::header; + use tower::{BoxError, ServiceBuilder, ServiceExt}; + use tower_service::Service; + + #[tokio::test] + async fn valid_basic_token() { + let mut service = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::basic("foo", "bar")) + .service_fn(echo); + + let request = Request::get("/") + .header( + header::AUTHORIZATION, + format!("Basic {}", BASE64.encode("foo:bar")), + ) + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + } + + #[tokio::test] + async fn invalid_basic_token() { + let mut service = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::basic("foo", "bar")) + .service_fn(echo); + + let request = Request::get("/") + .header( + header::AUTHORIZATION, + format!("Basic {}", BASE64.encode("wrong:credentials")), + ) + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::UNAUTHORIZED); + + let www_authenticate = res.headers().get(header::WWW_AUTHENTICATE).unwrap(); + assert_eq!(www_authenticate, "Basic"); + } + + #[tokio::test] + async fn valid_bearer_token() { + let mut service = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::bearer("foobar")) + .service_fn(echo); + + let request = Request::get("/") + .header(header::AUTHORIZATION, "Bearer foobar") + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + } + + #[tokio::test] + async fn basic_auth_is_case_sensitive_in_prefix() { + let mut service = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::basic("foo", "bar")) + .service_fn(echo); + + let request = Request::get("/") + .header( + header::AUTHORIZATION, + format!("basic {}", BASE64.encode("foo:bar")), + ) + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::UNAUTHORIZED); + } + + #[tokio::test] + async fn basic_auth_is_case_sensitive_in_value() { + let mut service = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::basic("foo", "bar")) + .service_fn(echo); + + let request = Request::get("/") + .header( + header::AUTHORIZATION, + format!("Basic {}", BASE64.encode("Foo:bar")), + ) + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::UNAUTHORIZED); + } + + #[tokio::test] + async fn invalid_bearer_token() { + let mut service = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::bearer("foobar")) + .service_fn(echo); + + let request = Request::get("/") + .header(header::AUTHORIZATION, "Bearer wat") + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::UNAUTHORIZED); + } + + #[tokio::test] + async fn bearer_token_is_case_sensitive_in_prefix() { + let mut service = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::bearer("foobar")) + .service_fn(echo); + + let request = Request::get("/") + .header(header::AUTHORIZATION, "bearer foobar") + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::UNAUTHORIZED); + } + + #[tokio::test] + async fn bearer_token_is_case_sensitive_in_token() { + let mut service = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::bearer("foobar")) + .service_fn(echo); + + let request = Request::get("/") + .header(header::AUTHORIZATION, "Bearer Foobar") + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::UNAUTHORIZED); + } + + async fn echo(req: Request) -> Result, BoxError> { + Ok(Response::new(req.into_body())) + } +} diff --git a/.cargo-vendor/tower-http/src/body.rs b/.cargo-vendor/tower-http/src/body.rs new file mode 100644 index 0000000000..815a0d109a --- /dev/null +++ b/.cargo-vendor/tower-http/src/body.rs @@ -0,0 +1,121 @@ +//! Body types. +//! +//! All these are wrappers around other body types. You shouldn't have to use them in your code. +//! Use `http-body-util` instead. +//! +//! They exist because we don't want to expose types from `http-body-util` in `tower-http`s public +//! API. + +#![allow(missing_docs)] + +use std::convert::Infallible; + +use bytes::{Buf, Bytes}; +use http_body::Body; +use pin_project_lite::pin_project; + +use crate::BoxError; + +macro_rules! body_methods { + () => { + #[inline] + fn poll_frame( + self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll, Self::Error>>> { + self.project().inner.poll_frame(cx) + } + + #[inline] + fn is_end_stream(&self) -> bool { + Body::is_end_stream(&self.inner) + } + + #[inline] + fn size_hint(&self) -> http_body::SizeHint { + Body::size_hint(&self.inner) + } + }; +} + +pin_project! { + #[derive(Default)] + pub struct Full { + #[pin] + pub(crate) inner: http_body_util::Full + } +} + +impl Full { + #[allow(dead_code)] + pub(crate) fn new(inner: http_body_util::Full) -> Self { + Self { inner } + } +} + +impl Body for Full { + type Data = Bytes; + type Error = Infallible; + + body_methods!(); +} + +pin_project! { + pub struct Limited { + #[pin] + pub(crate) inner: http_body_util::Limited + } +} + +impl Limited { + #[allow(dead_code)] + pub(crate) fn new(inner: http_body_util::Limited) -> Self { + Self { inner } + } +} + +impl Body for Limited +where + B: Body, + B::Error: Into, +{ + type Data = B::Data; + type Error = BoxError; + + body_methods!(); +} + +pin_project! { + pub struct UnsyncBoxBody { + #[pin] + pub(crate) inner: http_body_util::combinators::UnsyncBoxBody + } +} + +impl Default for UnsyncBoxBody +where + D: Buf + 'static, +{ + fn default() -> Self { + Self { + inner: Default::default(), + } + } +} + +impl UnsyncBoxBody { + #[allow(dead_code)] + pub(crate) fn new(inner: http_body_util::combinators::UnsyncBoxBody) -> Self { + Self { inner } + } +} + +impl Body for UnsyncBoxBody +where + D: Buf, +{ + type Data = D; + type Error = E; + + body_methods!(); +} diff --git a/.cargo-vendor/tower-http/src/builder.rs b/.cargo-vendor/tower-http/src/builder.rs new file mode 100644 index 0000000000..4c27a95cce --- /dev/null +++ b/.cargo-vendor/tower-http/src/builder.rs @@ -0,0 +1,597 @@ +use tower::ServiceBuilder; + +#[allow(unused_imports)] +use http::header::HeaderName; +#[allow(unused_imports)] +use tower_layer::Stack; + +/// Extension trait that adds methods to [`tower::ServiceBuilder`] for adding middleware from +/// tower-http. +/// +/// [`Service`]: tower::Service +/// +/// # Example +/// +/// ```rust +/// use http::{Request, Response, header::HeaderName}; +/// use bytes::Bytes; +/// use http_body_util::Full; +/// use std::{time::Duration, convert::Infallible}; +/// use tower::{ServiceBuilder, ServiceExt, Service}; +/// use tower_http::ServiceBuilderExt; +/// +/// async fn handle(request: Request>) -> Result>, Infallible> { +/// Ok(Response::new(Full::default())) +/// } +/// +/// # #[tokio::main] +/// # async fn main() { +/// let service = ServiceBuilder::new() +/// // Methods from tower +/// .timeout(Duration::from_secs(30)) +/// // Methods from tower-http +/// .trace_for_http() +/// .propagate_header(HeaderName::from_static("x-request-id")) +/// .service_fn(handle); +/// # let mut service = service; +/// # service.ready().await.unwrap().call(Request::new(Full::default())).await.unwrap(); +/// # } +/// ``` +#[cfg(feature = "util")] +// ^ work around rustdoc not inferring doc(cfg)s for cfg's from surrounding scopes +pub trait ServiceBuilderExt: crate::sealed::Sealed + Sized { + /// Propagate a header from the request to the response. + /// + /// See [`tower_http::propagate_header`] for more details. + /// + /// [`tower_http::propagate_header`]: crate::propagate_header + #[cfg(feature = "propagate-header")] + fn propagate_header( + self, + header: HeaderName, + ) -> ServiceBuilder>; + + /// Add some shareable value to [request extensions]. + /// + /// See [`tower_http::add_extension`] for more details. + /// + /// [`tower_http::add_extension`]: crate::add_extension + /// [request extensions]: https://docs.rs/http/latest/http/struct.Extensions.html + #[cfg(feature = "add-extension")] + fn add_extension( + self, + value: T, + ) -> ServiceBuilder, L>>; + + /// Apply a transformation to the request body. + /// + /// See [`tower_http::map_request_body`] for more details. + /// + /// [`tower_http::map_request_body`]: crate::map_request_body + #[cfg(feature = "map-request-body")] + fn map_request_body( + self, + f: F, + ) -> ServiceBuilder, L>>; + + /// Apply a transformation to the response body. + /// + /// See [`tower_http::map_response_body`] for more details. + /// + /// [`tower_http::map_response_body`]: crate::map_response_body + #[cfg(feature = "map-response-body")] + fn map_response_body( + self, + f: F, + ) -> ServiceBuilder, L>>; + + /// Compresses response bodies. + /// + /// See [`tower_http::compression`] for more details. + /// + /// [`tower_http::compression`]: crate::compression + #[cfg(any( + feature = "compression-br", + feature = "compression-deflate", + feature = "compression-gzip", + feature = "compression-zstd", + ))] + fn compression(self) -> ServiceBuilder>; + + /// Decompress response bodies. + /// + /// See [`tower_http::decompression`] for more details. + /// + /// [`tower_http::decompression`]: crate::decompression + #[cfg(any( + feature = "decompression-br", + feature = "decompression-deflate", + feature = "decompression-gzip", + feature = "decompression-zstd", + ))] + fn decompression(self) -> ServiceBuilder>; + + /// High level tracing that classifies responses using HTTP status codes. + /// + /// This method does not support customizing the output, to do that use [`TraceLayer`] + /// instead. + /// + /// See [`tower_http::trace`] for more details. + /// + /// [`tower_http::trace`]: crate::trace + /// [`TraceLayer`]: crate::trace::TraceLayer + #[cfg(feature = "trace")] + fn trace_for_http( + self, + ) -> ServiceBuilder, L>>; + + /// High level tracing that classifies responses using gRPC headers. + /// + /// This method does not support customizing the output, to do that use [`TraceLayer`] + /// instead. + /// + /// See [`tower_http::trace`] for more details. + /// + /// [`tower_http::trace`]: crate::trace + /// [`TraceLayer`]: crate::trace::TraceLayer + #[cfg(feature = "trace")] + fn trace_for_grpc( + self, + ) -> ServiceBuilder, L>>; + + /// Follow redirect resposes using the [`Standard`] policy. + /// + /// See [`tower_http::follow_redirect`] for more details. + /// + /// [`tower_http::follow_redirect`]: crate::follow_redirect + /// [`Standard`]: crate::follow_redirect::policy::Standard + #[cfg(feature = "follow-redirect")] + fn follow_redirects( + self, + ) -> ServiceBuilder< + Stack< + crate::follow_redirect::FollowRedirectLayer, + L, + >, + >; + + /// Mark headers as [sensitive] on both requests and responses. + /// + /// See [`tower_http::sensitive_headers`] for more details. + /// + /// [sensitive]: https://docs.rs/http/latest/http/header/struct.HeaderValue.html#method.set_sensitive + /// [`tower_http::sensitive_headers`]: crate::sensitive_headers + #[cfg(feature = "sensitive-headers")] + fn sensitive_headers( + self, + headers: I, + ) -> ServiceBuilder> + where + I: IntoIterator; + + /// Mark headers as [sensitive] on both requests. + /// + /// See [`tower_http::sensitive_headers`] for more details. + /// + /// [sensitive]: https://docs.rs/http/latest/http/header/struct.HeaderValue.html#method.set_sensitive + /// [`tower_http::sensitive_headers`]: crate::sensitive_headers + #[cfg(feature = "sensitive-headers")] + fn sensitive_request_headers( + self, + headers: std::sync::Arc<[HeaderName]>, + ) -> ServiceBuilder>; + + /// Mark headers as [sensitive] on both responses. + /// + /// See [`tower_http::sensitive_headers`] for more details. + /// + /// [sensitive]: https://docs.rs/http/latest/http/header/struct.HeaderValue.html#method.set_sensitive + /// [`tower_http::sensitive_headers`]: crate::sensitive_headers + #[cfg(feature = "sensitive-headers")] + fn sensitive_response_headers( + self, + headers: std::sync::Arc<[HeaderName]>, + ) -> ServiceBuilder>; + + /// Insert a header into the request. + /// + /// If a previous value exists for the same header, it is removed and replaced with the new + /// header value. + /// + /// See [`tower_http::set_header`] for more details. + /// + /// [`tower_http::set_header`]: crate::set_header + #[cfg(feature = "set-header")] + fn override_request_header( + self, + header_name: HeaderName, + make: M, + ) -> ServiceBuilder, L>>; + + /// Append a header into the request. + /// + /// If previous values exist, the header will have multiple values. + /// + /// See [`tower_http::set_header`] for more details. + /// + /// [`tower_http::set_header`]: crate::set_header + #[cfg(feature = "set-header")] + fn append_request_header( + self, + header_name: HeaderName, + make: M, + ) -> ServiceBuilder, L>>; + + /// Insert a header into the request, if the header is not already present. + /// + /// See [`tower_http::set_header`] for more details. + /// + /// [`tower_http::set_header`]: crate::set_header + #[cfg(feature = "set-header")] + fn insert_request_header_if_not_present( + self, + header_name: HeaderName, + make: M, + ) -> ServiceBuilder, L>>; + + /// Insert a header into the response. + /// + /// If a previous value exists for the same header, it is removed and replaced with the new + /// header value. + /// + /// See [`tower_http::set_header`] for more details. + /// + /// [`tower_http::set_header`]: crate::set_header + #[cfg(feature = "set-header")] + fn override_response_header( + self, + header_name: HeaderName, + make: M, + ) -> ServiceBuilder, L>>; + + /// Append a header into the response. + /// + /// If previous values exist, the header will have multiple values. + /// + /// See [`tower_http::set_header`] for more details. + /// + /// [`tower_http::set_header`]: crate::set_header + #[cfg(feature = "set-header")] + fn append_response_header( + self, + header_name: HeaderName, + make: M, + ) -> ServiceBuilder, L>>; + + /// Insert a header into the response, if the header is not already present. + /// + /// See [`tower_http::set_header`] for more details. + /// + /// [`tower_http::set_header`]: crate::set_header + #[cfg(feature = "set-header")] + fn insert_response_header_if_not_present( + self, + header_name: HeaderName, + make: M, + ) -> ServiceBuilder, L>>; + + /// Add request id header and extension. + /// + /// See [`tower_http::request_id`] for more details. + /// + /// [`tower_http::request_id`]: crate::request_id + #[cfg(feature = "request-id")] + fn set_request_id( + self, + header_name: HeaderName, + make_request_id: M, + ) -> ServiceBuilder, L>> + where + M: crate::request_id::MakeRequestId; + + /// Add request id header and extension, using `x-request-id` as the header name. + /// + /// See [`tower_http::request_id`] for more details. + /// + /// [`tower_http::request_id`]: crate::request_id + #[cfg(feature = "request-id")] + fn set_x_request_id( + self, + make_request_id: M, + ) -> ServiceBuilder, L>> + where + M: crate::request_id::MakeRequestId, + { + self.set_request_id( + HeaderName::from_static(crate::request_id::X_REQUEST_ID), + make_request_id, + ) + } + + /// Propgate request ids from requests to responses. + /// + /// See [`tower_http::request_id`] for more details. + /// + /// [`tower_http::request_id`]: crate::request_id + #[cfg(feature = "request-id")] + fn propagate_request_id( + self, + header_name: HeaderName, + ) -> ServiceBuilder>; + + /// Propgate request ids from requests to responses, using `x-request-id` as the header name. + /// + /// See [`tower_http::request_id`] for more details. + /// + /// [`tower_http::request_id`]: crate::request_id + #[cfg(feature = "request-id")] + fn propagate_x_request_id( + self, + ) -> ServiceBuilder> { + self.propagate_request_id(HeaderName::from_static(crate::request_id::X_REQUEST_ID)) + } + + /// Catch panics and convert them into `500 Internal Server` responses. + /// + /// See [`tower_http::catch_panic`] for more details. + /// + /// [`tower_http::catch_panic`]: crate::catch_panic + #[cfg(feature = "catch-panic")] + fn catch_panic( + self, + ) -> ServiceBuilder< + Stack, L>, + >; + + /// Intercept requests with over-sized payloads and convert them into + /// `413 Payload Too Large` responses. + /// + /// See [`tower_http::limit`] for more details. + /// + /// [`tower_http::limit`]: crate::limit + #[cfg(feature = "limit")] + fn request_body_limit( + self, + limit: usize, + ) -> ServiceBuilder>; + + /// Remove trailing slashes from paths. + /// + /// See [`tower_http::normalize_path`] for more details. + /// + /// [`tower_http::normalize_path`]: crate::normalize_path + #[cfg(feature = "normalize-path")] + fn trim_trailing_slash( + self, + ) -> ServiceBuilder>; +} + +impl crate::sealed::Sealed for ServiceBuilder {} + +impl ServiceBuilderExt for ServiceBuilder { + #[cfg(feature = "propagate-header")] + fn propagate_header( + self, + header: HeaderName, + ) -> ServiceBuilder> { + self.layer(crate::propagate_header::PropagateHeaderLayer::new(header)) + } + + #[cfg(feature = "add-extension")] + fn add_extension( + self, + value: T, + ) -> ServiceBuilder, L>> { + self.layer(crate::add_extension::AddExtensionLayer::new(value)) + } + + #[cfg(feature = "map-request-body")] + fn map_request_body( + self, + f: F, + ) -> ServiceBuilder, L>> { + self.layer(crate::map_request_body::MapRequestBodyLayer::new(f)) + } + + #[cfg(feature = "map-response-body")] + fn map_response_body( + self, + f: F, + ) -> ServiceBuilder, L>> { + self.layer(crate::map_response_body::MapResponseBodyLayer::new(f)) + } + + #[cfg(any( + feature = "compression-br", + feature = "compression-deflate", + feature = "compression-gzip", + feature = "compression-zstd", + ))] + fn compression(self) -> ServiceBuilder> { + self.layer(crate::compression::CompressionLayer::new()) + } + + #[cfg(any( + feature = "decompression-br", + feature = "decompression-deflate", + feature = "decompression-gzip", + feature = "decompression-zstd", + ))] + fn decompression(self) -> ServiceBuilder> { + self.layer(crate::decompression::DecompressionLayer::new()) + } + + #[cfg(feature = "trace")] + fn trace_for_http( + self, + ) -> ServiceBuilder, L>> { + self.layer(crate::trace::TraceLayer::new_for_http()) + } + + #[cfg(feature = "trace")] + fn trace_for_grpc( + self, + ) -> ServiceBuilder, L>> { + self.layer(crate::trace::TraceLayer::new_for_grpc()) + } + + #[cfg(feature = "follow-redirect")] + fn follow_redirects( + self, + ) -> ServiceBuilder< + Stack< + crate::follow_redirect::FollowRedirectLayer, + L, + >, + > { + self.layer(crate::follow_redirect::FollowRedirectLayer::new()) + } + + #[cfg(feature = "sensitive-headers")] + fn sensitive_headers( + self, + headers: I, + ) -> ServiceBuilder> + where + I: IntoIterator, + { + self.layer(crate::sensitive_headers::SetSensitiveHeadersLayer::new( + headers, + )) + } + + #[cfg(feature = "sensitive-headers")] + fn sensitive_request_headers( + self, + headers: std::sync::Arc<[HeaderName]>, + ) -> ServiceBuilder> { + self.layer(crate::sensitive_headers::SetSensitiveRequestHeadersLayer::from_shared(headers)) + } + + #[cfg(feature = "sensitive-headers")] + fn sensitive_response_headers( + self, + headers: std::sync::Arc<[HeaderName]>, + ) -> ServiceBuilder> { + self.layer(crate::sensitive_headers::SetSensitiveResponseHeadersLayer::from_shared(headers)) + } + + #[cfg(feature = "set-header")] + fn override_request_header( + self, + header_name: HeaderName, + make: M, + ) -> ServiceBuilder, L>> { + self.layer(crate::set_header::SetRequestHeaderLayer::overriding( + header_name, + make, + )) + } + + #[cfg(feature = "set-header")] + fn append_request_header( + self, + header_name: HeaderName, + make: M, + ) -> ServiceBuilder, L>> { + self.layer(crate::set_header::SetRequestHeaderLayer::appending( + header_name, + make, + )) + } + + #[cfg(feature = "set-header")] + fn insert_request_header_if_not_present( + self, + header_name: HeaderName, + make: M, + ) -> ServiceBuilder, L>> { + self.layer(crate::set_header::SetRequestHeaderLayer::if_not_present( + header_name, + make, + )) + } + + #[cfg(feature = "set-header")] + fn override_response_header( + self, + header_name: HeaderName, + make: M, + ) -> ServiceBuilder, L>> { + self.layer(crate::set_header::SetResponseHeaderLayer::overriding( + header_name, + make, + )) + } + + #[cfg(feature = "set-header")] + fn append_response_header( + self, + header_name: HeaderName, + make: M, + ) -> ServiceBuilder, L>> { + self.layer(crate::set_header::SetResponseHeaderLayer::appending( + header_name, + make, + )) + } + + #[cfg(feature = "set-header")] + fn insert_response_header_if_not_present( + self, + header_name: HeaderName, + make: M, + ) -> ServiceBuilder, L>> { + self.layer(crate::set_header::SetResponseHeaderLayer::if_not_present( + header_name, + make, + )) + } + + #[cfg(feature = "request-id")] + fn set_request_id( + self, + header_name: HeaderName, + make_request_id: M, + ) -> ServiceBuilder, L>> + where + M: crate::request_id::MakeRequestId, + { + self.layer(crate::request_id::SetRequestIdLayer::new( + header_name, + make_request_id, + )) + } + + #[cfg(feature = "request-id")] + fn propagate_request_id( + self, + header_name: HeaderName, + ) -> ServiceBuilder> { + self.layer(crate::request_id::PropagateRequestIdLayer::new(header_name)) + } + + #[cfg(feature = "catch-panic")] + fn catch_panic( + self, + ) -> ServiceBuilder< + Stack, L>, + > { + self.layer(crate::catch_panic::CatchPanicLayer::new()) + } + + #[cfg(feature = "limit")] + fn request_body_limit( + self, + limit: usize, + ) -> ServiceBuilder> { + self.layer(crate::limit::RequestBodyLimitLayer::new(limit)) + } + + #[cfg(feature = "normalize-path")] + fn trim_trailing_slash( + self, + ) -> ServiceBuilder> { + self.layer(crate::normalize_path::NormalizePathLayer::trim_trailing_slash()) + } +} diff --git a/.cargo-vendor/tower-http/src/catch_panic.rs b/.cargo-vendor/tower-http/src/catch_panic.rs new file mode 100644 index 0000000000..3f1c227921 --- /dev/null +++ b/.cargo-vendor/tower-http/src/catch_panic.rs @@ -0,0 +1,409 @@ +//! Convert panics into responses. +//! +//! Note that using panics for error handling is _not_ recommended. Prefer instead to use `Result` +//! whenever possible. +//! +//! # Example +//! +//! ```rust +//! use http::{Request, Response, header::HeaderName}; +//! use std::convert::Infallible; +//! use tower::{Service, ServiceExt, ServiceBuilder, service_fn}; +//! use tower_http::catch_panic::CatchPanicLayer; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! async fn handle(req: Request>) -> Result>, Infallible> { +//! panic!("something went wrong...") +//! } +//! +//! let mut svc = ServiceBuilder::new() +//! // Catch panics and convert them into responses. +//! .layer(CatchPanicLayer::new()) +//! .service_fn(handle); +//! +//! // Call the service. +//! let request = Request::new(Full::default()); +//! +//! let response = svc.ready().await?.call(request).await?; +//! +//! assert_eq!(response.status(), 500); +//! # +//! # Ok(()) +//! # } +//! ``` +//! +//! Using a custom panic handler: +//! +//! ```rust +//! use http::{Request, StatusCode, Response, header::{self, HeaderName}}; +//! use std::{any::Any, convert::Infallible}; +//! use tower::{Service, ServiceExt, ServiceBuilder, service_fn}; +//! use tower_http::catch_panic::CatchPanicLayer; +//! use bytes::Bytes; +//! use http_body_util::Full; +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! async fn handle(req: Request>) -> Result>, Infallible> { +//! panic!("something went wrong...") +//! } +//! +//! fn handle_panic(err: Box) -> Response> { +//! let details = if let Some(s) = err.downcast_ref::() { +//! s.clone() +//! } else if let Some(s) = err.downcast_ref::<&str>() { +//! s.to_string() +//! } else { +//! "Unknown panic message".to_string() +//! }; +//! +//! let body = serde_json::json!({ +//! "error": { +//! "kind": "panic", +//! "details": details, +//! } +//! }); +//! let body = serde_json::to_string(&body).unwrap(); +//! +//! Response::builder() +//! .status(StatusCode::INTERNAL_SERVER_ERROR) +//! .header(header::CONTENT_TYPE, "application/json") +//! .body(Full::from(body)) +//! .unwrap() +//! } +//! +//! let svc = ServiceBuilder::new() +//! // Use `handle_panic` to create the response. +//! .layer(CatchPanicLayer::custom(handle_panic)) +//! .service_fn(handle); +//! # +//! # Ok(()) +//! # } +//! ``` + +use bytes::Bytes; +use futures_util::future::{CatchUnwind, FutureExt}; +use http::{HeaderValue, Request, Response, StatusCode}; +use http_body::Body; +use http_body_util::BodyExt; +use pin_project_lite::pin_project; +use std::{ + any::Any, + future::Future, + panic::AssertUnwindSafe, + pin::Pin, + task::{ready, Context, Poll}, +}; +use tower_layer::Layer; +use tower_service::Service; + +use crate::{ + body::{Full, UnsyncBoxBody}, + BoxError, +}; + +/// Layer that applies the [`CatchPanic`] middleware that catches panics and converts them into +/// `500 Internal Server` responses. +/// +/// See the [module docs](self) for an example. +#[derive(Debug, Clone, Copy, Default)] +pub struct CatchPanicLayer { + panic_handler: T, +} + +impl CatchPanicLayer { + /// Create a new `CatchPanicLayer` with the default panic handler. + pub fn new() -> Self { + CatchPanicLayer { + panic_handler: DefaultResponseForPanic, + } + } +} + +impl CatchPanicLayer { + /// Create a new `CatchPanicLayer` with a custom panic handler. + pub fn custom(panic_handler: T) -> Self + where + T: ResponseForPanic, + { + Self { panic_handler } + } +} + +impl Layer for CatchPanicLayer +where + T: Clone, +{ + type Service = CatchPanic; + + fn layer(&self, inner: S) -> Self::Service { + CatchPanic { + inner, + panic_handler: self.panic_handler.clone(), + } + } +} + +/// Middleware that catches panics and converts them into `500 Internal Server` responses. +/// +/// See the [module docs](self) for an example. +#[derive(Debug, Clone, Copy)] +pub struct CatchPanic { + inner: S, + panic_handler: T, +} + +impl CatchPanic { + /// Create a new `CatchPanic` with the default panic handler. + pub fn new(inner: S) -> Self { + Self { + inner, + panic_handler: DefaultResponseForPanic, + } + } +} + +impl CatchPanic { + define_inner_service_accessors!(); + + /// Create a new `CatchPanic` with a custom panic handler. + pub fn custom(inner: S, panic_handler: T) -> Self + where + T: ResponseForPanic, + { + Self { + inner, + panic_handler, + } + } +} + +impl Service> for CatchPanic +where + S: Service, Response = Response>, + ResBody: Body + Send + 'static, + ResBody::Error: Into, + T: ResponseForPanic + Clone, + T::ResponseBody: Body + Send + 'static, + ::Error: Into, +{ + type Response = Response>; + type Error = S::Error; + type Future = ResponseFuture; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + match std::panic::catch_unwind(AssertUnwindSafe(|| self.inner.call(req))) { + Ok(future) => ResponseFuture { + kind: Kind::Future { + future: AssertUnwindSafe(future).catch_unwind(), + panic_handler: Some(self.panic_handler.clone()), + }, + }, + Err(panic_err) => ResponseFuture { + kind: Kind::Panicked { + panic_err: Some(panic_err), + panic_handler: Some(self.panic_handler.clone()), + }, + }, + } + } +} + +pin_project! { + /// Response future for [`CatchPanic`]. + pub struct ResponseFuture { + #[pin] + kind: Kind, + } +} + +pin_project! { + #[project = KindProj] + enum Kind { + Panicked { + panic_err: Option>, + panic_handler: Option, + }, + Future { + #[pin] + future: CatchUnwind>, + panic_handler: Option, + } + } +} + +impl Future for ResponseFuture +where + F: Future, E>>, + ResBody: Body + Send + 'static, + ResBody::Error: Into, + T: ResponseForPanic, + T::ResponseBody: Body + Send + 'static, + ::Error: Into, +{ + type Output = Result>, E>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match self.project().kind.project() { + KindProj::Panicked { + panic_err, + panic_handler, + } => { + let panic_handler = panic_handler + .take() + .expect("future polled after completion"); + let panic_err = panic_err.take().expect("future polled after completion"); + Poll::Ready(Ok(response_for_panic(panic_handler, panic_err))) + } + KindProj::Future { + future, + panic_handler, + } => match ready!(future.poll(cx)) { + Ok(Ok(res)) => { + Poll::Ready(Ok(res.map(|body| { + UnsyncBoxBody::new(body.map_err(Into::into).boxed_unsync()) + }))) + } + Ok(Err(svc_err)) => Poll::Ready(Err(svc_err)), + Err(panic_err) => Poll::Ready(Ok(response_for_panic( + panic_handler + .take() + .expect("future polled after completion"), + panic_err, + ))), + }, + } + } +} + +fn response_for_panic( + mut panic_handler: T, + err: Box, +) -> Response> +where + T: ResponseForPanic, + T::ResponseBody: Body + Send + 'static, + ::Error: Into, +{ + panic_handler + .response_for_panic(err) + .map(|body| UnsyncBoxBody::new(body.map_err(Into::into).boxed_unsync())) +} + +/// Trait for creating responses from panics. +pub trait ResponseForPanic: Clone { + /// The body type used for responses to panics. + type ResponseBody; + + /// Create a response from the panic error. + fn response_for_panic( + &mut self, + err: Box, + ) -> Response; +} + +impl ResponseForPanic for F +where + F: FnMut(Box) -> Response + Clone, +{ + type ResponseBody = B; + + fn response_for_panic( + &mut self, + err: Box, + ) -> Response { + self(err) + } +} + +/// The default `ResponseForPanic` used by `CatchPanic`. +/// +/// It will log the panic message and return a `500 Internal Server` error response with an empty +/// body. +#[derive(Debug, Default, Clone, Copy)] +#[non_exhaustive] +pub struct DefaultResponseForPanic; + +impl ResponseForPanic for DefaultResponseForPanic { + type ResponseBody = Full; + + fn response_for_panic( + &mut self, + err: Box, + ) -> Response { + if let Some(s) = err.downcast_ref::() { + tracing::error!("Service panicked: {}", s); + } else if let Some(s) = err.downcast_ref::<&str>() { + tracing::error!("Service panicked: {}", s); + } else { + tracing::error!( + "Service panicked but `CatchPanic` was unable to downcast the panic info" + ); + }; + + let mut res = Response::new(Full::new(http_body_util::Full::from("Service panicked"))); + *res.status_mut() = StatusCode::INTERNAL_SERVER_ERROR; + + #[allow(clippy::declare_interior_mutable_const)] + const TEXT_PLAIN: HeaderValue = HeaderValue::from_static("text/plain; charset=utf-8"); + res.headers_mut() + .insert(http::header::CONTENT_TYPE, TEXT_PLAIN); + + res + } +} + +#[cfg(test)] +mod tests { + #![allow(unreachable_code)] + + use super::*; + use crate::test_helpers::Body; + use http::Response; + use std::convert::Infallible; + use tower::{ServiceBuilder, ServiceExt}; + + #[tokio::test] + async fn panic_before_returning_future() { + let svc = ServiceBuilder::new() + .layer(CatchPanicLayer::new()) + .service_fn(|_: Request| { + panic!("service panic"); + async { Ok::<_, Infallible>(Response::new(Body::empty())) } + }); + + let req = Request::new(Body::empty()); + + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::INTERNAL_SERVER_ERROR); + let body = crate::test_helpers::to_bytes(res).await.unwrap(); + assert_eq!(&body[..], b"Service panicked"); + } + + #[tokio::test] + async fn panic_in_future() { + let svc = ServiceBuilder::new() + .layer(CatchPanicLayer::new()) + .service_fn(|_: Request| async { + panic!("future panic"); + Ok::<_, Infallible>(Response::new(Body::empty())) + }); + + let req = Request::new(Body::empty()); + + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::INTERNAL_SERVER_ERROR); + let body = crate::test_helpers::to_bytes(res).await.unwrap(); + assert_eq!(&body[..], b"Service panicked"); + } +} diff --git a/.cargo-vendor/tower-http/src/classify/grpc_errors_as_failures.rs b/.cargo-vendor/tower-http/src/classify/grpc_errors_as_failures.rs new file mode 100644 index 0000000000..b88606b5b0 --- /dev/null +++ b/.cargo-vendor/tower-http/src/classify/grpc_errors_as_failures.rs @@ -0,0 +1,358 @@ +use super::{ClassifiedResponse, ClassifyEos, ClassifyResponse, SharedClassifier}; +use bitflags::bitflags; +use http::{HeaderMap, Response}; +use std::{fmt, num::NonZeroI32}; + +/// gRPC status codes. +/// +/// These variants match the [gRPC status codes]. +/// +/// [gRPC status codes]: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md#status-codes-and-their-use-in-grpc +#[derive(Clone, Copy, Debug)] +pub enum GrpcCode { + /// The operation completed successfully. + Ok, + /// The operation was cancelled. + Cancelled, + /// Unknown error. + Unknown, + /// Client specified an invalid argument. + InvalidArgument, + /// Deadline expired before operation could complete. + DeadlineExceeded, + /// Some requested entity was not found. + NotFound, + /// Some entity that we attempted to create already exists. + AlreadyExists, + /// The caller does not have permission to execute the specified operation. + PermissionDenied, + /// Some resource has been exhausted. + ResourceExhausted, + /// The system is not in a state required for the operation's execution. + FailedPrecondition, + /// The operation was aborted. + Aborted, + /// Operation was attempted past the valid range. + OutOfRange, + /// Operation is not implemented or not supported. + Unimplemented, + /// Internal error. + Internal, + /// The service is currently unavailable. + Unavailable, + /// Unrecoverable data loss or corruption. + DataLoss, + /// The request does not have valid authentication credentials + Unauthenticated, +} + +impl GrpcCode { + pub(crate) fn into_bitmask(self) -> GrpcCodeBitmask { + match self { + Self::Ok => GrpcCodeBitmask::OK, + Self::Cancelled => GrpcCodeBitmask::CANCELLED, + Self::Unknown => GrpcCodeBitmask::UNKNOWN, + Self::InvalidArgument => GrpcCodeBitmask::INVALID_ARGUMENT, + Self::DeadlineExceeded => GrpcCodeBitmask::DEADLINE_EXCEEDED, + Self::NotFound => GrpcCodeBitmask::NOT_FOUND, + Self::AlreadyExists => GrpcCodeBitmask::ALREADY_EXISTS, + Self::PermissionDenied => GrpcCodeBitmask::PERMISSION_DENIED, + Self::ResourceExhausted => GrpcCodeBitmask::RESOURCE_EXHAUSTED, + Self::FailedPrecondition => GrpcCodeBitmask::FAILED_PRECONDITION, + Self::Aborted => GrpcCodeBitmask::ABORTED, + Self::OutOfRange => GrpcCodeBitmask::OUT_OF_RANGE, + Self::Unimplemented => GrpcCodeBitmask::UNIMPLEMENTED, + Self::Internal => GrpcCodeBitmask::INTERNAL, + Self::Unavailable => GrpcCodeBitmask::UNAVAILABLE, + Self::DataLoss => GrpcCodeBitmask::DATA_LOSS, + Self::Unauthenticated => GrpcCodeBitmask::UNAUTHENTICATED, + } + } +} + +bitflags! { + #[derive(Debug, Clone, Copy)] + pub(crate) struct GrpcCodeBitmask: u32 { + const OK = 0b00000000000000001; + const CANCELLED = 0b00000000000000010; + const UNKNOWN = 0b00000000000000100; + const INVALID_ARGUMENT = 0b00000000000001000; + const DEADLINE_EXCEEDED = 0b00000000000010000; + const NOT_FOUND = 0b00000000000100000; + const ALREADY_EXISTS = 0b00000000001000000; + const PERMISSION_DENIED = 0b00000000010000000; + const RESOURCE_EXHAUSTED = 0b00000000100000000; + const FAILED_PRECONDITION = 0b00000001000000000; + const ABORTED = 0b00000010000000000; + const OUT_OF_RANGE = 0b00000100000000000; + const UNIMPLEMENTED = 0b00001000000000000; + const INTERNAL = 0b00010000000000000; + const UNAVAILABLE = 0b00100000000000000; + const DATA_LOSS = 0b01000000000000000; + const UNAUTHENTICATED = 0b10000000000000000; + } +} + +impl GrpcCodeBitmask { + fn try_from_u32(code: u32) -> Option { + match code { + 0 => Some(Self::OK), + 1 => Some(Self::CANCELLED), + 2 => Some(Self::UNKNOWN), + 3 => Some(Self::INVALID_ARGUMENT), + 4 => Some(Self::DEADLINE_EXCEEDED), + 5 => Some(Self::NOT_FOUND), + 6 => Some(Self::ALREADY_EXISTS), + 7 => Some(Self::PERMISSION_DENIED), + 8 => Some(Self::RESOURCE_EXHAUSTED), + 9 => Some(Self::FAILED_PRECONDITION), + 10 => Some(Self::ABORTED), + 11 => Some(Self::OUT_OF_RANGE), + 12 => Some(Self::UNIMPLEMENTED), + 13 => Some(Self::INTERNAL), + 14 => Some(Self::UNAVAILABLE), + 15 => Some(Self::DATA_LOSS), + 16 => Some(Self::UNAUTHENTICATED), + _ => None, + } + } +} + +/// Response classifier for gRPC responses. +/// +/// gRPC doesn't use normal HTTP statuses for indicating success or failure but instead a special +/// header that might appear in a trailer. +/// +/// Responses are considered successful if +/// +/// - `grpc-status` header value contains a success value. +/// default). +/// - `grpc-status` header is missing. +/// - `grpc-status` header value isn't a valid `String`. +/// - `grpc-status` header value can't parsed into an `i32`. +/// +/// All others are considered failures. +#[derive(Debug, Clone)] +pub struct GrpcErrorsAsFailures { + success_codes: GrpcCodeBitmask, +} + +impl Default for GrpcErrorsAsFailures { + fn default() -> Self { + Self::new() + } +} + +impl GrpcErrorsAsFailures { + /// Create a new [`GrpcErrorsAsFailures`]. + pub fn new() -> Self { + Self { + success_codes: GrpcCodeBitmask::OK, + } + } + + /// Change which gRPC codes are considered success. + /// + /// Defaults to only considering `Ok` as success. + /// + /// `Ok` will always be considered a success. + /// + /// # Example + /// + /// Servers might not want to consider `Invalid Argument` or `Not Found` as failures since + /// thats likely the clients fault: + /// + /// ```rust + /// use tower_http::classify::{GrpcErrorsAsFailures, GrpcCode}; + /// + /// let classifier = GrpcErrorsAsFailures::new() + /// .with_success(GrpcCode::InvalidArgument) + /// .with_success(GrpcCode::NotFound); + /// ``` + pub fn with_success(mut self, code: GrpcCode) -> Self { + self.success_codes |= code.into_bitmask(); + self + } + + /// Returns a [`MakeClassifier`](super::MakeClassifier) that produces `GrpcErrorsAsFailures`. + /// + /// This is a convenience function that simply calls `SharedClassifier::new`. + pub fn make_classifier() -> SharedClassifier { + SharedClassifier::new(Self::new()) + } +} + +impl ClassifyResponse for GrpcErrorsAsFailures { + type FailureClass = GrpcFailureClass; + type ClassifyEos = GrpcEosErrorsAsFailures; + + fn classify_response( + self, + res: &Response, + ) -> ClassifiedResponse { + match classify_grpc_metadata(res.headers(), self.success_codes) { + ParsedGrpcStatus::Success + | ParsedGrpcStatus::HeaderNotString + | ParsedGrpcStatus::HeaderNotInt => ClassifiedResponse::Ready(Ok(())), + ParsedGrpcStatus::NonSuccess(status) => { + ClassifiedResponse::Ready(Err(GrpcFailureClass::Code(status))) + } + ParsedGrpcStatus::GrpcStatusHeaderMissing => { + ClassifiedResponse::RequiresEos(GrpcEosErrorsAsFailures { + success_codes: self.success_codes, + }) + } + } + } + + fn classify_error(self, error: &E) -> Self::FailureClass + where + E: fmt::Display + 'static, + { + GrpcFailureClass::Error(error.to_string()) + } +} + +/// The [`ClassifyEos`] for [`GrpcErrorsAsFailures`]. +#[derive(Debug, Clone)] +pub struct GrpcEosErrorsAsFailures { + success_codes: GrpcCodeBitmask, +} + +impl ClassifyEos for GrpcEosErrorsAsFailures { + type FailureClass = GrpcFailureClass; + + fn classify_eos(self, trailers: Option<&HeaderMap>) -> Result<(), Self::FailureClass> { + if let Some(trailers) = trailers { + match classify_grpc_metadata(trailers, self.success_codes) { + ParsedGrpcStatus::Success + | ParsedGrpcStatus::GrpcStatusHeaderMissing + | ParsedGrpcStatus::HeaderNotString + | ParsedGrpcStatus::HeaderNotInt => Ok(()), + ParsedGrpcStatus::NonSuccess(status) => Err(GrpcFailureClass::Code(status)), + } + } else { + Ok(()) + } + } + + fn classify_error(self, error: &E) -> Self::FailureClass + where + E: fmt::Display + 'static, + { + GrpcFailureClass::Error(error.to_string()) + } +} + +/// The failure class for [`GrpcErrorsAsFailures`]. +#[derive(Debug)] +pub enum GrpcFailureClass { + /// A gRPC response was classified as a failure with the corresponding status. + Code(std::num::NonZeroI32), + /// A gRPC response was classified as an error with the corresponding error description. + Error(String), +} + +impl fmt::Display for GrpcFailureClass { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::Code(code) => write!(f, "Code: {}", code), + Self::Error(error) => write!(f, "Error: {}", error), + } + } +} + +pub(crate) fn classify_grpc_metadata( + headers: &HeaderMap, + success_codes: GrpcCodeBitmask, +) -> ParsedGrpcStatus { + macro_rules! or_else { + ($expr:expr, $other:ident) => { + if let Some(value) = $expr { + value + } else { + return ParsedGrpcStatus::$other; + } + }; + } + + let status = or_else!(headers.get("grpc-status"), GrpcStatusHeaderMissing); + let status = or_else!(status.to_str().ok(), HeaderNotString); + let status = or_else!(status.parse::().ok(), HeaderNotInt); + + if GrpcCodeBitmask::try_from_u32(status as _) + .filter(|code| success_codes.contains(*code)) + .is_some() + { + ParsedGrpcStatus::Success + } else { + ParsedGrpcStatus::NonSuccess(NonZeroI32::new(status).unwrap()) + } +} + +#[derive(Debug, PartialEq, Eq)] +pub(crate) enum ParsedGrpcStatus { + Success, + NonSuccess(NonZeroI32), + GrpcStatusHeaderMissing, + // these two are treated as `Success` but kept separate for clarity + HeaderNotString, + HeaderNotInt, +} + +#[cfg(test)] +mod tests { + use super::*; + + macro_rules! classify_grpc_metadata_test { + ( + name: $name:ident, + status: $status:expr, + success_flags: $success_flags:expr, + expected: $expected:expr, + ) => { + #[test] + fn $name() { + let mut headers = HeaderMap::new(); + headers.insert("grpc-status", $status.parse().unwrap()); + let status = classify_grpc_metadata(&headers, $success_flags); + assert_eq!(status, $expected); + } + }; + } + + classify_grpc_metadata_test! { + name: basic_ok, + status: "0", + success_flags: GrpcCodeBitmask::OK, + expected: ParsedGrpcStatus::Success, + } + + classify_grpc_metadata_test! { + name: basic_error, + status: "1", + success_flags: GrpcCodeBitmask::OK, + expected: ParsedGrpcStatus::NonSuccess(NonZeroI32::new(1).unwrap()), + } + + classify_grpc_metadata_test! { + name: two_success_codes_first_matches, + status: "0", + success_flags: GrpcCodeBitmask::OK | GrpcCodeBitmask::INVALID_ARGUMENT, + expected: ParsedGrpcStatus::Success, + } + + classify_grpc_metadata_test! { + name: two_success_codes_second_matches, + status: "3", + success_flags: GrpcCodeBitmask::OK | GrpcCodeBitmask::INVALID_ARGUMENT, + expected: ParsedGrpcStatus::Success, + } + + classify_grpc_metadata_test! { + name: two_success_codes_none_matches, + status: "16", + success_flags: GrpcCodeBitmask::OK | GrpcCodeBitmask::INVALID_ARGUMENT, + expected: ParsedGrpcStatus::NonSuccess(NonZeroI32::new(16).unwrap()), + } +} diff --git a/.cargo-vendor/tower-http/src/classify/map_failure_class.rs b/.cargo-vendor/tower-http/src/classify/map_failure_class.rs new file mode 100644 index 0000000000..680593b56e --- /dev/null +++ b/.cargo-vendor/tower-http/src/classify/map_failure_class.rs @@ -0,0 +1,80 @@ +use super::{ClassifiedResponse, ClassifyEos, ClassifyResponse}; +use http::{HeaderMap, Response}; +use std::fmt; + +/// Response classifier that transforms the failure class of some other +/// classifier. +/// +/// Created with [`ClassifyResponse::map_failure_class`] or +/// [`ClassifyEos::map_failure_class`]. +#[derive(Clone, Copy)] +pub struct MapFailureClass { + inner: C, + f: F, +} + +impl MapFailureClass { + pub(super) fn new(classify: C, f: F) -> Self { + Self { inner: classify, f } + } +} + +impl fmt::Debug for MapFailureClass +where + C: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("MapFailureClass") + .field("inner", &self.inner) + .field("f", &format_args!("{}", std::any::type_name::())) + .finish() + } +} + +impl ClassifyResponse for MapFailureClass +where + C: ClassifyResponse, + F: FnOnce(C::FailureClass) -> NewClass, +{ + type FailureClass = NewClass; + type ClassifyEos = MapFailureClass; + + fn classify_response( + self, + res: &Response, + ) -> ClassifiedResponse { + match self.inner.classify_response(res) { + ClassifiedResponse::Ready(result) => ClassifiedResponse::Ready(result.map_err(self.f)), + ClassifiedResponse::RequiresEos(classify_eos) => { + let mapped_classify_eos = MapFailureClass::new(classify_eos, self.f); + ClassifiedResponse::RequiresEos(mapped_classify_eos) + } + } + } + + fn classify_error(self, error: &E) -> Self::FailureClass + where + E: std::fmt::Display + 'static, + { + (self.f)(self.inner.classify_error(error)) + } +} + +impl ClassifyEos for MapFailureClass +where + C: ClassifyEos, + F: FnOnce(C::FailureClass) -> NewClass, +{ + type FailureClass = NewClass; + + fn classify_eos(self, trailers: Option<&HeaderMap>) -> Result<(), Self::FailureClass> { + self.inner.classify_eos(trailers).map_err(self.f) + } + + fn classify_error(self, error: &E) -> Self::FailureClass + where + E: std::fmt::Display + 'static, + { + (self.f)(self.inner.classify_error(error)) + } +} diff --git a/.cargo-vendor/tower-http/src/classify/mod.rs b/.cargo-vendor/tower-http/src/classify/mod.rs new file mode 100644 index 0000000000..6ea3255975 --- /dev/null +++ b/.cargo-vendor/tower-http/src/classify/mod.rs @@ -0,0 +1,432 @@ +//! Tools for classifying responses as either success or failure. + +use http::{HeaderMap, Request, Response, StatusCode}; +use std::{convert::Infallible, fmt, marker::PhantomData}; + +pub(crate) mod grpc_errors_as_failures; +mod map_failure_class; +mod status_in_range_is_error; + +pub use self::{ + grpc_errors_as_failures::{ + GrpcCode, GrpcEosErrorsAsFailures, GrpcErrorsAsFailures, GrpcFailureClass, + }, + map_failure_class::MapFailureClass, + status_in_range_is_error::{StatusInRangeAsFailures, StatusInRangeFailureClass}, +}; + +/// Trait for producing response classifiers from a request. +/// +/// This is useful when a classifier depends on data from the request. For example, this could +/// include the URI or HTTP method. +/// +/// This trait is generic over the [`Error` type] of the `Service`s used with the classifier. +/// This is necessary for [`ClassifyResponse::classify_error`]. +/// +/// [`Error` type]: https://docs.rs/tower/latest/tower/trait.Service.html#associatedtype.Error +pub trait MakeClassifier { + /// The response classifier produced. + type Classifier: ClassifyResponse< + FailureClass = Self::FailureClass, + ClassifyEos = Self::ClassifyEos, + >; + + /// The type of failure classifications. + /// + /// This might include additional information about the error, such as + /// whether it was a client or server error, or whether or not it should + /// be considered retryable. + type FailureClass; + + /// The type used to classify the response end of stream (EOS). + type ClassifyEos: ClassifyEos; + + /// Returns a response classifier for this request + fn make_classifier(&self, req: &Request) -> Self::Classifier; +} + +/// A [`MakeClassifier`] that produces new classifiers by cloning an inner classifier. +/// +/// When a type implementing [`ClassifyResponse`] doesn't depend on information +/// from the request, [`SharedClassifier`] can be used to turn an instance of that type +/// into a [`MakeClassifier`]. +/// +/// # Example +/// +/// ``` +/// use std::fmt; +/// use tower_http::classify::{ +/// ClassifyResponse, ClassifiedResponse, NeverClassifyEos, +/// SharedClassifier, MakeClassifier, +/// }; +/// use http::Response; +/// +/// // A response classifier that only considers errors to be failures. +/// #[derive(Clone, Copy)] +/// struct MyClassifier; +/// +/// impl ClassifyResponse for MyClassifier { +/// type FailureClass = String; +/// type ClassifyEos = NeverClassifyEos; +/// +/// fn classify_response( +/// self, +/// _res: &Response, +/// ) -> ClassifiedResponse { +/// ClassifiedResponse::Ready(Ok(())) +/// } +/// +/// fn classify_error(self, error: &E) -> Self::FailureClass +/// where +/// E: fmt::Display + 'static, +/// { +/// error.to_string() +/// } +/// } +/// +/// // Some function that requires a `MakeClassifier` +/// fn use_make_classifier(make: M) { +/// // ... +/// } +/// +/// // `MyClassifier` doesn't implement `MakeClassifier` but since it doesn't +/// // care about the incoming request we can make `MyClassifier`s by cloning. +/// // That is what `SharedClassifier` does. +/// let make_classifier = SharedClassifier::new(MyClassifier); +/// +/// // We now have a `MakeClassifier`! +/// use_make_classifier(make_classifier); +/// ``` +#[derive(Debug, Clone)] +pub struct SharedClassifier { + classifier: C, +} + +impl SharedClassifier { + /// Create a new `SharedClassifier` from the given classifier. + pub fn new(classifier: C) -> Self + where + C: ClassifyResponse + Clone, + { + Self { classifier } + } +} + +impl MakeClassifier for SharedClassifier +where + C: ClassifyResponse + Clone, +{ + type FailureClass = C::FailureClass; + type ClassifyEos = C::ClassifyEos; + type Classifier = C; + + fn make_classifier(&self, _req: &Request) -> Self::Classifier { + self.classifier.clone() + } +} + +/// Trait for classifying responses as either success or failure. Designed to support both unary +/// requests (single request for a single response) as well as streaming responses. +/// +/// Response classifiers are used in cases where middleware needs to determine +/// whether a response completed successfully or failed. For example, they may +/// be used by logging or metrics middleware to record failures differently +/// from successes. +/// +/// Furthermore, when a response fails, a response classifier may provide +/// additional information about the failure. This can, for example, be used to +/// build [retry policies] by indicating whether or not a particular failure is +/// retryable. +/// +/// [retry policies]: https://docs.rs/tower/latest/tower/retry/trait.Policy.html +pub trait ClassifyResponse { + /// The type returned when a response is classified as a failure. + /// + /// Depending on the classifier, this may simply indicate that the + /// request failed, or it may contain additional information about + /// the failure, such as whether or not it is retryable. + type FailureClass; + + /// The type used to classify the response end of stream (EOS). + type ClassifyEos: ClassifyEos; + + /// Attempt to classify the beginning of a response. + /// + /// In some cases, the response can be classified immediately, without + /// waiting for a body to complete. This may include: + /// + /// - When the response has an error status code. + /// - When a successful response does not have a streaming body. + /// - When the classifier does not care about streaming bodies. + /// + /// When the response can be classified immediately, `classify_response` + /// returns a [`ClassifiedResponse::Ready`] which indicates whether the + /// response succeeded or failed. + /// + /// In other cases, however, the classifier may need to wait until the + /// response body stream completes before it can classify the response. + /// For example, gRPC indicates RPC failures using the `grpc-status` + /// trailer. In this case, `classify_response` returns a + /// [`ClassifiedResponse::RequiresEos`] containing a type which will + /// be used to classify the response when the body stream ends. + fn classify_response( + self, + res: &Response, + ) -> ClassifiedResponse; + + /// Classify an error. + /// + /// Errors are always errors (doh) but sometimes it might be useful to have multiple classes of + /// errors. A retry policy might allow retrying some errors and not others. + fn classify_error(self, error: &E) -> Self::FailureClass + where + E: fmt::Display + 'static; + + /// Transform the failure classification using a function. + /// + /// # Example + /// + /// ``` + /// use tower_http::classify::{ + /// ServerErrorsAsFailures, ServerErrorsFailureClass, + /// ClassifyResponse, ClassifiedResponse + /// }; + /// use http::{Response, StatusCode}; + /// use http_body_util::Empty; + /// use bytes::Bytes; + /// + /// fn transform_failure_class(class: ServerErrorsFailureClass) -> NewFailureClass { + /// match class { + /// // Convert status codes into u16 + /// ServerErrorsFailureClass::StatusCode(status) => { + /// NewFailureClass::Status(status.as_u16()) + /// } + /// // Don't change errors. + /// ServerErrorsFailureClass::Error(error) => { + /// NewFailureClass::Error(error) + /// } + /// } + /// } + /// + /// enum NewFailureClass { + /// Status(u16), + /// Error(String), + /// } + /// + /// // Create a classifier who's failure class will be transformed by `transform_failure_class` + /// let classifier = ServerErrorsAsFailures::new().map_failure_class(transform_failure_class); + /// + /// let response = Response::builder() + /// .status(StatusCode::INTERNAL_SERVER_ERROR) + /// .body(Empty::::new()) + /// .unwrap(); + /// + /// let classification = classifier.classify_response(&response); + /// + /// assert!(matches!( + /// classification, + /// ClassifiedResponse::Ready(Err(NewFailureClass::Status(500))) + /// )); + /// ``` + fn map_failure_class(self, f: F) -> MapFailureClass + where + Self: Sized, + F: FnOnce(Self::FailureClass) -> NewClass, + { + MapFailureClass::new(self, f) + } +} + +/// Trait for classifying end of streams (EOS) as either success or failure. +pub trait ClassifyEos { + /// The type of failure classifications. + type FailureClass; + + /// Perform the classification from response trailers. + fn classify_eos(self, trailers: Option<&HeaderMap>) -> Result<(), Self::FailureClass>; + + /// Classify an error. + /// + /// Errors are always errors (doh) but sometimes it might be useful to have multiple classes of + /// errors. A retry policy might allow retrying some errors and not others. + fn classify_error(self, error: &E) -> Self::FailureClass + where + E: fmt::Display + 'static; + + /// Transform the failure classification using a function. + /// + /// See [`ClassifyResponse::map_failure_class`] for more details. + fn map_failure_class(self, f: F) -> MapFailureClass + where + Self: Sized, + F: FnOnce(Self::FailureClass) -> NewClass, + { + MapFailureClass::new(self, f) + } +} + +/// Result of doing a classification. +#[derive(Debug)] +pub enum ClassifiedResponse { + /// The response was able to be classified immediately. + Ready(Result<(), FailureClass>), + /// We have to wait until the end of a streaming response to classify it. + RequiresEos(ClassifyEos), +} + +/// A [`ClassifyEos`] type that can be used in [`ClassifyResponse`] implementations that never have +/// to classify streaming responses. +/// +/// `NeverClassifyEos` exists only as type. `NeverClassifyEos` values cannot be constructed. +pub struct NeverClassifyEos { + _output_ty: PhantomData T>, + _never: Infallible, +} + +impl ClassifyEos for NeverClassifyEos { + type FailureClass = T; + + fn classify_eos(self, _trailers: Option<&HeaderMap>) -> Result<(), Self::FailureClass> { + // `NeverClassifyEos` contains an `Infallible` so it can never be constructed + unreachable!() + } + + fn classify_error(self, _error: &E) -> Self::FailureClass + where + E: fmt::Display + 'static, + { + // `NeverClassifyEos` contains an `Infallible` so it can never be constructed + unreachable!() + } +} + +impl fmt::Debug for NeverClassifyEos { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("NeverClassifyEos").finish() + } +} + +/// The default classifier used for normal HTTP responses. +/// +/// Responses with a `5xx` status code are considered failures, all others are considered +/// successes. +#[derive(Clone, Debug, Default)] +pub struct ServerErrorsAsFailures { + _priv: (), +} + +impl ServerErrorsAsFailures { + /// Create a new [`ServerErrorsAsFailures`]. + pub fn new() -> Self { + Self::default() + } + + /// Returns a [`MakeClassifier`] that produces `ServerErrorsAsFailures`. + /// + /// This is a convenience function that simply calls `SharedClassifier::new`. + pub fn make_classifier() -> SharedClassifier { + SharedClassifier::new(Self::new()) + } +} + +impl ClassifyResponse for ServerErrorsAsFailures { + type FailureClass = ServerErrorsFailureClass; + type ClassifyEos = NeverClassifyEos; + + fn classify_response( + self, + res: &Response, + ) -> ClassifiedResponse { + if res.status().is_server_error() { + ClassifiedResponse::Ready(Err(ServerErrorsFailureClass::StatusCode(res.status()))) + } else { + ClassifiedResponse::Ready(Ok(())) + } + } + + fn classify_error(self, error: &E) -> Self::FailureClass + where + E: fmt::Display + 'static, + { + ServerErrorsFailureClass::Error(error.to_string()) + } +} + +/// The failure class for [`ServerErrorsAsFailures`]. +#[derive(Debug)] +pub enum ServerErrorsFailureClass { + /// A response was classified as a failure with the corresponding status. + StatusCode(StatusCode), + /// A response was classified as an error with the corresponding error description. + Error(String), +} + +impl fmt::Display for ServerErrorsFailureClass { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::StatusCode(code) => write!(f, "Status code: {}", code), + Self::Error(error) => write!(f, "Error: {}", error), + } + } +} + +// Just verify that we can actually use this response classifier to determine retries as well +#[cfg(test)] +mod usable_for_retries { + #[allow(unused_imports)] + use super::*; + use http::{Request, Response}; + use tower::retry::Policy; + + trait IsRetryable { + fn is_retryable(&self) -> bool; + } + + #[derive(Clone)] + struct RetryBasedOnClassification { + classifier: C, + // ... + } + + impl Policy, Response, E> for RetryBasedOnClassification + where + C: ClassifyResponse + Clone, + E: fmt::Display + 'static, + C::FailureClass: IsRetryable, + ResB: http_body::Body, + Request: Clone, + E: std::error::Error + 'static, + { + type Future = std::future::Ready>; + + fn retry( + &self, + _req: &Request, + res: Result<&Response, &E>, + ) -> Option { + match res { + Ok(res) => { + if let ClassifiedResponse::Ready(class) = + self.classifier.clone().classify_response(res) + { + if class.err()?.is_retryable() { + return Some(std::future::ready(self.clone())); + } + } + + None + } + Err(err) => self + .classifier + .clone() + .classify_error(err) + .is_retryable() + .then(|| std::future::ready(self.clone())), + } + } + + fn clone_request(&self, req: &Request) -> Option> { + Some(req.clone()) + } + } +} diff --git a/.cargo-vendor/tower-http/src/classify/status_in_range_is_error.rs b/.cargo-vendor/tower-http/src/classify/status_in_range_is_error.rs new file mode 100644 index 0000000000..934d08c542 --- /dev/null +++ b/.cargo-vendor/tower-http/src/classify/status_in_range_is_error.rs @@ -0,0 +1,160 @@ +use super::{ClassifiedResponse, ClassifyResponse, NeverClassifyEos, SharedClassifier}; +use http::StatusCode; +use std::{fmt, ops::RangeInclusive}; + +/// Response classifier that considers responses with a status code within some range to be +/// failures. +/// +/// # Example +/// +/// A client with tracing where server errors _and_ client errors are considered failures. +/// +/// ```no_run +/// use tower_http::{trace::TraceLayer, classify::StatusInRangeAsFailures}; +/// use tower::{ServiceBuilder, Service, ServiceExt}; +/// use http::{Request, Method}; +/// use http_body_util::Full; +/// use bytes::Bytes; +/// use hyper_util::{rt::TokioExecutor, client::legacy::Client}; +/// +/// # async fn foo() -> Result<(), tower::BoxError> { +/// let classifier = StatusInRangeAsFailures::new(400..=599); +/// +/// let client = Client::builder(TokioExecutor::new()).build_http(); +/// let mut client = ServiceBuilder::new() +/// .layer(TraceLayer::new(classifier.into_make_classifier())) +/// .service(client); +/// +/// let request = Request::builder() +/// .method(Method::GET) +/// .uri("https://example.com") +/// .body(Full::::default()) +/// .unwrap(); +/// +/// let response = client.ready().await?.call(request).await?; +/// # Ok(()) +/// # } +/// ``` +#[derive(Debug, Clone)] +pub struct StatusInRangeAsFailures { + range: RangeInclusive, +} + +impl StatusInRangeAsFailures { + /// Creates a new `StatusInRangeAsFailures`. + /// + /// # Panics + /// + /// Panics if the start or end of `range` aren't valid status codes as determined by + /// [`StatusCode::from_u16`]. + /// + /// [`StatusCode::from_u16`]: https://docs.rs/http/latest/http/status/struct.StatusCode.html#method.from_u16 + pub fn new(range: RangeInclusive) -> Self { + assert!( + StatusCode::from_u16(*range.start()).is_ok(), + "range start isn't a valid status code" + ); + assert!( + StatusCode::from_u16(*range.end()).is_ok(), + "range end isn't a valid status code" + ); + + Self { range } + } + + /// Creates a new `StatusInRangeAsFailures` that classifies client and server responses as + /// failures. + /// + /// This is a convenience for `StatusInRangeAsFailures::new(400..=599)`. + pub fn new_for_client_and_server_errors() -> Self { + Self::new(400..=599) + } + + /// Convert this `StatusInRangeAsFailures` into a [`MakeClassifier`]. + /// + /// [`MakeClassifier`]: super::MakeClassifier + pub fn into_make_classifier(self) -> SharedClassifier { + SharedClassifier::new(self) + } +} + +impl ClassifyResponse for StatusInRangeAsFailures { + type FailureClass = StatusInRangeFailureClass; + type ClassifyEos = NeverClassifyEos; + + fn classify_response( + self, + res: &http::Response, + ) -> ClassifiedResponse { + if self.range.contains(&res.status().as_u16()) { + let class = StatusInRangeFailureClass::StatusCode(res.status()); + ClassifiedResponse::Ready(Err(class)) + } else { + ClassifiedResponse::Ready(Ok(())) + } + } + + fn classify_error(self, error: &E) -> Self::FailureClass + where + E: std::fmt::Display + 'static, + { + StatusInRangeFailureClass::Error(error.to_string()) + } +} + +/// The failure class for [`StatusInRangeAsFailures`]. +#[derive(Debug)] +pub enum StatusInRangeFailureClass { + /// A response was classified as a failure with the corresponding status. + StatusCode(StatusCode), + /// A response was classified as an error with the corresponding error description. + Error(String), +} + +impl fmt::Display for StatusInRangeFailureClass { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::StatusCode(code) => write!(f, "Status code: {}", code), + Self::Error(error) => write!(f, "Error: {}", error), + } + } +} + +#[cfg(test)] +mod tests { + #[allow(unused_imports)] + use super::*; + use http::Response; + + #[test] + fn basic() { + let classifier = StatusInRangeAsFailures::new(400..=599); + + assert!(matches!( + dbg!(classifier + .clone() + .classify_response(&response_with_status(200))), + ClassifiedResponse::Ready(Ok(())), + )); + + assert!(matches!( + dbg!(classifier + .clone() + .classify_response(&response_with_status(400))), + ClassifiedResponse::Ready(Err(StatusInRangeFailureClass::StatusCode( + StatusCode::BAD_REQUEST + ))), + )); + + assert!(matches!( + dbg!(classifier.classify_response(&response_with_status(500))), + ClassifiedResponse::Ready(Err(StatusInRangeFailureClass::StatusCode( + StatusCode::INTERNAL_SERVER_ERROR + ))), + )); + } + + fn response_with_status(status: u16) -> Response<()> { + Response::builder().status(status).body(()).unwrap() + } +} diff --git a/.cargo-vendor/tower-http/src/compression/body.rs b/.cargo-vendor/tower-http/src/compression/body.rs new file mode 100644 index 0000000000..013d605bc1 --- /dev/null +++ b/.cargo-vendor/tower-http/src/compression/body.rs @@ -0,0 +1,349 @@ +#![allow(unused_imports)] + +use crate::compression::CompressionLevel; +use crate::{ + compression_utils::{AsyncReadBody, BodyIntoStream, DecorateAsyncRead, WrapBody}, + BoxError, +}; +#[cfg(feature = "compression-br")] +use async_compression::tokio::bufread::BrotliEncoder; +#[cfg(feature = "compression-gzip")] +use async_compression::tokio::bufread::GzipEncoder; +#[cfg(feature = "compression-deflate")] +use async_compression::tokio::bufread::ZlibEncoder; +#[cfg(feature = "compression-zstd")] +use async_compression::tokio::bufread::ZstdEncoder; + +use bytes::{Buf, Bytes}; +use http::HeaderMap; +use http_body::Body; +use pin_project_lite::pin_project; +use std::{ + io, + marker::PhantomData, + pin::Pin, + task::{ready, Context, Poll}, +}; +use tokio_util::io::StreamReader; + +use super::pin_project_cfg::pin_project_cfg; + +pin_project! { + /// Response body of [`Compression`]. + /// + /// [`Compression`]: super::Compression + pub struct CompressionBody + where + B: Body, + { + #[pin] + pub(crate) inner: BodyInner, + } +} + +impl Default for CompressionBody +where + B: Body + Default, +{ + fn default() -> Self { + Self { + inner: BodyInner::Identity { + inner: B::default(), + }, + } + } +} + +impl CompressionBody +where + B: Body, +{ + pub(crate) fn new(inner: BodyInner) -> Self { + Self { inner } + } + + /// Get a reference to the inner body + pub fn get_ref(&self) -> &B { + match &self.inner { + #[cfg(feature = "compression-gzip")] + BodyInner::Gzip { inner } => inner.read.get_ref().get_ref().get_ref().get_ref(), + #[cfg(feature = "compression-deflate")] + BodyInner::Deflate { inner } => inner.read.get_ref().get_ref().get_ref().get_ref(), + #[cfg(feature = "compression-br")] + BodyInner::Brotli { inner } => inner.read.get_ref().get_ref().get_ref().get_ref(), + #[cfg(feature = "compression-zstd")] + BodyInner::Zstd { inner } => inner.read.get_ref().get_ref().get_ref().get_ref(), + BodyInner::Identity { inner } => inner, + } + } + + /// Get a mutable reference to the inner body + pub fn get_mut(&mut self) -> &mut B { + match &mut self.inner { + #[cfg(feature = "compression-gzip")] + BodyInner::Gzip { inner } => inner.read.get_mut().get_mut().get_mut().get_mut(), + #[cfg(feature = "compression-deflate")] + BodyInner::Deflate { inner } => inner.read.get_mut().get_mut().get_mut().get_mut(), + #[cfg(feature = "compression-br")] + BodyInner::Brotli { inner } => inner.read.get_mut().get_mut().get_mut().get_mut(), + #[cfg(feature = "compression-zstd")] + BodyInner::Zstd { inner } => inner.read.get_mut().get_mut().get_mut().get_mut(), + BodyInner::Identity { inner } => inner, + } + } + + /// Get a pinned mutable reference to the inner body + pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut B> { + match self.project().inner.project() { + #[cfg(feature = "compression-gzip")] + BodyInnerProj::Gzip { inner } => inner + .project() + .read + .get_pin_mut() + .get_pin_mut() + .get_pin_mut() + .get_pin_mut(), + #[cfg(feature = "compression-deflate")] + BodyInnerProj::Deflate { inner } => inner + .project() + .read + .get_pin_mut() + .get_pin_mut() + .get_pin_mut() + .get_pin_mut(), + #[cfg(feature = "compression-br")] + BodyInnerProj::Brotli { inner } => inner + .project() + .read + .get_pin_mut() + .get_pin_mut() + .get_pin_mut() + .get_pin_mut(), + #[cfg(feature = "compression-zstd")] + BodyInnerProj::Zstd { inner } => inner + .project() + .read + .get_pin_mut() + .get_pin_mut() + .get_pin_mut() + .get_pin_mut(), + BodyInnerProj::Identity { inner } => inner, + } + } + + /// Consume `self`, returning the inner body + pub fn into_inner(self) -> B { + match self.inner { + #[cfg(feature = "compression-gzip")] + BodyInner::Gzip { inner } => inner + .read + .into_inner() + .into_inner() + .into_inner() + .into_inner(), + #[cfg(feature = "compression-deflate")] + BodyInner::Deflate { inner } => inner + .read + .into_inner() + .into_inner() + .into_inner() + .into_inner(), + #[cfg(feature = "compression-br")] + BodyInner::Brotli { inner } => inner + .read + .into_inner() + .into_inner() + .into_inner() + .into_inner(), + #[cfg(feature = "compression-zstd")] + BodyInner::Zstd { inner } => inner + .read + .into_inner() + .into_inner() + .into_inner() + .into_inner(), + BodyInner::Identity { inner } => inner, + } + } +} + +#[cfg(feature = "compression-gzip")] +type GzipBody = WrapBody>; + +#[cfg(feature = "compression-deflate")] +type DeflateBody = WrapBody>; + +#[cfg(feature = "compression-br")] +type BrotliBody = WrapBody>; + +#[cfg(feature = "compression-zstd")] +type ZstdBody = WrapBody>; + +pin_project_cfg! { + #[project = BodyInnerProj] + pub(crate) enum BodyInner + where + B: Body, + { + #[cfg(feature = "compression-gzip")] + Gzip { + #[pin] + inner: GzipBody, + }, + #[cfg(feature = "compression-deflate")] + Deflate { + #[pin] + inner: DeflateBody, + }, + #[cfg(feature = "compression-br")] + Brotli { + #[pin] + inner: BrotliBody, + }, + #[cfg(feature = "compression-zstd")] + Zstd { + #[pin] + inner: ZstdBody, + }, + Identity { + #[pin] + inner: B, + }, + } +} + +impl BodyInner { + #[cfg(feature = "compression-gzip")] + pub(crate) fn gzip(inner: WrapBody>) -> Self { + Self::Gzip { inner } + } + + #[cfg(feature = "compression-deflate")] + pub(crate) fn deflate(inner: WrapBody>) -> Self { + Self::Deflate { inner } + } + + #[cfg(feature = "compression-br")] + pub(crate) fn brotli(inner: WrapBody>) -> Self { + Self::Brotli { inner } + } + + #[cfg(feature = "compression-zstd")] + pub(crate) fn zstd(inner: WrapBody>) -> Self { + Self::Zstd { inner } + } + + pub(crate) fn identity(inner: B) -> Self { + Self::Identity { inner } + } +} + +impl Body for CompressionBody +where + B: Body, + B::Error: Into, +{ + type Data = Bytes; + type Error = BoxError; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + match self.project().inner.project() { + #[cfg(feature = "compression-gzip")] + BodyInnerProj::Gzip { inner } => inner.poll_frame(cx), + #[cfg(feature = "compression-deflate")] + BodyInnerProj::Deflate { inner } => inner.poll_frame(cx), + #[cfg(feature = "compression-br")] + BodyInnerProj::Brotli { inner } => inner.poll_frame(cx), + #[cfg(feature = "compression-zstd")] + BodyInnerProj::Zstd { inner } => inner.poll_frame(cx), + BodyInnerProj::Identity { inner } => match ready!(inner.poll_frame(cx)) { + Some(Ok(frame)) => { + let frame = frame.map_data(|mut buf| buf.copy_to_bytes(buf.remaining())); + Poll::Ready(Some(Ok(frame))) + } + Some(Err(err)) => Poll::Ready(Some(Err(err.into()))), + None => Poll::Ready(None), + }, + } + } +} + +#[cfg(feature = "compression-gzip")] +impl DecorateAsyncRead for GzipEncoder +where + B: Body, +{ + type Input = AsyncReadBody; + type Output = GzipEncoder; + + fn apply(input: Self::Input, quality: CompressionLevel) -> Self::Output { + GzipEncoder::with_quality(input, quality.into_async_compression()) + } + + fn get_pin_mut(pinned: Pin<&mut Self::Output>) -> Pin<&mut Self::Input> { + pinned.get_pin_mut() + } +} + +#[cfg(feature = "compression-deflate")] +impl DecorateAsyncRead for ZlibEncoder +where + B: Body, +{ + type Input = AsyncReadBody; + type Output = ZlibEncoder; + + fn apply(input: Self::Input, quality: CompressionLevel) -> Self::Output { + ZlibEncoder::with_quality(input, quality.into_async_compression()) + } + + fn get_pin_mut(pinned: Pin<&mut Self::Output>) -> Pin<&mut Self::Input> { + pinned.get_pin_mut() + } +} + +#[cfg(feature = "compression-br")] +impl DecorateAsyncRead for BrotliEncoder +where + B: Body, +{ + type Input = AsyncReadBody; + type Output = BrotliEncoder; + + fn apply(input: Self::Input, quality: CompressionLevel) -> Self::Output { + // The brotli crate used under the hood here has a default compression level of 11, + // which is the max for brotli. This causes extremely slow compression times, so we + // manually set a default of 4 here. + // + // This is the same default used by NGINX for on-the-fly brotli compression. + let level = match quality { + CompressionLevel::Default => async_compression::Level::Precise(4), + other => other.into_async_compression(), + }; + BrotliEncoder::with_quality(input, level) + } + + fn get_pin_mut(pinned: Pin<&mut Self::Output>) -> Pin<&mut Self::Input> { + pinned.get_pin_mut() + } +} + +#[cfg(feature = "compression-zstd")] +impl DecorateAsyncRead for ZstdEncoder +where + B: Body, +{ + type Input = AsyncReadBody; + type Output = ZstdEncoder; + + fn apply(input: Self::Input, quality: CompressionLevel) -> Self::Output { + ZstdEncoder::with_quality(input, quality.into_async_compression()) + } + + fn get_pin_mut(pinned: Pin<&mut Self::Output>) -> Pin<&mut Self::Input> { + pinned.get_pin_mut() + } +} diff --git a/.cargo-vendor/tower-http/src/compression/future.rs b/.cargo-vendor/tower-http/src/compression/future.rs new file mode 100644 index 0000000000..c38ecb035d --- /dev/null +++ b/.cargo-vendor/tower-http/src/compression/future.rs @@ -0,0 +1,115 @@ +#![allow(unused_imports)] + +use super::{body::BodyInner, CompressionBody}; +use crate::compression::predicate::Predicate; +use crate::compression::CompressionLevel; +use crate::compression_utils::WrapBody; +use crate::content_encoding::Encoding; +use http::{header, HeaderMap, HeaderValue, Response}; +use http_body::Body; +use pin_project_lite::pin_project; +use std::{ + future::Future, + pin::Pin, + task::{ready, Context, Poll}, +}; + +pin_project! { + /// Response future of [`Compression`]. + /// + /// [`Compression`]: super::Compression + #[derive(Debug)] + pub struct ResponseFuture { + #[pin] + pub(crate) inner: F, + pub(crate) encoding: Encoding, + pub(crate) predicate: P, + pub(crate) quality: CompressionLevel, + } +} + +impl Future for ResponseFuture +where + F: Future, E>>, + B: Body, + P: Predicate, +{ + type Output = Result>, E>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let res = ready!(self.as_mut().project().inner.poll(cx)?); + + // never recompress responses that are already compressed + let should_compress = !res.headers().contains_key(header::CONTENT_ENCODING) + // never compress responses that are ranges + && !res.headers().contains_key(header::CONTENT_RANGE) + && self.predicate.should_compress(&res); + + let (mut parts, body) = res.into_parts(); + + if should_compress { + parts + .headers + .append(header::VARY, header::ACCEPT_ENCODING.into()); + } + + let body = match (should_compress, self.encoding) { + // if compression is _not_ supported or the client doesn't accept it + (false, _) | (_, Encoding::Identity) => { + return Poll::Ready(Ok(Response::from_parts( + parts, + CompressionBody::new(BodyInner::identity(body)), + ))) + } + + #[cfg(feature = "compression-gzip")] + (_, Encoding::Gzip) => { + CompressionBody::new(BodyInner::gzip(WrapBody::new(body, self.quality))) + } + #[cfg(feature = "compression-deflate")] + (_, Encoding::Deflate) => { + CompressionBody::new(BodyInner::deflate(WrapBody::new(body, self.quality))) + } + #[cfg(feature = "compression-br")] + (_, Encoding::Brotli) => { + CompressionBody::new(BodyInner::brotli(WrapBody::new(body, self.quality))) + } + #[cfg(feature = "compression-zstd")] + (_, Encoding::Zstd) => { + CompressionBody::new(BodyInner::zstd(WrapBody::new(body, self.quality))) + } + #[cfg(feature = "fs")] + #[allow(unreachable_patterns)] + (true, _) => { + // This should never happen because the `AcceptEncoding` struct which is used to determine + // `self.encoding` will only enable the different compression algorithms if the + // corresponding crate feature has been enabled. This means + // Encoding::[Gzip|Brotli|Deflate] should be impossible at this point without the + // features enabled. + // + // The match arm is still required though because the `fs` feature uses the + // Encoding struct independently and requires no compression logic to be enabled. + // This means a combination of an individual compression feature and `fs` will fail + // to compile without this branch even though it will never be reached. + // + // To safeguard against refactors that changes this relationship or other bugs the + // server will return an uncompressed response instead of panicking since that could + // become a ddos attack vector. + return Poll::Ready(Ok(Response::from_parts( + parts, + CompressionBody::new(BodyInner::identity(body)), + ))); + } + }; + + parts.headers.remove(header::ACCEPT_RANGES); + parts.headers.remove(header::CONTENT_LENGTH); + + parts + .headers + .insert(header::CONTENT_ENCODING, self.encoding.into_header_value()); + + let res = Response::from_parts(parts, body); + Poll::Ready(Ok(res)) + } +} diff --git a/.cargo-vendor/tower-http/src/compression/layer.rs b/.cargo-vendor/tower-http/src/compression/layer.rs new file mode 100644 index 0000000000..9d7fa801b9 --- /dev/null +++ b/.cargo-vendor/tower-http/src/compression/layer.rs @@ -0,0 +1,203 @@ +use super::{Compression, Predicate}; +use crate::compression::predicate::DefaultPredicate; +use crate::compression::CompressionLevel; +use crate::compression_utils::AcceptEncoding; +use tower_layer::Layer; + +/// Compress response bodies of the underlying service. +/// +/// This uses the `Accept-Encoding` header to pick an appropriate encoding and adds the +/// `Content-Encoding` header to responses. +/// +/// See the [module docs](crate::compression) for more details. +#[derive(Clone, Debug, Default)] +pub struct CompressionLayer

{ + accept: AcceptEncoding, + predicate: P, + quality: CompressionLevel, +} + +impl Layer for CompressionLayer

+where + P: Predicate, +{ + type Service = Compression; + + fn layer(&self, inner: S) -> Self::Service { + Compression { + inner, + accept: self.accept, + predicate: self.predicate.clone(), + quality: self.quality, + } + } +} + +impl CompressionLayer { + /// Creates a new [`CompressionLayer`]. + pub fn new() -> Self { + Self::default() + } + + /// Sets whether to enable the gzip encoding. + #[cfg(feature = "compression-gzip")] + pub fn gzip(mut self, enable: bool) -> Self { + self.accept.set_gzip(enable); + self + } + + /// Sets whether to enable the Deflate encoding. + #[cfg(feature = "compression-deflate")] + pub fn deflate(mut self, enable: bool) -> Self { + self.accept.set_deflate(enable); + self + } + + /// Sets whether to enable the Brotli encoding. + #[cfg(feature = "compression-br")] + pub fn br(mut self, enable: bool) -> Self { + self.accept.set_br(enable); + self + } + + /// Sets whether to enable the Zstd encoding. + #[cfg(feature = "compression-zstd")] + pub fn zstd(mut self, enable: bool) -> Self { + self.accept.set_zstd(enable); + self + } + + /// Sets the compression quality. + pub fn quality(mut self, quality: CompressionLevel) -> Self { + self.quality = quality; + self + } + + /// Disables the gzip encoding. + /// + /// This method is available even if the `gzip` crate feature is disabled. + pub fn no_gzip(mut self) -> Self { + self.accept.set_gzip(false); + self + } + + /// Disables the Deflate encoding. + /// + /// This method is available even if the `deflate` crate feature is disabled. + pub fn no_deflate(mut self) -> Self { + self.accept.set_deflate(false); + self + } + + /// Disables the Brotli encoding. + /// + /// This method is available even if the `br` crate feature is disabled. + pub fn no_br(mut self) -> Self { + self.accept.set_br(false); + self + } + + /// Disables the Zstd encoding. + /// + /// This method is available even if the `zstd` crate feature is disabled. + pub fn no_zstd(mut self) -> Self { + self.accept.set_zstd(false); + self + } + + /// Replace the current compression predicate. + /// + /// See [`Compression::compress_when`] for more details. + pub fn compress_when(self, predicate: C) -> CompressionLayer + where + C: Predicate, + { + CompressionLayer { + accept: self.accept, + predicate, + quality: self.quality, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_helpers::Body; + use http::{header::ACCEPT_ENCODING, Request, Response}; + use http_body_util::BodyExt; + use std::convert::Infallible; + use tokio::fs::File; + use tokio_util::io::ReaderStream; + use tower::{Service, ServiceBuilder, ServiceExt}; + + async fn handle(_req: Request) -> Result, Infallible> { + // Open the file. + let file = File::open("Cargo.toml").await.expect("file missing"); + // Convert the file into a `Stream`. + let stream = ReaderStream::new(file); + // Convert the `Stream` into a `Body`. + let body = Body::from_stream(stream); + // Create response. + Ok(Response::new(body)) + } + + #[tokio::test] + async fn accept_encoding_configuration_works() -> Result<(), crate::BoxError> { + let deflate_only_layer = CompressionLayer::new() + .quality(CompressionLevel::Best) + .no_br() + .no_gzip(); + + let mut service = ServiceBuilder::new() + // Compress responses based on the `Accept-Encoding` header. + .layer(deflate_only_layer) + .service_fn(handle); + + // Call the service with the deflate only layer + let request = Request::builder() + .header(ACCEPT_ENCODING, "gzip, deflate, br") + .body(Body::empty())?; + + let response = service.ready().await?.call(request).await?; + + assert_eq!(response.headers()["content-encoding"], "deflate"); + + // Read the body + let body = response.into_body(); + let bytes = body.collect().await.unwrap().to_bytes(); + + let deflate_bytes_len = bytes.len(); + + let br_only_layer = CompressionLayer::new() + .quality(CompressionLevel::Best) + .no_gzip() + .no_deflate(); + + let mut service = ServiceBuilder::new() + // Compress responses based on the `Accept-Encoding` header. + .layer(br_only_layer) + .service_fn(handle); + + // Call the service with the br only layer + let request = Request::builder() + .header(ACCEPT_ENCODING, "gzip, deflate, br") + .body(Body::empty())?; + + let response = service.ready().await?.call(request).await?; + + assert_eq!(response.headers()["content-encoding"], "br"); + + // Read the body + let body = response.into_body(); + let bytes = body.collect().await.unwrap().to_bytes(); + + let br_byte_length = bytes.len(); + + // check the corresponding algorithms are actually used + // br should compresses better than deflate + assert!(br_byte_length < deflate_bytes_len * 9 / 10); + + Ok(()) + } +} diff --git a/.cargo-vendor/tower-http/src/compression/mod.rs b/.cargo-vendor/tower-http/src/compression/mod.rs new file mode 100644 index 0000000000..5107fac3f7 --- /dev/null +++ b/.cargo-vendor/tower-http/src/compression/mod.rs @@ -0,0 +1,498 @@ +//! Middleware that compresses response bodies. +//! +//! # Example +//! +//! Example showing how to respond with the compressed contents of a file. +//! +//! ```rust +//! use bytes::{Bytes, BytesMut}; +//! use http::{Request, Response, header::ACCEPT_ENCODING}; +//! use http_body_util::{Full, BodyExt, StreamBody, combinators::UnsyncBoxBody}; +//! use http_body::Frame; +//! use std::convert::Infallible; +//! use tokio::fs::{self, File}; +//! use tokio_util::io::ReaderStream; +//! use tower::{Service, ServiceExt, ServiceBuilder, service_fn}; +//! use tower_http::{compression::CompressionLayer, BoxError}; +//! use futures_util::TryStreamExt; +//! +//! type BoxBody = UnsyncBoxBody; +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), BoxError> { +//! async fn handle(req: Request>) -> Result, Infallible> { +//! // Open the file. +//! let file = File::open("Cargo.toml").await.expect("file missing"); +//! // Convert the file into a `Stream` of `Bytes`. +//! let stream = ReaderStream::new(file); +//! // Convert the stream into a stream of data `Frame`s. +//! let stream = stream.map_ok(Frame::data); +//! // Convert the `Stream` into a `Body`. +//! let body = StreamBody::new(stream); +//! // Erase the type because its very hard to name in the function signature. +//! let body = body.boxed_unsync(); +//! // Create response. +//! Ok(Response::new(body)) +//! } +//! +//! let mut service = ServiceBuilder::new() +//! // Compress responses based on the `Accept-Encoding` header. +//! .layer(CompressionLayer::new()) +//! .service_fn(handle); +//! +//! // Call the service. +//! let request = Request::builder() +//! .header(ACCEPT_ENCODING, "gzip") +//! .body(Full::::default())?; +//! +//! let response = service +//! .ready() +//! .await? +//! .call(request) +//! .await?; +//! +//! assert_eq!(response.headers()["content-encoding"], "gzip"); +//! +//! // Read the body +//! let bytes = response +//! .into_body() +//! .collect() +//! .await? +//! .to_bytes(); +//! +//! // The compressed body should be smaller 🤞 +//! let uncompressed_len = fs::read_to_string("Cargo.toml").await?.len(); +//! assert!(bytes.len() < uncompressed_len); +//! # +//! # Ok(()) +//! # } +//! ``` +//! + +pub mod predicate; + +mod body; +mod future; +mod layer; +mod pin_project_cfg; +mod service; + +#[doc(inline)] +pub use self::{ + body::CompressionBody, + future::ResponseFuture, + layer::CompressionLayer, + predicate::{DefaultPredicate, Predicate}, + service::Compression, +}; +pub use crate::compression_utils::CompressionLevel; + +#[cfg(test)] +mod tests { + use crate::compression::predicate::SizeAbove; + + use super::*; + use crate::test_helpers::{Body, WithTrailers}; + use async_compression::tokio::write::{BrotliDecoder, BrotliEncoder}; + use flate2::read::GzDecoder; + use http::header::{ + ACCEPT_ENCODING, ACCEPT_RANGES, CONTENT_ENCODING, CONTENT_RANGE, CONTENT_TYPE, RANGE, + }; + use http::{HeaderMap, HeaderName, HeaderValue, Request, Response}; + use http_body_util::BodyExt; + use std::convert::Infallible; + use std::io::Read; + use std::sync::{Arc, RwLock}; + use tokio::io::{AsyncReadExt, AsyncWriteExt}; + use tokio_util::io::StreamReader; + use tower::{service_fn, Service, ServiceExt}; + + // Compression filter allows every other request to be compressed + #[derive(Clone)] + struct Always; + + impl Predicate for Always { + fn should_compress(&self, _: &http::Response) -> bool + where + B: http_body::Body, + { + true + } + } + + #[tokio::test] + async fn gzip_works() { + let svc = service_fn(handle); + let mut svc = Compression::new(svc).compress_when(Always); + + // call the service + let req = Request::builder() + .header("accept-encoding", "gzip") + .body(Body::empty()) + .unwrap(); + let res = svc.ready().await.unwrap().call(req).await.unwrap(); + + // read the compressed body + let collected = res.into_body().collect().await.unwrap(); + let trailers = collected.trailers().cloned().unwrap(); + let compressed_data = collected.to_bytes(); + + // decompress the body + // doing this with flate2 as that is much easier than async-compression and blocking during + // tests is fine + let mut decoder = GzDecoder::new(&compressed_data[..]); + let mut decompressed = String::new(); + decoder.read_to_string(&mut decompressed).unwrap(); + + assert_eq!(decompressed, "Hello, World!"); + + // trailers are maintained + assert_eq!(trailers["foo"], "bar"); + } + + #[tokio::test] + async fn x_gzip_works() { + let svc = service_fn(handle); + let mut svc = Compression::new(svc).compress_when(Always); + + // call the service + let req = Request::builder() + .header("accept-encoding", "x-gzip") + .body(Body::empty()) + .unwrap(); + let res = svc.ready().await.unwrap().call(req).await.unwrap(); + + // we treat x-gzip as equivalent to gzip and don't have to return x-gzip + // taking extra caution by checking all headers with this name + assert_eq!( + res.headers() + .get_all("content-encoding") + .iter() + .collect::>(), + vec!(HeaderValue::from_static("gzip")) + ); + + // read the compressed body + let collected = res.into_body().collect().await.unwrap(); + let trailers = collected.trailers().cloned().unwrap(); + let compressed_data = collected.to_bytes(); + + // decompress the body + // doing this with flate2 as that is much easier than async-compression and blocking during + // tests is fine + let mut decoder = GzDecoder::new(&compressed_data[..]); + let mut decompressed = String::new(); + decoder.read_to_string(&mut decompressed).unwrap(); + + assert_eq!(decompressed, "Hello, World!"); + + // trailers are maintained + assert_eq!(trailers["foo"], "bar"); + } + + #[tokio::test] + async fn zstd_works() { + let svc = service_fn(handle); + let mut svc = Compression::new(svc).compress_when(Always); + + // call the service + let req = Request::builder() + .header("accept-encoding", "zstd") + .body(Body::empty()) + .unwrap(); + let res = svc.ready().await.unwrap().call(req).await.unwrap(); + + // read the compressed body + let body = res.into_body(); + let compressed_data = body.collect().await.unwrap().to_bytes(); + + // decompress the body + let decompressed = zstd::stream::decode_all(std::io::Cursor::new(compressed_data)).unwrap(); + let decompressed = String::from_utf8(decompressed).unwrap(); + + assert_eq!(decompressed, "Hello, World!"); + } + + #[tokio::test] + async fn no_recompress() { + const DATA: &str = "Hello, World! I'm already compressed with br!"; + + let svc = service_fn(|_| async { + let buf = { + let mut buf = Vec::new(); + + let mut enc = BrotliEncoder::new(&mut buf); + enc.write_all(DATA.as_bytes()).await?; + enc.flush().await?; + buf + }; + + let resp = Response::builder() + .header("content-encoding", "br") + .body(Body::from(buf)) + .unwrap(); + Ok::<_, std::io::Error>(resp) + }); + let mut svc = Compression::new(svc); + + // call the service + // + // note: the accept-encoding doesn't match the content-encoding above, so that + // we're able to see if the compression layer triggered or not + let req = Request::builder() + .header("accept-encoding", "gzip") + .body(Body::empty()) + .unwrap(); + let res = svc.ready().await.unwrap().call(req).await.unwrap(); + + // check we didn't recompress + assert_eq!( + res.headers() + .get("content-encoding") + .and_then(|h| h.to_str().ok()) + .unwrap_or_default(), + "br", + ); + + // read the compressed body + let body = res.into_body(); + let data = body.collect().await.unwrap().to_bytes(); + + // decompress the body + let data = { + let mut output_buf = Vec::new(); + let mut decoder = BrotliDecoder::new(&mut output_buf); + decoder + .write_all(&data) + .await + .expect("couldn't brotli-decode"); + decoder.flush().await.expect("couldn't flush"); + output_buf + }; + + assert_eq!(data, DATA.as_bytes()); + } + + async fn handle(_req: Request) -> Result>, Infallible> { + let mut trailers = HeaderMap::new(); + trailers.insert(HeaderName::from_static("foo"), "bar".parse().unwrap()); + let body = Body::from("Hello, World!").with_trailers(trailers); + Ok(Response::builder().body(body).unwrap()) + } + + #[tokio::test] + async fn will_not_compress_if_filtered_out() { + use predicate::Predicate; + + const DATA: &str = "Hello world uncompressed"; + + let svc_fn = service_fn(|_| async { + let resp = Response::builder() + // .header("content-encoding", "br") + .body(Body::from(DATA.as_bytes())) + .unwrap(); + Ok::<_, std::io::Error>(resp) + }); + + // Compression filter allows every other request to be compressed + #[derive(Default, Clone)] + struct EveryOtherResponse(Arc>); + + #[allow(clippy::dbg_macro)] + impl Predicate for EveryOtherResponse { + fn should_compress(&self, _: &http::Response) -> bool + where + B: http_body::Body, + { + let mut guard = self.0.write().unwrap(); + let should_compress = *guard % 2 != 0; + *guard += 1; + dbg!(should_compress) + } + } + + let mut svc = Compression::new(svc_fn).compress_when(EveryOtherResponse::default()); + let req = Request::builder() + .header("accept-encoding", "br") + .body(Body::empty()) + .unwrap(); + let res = svc.ready().await.unwrap().call(req).await.unwrap(); + + // read the uncompressed body + let body = res.into_body(); + let data = body.collect().await.unwrap().to_bytes(); + let still_uncompressed = String::from_utf8(data.to_vec()).unwrap(); + assert_eq!(DATA, &still_uncompressed); + + // Compression filter will compress the next body + let req = Request::builder() + .header("accept-encoding", "br") + .body(Body::empty()) + .unwrap(); + let res = svc.ready().await.unwrap().call(req).await.unwrap(); + + // read the compressed body + let body = res.into_body(); + let data = body.collect().await.unwrap().to_bytes(); + assert!(String::from_utf8(data.to_vec()).is_err()); + } + + #[tokio::test] + async fn doesnt_compress_images() { + async fn handle(_req: Request) -> Result, Infallible> { + let mut res = Response::new(Body::from( + "a".repeat((SizeAbove::DEFAULT_MIN_SIZE * 2) as usize), + )); + res.headers_mut() + .insert(CONTENT_TYPE, "image/png".parse().unwrap()); + Ok(res) + } + + let svc = Compression::new(service_fn(handle)); + + let res = svc + .oneshot( + Request::builder() + .header(ACCEPT_ENCODING, "gzip") + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + assert!(res.headers().get(CONTENT_ENCODING).is_none()); + } + + #[tokio::test] + async fn does_compress_svg() { + async fn handle(_req: Request) -> Result, Infallible> { + let mut res = Response::new(Body::from( + "a".repeat((SizeAbove::DEFAULT_MIN_SIZE * 2) as usize), + )); + res.headers_mut() + .insert(CONTENT_TYPE, "image/svg+xml".parse().unwrap()); + Ok(res) + } + + let svc = Compression::new(service_fn(handle)); + + let res = svc + .oneshot( + Request::builder() + .header(ACCEPT_ENCODING, "gzip") + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + assert_eq!(res.headers()[CONTENT_ENCODING], "gzip"); + } + + #[tokio::test] + async fn compress_with_quality() { + const DATA: &str = "Check compression quality level! Check compression quality level! Check compression quality level!"; + let level = CompressionLevel::Best; + + let svc = service_fn(|_| async { + let resp = Response::builder() + .body(Body::from(DATA.as_bytes())) + .unwrap(); + Ok::<_, std::io::Error>(resp) + }); + + let mut svc = Compression::new(svc).quality(level); + + // call the service + let req = Request::builder() + .header("accept-encoding", "br") + .body(Body::empty()) + .unwrap(); + let res = svc.ready().await.unwrap().call(req).await.unwrap(); + + // read the compressed body + let body = res.into_body(); + let compressed_data = body.collect().await.unwrap().to_bytes(); + + // build the compressed body with the same quality level + let compressed_with_level = { + use async_compression::tokio::bufread::BrotliEncoder; + + let stream = Box::pin(futures_util::stream::once(async move { + Ok::<_, std::io::Error>(DATA.as_bytes()) + })); + let reader = StreamReader::new(stream); + let mut enc = BrotliEncoder::with_quality(reader, level.into_async_compression()); + + let mut buf = Vec::new(); + enc.read_to_end(&mut buf).await.unwrap(); + buf + }; + + assert_eq!( + compressed_data, + compressed_with_level.as_slice(), + "Compression level is not respected" + ); + } + + #[tokio::test] + async fn should_not_compress_ranges() { + let svc = service_fn(|_| async { + let mut res = Response::new(Body::from("Hello")); + let headers = res.headers_mut(); + headers.insert(ACCEPT_RANGES, "bytes".parse().unwrap()); + headers.insert(CONTENT_RANGE, "bytes 0-4/*".parse().unwrap()); + Ok::<_, std::io::Error>(res) + }); + let mut svc = Compression::new(svc).compress_when(Always); + + // call the service + let req = Request::builder() + .header(ACCEPT_ENCODING, "gzip") + .header(RANGE, "bytes=0-4") + .body(Body::empty()) + .unwrap(); + let res = svc.ready().await.unwrap().call(req).await.unwrap(); + let headers = res.headers().clone(); + + // read the uncompressed body + let collected = res.into_body().collect().await.unwrap().to_bytes(); + + assert_eq!(headers[ACCEPT_RANGES], "bytes"); + assert!(!headers.contains_key(CONTENT_ENCODING)); + assert_eq!(collected, "Hello"); + } + + #[tokio::test] + async fn should_strip_accept_ranges_header_when_compressing() { + let svc = service_fn(|_| async { + let mut res = Response::new(Body::from("Hello, World!")); + res.headers_mut() + .insert(ACCEPT_RANGES, "bytes".parse().unwrap()); + Ok::<_, std::io::Error>(res) + }); + let mut svc = Compression::new(svc).compress_when(Always); + + // call the service + let req = Request::builder() + .header(ACCEPT_ENCODING, "gzip") + .body(Body::empty()) + .unwrap(); + let res = svc.ready().await.unwrap().call(req).await.unwrap(); + let headers = res.headers().clone(); + + // read the compressed body + let collected = res.into_body().collect().await.unwrap(); + let compressed_data = collected.to_bytes(); + + // decompress the body + // doing this with flate2 as that is much easier than async-compression and blocking during + // tests is fine + let mut decoder = GzDecoder::new(&compressed_data[..]); + let mut decompressed = String::new(); + decoder.read_to_string(&mut decompressed).unwrap(); + + assert!(!headers.contains_key(ACCEPT_RANGES)); + assert_eq!(headers[CONTENT_ENCODING], "gzip"); + assert_eq!(decompressed, "Hello, World!"); + } +} diff --git a/.cargo-vendor/tower-http/src/compression/pin_project_cfg.rs b/.cargo-vendor/tower-http/src/compression/pin_project_cfg.rs new file mode 100644 index 0000000000..655b8d94e9 --- /dev/null +++ b/.cargo-vendor/tower-http/src/compression/pin_project_cfg.rs @@ -0,0 +1,144 @@ +// Full credit to @tesaguri who posted this gist under CC0 1.0 Universal licence +// https://gist.github.com/tesaguri/2a1c0790a48bbda3dd7f71c26d02a793 + +macro_rules! pin_project_cfg { + ($(#[$($attr:tt)*])* $vis:vis enum $($rest:tt)+) => { + pin_project_cfg! { + @outer [$(#[$($attr)*])* $vis enum] $($rest)+ + } + }; + // Accumulate type parameters and `where` clause. + (@outer [$($accum:tt)*] $tt:tt $($rest:tt)+) => { + pin_project_cfg! { + @outer [$($accum)* $tt] $($rest)+ + } + }; + (@outer [$($accum:tt)*] { $($body:tt)* }) => { + pin_project_cfg! { + @body #[cfg(all())] [$($accum)*] {} $($body)* + } + }; + // Process a variant with `cfg`. + ( + @body + #[cfg(all($($pred_accum:tt)*))] + $outer:tt + { $($accum:tt)* } + + #[cfg($($pred:tt)*)] + $(#[$($attr:tt)*])* $variant:ident { $($body:tt)* }, + $($rest:tt)* + ) => { + // Create two versions of the enum with `cfg($pred)` and `cfg(not($pred))`. + pin_project_cfg! { + @variant_body + { $($body)* } + {} + #[cfg(all($($pred_accum)* $($pred)*,))] + $outer + { $($accum)* $(#[$($attr)*])* $variant } + $($rest)* + } + pin_project_cfg! { + @body + #[cfg(all($($pred_accum)* not($($pred)*),))] + $outer + { $($accum)* } + $($rest)* + } + }; + // Process a variant without `cfg`. + ( + @body + #[cfg(all($($pred_accum:tt)*))] + $outer:tt + { $($accum:tt)* } + + $(#[$($attr:tt)*])* $variant:ident { $($body:tt)* }, + $($rest:tt)* + ) => { + pin_project_cfg! { + @variant_body + { $($body)* } + {} + #[cfg(all($($pred_accum)*))] + $outer + { $($accum)* $(#[$($attr)*])* $variant } + $($rest)* + } + }; + // Process a variant field with `cfg`. + ( + @variant_body + { + #[cfg($($pred:tt)*)] + $(#[$($attr:tt)*])* $field:ident: $ty:ty, + $($rest:tt)* + } + { $($accum:tt)* } + #[cfg(all($($pred_accum:tt)*))] + $($outer:tt)* + ) => { + pin_project_cfg! { + @variant_body + {$($rest)*} + { $($accum)* $(#[$($attr)*])* $field: $ty, } + #[cfg(all($($pred_accum)* $($pred)*,))] + $($outer)* + } + pin_project_cfg! { + @variant_body + { $($rest)* } + { $($accum)* } + #[cfg(all($($pred_accum)* not($($pred)*),))] + $($outer)* + } + }; + // Process a variant field without `cfg`. + ( + @variant_body + { + $(#[$($attr:tt)*])* $field:ident: $ty:ty, + $($rest:tt)* + } + { $($accum:tt)* } + $($outer:tt)* + ) => { + pin_project_cfg! { + @variant_body + {$($rest)*} + { $($accum)* $(#[$($attr)*])* $field: $ty, } + $($outer)* + } + }; + ( + @variant_body + {} + $body:tt + #[cfg(all($($pred_accum:tt)*))] + $outer:tt + { $($accum:tt)* } + $($rest:tt)* + ) => { + pin_project_cfg! { + @body + #[cfg(all($($pred_accum)*))] + $outer + { $($accum)* $body, } + $($rest)* + } + }; + ( + @body + #[$cfg:meta] + [$($outer:tt)*] + $body:tt + ) => { + #[$cfg] + pin_project_lite::pin_project! { + $($outer)* $body + } + }; +} + +pub(crate) use pin_project_cfg; diff --git a/.cargo-vendor/tower-http/src/compression/predicate.rs b/.cargo-vendor/tower-http/src/compression/predicate.rs new file mode 100644 index 0000000000..88c3101c11 --- /dev/null +++ b/.cargo-vendor/tower-http/src/compression/predicate.rs @@ -0,0 +1,272 @@ +//! Predicates for disabling compression of responses. +//! +//! Predicates are applied with [`Compression::compress_when`] or +//! [`CompressionLayer::compress_when`]. +//! +//! [`Compression::compress_when`]: super::Compression::compress_when +//! [`CompressionLayer::compress_when`]: super::CompressionLayer::compress_when + +use http::{header, Extensions, HeaderMap, StatusCode, Version}; +use http_body::Body; +use std::{fmt, sync::Arc}; + +/// Predicate used to determine if a response should be compressed or not. +pub trait Predicate: Clone { + /// Should this response be compressed or not? + fn should_compress(&self, response: &http::Response) -> bool + where + B: Body; + + /// Combine two predicates into one. + /// + /// The resulting predicate enables compression if both inner predicates do. + fn and(self, other: Other) -> And + where + Self: Sized, + Other: Predicate, + { + And { + lhs: self, + rhs: other, + } + } +} + +impl Predicate for F +where + F: Fn(StatusCode, Version, &HeaderMap, &Extensions) -> bool + Clone, +{ + fn should_compress(&self, response: &http::Response) -> bool + where + B: Body, + { + let status = response.status(); + let version = response.version(); + let headers = response.headers(); + let extensions = response.extensions(); + self(status, version, headers, extensions) + } +} + +impl Predicate for Option +where + T: Predicate, +{ + fn should_compress(&self, response: &http::Response) -> bool + where + B: Body, + { + self.as_ref() + .map(|inner| inner.should_compress(response)) + .unwrap_or(true) + } +} + +/// Two predicates combined into one. +/// +/// Created with [`Predicate::and`] +#[derive(Debug, Clone, Default, Copy)] +pub struct And { + lhs: Lhs, + rhs: Rhs, +} + +impl Predicate for And +where + Lhs: Predicate, + Rhs: Predicate, +{ + fn should_compress(&self, response: &http::Response) -> bool + where + B: Body, + { + self.lhs.should_compress(response) && self.rhs.should_compress(response) + } +} + +/// The default predicate used by [`Compression`] and [`CompressionLayer`]. +/// +/// This will compress responses unless: +/// +/// - They're gRPC, which has its own protocol specific compression scheme. +/// - It's an image as determined by the `content-type` starting with `image/`. +/// - They're Server-Sent Events (SSE) as determined by the `content-type` being `text/event-stream`. +/// - The response is less than 32 bytes. +/// +/// # Configuring the defaults +/// +/// `DefaultPredicate` doesn't support any configuration. Instead you can build your own predicate +/// by combining types in this module: +/// +/// ```rust +/// use tower_http::compression::predicate::{SizeAbove, NotForContentType, Predicate}; +/// +/// // slightly large min size than the default 32 +/// let predicate = SizeAbove::new(256) +/// // still don't compress gRPC +/// .and(NotForContentType::GRPC) +/// // still don't compress images +/// .and(NotForContentType::IMAGES) +/// // also don't compress JSON +/// .and(NotForContentType::const_new("application/json")); +/// ``` +/// +/// [`Compression`]: super::Compression +/// [`CompressionLayer`]: super::CompressionLayer +#[derive(Clone)] +pub struct DefaultPredicate( + And, NotForContentType>, NotForContentType>, +); + +impl DefaultPredicate { + /// Create a new `DefaultPredicate`. + pub fn new() -> Self { + let inner = SizeAbove::new(SizeAbove::DEFAULT_MIN_SIZE) + .and(NotForContentType::GRPC) + .and(NotForContentType::IMAGES) + .and(NotForContentType::SSE); + Self(inner) + } +} + +impl Default for DefaultPredicate { + fn default() -> Self { + Self::new() + } +} + +impl Predicate for DefaultPredicate { + fn should_compress(&self, response: &http::Response) -> bool + where + B: Body, + { + self.0.should_compress(response) + } +} + +/// [`Predicate`] that will only allow compression of responses above a certain size. +#[derive(Clone, Copy, Debug)] +pub struct SizeAbove(u16); + +impl SizeAbove { + pub(crate) const DEFAULT_MIN_SIZE: u16 = 32; + + /// Create a new `SizeAbove` predicate that will only compress responses larger than + /// `min_size_bytes`. + /// + /// The response will be compressed if the exact size cannot be determined through either the + /// `content-length` header or [`Body::size_hint`]. + pub const fn new(min_size_bytes: u16) -> Self { + Self(min_size_bytes) + } +} + +impl Default for SizeAbove { + fn default() -> Self { + Self(Self::DEFAULT_MIN_SIZE) + } +} + +impl Predicate for SizeAbove { + fn should_compress(&self, response: &http::Response) -> bool + where + B: Body, + { + let content_size = response.body().size_hint().exact().or_else(|| { + response + .headers() + .get(header::CONTENT_LENGTH) + .and_then(|h| h.to_str().ok()) + .and_then(|val| val.parse().ok()) + }); + + match content_size { + Some(size) => size >= (self.0 as u64), + _ => true, + } + } +} + +/// Predicate that wont allow responses with a specific `content-type` to be compressed. +#[derive(Clone, Debug)] +pub struct NotForContentType { + content_type: Str, + exception: Option, +} + +impl NotForContentType { + /// Predicate that wont compress gRPC responses. + pub const GRPC: Self = Self::const_new("application/grpc"); + + /// Predicate that wont compress images. + pub const IMAGES: Self = Self { + content_type: Str::Static("image/"), + exception: Some(Str::Static("image/svg+xml")), + }; + + /// Predicate that wont compress Server-Sent Events (SSE) responses. + pub const SSE: Self = Self::const_new("text/event-stream"); + + /// Create a new `NotForContentType`. + pub fn new(content_type: &str) -> Self { + Self { + content_type: Str::Shared(content_type.into()), + exception: None, + } + } + + /// Create a new `NotForContentType` from a static string. + pub const fn const_new(content_type: &'static str) -> Self { + Self { + content_type: Str::Static(content_type), + exception: None, + } + } +} + +impl Predicate for NotForContentType { + fn should_compress(&self, response: &http::Response) -> bool + where + B: Body, + { + if let Some(except) = &self.exception { + if content_type(response) == except.as_str() { + return true; + } + } + + !content_type(response).starts_with(self.content_type.as_str()) + } +} + +#[derive(Clone)] +enum Str { + Static(&'static str), + Shared(Arc), +} + +impl Str { + fn as_str(&self) -> &str { + match self { + Str::Static(s) => s, + Str::Shared(s) => s, + } + } +} + +impl fmt::Debug for Str { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Static(inner) => inner.fmt(f), + Self::Shared(inner) => inner.fmt(f), + } + } +} + +fn content_type(response: &http::Response) -> &str { + response + .headers() + .get(header::CONTENT_TYPE) + .and_then(|h| h.to_str().ok()) + .unwrap_or_default() +} diff --git a/.cargo-vendor/tower-http/src/compression/service.rs b/.cargo-vendor/tower-http/src/compression/service.rs new file mode 100644 index 0000000000..22dcf73ae5 --- /dev/null +++ b/.cargo-vendor/tower-http/src/compression/service.rs @@ -0,0 +1,185 @@ +use super::{CompressionBody, CompressionLayer, ResponseFuture}; +use crate::compression::predicate::{DefaultPredicate, Predicate}; +use crate::compression::CompressionLevel; +use crate::{compression_utils::AcceptEncoding, content_encoding::Encoding}; +use http::{Request, Response}; +use http_body::Body; +use std::task::{Context, Poll}; +use tower_service::Service; + +/// Compress response bodies of the underlying service. +/// +/// This uses the `Accept-Encoding` header to pick an appropriate encoding and adds the +/// `Content-Encoding` header to responses. +/// +/// See the [module docs](crate::compression) for more details. +#[derive(Clone, Copy)] +pub struct Compression { + pub(crate) inner: S, + pub(crate) accept: AcceptEncoding, + pub(crate) predicate: P, + pub(crate) quality: CompressionLevel, +} + +impl Compression { + /// Creates a new `Compression` wrapping the `service`. + pub fn new(service: S) -> Compression { + Self { + inner: service, + accept: AcceptEncoding::default(), + predicate: DefaultPredicate::default(), + quality: CompressionLevel::default(), + } + } +} + +impl Compression { + define_inner_service_accessors!(); + + /// Returns a new [`Layer`] that wraps services with a `Compression` middleware. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer() -> CompressionLayer { + CompressionLayer::new() + } + + /// Sets whether to enable the gzip encoding. + #[cfg(feature = "compression-gzip")] + pub fn gzip(mut self, enable: bool) -> Self { + self.accept.set_gzip(enable); + self + } + + /// Sets whether to enable the Deflate encoding. + #[cfg(feature = "compression-deflate")] + pub fn deflate(mut self, enable: bool) -> Self { + self.accept.set_deflate(enable); + self + } + + /// Sets whether to enable the Brotli encoding. + #[cfg(feature = "compression-br")] + pub fn br(mut self, enable: bool) -> Self { + self.accept.set_br(enable); + self + } + + /// Sets whether to enable the Zstd encoding. + #[cfg(feature = "compression-zstd")] + pub fn zstd(mut self, enable: bool) -> Self { + self.accept.set_zstd(enable); + self + } + + /// Sets the compression quality. + pub fn quality(mut self, quality: CompressionLevel) -> Self { + self.quality = quality; + self + } + + /// Disables the gzip encoding. + /// + /// This method is available even if the `gzip` crate feature is disabled. + pub fn no_gzip(mut self) -> Self { + self.accept.set_gzip(false); + self + } + + /// Disables the Deflate encoding. + /// + /// This method is available even if the `deflate` crate feature is disabled. + pub fn no_deflate(mut self) -> Self { + self.accept.set_deflate(false); + self + } + + /// Disables the Brotli encoding. + /// + /// This method is available even if the `br` crate feature is disabled. + pub fn no_br(mut self) -> Self { + self.accept.set_br(false); + self + } + + /// Disables the Zstd encoding. + /// + /// This method is available even if the `zstd` crate feature is disabled. + pub fn no_zstd(mut self) -> Self { + self.accept.set_zstd(false); + self + } + + /// Replace the current compression predicate. + /// + /// Predicates are used to determine whether a response should be compressed or not. + /// + /// The default predicate is [`DefaultPredicate`]. See its documentation for more + /// details on which responses it wont compress. + /// + /// # Changing the compression predicate + /// + /// ``` + /// use tower_http::compression::{ + /// Compression, + /// predicate::{Predicate, NotForContentType, DefaultPredicate}, + /// }; + /// use tower::util::service_fn; + /// + /// // Placeholder service_fn + /// let service = service_fn(|_: ()| async { + /// Ok::<_, std::io::Error>(http::Response::new(())) + /// }); + /// + /// // build our custom compression predicate + /// // its recommended to still include `DefaultPredicate` as part of + /// // custom predicates + /// let predicate = DefaultPredicate::new() + /// // don't compress responses who's `content-type` starts with `application/json` + /// .and(NotForContentType::new("application/json")); + /// + /// let service = Compression::new(service).compress_when(predicate); + /// ``` + /// + /// See [`predicate`](super::predicate) for more utilities for building compression predicates. + /// + /// Responses that are already compressed (ie have a `content-encoding` header) will _never_ be + /// recompressed, regardless what they predicate says. + pub fn compress_when(self, predicate: C) -> Compression + where + C: Predicate, + { + Compression { + inner: self.inner, + accept: self.accept, + predicate, + quality: self.quality, + } + } +} + +impl Service> for Compression +where + S: Service, Response = Response>, + ResBody: Body, + P: Predicate, +{ + type Response = Response>; + type Error = S::Error; + type Future = ResponseFuture; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + let encoding = Encoding::from_headers(req.headers(), self.accept); + + ResponseFuture { + inner: self.inner.call(req), + encoding, + predicate: self.predicate.clone(), + quality: self.quality, + } + } +} diff --git a/.cargo-vendor/tower-http/src/compression_utils.rs b/.cargo-vendor/tower-http/src/compression_utils.rs new file mode 100644 index 0000000000..153ae3244a --- /dev/null +++ b/.cargo-vendor/tower-http/src/compression_utils.rs @@ -0,0 +1,444 @@ +//! Types used by compression and decompression middleware. + +use crate::{content_encoding::SupportedEncodings, BoxError}; +use bytes::{Buf, Bytes, BytesMut}; +use futures_core::Stream; +use http::HeaderValue; +use http_body::{Body, Frame}; +use pin_project_lite::pin_project; +use std::{ + io, + pin::Pin, + task::{ready, Context, Poll}, +}; +use tokio::io::AsyncRead; +use tokio_util::io::StreamReader; + +#[derive(Debug, Clone, Copy)] +pub(crate) struct AcceptEncoding { + pub(crate) gzip: bool, + pub(crate) deflate: bool, + pub(crate) br: bool, + pub(crate) zstd: bool, +} + +impl AcceptEncoding { + #[allow(dead_code)] + pub(crate) fn to_header_value(self) -> Option { + let accept = match (self.gzip(), self.deflate(), self.br(), self.zstd()) { + (true, true, true, false) => "gzip,deflate,br", + (true, true, false, false) => "gzip,deflate", + (true, false, true, false) => "gzip,br", + (true, false, false, false) => "gzip", + (false, true, true, false) => "deflate,br", + (false, true, false, false) => "deflate", + (false, false, true, false) => "br", + (true, true, true, true) => "zstd,gzip,deflate,br", + (true, true, false, true) => "zstd,gzip,deflate", + (true, false, true, true) => "zstd,gzip,br", + (true, false, false, true) => "zstd,gzip", + (false, true, true, true) => "zstd,deflate,br", + (false, true, false, true) => "zstd,deflate", + (false, false, true, true) => "zstd,br", + (false, false, false, true) => "zstd", + (false, false, false, false) => return None, + }; + Some(HeaderValue::from_static(accept)) + } + + #[allow(dead_code)] + pub(crate) fn set_gzip(&mut self, enable: bool) { + self.gzip = enable; + } + + #[allow(dead_code)] + pub(crate) fn set_deflate(&mut self, enable: bool) { + self.deflate = enable; + } + + #[allow(dead_code)] + pub(crate) fn set_br(&mut self, enable: bool) { + self.br = enable; + } + + #[allow(dead_code)] + pub(crate) fn set_zstd(&mut self, enable: bool) { + self.zstd = enable; + } +} + +impl SupportedEncodings for AcceptEncoding { + #[allow(dead_code)] + fn gzip(&self) -> bool { + #[cfg(any(feature = "decompression-gzip", feature = "compression-gzip"))] + return self.gzip; + + #[cfg(not(any(feature = "decompression-gzip", feature = "compression-gzip")))] + return false; + } + + #[allow(dead_code)] + fn deflate(&self) -> bool { + #[cfg(any(feature = "decompression-deflate", feature = "compression-deflate"))] + return self.deflate; + + #[cfg(not(any(feature = "decompression-deflate", feature = "compression-deflate")))] + return false; + } + + #[allow(dead_code)] + fn br(&self) -> bool { + #[cfg(any(feature = "decompression-br", feature = "compression-br"))] + return self.br; + + #[cfg(not(any(feature = "decompression-br", feature = "compression-br")))] + return false; + } + + #[allow(dead_code)] + fn zstd(&self) -> bool { + #[cfg(any(feature = "decompression-zstd", feature = "compression-zstd"))] + return self.zstd; + + #[cfg(not(any(feature = "decompression-zstd", feature = "compression-zstd")))] + return false; + } +} + +impl Default for AcceptEncoding { + fn default() -> Self { + AcceptEncoding { + gzip: true, + deflate: true, + br: true, + zstd: true, + } + } +} + +/// A `Body` that has been converted into an `AsyncRead`. +pub(crate) type AsyncReadBody = + StreamReader, ::Error>, ::Data>; + +/// Trait for applying some decorator to an `AsyncRead` +pub(crate) trait DecorateAsyncRead { + type Input: AsyncRead; + type Output: AsyncRead; + + /// Apply the decorator + fn apply(input: Self::Input, quality: CompressionLevel) -> Self::Output; + + /// Get a pinned mutable reference to the original input. + /// + /// This is necessary to implement `Body::poll_trailers`. + fn get_pin_mut(pinned: Pin<&mut Self::Output>) -> Pin<&mut Self::Input>; +} + +pin_project! { + /// `Body` that has been decorated by an `AsyncRead` + pub(crate) struct WrapBody { + #[pin] + // rust-analyer thinks this field is private if its `pub(crate)` but works fine when its + // `pub` + pub read: M::Output, + read_all_data: bool, + } +} + +impl WrapBody { + #[allow(dead_code)] + pub(crate) fn new(body: B, quality: CompressionLevel) -> Self + where + B: Body, + M: DecorateAsyncRead>, + { + // convert `Body` into a `Stream` + let stream = BodyIntoStream::new(body); + + // an adapter that converts the error type into `io::Error` while storing the actual error + // `StreamReader` requires the error type is `io::Error` + let stream = StreamErrorIntoIoError::<_, B::Error>::new(stream); + + // convert `Stream` into an `AsyncRead` + let read = StreamReader::new(stream); + + // apply decorator to `AsyncRead` yielding another `AsyncRead` + let read = M::apply(read, quality); + + Self { + read, + read_all_data: false, + } + } +} + +impl Body for WrapBody +where + B: Body, + B::Error: Into, + M: DecorateAsyncRead>, +{ + type Data = Bytes; + type Error = BoxError; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + let mut this = self.project(); + let mut buf = BytesMut::new(); + if !*this.read_all_data { + let result = tokio_util::io::poll_read_buf(this.read.as_mut(), cx, &mut buf); + + match ready!(result) { + Ok(0) => { + *this.read_all_data = true; + } + Ok(_) => { + return Poll::Ready(Some(Ok(Frame::data(buf.freeze())))); + } + Err(err) => { + let body_error: Option = M::get_pin_mut(this.read) + .get_pin_mut() + .project() + .error + .take(); + + if let Some(body_error) = body_error { + return Poll::Ready(Some(Err(body_error.into()))); + } else if err.raw_os_error() == Some(SENTINEL_ERROR_CODE) { + // SENTINEL_ERROR_CODE only gets used when storing + // an underlying body error + unreachable!() + } else { + return Poll::Ready(Some(Err(err.into()))); + } + } + } + } + + // poll any remaining frames, such as trailers + let body = M::get_pin_mut(this.read).get_pin_mut().get_pin_mut(); + body.poll_frame(cx).map(|option| { + option.map(|result| { + result + .map(|frame| frame.map_data(|mut data| data.copy_to_bytes(data.remaining()))) + .map_err(|err| err.into()) + }) + }) + } +} + +pin_project! { + pub(crate) struct BodyIntoStream + where + B: Body, + { + #[pin] + body: B, + yielded_all_data: bool, + non_data_frame: Option>, + } +} + +#[allow(dead_code)] +impl BodyIntoStream +where + B: Body, +{ + pub(crate) fn new(body: B) -> Self { + Self { + body, + yielded_all_data: false, + non_data_frame: None, + } + } + + /// Get a reference to the inner body + pub(crate) fn get_ref(&self) -> &B { + &self.body + } + + /// Get a mutable reference to the inner body + pub(crate) fn get_mut(&mut self) -> &mut B { + &mut self.body + } + + /// Get a pinned mutable reference to the inner body + pub(crate) fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut B> { + self.project().body + } + + /// Consume `self`, returning the inner body + pub(crate) fn into_inner(self) -> B { + self.body + } +} + +impl Stream for BodyIntoStream +where + B: Body, +{ + type Item = Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + loop { + let this = self.as_mut().project(); + + if *this.yielded_all_data { + return Poll::Ready(None); + } + + match std::task::ready!(this.body.poll_frame(cx)) { + Some(Ok(frame)) => match frame.into_data() { + Ok(data) => return Poll::Ready(Some(Ok(data))), + Err(frame) => { + *this.yielded_all_data = true; + *this.non_data_frame = Some(frame); + } + }, + Some(Err(err)) => return Poll::Ready(Some(Err(err))), + None => { + *this.yielded_all_data = true; + } + } + } + } +} + +impl Body for BodyIntoStream +where + B: Body, +{ + type Data = B::Data; + type Error = B::Error; + + fn poll_frame( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + // First drive the stream impl. This consumes all data frames and buffer at most one + // trailers frame. + if let Some(frame) = std::task::ready!(self.as_mut().poll_next(cx)) { + return Poll::Ready(Some(frame.map(Frame::data))); + } + + let this = self.project(); + + // Yield the trailers frame `poll_next` hit. + if let Some(frame) = this.non_data_frame.take() { + return Poll::Ready(Some(Ok(frame))); + } + + // Yield any remaining frames in the body. There shouldn't be any after the trailers but + // you never know. + this.body.poll_frame(cx) + } + + #[inline] + fn size_hint(&self) -> http_body::SizeHint { + self.body.size_hint() + } +} + +pin_project! { + pub(crate) struct StreamErrorIntoIoError { + #[pin] + inner: S, + error: Option, + } +} + +impl StreamErrorIntoIoError { + pub(crate) fn new(inner: S) -> Self { + Self { inner, error: None } + } + + /// Get a reference to the inner body + pub(crate) fn get_ref(&self) -> &S { + &self.inner + } + + /// Get a mutable reference to the inner inner + pub(crate) fn get_mut(&mut self) -> &mut S { + &mut self.inner + } + + /// Get a pinned mutable reference to the inner inner + pub(crate) fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut S> { + self.project().inner + } + + /// Consume `self`, returning the inner inner + pub(crate) fn into_inner(self) -> S { + self.inner + } +} + +impl Stream for StreamErrorIntoIoError +where + S: Stream>, +{ + type Item = Result; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.project(); + match ready!(this.inner.poll_next(cx)) { + None => Poll::Ready(None), + Some(Ok(value)) => Poll::Ready(Some(Ok(value))), + Some(Err(err)) => { + *this.error = Some(err); + Poll::Ready(Some(Err(io::Error::from_raw_os_error(SENTINEL_ERROR_CODE)))) + } + } + } +} + +pub(crate) const SENTINEL_ERROR_CODE: i32 = -837459418; + +/// Level of compression data should be compressed with. +#[non_exhaustive] +#[derive(Clone, Copy, Debug, Eq, PartialEq, Default)] +pub enum CompressionLevel { + /// Fastest quality of compression, usually produces bigger size. + Fastest, + /// Best quality of compression, usually produces the smallest size. + Best, + /// Default quality of compression defined by the selected compression + /// algorithm. + #[default] + Default, + /// Precise quality based on the underlying compression algorithms' + /// qualities. + /// + /// The interpretation of this depends on the algorithm chosen and the + /// specific implementation backing it. + /// + /// Qualities are implicitly clamped to the algorithm's maximum. + Precise(i32), +} + +#[cfg(any( + feature = "compression-br", + feature = "compression-gzip", + feature = "compression-deflate", + feature = "compression-zstd" +))] +use async_compression::Level as AsyncCompressionLevel; + +#[cfg(any( + feature = "compression-br", + feature = "compression-gzip", + feature = "compression-deflate", + feature = "compression-zstd" +))] +impl CompressionLevel { + pub(crate) fn into_async_compression(self) -> AsyncCompressionLevel { + match self { + CompressionLevel::Fastest => AsyncCompressionLevel::Fastest, + CompressionLevel::Best => AsyncCompressionLevel::Best, + CompressionLevel::Default => AsyncCompressionLevel::Default, + CompressionLevel::Precise(quality) => AsyncCompressionLevel::Precise(quality), + } + } +} diff --git a/.cargo-vendor/tower-http/src/content_encoding.rs b/.cargo-vendor/tower-http/src/content_encoding.rs new file mode 100644 index 0000000000..a0e60a3cbf --- /dev/null +++ b/.cargo-vendor/tower-http/src/content_encoding.rs @@ -0,0 +1,605 @@ +pub(crate) trait SupportedEncodings: Copy { + fn gzip(&self) -> bool; + fn deflate(&self) -> bool; + fn br(&self) -> bool; + fn zstd(&self) -> bool; +} + +// This enum's variants are ordered from least to most preferred. +#[derive(Copy, Clone, Debug, Ord, PartialOrd, PartialEq, Eq)] +pub(crate) enum Encoding { + #[allow(dead_code)] + Identity, + #[cfg(any(feature = "fs", feature = "compression-deflate"))] + Deflate, + #[cfg(any(feature = "fs", feature = "compression-gzip"))] + Gzip, + #[cfg(any(feature = "fs", feature = "compression-br"))] + Brotli, + #[cfg(any(feature = "fs", feature = "compression-zstd"))] + Zstd, +} + +impl Encoding { + #[allow(dead_code)] + fn to_str(self) -> &'static str { + match self { + #[cfg(any(feature = "fs", feature = "compression-gzip"))] + Encoding::Gzip => "gzip", + #[cfg(any(feature = "fs", feature = "compression-deflate"))] + Encoding::Deflate => "deflate", + #[cfg(any(feature = "fs", feature = "compression-br"))] + Encoding::Brotli => "br", + #[cfg(any(feature = "fs", feature = "compression-zstd"))] + Encoding::Zstd => "zstd", + Encoding::Identity => "identity", + } + } + + #[cfg(feature = "fs")] + pub(crate) fn to_file_extension(self) -> Option<&'static std::ffi::OsStr> { + match self { + Encoding::Gzip => Some(std::ffi::OsStr::new(".gz")), + Encoding::Deflate => Some(std::ffi::OsStr::new(".zz")), + Encoding::Brotli => Some(std::ffi::OsStr::new(".br")), + Encoding::Zstd => Some(std::ffi::OsStr::new(".zst")), + Encoding::Identity => None, + } + } + + #[allow(dead_code)] + pub(crate) fn into_header_value(self) -> http::HeaderValue { + http::HeaderValue::from_static(self.to_str()) + } + + #[cfg(any( + feature = "compression-gzip", + feature = "compression-br", + feature = "compression-deflate", + feature = "compression-zstd", + feature = "fs", + ))] + fn parse(s: &str, _supported_encoding: impl SupportedEncodings) -> Option { + #[cfg(any(feature = "fs", feature = "compression-gzip"))] + if (s.eq_ignore_ascii_case("gzip") || s.eq_ignore_ascii_case("x-gzip")) + && _supported_encoding.gzip() + { + return Some(Encoding::Gzip); + } + + #[cfg(any(feature = "fs", feature = "compression-deflate"))] + if s.eq_ignore_ascii_case("deflate") && _supported_encoding.deflate() { + return Some(Encoding::Deflate); + } + + #[cfg(any(feature = "fs", feature = "compression-br"))] + if s.eq_ignore_ascii_case("br") && _supported_encoding.br() { + return Some(Encoding::Brotli); + } + + #[cfg(any(feature = "fs", feature = "compression-zstd"))] + if s.eq_ignore_ascii_case("zstd") && _supported_encoding.zstd() { + return Some(Encoding::Zstd); + } + + if s.eq_ignore_ascii_case("identity") { + return Some(Encoding::Identity); + } + + None + } + + #[cfg(any( + feature = "compression-gzip", + feature = "compression-br", + feature = "compression-zstd", + feature = "compression-deflate", + ))] + // based on https://github.com/http-rs/accept-encoding + pub(crate) fn from_headers( + headers: &http::HeaderMap, + supported_encoding: impl SupportedEncodings, + ) -> Self { + Encoding::preferred_encoding(encodings(headers, supported_encoding)) + .unwrap_or(Encoding::Identity) + } + + #[cfg(any( + feature = "compression-gzip", + feature = "compression-br", + feature = "compression-zstd", + feature = "compression-deflate", + feature = "fs", + ))] + pub(crate) fn preferred_encoding( + accepted_encodings: impl Iterator, + ) -> Option { + accepted_encodings + .filter(|(_, qvalue)| qvalue.0 > 0) + .max_by_key(|&(encoding, qvalue)| (qvalue, encoding)) + .map(|(encoding, _)| encoding) + } +} + +// Allowed q-values are numbers between 0 and 1 with at most 3 digits in the fractional part. They +// are presented here as an unsigned integer between 0 and 1000. +#[cfg(any( + feature = "compression-gzip", + feature = "compression-br", + feature = "compression-zstd", + feature = "compression-deflate", + feature = "fs", +))] +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub(crate) struct QValue(u16); + +#[cfg(any( + feature = "compression-gzip", + feature = "compression-br", + feature = "compression-zstd", + feature = "compression-deflate", + feature = "fs", +))] +impl QValue { + #[inline] + fn one() -> Self { + Self(1000) + } + + // Parse a q-value as specified in RFC 7231 section 5.3.1. + fn parse(s: &str) -> Option { + let mut c = s.chars(); + // Parse "q=" (case-insensitively). + match c.next() { + Some('q' | 'Q') => (), + _ => return None, + }; + match c.next() { + Some('=') => (), + _ => return None, + }; + + // Parse leading digit. Since valid q-values are between 0.000 and 1.000, only "0" and "1" + // are allowed. + let mut value = match c.next() { + Some('0') => 0, + Some('1') => 1000, + _ => return None, + }; + + // Parse optional decimal point. + match c.next() { + Some('.') => (), + None => return Some(Self(value)), + _ => return None, + }; + + // Parse optional fractional digits. The value of each digit is multiplied by `factor`. + // Since the q-value is represented as an integer between 0 and 1000, `factor` is `100` for + // the first digit, `10` for the next, and `1` for the digit after that. + let mut factor = 100; + loop { + match c.next() { + Some(n @ '0'..='9') => { + // If `factor` is less than `1`, three digits have already been parsed. A + // q-value having more than 3 fractional digits is invalid. + if factor < 1 { + return None; + } + // Add the digit's value multiplied by `factor` to `value`. + value += factor * (n as u16 - '0' as u16); + } + None => { + // No more characters to parse. Check that the value representing the q-value is + // in the valid range. + return if value <= 1000 { + Some(Self(value)) + } else { + None + }; + } + _ => return None, + }; + factor /= 10; + } + } +} + +#[cfg(any( + feature = "compression-gzip", + feature = "compression-br", + feature = "compression-zstd", + feature = "compression-deflate", + feature = "fs", +))] +// based on https://github.com/http-rs/accept-encoding +pub(crate) fn encodings<'a>( + headers: &'a http::HeaderMap, + supported_encoding: impl SupportedEncodings + 'a, +) -> impl Iterator + 'a { + headers + .get_all(http::header::ACCEPT_ENCODING) + .iter() + .filter_map(|hval| hval.to_str().ok()) + .flat_map(|s| s.split(',')) + .filter_map(move |v| { + let mut v = v.splitn(2, ';'); + + let encoding = match Encoding::parse(v.next().unwrap().trim(), supported_encoding) { + Some(encoding) => encoding, + None => return None, // ignore unknown encodings + }; + + let qval = if let Some(qval) = v.next() { + QValue::parse(qval.trim())? + } else { + QValue::one() + }; + + Some((encoding, qval)) + }) +} + +#[cfg(all( + test, + feature = "compression-gzip", + feature = "compression-deflate", + feature = "compression-br", + feature = "compression-zstd", +))] +mod tests { + use super::*; + + #[derive(Copy, Clone, Default)] + struct SupportedEncodingsAll; + + impl SupportedEncodings for SupportedEncodingsAll { + fn gzip(&self) -> bool { + true + } + + fn deflate(&self) -> bool { + true + } + + fn br(&self) -> bool { + true + } + + fn zstd(&self) -> bool { + true + } + } + + #[test] + fn no_accept_encoding_header() { + let encoding = Encoding::from_headers(&http::HeaderMap::new(), SupportedEncodingsAll); + assert_eq!(Encoding::Identity, encoding); + } + + #[test] + fn accept_encoding_header_single_encoding() { + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Gzip, encoding); + } + + #[test] + fn accept_encoding_header_two_encodings() { + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip,br"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Brotli, encoding); + } + + #[test] + fn accept_encoding_header_gzip_x_gzip() { + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip,x-gzip"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Gzip, encoding); + } + + #[test] + fn accept_encoding_header_x_gzip_deflate() { + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("deflate,x-gzip"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Gzip, encoding); + } + + #[test] + fn accept_encoding_header_three_encodings() { + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip,deflate,br"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Brotli, encoding); + } + + #[test] + fn accept_encoding_header_two_encodings_with_one_qvalue() { + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=0.5,br"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Brotli, encoding); + } + + #[test] + fn accept_encoding_header_three_encodings_with_one_qvalue() { + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=0.5,deflate,br"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Brotli, encoding); + } + + #[test] + fn two_accept_encoding_headers_with_one_qvalue() { + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=0.5"), + ); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("br"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Brotli, encoding); + } + + #[test] + fn two_accept_encoding_headers_three_encodings_with_one_qvalue() { + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=0.5,deflate"), + ); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("br"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Brotli, encoding); + } + + #[test] + fn three_accept_encoding_headers_with_one_qvalue() { + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=0.5"), + ); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("deflate"), + ); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("br"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Brotli, encoding); + } + + #[test] + fn accept_encoding_header_two_encodings_with_two_qvalues() { + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=0.5,br;q=0.8"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Brotli, encoding); + + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=0.8,br;q=0.5"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Gzip, encoding); + + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=0.995,br;q=0.999"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Brotli, encoding); + } + + #[test] + fn accept_encoding_header_three_encodings_with_three_qvalues() { + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=0.5,deflate;q=0.6,br;q=0.8"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Brotli, encoding); + + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=0.8,deflate;q=0.6,br;q=0.5"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Gzip, encoding); + + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=0.6,deflate;q=0.8,br;q=0.5"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Deflate, encoding); + + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=0.995,deflate;q=0.997,br;q=0.999"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Brotli, encoding); + } + + #[test] + fn accept_encoding_header_invalid_encdoing() { + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("invalid,gzip"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Gzip, encoding); + } + + #[test] + fn accept_encoding_header_with_qvalue_zero() { + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=0"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Identity, encoding); + + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=0."), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Identity, encoding); + + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=0,br;q=0.5"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Brotli, encoding); + } + + #[test] + fn accept_encoding_header_with_uppercase_letters() { + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gZiP"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Gzip, encoding); + + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=0.5,br;Q=0.8"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Brotli, encoding); + } + + #[test] + fn accept_encoding_header_with_allowed_spaces() { + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static(" gzip\t; q=0.5 ,\tbr ;\tq=0.8\t"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Brotli, encoding); + } + + #[test] + fn accept_encoding_header_with_invalid_spaces() { + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q =0.5"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Identity, encoding); + + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q= 0.5"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Identity, encoding); + } + + #[test] + fn accept_encoding_header_with_invalid_quvalues() { + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=-0.1"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Identity, encoding); + + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=00.5"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Identity, encoding); + + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=0.5000"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Identity, encoding); + + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=.5"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Identity, encoding); + + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=1.01"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Identity, encoding); + + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=1.001"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Identity, encoding); + } +} diff --git a/.cargo-vendor/tower-http/src/cors/allow_credentials.rs b/.cargo-vendor/tower-http/src/cors/allow_credentials.rs new file mode 100644 index 0000000000..de53ffed61 --- /dev/null +++ b/.cargo-vendor/tower-http/src/cors/allow_credentials.rs @@ -0,0 +1,96 @@ +use std::{fmt, sync::Arc}; + +use http::{ + header::{self, HeaderName, HeaderValue}, + request::Parts as RequestParts, +}; + +/// Holds configuration for how to set the [`Access-Control-Allow-Credentials`][mdn] header. +/// +/// See [`CorsLayer::allow_credentials`] for more details. +/// +/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Credentials +/// [`CorsLayer::allow_credentials`]: super::CorsLayer::allow_credentials +#[derive(Clone, Default)] +#[must_use] +pub struct AllowCredentials(AllowCredentialsInner); + +impl AllowCredentials { + /// Allow credentials for all requests + /// + /// See [`CorsLayer::allow_credentials`] for more details. + /// + /// [`CorsLayer::allow_credentials`]: super::CorsLayer::allow_credentials + pub fn yes() -> Self { + Self(AllowCredentialsInner::Yes) + } + + /// Allow credentials for some requests, based on a given predicate + /// + /// The first argument to the predicate is the request origin. + /// + /// See [`CorsLayer::allow_credentials`] for more details. + /// + /// [`CorsLayer::allow_credentials`]: super::CorsLayer::allow_credentials + pub fn predicate(f: F) -> Self + where + F: Fn(&HeaderValue, &RequestParts) -> bool + Send + Sync + 'static, + { + Self(AllowCredentialsInner::Predicate(Arc::new(f))) + } + + pub(super) fn is_true(&self) -> bool { + matches!(&self.0, AllowCredentialsInner::Yes) + } + + pub(super) fn to_header( + &self, + origin: Option<&HeaderValue>, + parts: &RequestParts, + ) -> Option<(HeaderName, HeaderValue)> { + #[allow(clippy::declare_interior_mutable_const)] + const TRUE: HeaderValue = HeaderValue::from_static("true"); + + let allow_creds = match &self.0 { + AllowCredentialsInner::Yes => true, + AllowCredentialsInner::No => false, + AllowCredentialsInner::Predicate(c) => c(origin?, parts), + }; + + allow_creds.then_some((header::ACCESS_CONTROL_ALLOW_CREDENTIALS, TRUE)) + } +} + +impl From for AllowCredentials { + fn from(v: bool) -> Self { + match v { + true => Self(AllowCredentialsInner::Yes), + false => Self(AllowCredentialsInner::No), + } + } +} + +impl fmt::Debug for AllowCredentials { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 { + AllowCredentialsInner::Yes => f.debug_tuple("Yes").finish(), + AllowCredentialsInner::No => f.debug_tuple("No").finish(), + AllowCredentialsInner::Predicate(_) => f.debug_tuple("Predicate").finish(), + } + } +} + +#[derive(Clone)] +enum AllowCredentialsInner { + Yes, + No, + Predicate( + Arc Fn(&'a HeaderValue, &'a RequestParts) -> bool + Send + Sync + 'static>, + ), +} + +impl Default for AllowCredentialsInner { + fn default() -> Self { + Self::No + } +} diff --git a/.cargo-vendor/tower-http/src/cors/allow_headers.rs b/.cargo-vendor/tower-http/src/cors/allow_headers.rs new file mode 100644 index 0000000000..06c199280c --- /dev/null +++ b/.cargo-vendor/tower-http/src/cors/allow_headers.rs @@ -0,0 +1,112 @@ +use std::{array, fmt}; + +use http::{ + header::{self, HeaderName, HeaderValue}, + request::Parts as RequestParts, +}; + +use super::{separated_by_commas, Any, WILDCARD}; + +/// Holds configuration for how to set the [`Access-Control-Allow-Headers`][mdn] header. +/// +/// See [`CorsLayer::allow_headers`] for more details. +/// +/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Headers +/// [`CorsLayer::allow_headers`]: super::CorsLayer::allow_headers +#[derive(Clone, Default)] +#[must_use] +pub struct AllowHeaders(AllowHeadersInner); + +impl AllowHeaders { + /// Allow any headers by sending a wildcard (`*`) + /// + /// See [`CorsLayer::allow_headers`] for more details. + /// + /// [`CorsLayer::allow_headers`]: super::CorsLayer::allow_headers + pub fn any() -> Self { + Self(AllowHeadersInner::Const(Some(WILDCARD))) + } + + /// Set multiple allowed headers + /// + /// See [`CorsLayer::allow_headers`] for more details. + /// + /// [`CorsLayer::allow_headers`]: super::CorsLayer::allow_headers + pub fn list(headers: I) -> Self + where + I: IntoIterator, + { + Self(AllowHeadersInner::Const(separated_by_commas( + headers.into_iter().map(Into::into), + ))) + } + + /// Allow any headers, by mirroring the preflight [`Access-Control-Request-Headers`][mdn] + /// header. + /// + /// See [`CorsLayer::allow_headers`] for more details. + /// + /// [`CorsLayer::allow_headers`]: super::CorsLayer::allow_headers + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Request-Headers + pub fn mirror_request() -> Self { + Self(AllowHeadersInner::MirrorRequest) + } + + #[allow(clippy::borrow_interior_mutable_const)] + pub(super) fn is_wildcard(&self) -> bool { + matches!(&self.0, AllowHeadersInner::Const(Some(v)) if v == WILDCARD) + } + + pub(super) fn to_header(&self, parts: &RequestParts) -> Option<(HeaderName, HeaderValue)> { + let allow_headers = match &self.0 { + AllowHeadersInner::Const(v) => v.clone()?, + AllowHeadersInner::MirrorRequest => parts + .headers + .get(header::ACCESS_CONTROL_REQUEST_HEADERS)? + .clone(), + }; + + Some((header::ACCESS_CONTROL_ALLOW_HEADERS, allow_headers)) + } +} + +impl fmt::Debug for AllowHeaders { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match &self.0 { + AllowHeadersInner::Const(inner) => f.debug_tuple("Const").field(inner).finish(), + AllowHeadersInner::MirrorRequest => f.debug_tuple("MirrorRequest").finish(), + } + } +} + +impl From for AllowHeaders { + fn from(_: Any) -> Self { + Self::any() + } +} + +impl From<[HeaderName; N]> for AllowHeaders { + fn from(arr: [HeaderName; N]) -> Self { + #[allow(deprecated)] // Can be changed when MSRV >= 1.53 + Self::list(array::IntoIter::new(arr)) + } +} + +impl From> for AllowHeaders { + fn from(vec: Vec) -> Self { + Self::list(vec) + } +} + +#[derive(Clone)] +enum AllowHeadersInner { + Const(Option), + MirrorRequest, +} + +impl Default for AllowHeadersInner { + fn default() -> Self { + Self::Const(None) + } +} diff --git a/.cargo-vendor/tower-http/src/cors/allow_methods.rs b/.cargo-vendor/tower-http/src/cors/allow_methods.rs new file mode 100644 index 0000000000..df1a3cbde6 --- /dev/null +++ b/.cargo-vendor/tower-http/src/cors/allow_methods.rs @@ -0,0 +1,132 @@ +use std::{array, fmt}; + +use http::{ + header::{self, HeaderName, HeaderValue}, + request::Parts as RequestParts, + Method, +}; + +use super::{separated_by_commas, Any, WILDCARD}; + +/// Holds configuration for how to set the [`Access-Control-Allow-Methods`][mdn] header. +/// +/// See [`CorsLayer::allow_methods`] for more details. +/// +/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Methods +/// [`CorsLayer::allow_methods`]: super::CorsLayer::allow_methods +#[derive(Clone, Default)] +#[must_use] +pub struct AllowMethods(AllowMethodsInner); + +impl AllowMethods { + /// Allow any method by sending a wildcard (`*`) + /// + /// See [`CorsLayer::allow_methods`] for more details. + /// + /// [`CorsLayer::allow_methods`]: super::CorsLayer::allow_methods + pub fn any() -> Self { + Self(AllowMethodsInner::Const(Some(WILDCARD))) + } + + /// Set a single allowed method + /// + /// See [`CorsLayer::allow_methods`] for more details. + /// + /// [`CorsLayer::allow_methods`]: super::CorsLayer::allow_methods + pub fn exact(method: Method) -> Self { + Self(AllowMethodsInner::Const(Some( + HeaderValue::from_str(method.as_str()).unwrap(), + ))) + } + + /// Set multiple allowed methods + /// + /// See [`CorsLayer::allow_methods`] for more details. + /// + /// [`CorsLayer::allow_methods`]: super::CorsLayer::allow_methods + pub fn list(methods: I) -> Self + where + I: IntoIterator, + { + Self(AllowMethodsInner::Const(separated_by_commas( + methods + .into_iter() + .map(|m| HeaderValue::from_str(m.as_str()).unwrap()), + ))) + } + + /// Allow any method, by mirroring the preflight [`Access-Control-Request-Method`][mdn] + /// header. + /// + /// See [`CorsLayer::allow_methods`] for more details. + /// + /// [`CorsLayer::allow_methods`]: super::CorsLayer::allow_methods + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Request-Method + pub fn mirror_request() -> Self { + Self(AllowMethodsInner::MirrorRequest) + } + + #[allow(clippy::borrow_interior_mutable_const)] + pub(super) fn is_wildcard(&self) -> bool { + matches!(&self.0, AllowMethodsInner::Const(Some(v)) if v == WILDCARD) + } + + pub(super) fn to_header(&self, parts: &RequestParts) -> Option<(HeaderName, HeaderValue)> { + let allow_methods = match &self.0 { + AllowMethodsInner::Const(v) => v.clone()?, + AllowMethodsInner::MirrorRequest => parts + .headers + .get(header::ACCESS_CONTROL_REQUEST_METHOD)? + .clone(), + }; + + Some((header::ACCESS_CONTROL_ALLOW_METHODS, allow_methods)) + } +} + +impl fmt::Debug for AllowMethods { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match &self.0 { + AllowMethodsInner::Const(inner) => f.debug_tuple("Const").field(inner).finish(), + AllowMethodsInner::MirrorRequest => f.debug_tuple("MirrorRequest").finish(), + } + } +} + +impl From for AllowMethods { + fn from(_: Any) -> Self { + Self::any() + } +} + +impl From for AllowMethods { + fn from(method: Method) -> Self { + Self::exact(method) + } +} + +impl From<[Method; N]> for AllowMethods { + fn from(arr: [Method; N]) -> Self { + #[allow(deprecated)] // Can be changed when MSRV >= 1.53 + Self::list(array::IntoIter::new(arr)) + } +} + +impl From> for AllowMethods { + fn from(vec: Vec) -> Self { + Self::list(vec) + } +} + +#[derive(Clone)] +enum AllowMethodsInner { + Const(Option), + MirrorRequest, +} + +impl Default for AllowMethodsInner { + fn default() -> Self { + Self::Const(None) + } +} diff --git a/.cargo-vendor/tower-http/src/cors/allow_origin.rs b/.cargo-vendor/tower-http/src/cors/allow_origin.rs new file mode 100644 index 0000000000..3d7a7f3ec3 --- /dev/null +++ b/.cargo-vendor/tower-http/src/cors/allow_origin.rs @@ -0,0 +1,156 @@ +use std::{array, fmt, sync::Arc}; + +use http::{ + header::{self, HeaderName, HeaderValue}, + request::Parts as RequestParts, +}; + +use super::{Any, WILDCARD}; + +/// Holds configuration for how to set the [`Access-Control-Allow-Origin`][mdn] header. +/// +/// See [`CorsLayer::allow_origin`] for more details. +/// +/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Origin +/// [`CorsLayer::allow_origin`]: super::CorsLayer::allow_origin +#[derive(Clone, Default)] +#[must_use] +pub struct AllowOrigin(OriginInner); + +impl AllowOrigin { + /// Allow any origin by sending a wildcard (`*`) + /// + /// See [`CorsLayer::allow_origin`] for more details. + /// + /// [`CorsLayer::allow_origin`]: super::CorsLayer::allow_origin + pub fn any() -> Self { + Self(OriginInner::Const(WILDCARD)) + } + + /// Set a single allowed origin + /// + /// See [`CorsLayer::allow_origin`] for more details. + /// + /// [`CorsLayer::allow_origin`]: super::CorsLayer::allow_origin + pub fn exact(origin: HeaderValue) -> Self { + Self(OriginInner::Const(origin)) + } + + /// Set multiple allowed origins + /// + /// See [`CorsLayer::allow_origin`] for more details. + /// + /// # Panics + /// + /// If the iterator contains a wildcard (`*`). + /// + /// [`CorsLayer::allow_origin`]: super::CorsLayer::allow_origin + #[allow(clippy::borrow_interior_mutable_const)] + pub fn list(origins: I) -> Self + where + I: IntoIterator, + { + let origins = origins.into_iter().collect::>(); + if origins.contains(&WILDCARD) { + panic!( + "Wildcard origin (`*`) cannot be passed to `AllowOrigin::list`. \ + Use `AllowOrigin::any()` instead" + ); + } + + Self(OriginInner::List(origins)) + } + + /// Set the allowed origins from a predicate + /// + /// See [`CorsLayer::allow_origin`] for more details. + /// + /// [`CorsLayer::allow_origin`]: super::CorsLayer::allow_origin + pub fn predicate(f: F) -> Self + where + F: Fn(&HeaderValue, &RequestParts) -> bool + Send + Sync + 'static, + { + Self(OriginInner::Predicate(Arc::new(f))) + } + + /// Allow any origin, by mirroring the request origin + /// + /// This is equivalent to + /// [`AllowOrigin::predicate(|_, _| true)`][Self::predicate]. + /// + /// See [`CorsLayer::allow_origin`] for more details. + /// + /// [`CorsLayer::allow_origin`]: super::CorsLayer::allow_origin + pub fn mirror_request() -> Self { + Self::predicate(|_, _| true) + } + + #[allow(clippy::borrow_interior_mutable_const)] + pub(super) fn is_wildcard(&self) -> bool { + matches!(&self.0, OriginInner::Const(v) if v == WILDCARD) + } + + pub(super) fn to_header( + &self, + origin: Option<&HeaderValue>, + parts: &RequestParts, + ) -> Option<(HeaderName, HeaderValue)> { + let allow_origin = match &self.0 { + OriginInner::Const(v) => v.clone(), + OriginInner::List(l) => origin.filter(|o| l.contains(o))?.clone(), + OriginInner::Predicate(c) => origin.filter(|origin| c(origin, parts))?.clone(), + }; + + Some((header::ACCESS_CONTROL_ALLOW_ORIGIN, allow_origin)) + } +} + +impl fmt::Debug for AllowOrigin { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match &self.0 { + OriginInner::Const(inner) => f.debug_tuple("Const").field(inner).finish(), + OriginInner::List(inner) => f.debug_tuple("List").field(inner).finish(), + OriginInner::Predicate(_) => f.debug_tuple("Predicate").finish(), + } + } +} + +impl From for AllowOrigin { + fn from(_: Any) -> Self { + Self::any() + } +} + +impl From for AllowOrigin { + fn from(val: HeaderValue) -> Self { + Self::exact(val) + } +} + +impl From<[HeaderValue; N]> for AllowOrigin { + fn from(arr: [HeaderValue; N]) -> Self { + #[allow(deprecated)] // Can be changed when MSRV >= 1.53 + Self::list(array::IntoIter::new(arr)) + } +} + +impl From> for AllowOrigin { + fn from(vec: Vec) -> Self { + Self::list(vec) + } +} + +#[derive(Clone)] +enum OriginInner { + Const(HeaderValue), + List(Vec), + Predicate( + Arc Fn(&'a HeaderValue, &'a RequestParts) -> bool + Send + Sync + 'static>, + ), +} + +impl Default for OriginInner { + fn default() -> Self { + Self::List(Vec::new()) + } +} diff --git a/.cargo-vendor/tower-http/src/cors/allow_private_network.rs b/.cargo-vendor/tower-http/src/cors/allow_private_network.rs new file mode 100644 index 0000000000..9f97dc11fb --- /dev/null +++ b/.cargo-vendor/tower-http/src/cors/allow_private_network.rs @@ -0,0 +1,205 @@ +use std::{fmt, sync::Arc}; + +use http::{ + header::{HeaderName, HeaderValue}, + request::Parts as RequestParts, +}; + +/// Holds configuration for how to set the [`Access-Control-Allow-Private-Network`][wicg] header. +/// +/// See [`CorsLayer::allow_private_network`] for more details. +/// +/// [wicg]: https://wicg.github.io/private-network-access/ +/// [`CorsLayer::allow_private_network`]: super::CorsLayer::allow_private_network +#[derive(Clone, Default)] +#[must_use] +pub struct AllowPrivateNetwork(AllowPrivateNetworkInner); + +impl AllowPrivateNetwork { + /// Allow requests via a more private network than the one used to access the origin + /// + /// See [`CorsLayer::allow_private_network`] for more details. + /// + /// [`CorsLayer::allow_private_network`]: super::CorsLayer::allow_private_network + pub fn yes() -> Self { + Self(AllowPrivateNetworkInner::Yes) + } + + /// Allow requests via private network for some requests, based on a given predicate + /// + /// The first argument to the predicate is the request origin. + /// + /// See [`CorsLayer::allow_private_network`] for more details. + /// + /// [`CorsLayer::allow_private_network`]: super::CorsLayer::allow_private_network + pub fn predicate(f: F) -> Self + where + F: Fn(&HeaderValue, &RequestParts) -> bool + Send + Sync + 'static, + { + Self(AllowPrivateNetworkInner::Predicate(Arc::new(f))) + } + + #[allow( + clippy::declare_interior_mutable_const, + clippy::borrow_interior_mutable_const + )] + pub(super) fn to_header( + &self, + origin: Option<&HeaderValue>, + parts: &RequestParts, + ) -> Option<(HeaderName, HeaderValue)> { + #[allow(clippy::declare_interior_mutable_const)] + const REQUEST_PRIVATE_NETWORK: HeaderName = + HeaderName::from_static("access-control-request-private-network"); + + #[allow(clippy::declare_interior_mutable_const)] + const ALLOW_PRIVATE_NETWORK: HeaderName = + HeaderName::from_static("access-control-allow-private-network"); + + const TRUE: HeaderValue = HeaderValue::from_static("true"); + + // Cheapest fallback: allow_private_network hasn't been set + if let AllowPrivateNetworkInner::No = &self.0 { + return None; + } + + // Access-Control-Allow-Private-Network is only relevant if the request + // has the Access-Control-Request-Private-Network header set, else skip + if parts.headers.get(REQUEST_PRIVATE_NETWORK) != Some(&TRUE) { + return None; + } + + let allow_private_network = match &self.0 { + AllowPrivateNetworkInner::Yes => true, + AllowPrivateNetworkInner::No => false, // unreachable, but not harmful + AllowPrivateNetworkInner::Predicate(c) => c(origin?, parts), + }; + + allow_private_network.then_some((ALLOW_PRIVATE_NETWORK, TRUE)) + } +} + +impl From for AllowPrivateNetwork { + fn from(v: bool) -> Self { + match v { + true => Self(AllowPrivateNetworkInner::Yes), + false => Self(AllowPrivateNetworkInner::No), + } + } +} + +impl fmt::Debug for AllowPrivateNetwork { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 { + AllowPrivateNetworkInner::Yes => f.debug_tuple("Yes").finish(), + AllowPrivateNetworkInner::No => f.debug_tuple("No").finish(), + AllowPrivateNetworkInner::Predicate(_) => f.debug_tuple("Predicate").finish(), + } + } +} + +#[derive(Clone)] +enum AllowPrivateNetworkInner { + Yes, + No, + Predicate( + Arc Fn(&'a HeaderValue, &'a RequestParts) -> bool + Send + Sync + 'static>, + ), +} + +impl Default for AllowPrivateNetworkInner { + fn default() -> Self { + Self::No + } +} + +#[cfg(test)] +mod tests { + #![allow( + clippy::declare_interior_mutable_const, + clippy::borrow_interior_mutable_const + )] + + use super::AllowPrivateNetwork; + use crate::cors::CorsLayer; + + use crate::test_helpers::Body; + use http::{header::ORIGIN, request::Parts, HeaderName, HeaderValue, Request, Response}; + use tower::{BoxError, ServiceBuilder, ServiceExt}; + use tower_service::Service; + + const REQUEST_PRIVATE_NETWORK: HeaderName = + HeaderName::from_static("access-control-request-private-network"); + + const ALLOW_PRIVATE_NETWORK: HeaderName = + HeaderName::from_static("access-control-allow-private-network"); + + const TRUE: HeaderValue = HeaderValue::from_static("true"); + + #[tokio::test] + async fn cors_private_network_header_is_added_correctly() { + let mut service = ServiceBuilder::new() + .layer(CorsLayer::new().allow_private_network(true)) + .service_fn(echo); + + let req = Request::builder() + .header(REQUEST_PRIVATE_NETWORK, TRUE) + .body(Body::empty()) + .unwrap(); + let res = service.ready().await.unwrap().call(req).await.unwrap(); + + assert_eq!(res.headers().get(ALLOW_PRIVATE_NETWORK).unwrap(), TRUE); + + let req = Request::builder().body(Body::empty()).unwrap(); + let res = service.ready().await.unwrap().call(req).await.unwrap(); + + assert!(res.headers().get(ALLOW_PRIVATE_NETWORK).is_none()); + } + + #[tokio::test] + async fn cors_private_network_header_is_added_correctly_with_predicate() { + let allow_private_network = + AllowPrivateNetwork::predicate(|origin: &HeaderValue, parts: &Parts| { + parts.uri.path() == "/allow-private" && origin == "localhost" + }); + let mut service = ServiceBuilder::new() + .layer(CorsLayer::new().allow_private_network(allow_private_network)) + .service_fn(echo); + + let req = Request::builder() + .header(ORIGIN, "localhost") + .header(REQUEST_PRIVATE_NETWORK, TRUE) + .uri("/allow-private") + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(req).await.unwrap(); + assert_eq!(res.headers().get(ALLOW_PRIVATE_NETWORK).unwrap(), TRUE); + + let req = Request::builder() + .header(ORIGIN, "localhost") + .header(REQUEST_PRIVATE_NETWORK, TRUE) + .uri("/other") + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(req).await.unwrap(); + + assert!(res.headers().get(ALLOW_PRIVATE_NETWORK).is_none()); + + let req = Request::builder() + .header(ORIGIN, "not-localhost") + .header(REQUEST_PRIVATE_NETWORK, TRUE) + .uri("/allow-private") + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(req).await.unwrap(); + + assert!(res.headers().get(ALLOW_PRIVATE_NETWORK).is_none()); + } + + async fn echo(req: Request) -> Result, BoxError> { + Ok(Response::new(req.into_body())) + } +} diff --git a/.cargo-vendor/tower-http/src/cors/expose_headers.rs b/.cargo-vendor/tower-http/src/cors/expose_headers.rs new file mode 100644 index 0000000000..2b1a226712 --- /dev/null +++ b/.cargo-vendor/tower-http/src/cors/expose_headers.rs @@ -0,0 +1,94 @@ +use std::{array, fmt}; + +use http::{ + header::{self, HeaderName, HeaderValue}, + request::Parts as RequestParts, +}; + +use super::{separated_by_commas, Any, WILDCARD}; + +/// Holds configuration for how to set the [`Access-Control-Expose-Headers`][mdn] header. +/// +/// See [`CorsLayer::expose_headers`] for more details. +/// +/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Expose-Headers +/// [`CorsLayer::expose_headers`]: super::CorsLayer::expose_headers +#[derive(Clone, Default)] +#[must_use] +pub struct ExposeHeaders(ExposeHeadersInner); + +impl ExposeHeaders { + /// Expose any / all headers by sending a wildcard (`*`) + /// + /// See [`CorsLayer::expose_headers`] for more details. + /// + /// [`CorsLayer::expose_headers`]: super::CorsLayer::expose_headers + pub fn any() -> Self { + Self(ExposeHeadersInner::Const(Some(WILDCARD))) + } + + /// Set multiple exposed header names + /// + /// See [`CorsLayer::expose_headers`] for more details. + /// + /// [`CorsLayer::expose_headers`]: super::CorsLayer::expose_headers + pub fn list(headers: I) -> Self + where + I: IntoIterator, + { + Self(ExposeHeadersInner::Const(separated_by_commas( + headers.into_iter().map(Into::into), + ))) + } + + #[allow(clippy::borrow_interior_mutable_const)] + pub(super) fn is_wildcard(&self) -> bool { + matches!(&self.0, ExposeHeadersInner::Const(Some(v)) if v == WILDCARD) + } + + pub(super) fn to_header(&self, _parts: &RequestParts) -> Option<(HeaderName, HeaderValue)> { + let expose_headers = match &self.0 { + ExposeHeadersInner::Const(v) => v.clone()?, + }; + + Some((header::ACCESS_CONTROL_EXPOSE_HEADERS, expose_headers)) + } +} + +impl fmt::Debug for ExposeHeaders { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match &self.0 { + ExposeHeadersInner::Const(inner) => f.debug_tuple("Const").field(inner).finish(), + } + } +} + +impl From for ExposeHeaders { + fn from(_: Any) -> Self { + Self::any() + } +} + +impl From<[HeaderName; N]> for ExposeHeaders { + fn from(arr: [HeaderName; N]) -> Self { + #[allow(deprecated)] // Can be changed when MSRV >= 1.53 + Self::list(array::IntoIter::new(arr)) + } +} + +impl From> for ExposeHeaders { + fn from(vec: Vec) -> Self { + Self::list(vec) + } +} + +#[derive(Clone)] +enum ExposeHeadersInner { + Const(Option), +} + +impl Default for ExposeHeadersInner { + fn default() -> Self { + ExposeHeadersInner::Const(None) + } +} diff --git a/.cargo-vendor/tower-http/src/cors/max_age.rs b/.cargo-vendor/tower-http/src/cors/max_age.rs new file mode 100644 index 0000000000..9818992633 --- /dev/null +++ b/.cargo-vendor/tower-http/src/cors/max_age.rs @@ -0,0 +1,74 @@ +use std::{fmt, sync::Arc, time::Duration}; + +use http::{ + header::{self, HeaderName, HeaderValue}, + request::Parts as RequestParts, +}; + +/// Holds configuration for how to set the [`Access-Control-Max-Age`][mdn] header. +/// +/// See [`CorsLayer::max_age`][super::CorsLayer::max_age] for more details. +/// +/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Max-Age +#[derive(Clone, Default)] +#[must_use] +pub struct MaxAge(MaxAgeInner); + +impl MaxAge { + /// Set a static max-age value + /// + /// See [`CorsLayer::max_age`][super::CorsLayer::max_age] for more details. + pub fn exact(max_age: Duration) -> Self { + Self(MaxAgeInner::Exact(Some(max_age.as_secs().into()))) + } + + /// Set the max-age based on the preflight request parts + /// + /// See [`CorsLayer::max_age`][super::CorsLayer::max_age] for more details. + pub fn dynamic(f: F) -> Self + where + F: Fn(&HeaderValue, &RequestParts) -> Duration + Send + Sync + 'static, + { + Self(MaxAgeInner::Fn(Arc::new(f))) + } + + pub(super) fn to_header( + &self, + origin: Option<&HeaderValue>, + parts: &RequestParts, + ) -> Option<(HeaderName, HeaderValue)> { + let max_age = match &self.0 { + MaxAgeInner::Exact(v) => v.clone()?, + MaxAgeInner::Fn(c) => c(origin?, parts).as_secs().into(), + }; + + Some((header::ACCESS_CONTROL_MAX_AGE, max_age)) + } +} + +impl fmt::Debug for MaxAge { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match &self.0 { + MaxAgeInner::Exact(inner) => f.debug_tuple("Exact").field(inner).finish(), + MaxAgeInner::Fn(_) => f.debug_tuple("Fn").finish(), + } + } +} + +impl From for MaxAge { + fn from(max_age: Duration) -> Self { + Self::exact(max_age) + } +} + +#[derive(Clone)] +enum MaxAgeInner { + Exact(Option), + Fn(Arc Fn(&'a HeaderValue, &'a RequestParts) -> Duration + Send + Sync + 'static>), +} + +impl Default for MaxAgeInner { + fn default() -> Self { + Self::Exact(None) + } +} diff --git a/.cargo-vendor/tower-http/src/cors/mod.rs b/.cargo-vendor/tower-http/src/cors/mod.rs new file mode 100644 index 0000000000..156576f207 --- /dev/null +++ b/.cargo-vendor/tower-http/src/cors/mod.rs @@ -0,0 +1,748 @@ +//! Middleware which adds headers for [CORS][mdn]. +//! +//! # Example +//! +//! ``` +//! use http::{Request, Response, Method, header}; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! use tower::{ServiceBuilder, ServiceExt, Service}; +//! use tower_http::cors::{Any, CorsLayer}; +//! use std::convert::Infallible; +//! +//! async fn handle(request: Request>) -> Result>, Infallible> { +//! Ok(Response::new(Full::default())) +//! } +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! let cors = CorsLayer::new() +//! // allow `GET` and `POST` when accessing the resource +//! .allow_methods([Method::GET, Method::POST]) +//! // allow requests from any origin +//! .allow_origin(Any); +//! +//! let mut service = ServiceBuilder::new() +//! .layer(cors) +//! .service_fn(handle); +//! +//! let request = Request::builder() +//! .header(header::ORIGIN, "https://example.com") +//! .body(Full::default()) +//! .unwrap(); +//! +//! let response = service +//! .ready() +//! .await? +//! .call(request) +//! .await?; +//! +//! assert_eq!( +//! response.headers().get(header::ACCESS_CONTROL_ALLOW_ORIGIN).unwrap(), +//! "*", +//! ); +//! # Ok(()) +//! # } +//! ``` +//! +//! [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS + +#![allow(clippy::enum_variant_names)] + +use bytes::{BufMut, BytesMut}; +use http::{ + header::{self, HeaderName}, + HeaderMap, HeaderValue, Method, Request, Response, +}; +use pin_project_lite::pin_project; +use std::{ + array, + future::Future, + mem, + pin::Pin, + task::{ready, Context, Poll}, +}; +use tower_layer::Layer; +use tower_service::Service; + +mod allow_credentials; +mod allow_headers; +mod allow_methods; +mod allow_origin; +mod allow_private_network; +mod expose_headers; +mod max_age; +mod vary; + +#[cfg(test)] +mod tests; + +pub use self::{ + allow_credentials::AllowCredentials, allow_headers::AllowHeaders, allow_methods::AllowMethods, + allow_origin::AllowOrigin, allow_private_network::AllowPrivateNetwork, + expose_headers::ExposeHeaders, max_age::MaxAge, vary::Vary, +}; + +/// Layer that applies the [`Cors`] middleware which adds headers for [CORS][mdn]. +/// +/// See the [module docs](crate::cors) for an example. +/// +/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS +#[derive(Debug, Clone)] +#[must_use] +pub struct CorsLayer { + allow_credentials: AllowCredentials, + allow_headers: AllowHeaders, + allow_methods: AllowMethods, + allow_origin: AllowOrigin, + allow_private_network: AllowPrivateNetwork, + expose_headers: ExposeHeaders, + max_age: MaxAge, + vary: Vary, +} + +#[allow(clippy::declare_interior_mutable_const)] +const WILDCARD: HeaderValue = HeaderValue::from_static("*"); + +impl CorsLayer { + /// Create a new `CorsLayer`. + /// + /// No headers are sent by default. Use the builder methods to customize + /// the behavior. + /// + /// You need to set at least an allowed origin for browsers to make + /// successful cross-origin requests to your service. + pub fn new() -> Self { + Self { + allow_credentials: Default::default(), + allow_headers: Default::default(), + allow_methods: Default::default(), + allow_origin: Default::default(), + allow_private_network: Default::default(), + expose_headers: Default::default(), + max_age: Default::default(), + vary: Default::default(), + } + } + + /// A permissive configuration: + /// + /// - All request headers allowed. + /// - All methods allowed. + /// - All origins allowed. + /// - All headers exposed. + pub fn permissive() -> Self { + Self::new() + .allow_headers(Any) + .allow_methods(Any) + .allow_origin(Any) + .expose_headers(Any) + } + + /// A very permissive configuration: + /// + /// - **Credentials allowed.** + /// - The method received in `Access-Control-Request-Method` is sent back + /// as an allowed method. + /// - The origin of the preflight request is sent back as an allowed origin. + /// - The header names received in `Access-Control-Request-Headers` are sent + /// back as allowed headers. + /// - No headers are currently exposed, but this may change in the future. + pub fn very_permissive() -> Self { + Self::new() + .allow_credentials(true) + .allow_headers(AllowHeaders::mirror_request()) + .allow_methods(AllowMethods::mirror_request()) + .allow_origin(AllowOrigin::mirror_request()) + } + + /// Set the [`Access-Control-Allow-Credentials`][mdn] header. + /// + /// ``` + /// use tower_http::cors::CorsLayer; + /// + /// let layer = CorsLayer::new().allow_credentials(true); + /// ``` + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Credentials + pub fn allow_credentials(mut self, allow_credentials: T) -> Self + where + T: Into, + { + self.allow_credentials = allow_credentials.into(); + self + } + + /// Set the value of the [`Access-Control-Allow-Headers`][mdn] header. + /// + /// ``` + /// use tower_http::cors::CorsLayer; + /// use http::header::{AUTHORIZATION, ACCEPT}; + /// + /// let layer = CorsLayer::new().allow_headers([AUTHORIZATION, ACCEPT]); + /// ``` + /// + /// All headers can be allowed with + /// + /// ``` + /// use tower_http::cors::{Any, CorsLayer}; + /// + /// let layer = CorsLayer::new().allow_headers(Any); + /// ``` + /// + /// Note that multiple calls to this method will override any previous + /// calls. + /// + /// Also note that `Access-Control-Allow-Headers` is required for requests that have + /// `Access-Control-Request-Headers`. + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Headers + pub fn allow_headers(mut self, headers: T) -> Self + where + T: Into, + { + self.allow_headers = headers.into(); + self + } + + /// Set the value of the [`Access-Control-Max-Age`][mdn] header. + /// + /// ``` + /// use std::time::Duration; + /// use tower_http::cors::CorsLayer; + /// + /// let layer = CorsLayer::new().max_age(Duration::from_secs(60) * 10); + /// ``` + /// + /// By default the header will not be set which disables caching and will + /// require a preflight call for all requests. + /// + /// Note that each browser has a maximum internal value that takes + /// precedence when the Access-Control-Max-Age is greater. For more details + /// see [mdn]. + /// + /// If you need more flexibility, you can use supply a function which can + /// dynamically decide the max-age based on the origin and other parts of + /// each preflight request: + /// + /// ``` + /// # struct MyServerConfig { cors_max_age: Duration } + /// use std::time::Duration; + /// + /// use http::{request::Parts as RequestParts, HeaderValue}; + /// use tower_http::cors::{CorsLayer, MaxAge}; + /// + /// let layer = CorsLayer::new().max_age(MaxAge::dynamic( + /// |_origin: &HeaderValue, parts: &RequestParts| -> Duration { + /// // Let's say you want to be able to reload your config at + /// // runtime and have another middleware that always inserts + /// // the current config into the request extensions + /// let config = parts.extensions.get::().unwrap(); + /// config.cors_max_age + /// }, + /// )); + /// ``` + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Max-Age + pub fn max_age(mut self, max_age: T) -> Self + where + T: Into, + { + self.max_age = max_age.into(); + self + } + + /// Set the value of the [`Access-Control-Allow-Methods`][mdn] header. + /// + /// ``` + /// use tower_http::cors::CorsLayer; + /// use http::Method; + /// + /// let layer = CorsLayer::new().allow_methods([Method::GET, Method::POST]); + /// ``` + /// + /// All methods can be allowed with + /// + /// ``` + /// use tower_http::cors::{Any, CorsLayer}; + /// + /// let layer = CorsLayer::new().allow_methods(Any); + /// ``` + /// + /// Note that multiple calls to this method will override any previous + /// calls. + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Methods + pub fn allow_methods(mut self, methods: T) -> Self + where + T: Into, + { + self.allow_methods = methods.into(); + self + } + + /// Set the value of the [`Access-Control-Allow-Origin`][mdn] header. + /// + /// ``` + /// use http::HeaderValue; + /// use tower_http::cors::CorsLayer; + /// + /// let layer = CorsLayer::new().allow_origin( + /// "http://example.com".parse::().unwrap(), + /// ); + /// ``` + /// + /// Multiple origins can be allowed with + /// + /// ``` + /// use tower_http::cors::CorsLayer; + /// + /// let origins = [ + /// "http://example.com".parse().unwrap(), + /// "http://api.example.com".parse().unwrap(), + /// ]; + /// + /// let layer = CorsLayer::new().allow_origin(origins); + /// ``` + /// + /// All origins can be allowed with + /// + /// ``` + /// use tower_http::cors::{Any, CorsLayer}; + /// + /// let layer = CorsLayer::new().allow_origin(Any); + /// ``` + /// + /// You can also use a closure + /// + /// ``` + /// use tower_http::cors::{CorsLayer, AllowOrigin}; + /// use http::{request::Parts as RequestParts, HeaderValue}; + /// + /// let layer = CorsLayer::new().allow_origin(AllowOrigin::predicate( + /// |origin: &HeaderValue, _request_parts: &RequestParts| { + /// origin.as_bytes().ends_with(b".rust-lang.org") + /// }, + /// )); + /// ``` + /// + /// Note that multiple calls to this method will override any previous + /// calls. + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Origin + pub fn allow_origin(mut self, origin: T) -> Self + where + T: Into, + { + self.allow_origin = origin.into(); + self + } + + /// Set the value of the [`Access-Control-Expose-Headers`][mdn] header. + /// + /// ``` + /// use tower_http::cors::CorsLayer; + /// use http::header::CONTENT_ENCODING; + /// + /// let layer = CorsLayer::new().expose_headers([CONTENT_ENCODING]); + /// ``` + /// + /// All headers can be allowed with + /// + /// ``` + /// use tower_http::cors::{Any, CorsLayer}; + /// + /// let layer = CorsLayer::new().expose_headers(Any); + /// ``` + /// + /// Note that multiple calls to this method will override any previous + /// calls. + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Expose-Headers + pub fn expose_headers(mut self, headers: T) -> Self + where + T: Into, + { + self.expose_headers = headers.into(); + self + } + + /// Set the value of the [`Access-Control-Allow-Private-Network`][wicg] header. + /// + /// ``` + /// use tower_http::cors::CorsLayer; + /// + /// let layer = CorsLayer::new().allow_private_network(true); + /// ``` + /// + /// [wicg]: https://wicg.github.io/private-network-access/ + pub fn allow_private_network(mut self, allow_private_network: T) -> Self + where + T: Into, + { + self.allow_private_network = allow_private_network.into(); + self + } + + /// Set the value(s) of the [`Vary`][mdn] header. + /// + /// In contrast to the other headers, this one has a non-empty default of + /// [`preflight_request_headers()`]. + /// + /// You only need to set this is you want to remove some of these defaults, + /// or if you use a closure for one of the other headers and want to add a + /// vary header accordingly. + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Vary + pub fn vary(mut self, headers: T) -> Self + where + T: Into, + { + self.vary = headers.into(); + self + } +} + +/// Represents a wildcard value (`*`) used with some CORS headers such as +/// [`CorsLayer::allow_methods`]. +#[derive(Debug, Clone, Copy)] +#[must_use] +pub struct Any; + +/// Represents a wildcard value (`*`) used with some CORS headers such as +/// [`CorsLayer::allow_methods`]. +#[deprecated = "Use Any as a unit struct literal instead"] +pub fn any() -> Any { + Any +} + +fn separated_by_commas(mut iter: I) -> Option +where + I: Iterator, +{ + match iter.next() { + Some(fst) => { + let mut result = BytesMut::from(fst.as_bytes()); + for val in iter { + result.reserve(val.len() + 1); + result.put_u8(b','); + result.extend_from_slice(val.as_bytes()); + } + + Some(HeaderValue::from_maybe_shared(result.freeze()).unwrap()) + } + None => None, + } +} + +impl Default for CorsLayer { + fn default() -> Self { + Self::new() + } +} + +impl Layer for CorsLayer { + type Service = Cors; + + fn layer(&self, inner: S) -> Self::Service { + ensure_usable_cors_rules(self); + + Cors { + inner, + layer: self.clone(), + } + } +} + +/// Middleware which adds headers for [CORS][mdn]. +/// +/// See the [module docs](crate::cors) for an example. +/// +/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS +#[derive(Debug, Clone)] +#[must_use] +pub struct Cors { + inner: S, + layer: CorsLayer, +} + +impl Cors { + /// Create a new `Cors`. + /// + /// See [`CorsLayer::new`] for more details. + pub fn new(inner: S) -> Self { + Self { + inner, + layer: CorsLayer::new(), + } + } + + /// A permissive configuration. + /// + /// See [`CorsLayer::permissive`] for more details. + pub fn permissive(inner: S) -> Self { + Self { + inner, + layer: CorsLayer::permissive(), + } + } + + /// A very permissive configuration. + /// + /// See [`CorsLayer::very_permissive`] for more details. + pub fn very_permissive(inner: S) -> Self { + Self { + inner, + layer: CorsLayer::very_permissive(), + } + } + + define_inner_service_accessors!(); + + /// Returns a new [`Layer`] that wraps services with a [`Cors`] middleware. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer() -> CorsLayer { + CorsLayer::new() + } + + /// Set the [`Access-Control-Allow-Credentials`][mdn] header. + /// + /// See [`CorsLayer::allow_credentials`] for more details. + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Credentials + pub fn allow_credentials(self, allow_credentials: T) -> Self + where + T: Into, + { + self.map_layer(|layer| layer.allow_credentials(allow_credentials)) + } + + /// Set the value of the [`Access-Control-Allow-Headers`][mdn] header. + /// + /// See [`CorsLayer::allow_headers`] for more details. + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Headers + pub fn allow_headers(self, headers: T) -> Self + where + T: Into, + { + self.map_layer(|layer| layer.allow_headers(headers)) + } + + /// Set the value of the [`Access-Control-Max-Age`][mdn] header. + /// + /// See [`CorsLayer::max_age`] for more details. + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Max-Age + pub fn max_age(self, max_age: T) -> Self + where + T: Into, + { + self.map_layer(|layer| layer.max_age(max_age)) + } + + /// Set the value of the [`Access-Control-Allow-Methods`][mdn] header. + /// + /// See [`CorsLayer::allow_methods`] for more details. + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Methods + pub fn allow_methods(self, methods: T) -> Self + where + T: Into, + { + self.map_layer(|layer| layer.allow_methods(methods)) + } + + /// Set the value of the [`Access-Control-Allow-Origin`][mdn] header. + /// + /// See [`CorsLayer::allow_origin`] for more details. + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Origin + pub fn allow_origin(self, origin: T) -> Self + where + T: Into, + { + self.map_layer(|layer| layer.allow_origin(origin)) + } + + /// Set the value of the [`Access-Control-Expose-Headers`][mdn] header. + /// + /// See [`CorsLayer::expose_headers`] for more details. + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Expose-Headers + pub fn expose_headers(self, headers: T) -> Self + where + T: Into, + { + self.map_layer(|layer| layer.expose_headers(headers)) + } + + /// Set the value of the [`Access-Control-Allow-Private-Network`][wicg] header. + /// + /// See [`CorsLayer::allow_private_network`] for more details. + /// + /// [wicg]: https://wicg.github.io/private-network-access/ + pub fn allow_private_network(self, allow_private_network: T) -> Self + where + T: Into, + { + self.map_layer(|layer| layer.allow_private_network(allow_private_network)) + } + + fn map_layer(mut self, f: F) -> Self + where + F: FnOnce(CorsLayer) -> CorsLayer, + { + self.layer = f(self.layer); + self + } +} + +impl Service> for Cors +where + S: Service, Response = Response>, + ResBody: Default, +{ + type Response = S::Response; + type Error = S::Error; + type Future = ResponseFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + ensure_usable_cors_rules(&self.layer); + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + let (parts, body) = req.into_parts(); + let origin = parts.headers.get(&header::ORIGIN); + + let mut headers = HeaderMap::new(); + + // These headers are applied to both preflight and subsequent regular CORS requests: + // https://fetch.spec.whatwg.org/#http-responses + headers.extend(self.layer.allow_origin.to_header(origin, &parts)); + headers.extend(self.layer.allow_credentials.to_header(origin, &parts)); + headers.extend(self.layer.allow_private_network.to_header(origin, &parts)); + headers.extend(self.layer.vary.to_header()); + + // Return results immediately upon preflight request + if parts.method == Method::OPTIONS { + // These headers are applied only to preflight requests + headers.extend(self.layer.allow_methods.to_header(&parts)); + headers.extend(self.layer.allow_headers.to_header(&parts)); + headers.extend(self.layer.max_age.to_header(origin, &parts)); + + ResponseFuture { + inner: Kind::PreflightCall { headers }, + } + } else { + // This header is applied only to non-preflight requests + headers.extend(self.layer.expose_headers.to_header(&parts)); + + let req = Request::from_parts(parts, body); + ResponseFuture { + inner: Kind::CorsCall { + future: self.inner.call(req), + headers, + }, + } + } + } +} + +pin_project! { + /// Response future for [`Cors`]. + pub struct ResponseFuture { + #[pin] + inner: Kind, + } +} + +pin_project! { + #[project = KindProj] + enum Kind { + CorsCall { + #[pin] + future: F, + headers: HeaderMap, + }, + PreflightCall { + headers: HeaderMap, + }, + } +} + +impl Future for ResponseFuture +where + F: Future, E>>, + B: Default, +{ + type Output = Result, E>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match self.project().inner.project() { + KindProj::CorsCall { future, headers } => { + let mut response: Response = ready!(future.poll(cx))?; + + let response_headers = response.headers_mut(); + + // vary header can have multiple values, don't overwrite + // previously-set value(s). + if let Some(vary) = headers.remove(header::VARY) { + response_headers.append(header::VARY, vary); + } + // extend will overwrite previous headers of remaining names + response_headers.extend(headers.drain()); + + Poll::Ready(Ok(response)) + } + KindProj::PreflightCall { headers } => { + let mut response = Response::new(B::default()); + mem::swap(response.headers_mut(), headers); + + Poll::Ready(Ok(response)) + } + } + } +} + +fn ensure_usable_cors_rules(layer: &CorsLayer) { + if layer.allow_credentials.is_true() { + assert!( + !layer.allow_headers.is_wildcard(), + "Invalid CORS configuration: Cannot combine `Access-Control-Allow-Credentials: true` \ + with `Access-Control-Allow-Headers: *`" + ); + + assert!( + !layer.allow_methods.is_wildcard(), + "Invalid CORS configuration: Cannot combine `Access-Control-Allow-Credentials: true` \ + with `Access-Control-Allow-Methods: *`" + ); + + assert!( + !layer.allow_origin.is_wildcard(), + "Invalid CORS configuration: Cannot combine `Access-Control-Allow-Credentials: true` \ + with `Access-Control-Allow-Origin: *`" + ); + + assert!( + !layer.expose_headers.is_wildcard(), + "Invalid CORS configuration: Cannot combine `Access-Control-Allow-Credentials: true` \ + with `Access-Control-Expose-Headers: *`" + ); + } +} + +/// Returns an iterator over the three request headers that may be involved in a CORS preflight request. +/// +/// This is the default set of header names returned in the `vary` header +pub fn preflight_request_headers() -> impl Iterator { + #[allow(deprecated)] // Can be changed when MSRV >= 1.53 + array::IntoIter::new([ + header::ORIGIN, + header::ACCESS_CONTROL_REQUEST_METHOD, + header::ACCESS_CONTROL_REQUEST_HEADERS, + ]) +} diff --git a/.cargo-vendor/tower-http/src/cors/tests.rs b/.cargo-vendor/tower-http/src/cors/tests.rs new file mode 100644 index 0000000000..8d95df2b5d --- /dev/null +++ b/.cargo-vendor/tower-http/src/cors/tests.rs @@ -0,0 +1,33 @@ +use std::convert::Infallible; + +use crate::test_helpers::Body; +use http::{header, HeaderValue, Request, Response}; +use tower::{service_fn, util::ServiceExt, Layer}; + +use crate::cors::CorsLayer; + +#[tokio::test] +#[allow( + clippy::declare_interior_mutable_const, + clippy::borrow_interior_mutable_const +)] +async fn vary_set_by_inner_service() { + const CUSTOM_VARY_HEADERS: HeaderValue = HeaderValue::from_static("accept, accept-encoding"); + const PERMISSIVE_CORS_VARY_HEADERS: HeaderValue = HeaderValue::from_static( + "origin, access-control-request-method, access-control-request-headers", + ); + + async fn inner_svc(_: Request) -> Result, Infallible> { + Ok(Response::builder() + .header(header::VARY, CUSTOM_VARY_HEADERS) + .body(Body::empty()) + .unwrap()) + } + + let svc = CorsLayer::permissive().layer(service_fn(inner_svc)); + let res = svc.oneshot(Request::new(Body::empty())).await.unwrap(); + let mut vary_headers = res.headers().get_all(header::VARY).into_iter(); + assert_eq!(vary_headers.next(), Some(&CUSTOM_VARY_HEADERS)); + assert_eq!(vary_headers.next(), Some(&PERMISSIVE_CORS_VARY_HEADERS)); + assert_eq!(vary_headers.next(), None); +} diff --git a/.cargo-vendor/tower-http/src/cors/vary.rs b/.cargo-vendor/tower-http/src/cors/vary.rs new file mode 100644 index 0000000000..1ed7e6721c --- /dev/null +++ b/.cargo-vendor/tower-http/src/cors/vary.rs @@ -0,0 +1,60 @@ +use std::array; + +use http::header::{self, HeaderName, HeaderValue}; + +use super::preflight_request_headers; + +/// Holds configuration for how to set the [`Vary`][mdn] header. +/// +/// See [`CorsLayer::vary`] for more details. +/// +/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Vary +/// [`CorsLayer::vary`]: super::CorsLayer::vary +#[derive(Clone, Debug)] +pub struct Vary(Vec); + +impl Vary { + /// Set the list of header names to return as vary header values + /// + /// See [`CorsLayer::vary`] for more details. + /// + /// [`CorsLayer::vary`]: super::CorsLayer::vary + pub fn list(headers: I) -> Self + where + I: IntoIterator, + { + Self(headers.into_iter().map(Into::into).collect()) + } + + pub(super) fn to_header(&self) -> Option<(HeaderName, HeaderValue)> { + let values = &self.0; + let mut res = values.first()?.as_bytes().to_owned(); + for val in &values[1..] { + res.extend_from_slice(b", "); + res.extend_from_slice(val.as_bytes()); + } + + let header_val = HeaderValue::from_bytes(&res) + .expect("comma-separated list of HeaderValues is always a valid HeaderValue"); + Some((header::VARY, header_val)) + } +} + +impl Default for Vary { + fn default() -> Self { + Self::list(preflight_request_headers()) + } +} + +impl From<[HeaderName; N]> for Vary { + fn from(arr: [HeaderName; N]) -> Self { + #[allow(deprecated)] // Can be changed when MSRV >= 1.53 + Self::list(array::IntoIter::new(arr)) + } +} + +impl From> for Vary { + fn from(vec: Vec) -> Self { + Self::list(vec) + } +} diff --git a/.cargo-vendor/tower-http/src/decompression/body.rs b/.cargo-vendor/tower-http/src/decompression/body.rs new file mode 100644 index 0000000000..88197bbf52 --- /dev/null +++ b/.cargo-vendor/tower-http/src/decompression/body.rs @@ -0,0 +1,399 @@ +#![allow(unused_imports)] + +use crate::compression_utils::CompressionLevel; +use crate::{ + compression_utils::{AsyncReadBody, BodyIntoStream, DecorateAsyncRead, WrapBody}, + BoxError, +}; +#[cfg(feature = "decompression-br")] +use async_compression::tokio::bufread::BrotliDecoder; +#[cfg(feature = "decompression-gzip")] +use async_compression::tokio::bufread::GzipDecoder; +#[cfg(feature = "decompression-deflate")] +use async_compression::tokio::bufread::ZlibDecoder; +#[cfg(feature = "decompression-zstd")] +use async_compression::tokio::bufread::ZstdDecoder; +use bytes::{Buf, Bytes}; +use http::HeaderMap; +use http_body::Body; +use pin_project_lite::pin_project; +use std::task::Context; +use std::{ + io, + marker::PhantomData, + pin::Pin, + task::{ready, Poll}, +}; +use tokio_util::io::StreamReader; + +pin_project! { + /// Response body of [`RequestDecompression`] and [`Decompression`]. + /// + /// [`RequestDecompression`]: super::RequestDecompression + /// [`Decompression`]: super::Decompression + pub struct DecompressionBody + where + B: Body + { + #[pin] + pub(crate) inner: BodyInner, + } +} + +impl Default for DecompressionBody +where + B: Body + Default, +{ + fn default() -> Self { + Self { + inner: BodyInner::Identity { + inner: B::default(), + }, + } + } +} + +impl DecompressionBody +where + B: Body, +{ + pub(crate) fn new(inner: BodyInner) -> Self { + Self { inner } + } + + /// Get a reference to the inner body + pub fn get_ref(&self) -> &B { + match &self.inner { + #[cfg(feature = "decompression-gzip")] + BodyInner::Gzip { inner } => inner.read.get_ref().get_ref().get_ref().get_ref(), + #[cfg(feature = "decompression-deflate")] + BodyInner::Deflate { inner } => inner.read.get_ref().get_ref().get_ref().get_ref(), + #[cfg(feature = "decompression-br")] + BodyInner::Brotli { inner } => inner.read.get_ref().get_ref().get_ref().get_ref(), + #[cfg(feature = "decompression-zstd")] + BodyInner::Zstd { inner } => inner.read.get_ref().get_ref().get_ref().get_ref(), + BodyInner::Identity { inner } => inner, + + // FIXME: Remove once possible; see https://github.com/rust-lang/rust/issues/51085 + #[cfg(not(feature = "decompression-gzip"))] + BodyInner::Gzip { inner } => match inner.0 {}, + #[cfg(not(feature = "decompression-deflate"))] + BodyInner::Deflate { inner } => match inner.0 {}, + #[cfg(not(feature = "decompression-br"))] + BodyInner::Brotli { inner } => match inner.0 {}, + #[cfg(not(feature = "decompression-zstd"))] + BodyInner::Zstd { inner } => match inner.0 {}, + } + } + + /// Get a mutable reference to the inner body + pub fn get_mut(&mut self) -> &mut B { + match &mut self.inner { + #[cfg(feature = "decompression-gzip")] + BodyInner::Gzip { inner } => inner.read.get_mut().get_mut().get_mut().get_mut(), + #[cfg(feature = "decompression-deflate")] + BodyInner::Deflate { inner } => inner.read.get_mut().get_mut().get_mut().get_mut(), + #[cfg(feature = "decompression-br")] + BodyInner::Brotli { inner } => inner.read.get_mut().get_mut().get_mut().get_mut(), + #[cfg(feature = "decompression-zstd")] + BodyInner::Zstd { inner } => inner.read.get_mut().get_mut().get_mut().get_mut(), + BodyInner::Identity { inner } => inner, + + #[cfg(not(feature = "decompression-gzip"))] + BodyInner::Gzip { inner } => match inner.0 {}, + #[cfg(not(feature = "decompression-deflate"))] + BodyInner::Deflate { inner } => match inner.0 {}, + #[cfg(not(feature = "decompression-br"))] + BodyInner::Brotli { inner } => match inner.0 {}, + #[cfg(not(feature = "decompression-zstd"))] + BodyInner::Zstd { inner } => match inner.0 {}, + } + } + + /// Get a pinned mutable reference to the inner body + pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut B> { + match self.project().inner.project() { + #[cfg(feature = "decompression-gzip")] + BodyInnerProj::Gzip { inner } => inner + .project() + .read + .get_pin_mut() + .get_pin_mut() + .get_pin_mut() + .get_pin_mut(), + #[cfg(feature = "decompression-deflate")] + BodyInnerProj::Deflate { inner } => inner + .project() + .read + .get_pin_mut() + .get_pin_mut() + .get_pin_mut() + .get_pin_mut(), + #[cfg(feature = "decompression-br")] + BodyInnerProj::Brotli { inner } => inner + .project() + .read + .get_pin_mut() + .get_pin_mut() + .get_pin_mut() + .get_pin_mut(), + #[cfg(feature = "decompression-zstd")] + BodyInnerProj::Zstd { inner } => inner + .project() + .read + .get_pin_mut() + .get_pin_mut() + .get_pin_mut() + .get_pin_mut(), + BodyInnerProj::Identity { inner } => inner, + + #[cfg(not(feature = "decompression-gzip"))] + BodyInnerProj::Gzip { inner } => match inner.0 {}, + #[cfg(not(feature = "decompression-deflate"))] + BodyInnerProj::Deflate { inner } => match inner.0 {}, + #[cfg(not(feature = "decompression-br"))] + BodyInnerProj::Brotli { inner } => match inner.0 {}, + #[cfg(not(feature = "decompression-zstd"))] + BodyInnerProj::Zstd { inner } => match inner.0 {}, + } + } + + /// Consume `self`, returning the inner body + pub fn into_inner(self) -> B { + match self.inner { + #[cfg(feature = "decompression-gzip")] + BodyInner::Gzip { inner } => inner + .read + .into_inner() + .into_inner() + .into_inner() + .into_inner(), + #[cfg(feature = "decompression-deflate")] + BodyInner::Deflate { inner } => inner + .read + .into_inner() + .into_inner() + .into_inner() + .into_inner(), + #[cfg(feature = "decompression-br")] + BodyInner::Brotli { inner } => inner + .read + .into_inner() + .into_inner() + .into_inner() + .into_inner(), + #[cfg(feature = "decompression-zstd")] + BodyInner::Zstd { inner } => inner + .read + .into_inner() + .into_inner() + .into_inner() + .into_inner(), + BodyInner::Identity { inner } => inner, + + #[cfg(not(feature = "decompression-gzip"))] + BodyInner::Gzip { inner } => match inner.0 {}, + #[cfg(not(feature = "decompression-deflate"))] + BodyInner::Deflate { inner } => match inner.0 {}, + #[cfg(not(feature = "decompression-br"))] + BodyInner::Brotli { inner } => match inner.0 {}, + #[cfg(not(feature = "decompression-zstd"))] + BodyInner::Zstd { inner } => match inner.0 {}, + } + } +} + +#[cfg(any( + not(feature = "decompression-gzip"), + not(feature = "decompression-deflate"), + not(feature = "decompression-br"), + not(feature = "decompression-zstd") +))] +pub(crate) enum Never {} + +#[cfg(feature = "decompression-gzip")] +type GzipBody = WrapBody>; +#[cfg(not(feature = "decompression-gzip"))] +type GzipBody = (Never, PhantomData); + +#[cfg(feature = "decompression-deflate")] +type DeflateBody = WrapBody>; +#[cfg(not(feature = "decompression-deflate"))] +type DeflateBody = (Never, PhantomData); + +#[cfg(feature = "decompression-br")] +type BrotliBody = WrapBody>; +#[cfg(not(feature = "decompression-br"))] +type BrotliBody = (Never, PhantomData); + +#[cfg(feature = "decompression-zstd")] +type ZstdBody = WrapBody>; +#[cfg(not(feature = "decompression-zstd"))] +type ZstdBody = (Never, PhantomData); + +pin_project! { + #[project = BodyInnerProj] + pub(crate) enum BodyInner + where + B: Body, + { + Gzip { + #[pin] + inner: GzipBody, + }, + Deflate { + #[pin] + inner: DeflateBody, + }, + Brotli { + #[pin] + inner: BrotliBody, + }, + Zstd { + #[pin] + inner: ZstdBody, + }, + Identity { + #[pin] + inner: B, + }, + } +} + +impl BodyInner { + #[cfg(feature = "decompression-gzip")] + pub(crate) fn gzip(inner: WrapBody>) -> Self { + Self::Gzip { inner } + } + + #[cfg(feature = "decompression-deflate")] + pub(crate) fn deflate(inner: WrapBody>) -> Self { + Self::Deflate { inner } + } + + #[cfg(feature = "decompression-br")] + pub(crate) fn brotli(inner: WrapBody>) -> Self { + Self::Brotli { inner } + } + + #[cfg(feature = "decompression-zstd")] + pub(crate) fn zstd(inner: WrapBody>) -> Self { + Self::Zstd { inner } + } + + pub(crate) fn identity(inner: B) -> Self { + Self::Identity { inner } + } +} + +impl Body for DecompressionBody +where + B: Body, + B::Error: Into, +{ + type Data = Bytes; + type Error = BoxError; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + match self.project().inner.project() { + #[cfg(feature = "decompression-gzip")] + BodyInnerProj::Gzip { inner } => inner.poll_frame(cx), + #[cfg(feature = "decompression-deflate")] + BodyInnerProj::Deflate { inner } => inner.poll_frame(cx), + #[cfg(feature = "decompression-br")] + BodyInnerProj::Brotli { inner } => inner.poll_frame(cx), + #[cfg(feature = "decompression-zstd")] + BodyInnerProj::Zstd { inner } => inner.poll_frame(cx), + BodyInnerProj::Identity { inner } => match ready!(inner.poll_frame(cx)) { + Some(Ok(frame)) => { + let frame = frame.map_data(|mut buf| buf.copy_to_bytes(buf.remaining())); + Poll::Ready(Some(Ok(frame))) + } + Some(Err(err)) => Poll::Ready(Some(Err(err.into()))), + None => Poll::Ready(None), + }, + + #[cfg(not(feature = "decompression-gzip"))] + BodyInnerProj::Gzip { inner } => match inner.0 {}, + #[cfg(not(feature = "decompression-deflate"))] + BodyInnerProj::Deflate { inner } => match inner.0 {}, + #[cfg(not(feature = "decompression-br"))] + BodyInnerProj::Brotli { inner } => match inner.0 {}, + #[cfg(not(feature = "decompression-zstd"))] + BodyInnerProj::Zstd { inner } => match inner.0 {}, + } + } +} + +#[cfg(feature = "decompression-gzip")] +impl DecorateAsyncRead for GzipDecoder +where + B: Body, +{ + type Input = AsyncReadBody; + type Output = GzipDecoder; + + fn apply(input: Self::Input, _quality: CompressionLevel) -> Self::Output { + let mut decoder = GzipDecoder::new(input); + decoder.multiple_members(true); + decoder + } + + fn get_pin_mut(pinned: Pin<&mut Self::Output>) -> Pin<&mut Self::Input> { + pinned.get_pin_mut() + } +} + +#[cfg(feature = "decompression-deflate")] +impl DecorateAsyncRead for ZlibDecoder +where + B: Body, +{ + type Input = AsyncReadBody; + type Output = ZlibDecoder; + + fn apply(input: Self::Input, _quality: CompressionLevel) -> Self::Output { + ZlibDecoder::new(input) + } + + fn get_pin_mut(pinned: Pin<&mut Self::Output>) -> Pin<&mut Self::Input> { + pinned.get_pin_mut() + } +} + +#[cfg(feature = "decompression-br")] +impl DecorateAsyncRead for BrotliDecoder +where + B: Body, +{ + type Input = AsyncReadBody; + type Output = BrotliDecoder; + + fn apply(input: Self::Input, _quality: CompressionLevel) -> Self::Output { + BrotliDecoder::new(input) + } + + fn get_pin_mut(pinned: Pin<&mut Self::Output>) -> Pin<&mut Self::Input> { + pinned.get_pin_mut() + } +} + +#[cfg(feature = "decompression-zstd")] +impl DecorateAsyncRead for ZstdDecoder +where + B: Body, +{ + type Input = AsyncReadBody; + type Output = ZstdDecoder; + + fn apply(input: Self::Input, _quality: CompressionLevel) -> Self::Output { + ZstdDecoder::new(input) + } + + fn get_pin_mut(pinned: Pin<&mut Self::Output>) -> Pin<&mut Self::Input> { + pinned.get_pin_mut() + } +} diff --git a/.cargo-vendor/tower-http/src/decompression/future.rs b/.cargo-vendor/tower-http/src/decompression/future.rs new file mode 100644 index 0000000000..36867e974a --- /dev/null +++ b/.cargo-vendor/tower-http/src/decompression/future.rs @@ -0,0 +1,80 @@ +#![allow(unused_imports)] + +use super::{body::BodyInner, DecompressionBody}; +use crate::compression_utils::{AcceptEncoding, CompressionLevel, WrapBody}; +use crate::content_encoding::SupportedEncodings; +use http::{header, Response}; +use http_body::Body; +use pin_project_lite::pin_project; +use std::{ + future::Future, + pin::Pin, + task::{ready, Context, Poll}, +}; + +pin_project! { + /// Response future of [`Decompression`]. + /// + /// [`Decompression`]: super::Decompression + #[derive(Debug)] + pub struct ResponseFuture { + #[pin] + pub(crate) inner: F, + pub(crate) accept: AcceptEncoding, + } +} + +impl Future for ResponseFuture +where + F: Future, E>>, + B: Body, +{ + type Output = Result>, E>; + + #[allow(unreachable_code, unused_mut, unused_variables)] + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let res = ready!(self.as_mut().project().inner.poll(cx)?); + let (mut parts, body) = res.into_parts(); + + let res = + if let header::Entry::Occupied(entry) = parts.headers.entry(header::CONTENT_ENCODING) { + let body = match entry.get().as_bytes() { + #[cfg(feature = "decompression-gzip")] + b"gzip" if self.accept.gzip() => DecompressionBody::new(BodyInner::gzip( + WrapBody::new(body, CompressionLevel::default()), + )), + + #[cfg(feature = "decompression-deflate")] + b"deflate" if self.accept.deflate() => DecompressionBody::new( + BodyInner::deflate(WrapBody::new(body, CompressionLevel::default())), + ), + + #[cfg(feature = "decompression-br")] + b"br" if self.accept.br() => DecompressionBody::new(BodyInner::brotli( + WrapBody::new(body, CompressionLevel::default()), + )), + + #[cfg(feature = "decompression-zstd")] + b"zstd" if self.accept.zstd() => DecompressionBody::new(BodyInner::zstd( + WrapBody::new(body, CompressionLevel::default()), + )), + + _ => { + return Poll::Ready(Ok(Response::from_parts( + parts, + DecompressionBody::new(BodyInner::identity(body)), + ))) + } + }; + + entry.remove(); + parts.headers.remove(header::CONTENT_LENGTH); + + Response::from_parts(parts, body) + } else { + Response::from_parts(parts, DecompressionBody::new(BodyInner::identity(body))) + }; + + Poll::Ready(Ok(res)) + } +} diff --git a/.cargo-vendor/tower-http/src/decompression/layer.rs b/.cargo-vendor/tower-http/src/decompression/layer.rs new file mode 100644 index 0000000000..4a184c166b --- /dev/null +++ b/.cargo-vendor/tower-http/src/decompression/layer.rs @@ -0,0 +1,92 @@ +use super::Decompression; +use crate::compression_utils::AcceptEncoding; +use tower_layer::Layer; + +/// Decompresses response bodies of the underlying service. +/// +/// This adds the `Accept-Encoding` header to requests and transparently decompresses response +/// bodies based on the `Content-Encoding` header. +/// +/// See the [module docs](crate::decompression) for more details. +#[derive(Debug, Default, Clone)] +pub struct DecompressionLayer { + accept: AcceptEncoding, +} + +impl Layer for DecompressionLayer { + type Service = Decompression; + + fn layer(&self, service: S) -> Self::Service { + Decompression { + inner: service, + accept: self.accept, + } + } +} + +impl DecompressionLayer { + /// Creates a new `DecompressionLayer`. + pub fn new() -> Self { + Default::default() + } + + /// Sets whether to request the gzip encoding. + #[cfg(feature = "decompression-gzip")] + pub fn gzip(mut self, enable: bool) -> Self { + self.accept.set_gzip(enable); + self + } + + /// Sets whether to request the Deflate encoding. + #[cfg(feature = "decompression-deflate")] + pub fn deflate(mut self, enable: bool) -> Self { + self.accept.set_deflate(enable); + self + } + + /// Sets whether to request the Brotli encoding. + #[cfg(feature = "decompression-br")] + pub fn br(mut self, enable: bool) -> Self { + self.accept.set_br(enable); + self + } + + /// Sets whether to request the Zstd encoding. + #[cfg(feature = "decompression-zstd")] + pub fn zstd(mut self, enable: bool) -> Self { + self.accept.set_zstd(enable); + self + } + + /// Disables the gzip encoding. + /// + /// This method is available even if the `gzip` crate feature is disabled. + pub fn no_gzip(mut self) -> Self { + self.accept.set_gzip(false); + self + } + + /// Disables the Deflate encoding. + /// + /// This method is available even if the `deflate` crate feature is disabled. + pub fn no_deflate(mut self) -> Self { + self.accept.set_deflate(false); + self + } + + /// Disables the Brotli encoding. + /// + /// This method is available even if the `br` crate feature is disabled. + pub fn no_br(mut self) -> Self { + self.accept.set_br(false); + self + } + + /// Disables the Zstd encoding. + /// + /// This method is available even if the `zstd` crate feature is disabled. + pub fn no_zstd(mut self) -> Self { + self.accept.set_zstd(false); + self + } +} diff --git a/.cargo-vendor/tower-http/src/decompression/mod.rs b/.cargo-vendor/tower-http/src/decompression/mod.rs new file mode 100644 index 0000000000..708df4399d --- /dev/null +++ b/.cargo-vendor/tower-http/src/decompression/mod.rs @@ -0,0 +1,199 @@ +//! Middleware that decompresses request and response bodies. +//! +//! # Examples +//! +//! #### Request +//! +//! ```rust +//! use bytes::Bytes; +//! use flate2::{write::GzEncoder, Compression}; +//! use http::{header, HeaderValue, Request, Response}; +//! use http_body_util::{Full, BodyExt}; +//! use std::{error::Error, io::Write}; +//! use tower::{Service, ServiceBuilder, service_fn, ServiceExt}; +//! use tower_http::{BoxError, decompression::{DecompressionBody, RequestDecompressionLayer}}; +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), BoxError> { +//! // A request encoded with gzip coming from some HTTP client. +//! let mut encoder = GzEncoder::new(Vec::new(), Compression::default()); +//! encoder.write_all(b"Hello?")?; +//! let request = Request::builder() +//! .header(header::CONTENT_ENCODING, "gzip") +//! .body(Full::from(encoder.finish()?))?; +//! +//! // Our HTTP server +//! let mut server = ServiceBuilder::new() +//! // Automatically decompress request bodies. +//! .layer(RequestDecompressionLayer::new()) +//! .service(service_fn(handler)); +//! +//! // Send the request, with the gzip encoded body, to our server. +//! let _response = server.ready().await?.call(request).await?; +//! +//! // Handler receives request whose body is decoded when read +//! async fn handler( +//! mut req: Request>>, +//! ) -> Result>, BoxError>{ +//! let data = req.into_body().collect().await?.to_bytes(); +//! assert_eq!(&data[..], b"Hello?"); +//! Ok(Response::new(Full::from("Hello, World!"))) +//! } +//! # Ok(()) +//! # } +//! ``` +//! +//! #### Response +//! +//! ```rust +//! use bytes::Bytes; +//! use http::{Request, Response}; +//! use http_body_util::{Full, BodyExt}; +//! use std::convert::Infallible; +//! use tower::{Service, ServiceExt, ServiceBuilder, service_fn}; +//! use tower_http::{compression::Compression, decompression::DecompressionLayer, BoxError}; +//! # +//! # #[tokio::main] +//! # async fn main() -> Result<(), tower_http::BoxError> { +//! # async fn handle(req: Request>) -> Result>, Infallible> { +//! # let body = Full::from("Hello, World!"); +//! # Ok(Response::new(body)) +//! # } +//! +//! // Some opaque service that applies compression. +//! let service = Compression::new(service_fn(handle)); +//! +//! // Our HTTP client. +//! let mut client = ServiceBuilder::new() +//! // Automatically decompress response bodies. +//! .layer(DecompressionLayer::new()) +//! .service(service); +//! +//! // Call the service. +//! // +//! // `DecompressionLayer` takes care of setting `Accept-Encoding`. +//! let request = Request::new(Full::::default()); +//! +//! let response = client +//! .ready() +//! .await? +//! .call(request) +//! .await?; +//! +//! // Read the body +//! let body = response.into_body(); +//! let bytes = body.collect().await?.to_bytes().to_vec(); +//! let body = String::from_utf8(bytes).map_err(Into::::into)?; +//! +//! assert_eq!(body, "Hello, World!"); +//! # +//! # Ok(()) +//! # } +//! ``` + +mod request; + +mod body; +mod future; +mod layer; +mod service; + +pub use self::{ + body::DecompressionBody, future::ResponseFuture, layer::DecompressionLayer, + service::Decompression, +}; + +pub use self::request::future::RequestDecompressionFuture; +pub use self::request::layer::RequestDecompressionLayer; +pub use self::request::service::RequestDecompression; + +#[cfg(test)] +mod tests { + use std::convert::Infallible; + use std::io::Write; + + use super::*; + use crate::test_helpers::Body; + use crate::{compression::Compression, test_helpers::WithTrailers}; + use flate2::write::GzEncoder; + use http::Response; + use http::{HeaderMap, HeaderName, Request}; + use http_body_util::BodyExt; + use tower::{service_fn, Service, ServiceExt}; + + #[tokio::test] + async fn works() { + let mut client = Decompression::new(Compression::new(service_fn(handle))); + + let req = Request::builder() + .header("accept-encoding", "gzip") + .body(Body::empty()) + .unwrap(); + let res = client.ready().await.unwrap().call(req).await.unwrap(); + + // read the body, it will be decompressed automatically + let body = res.into_body(); + let collected = body.collect().await.unwrap(); + let trailers = collected.trailers().cloned().unwrap(); + let decompressed_data = String::from_utf8(collected.to_bytes().to_vec()).unwrap(); + + assert_eq!(decompressed_data, "Hello, World!"); + + // maintains trailers + assert_eq!(trailers["foo"], "bar"); + } + + async fn handle(_req: Request) -> Result>, Infallible> { + let mut trailers = HeaderMap::new(); + trailers.insert(HeaderName::from_static("foo"), "bar".parse().unwrap()); + let body = Body::from("Hello, World!").with_trailers(trailers); + Ok(Response::builder().body(body).unwrap()) + } + + #[tokio::test] + async fn decompress_multi_gz() { + let mut client = Decompression::new(service_fn(handle_multi_gz)); + + let req = Request::builder() + .header("accept-encoding", "gzip") + .body(Body::empty()) + .unwrap(); + let res = client.ready().await.unwrap().call(req).await.unwrap(); + + // read the body, it will be decompressed automatically + let body = res.into_body(); + let decompressed_data = + String::from_utf8(body.collect().await.unwrap().to_bytes().to_vec()).unwrap(); + + assert_eq!(decompressed_data, "Hello, World!"); + } + + async fn handle_multi_gz(_req: Request) -> Result, Infallible> { + let mut buf = Vec::new(); + let mut enc1 = GzEncoder::new(&mut buf, Default::default()); + enc1.write_all(b"Hello, ").unwrap(); + enc1.finish().unwrap(); + + let mut enc2 = GzEncoder::new(&mut buf, Default::default()); + enc2.write_all(b"World!").unwrap(); + enc2.finish().unwrap(); + + let mut res = Response::new(Body::from(buf)); + res.headers_mut() + .insert("content-encoding", "gzip".parse().unwrap()); + Ok(res) + } + + #[allow(dead_code)] + async fn is_compatible_with_hyper() { + let client = + hyper_util::client::legacy::Client::builder(hyper_util::rt::TokioExecutor::new()) + .build_http(); + let mut client = Decompression::new(client); + + let req = Request::new(Body::empty()); + + let _: Response> = + client.ready().await.unwrap().call(req).await.unwrap(); + } +} diff --git a/.cargo-vendor/tower-http/src/decompression/request/future.rs b/.cargo-vendor/tower-http/src/decompression/request/future.rs new file mode 100644 index 0000000000..bdb22f8b40 --- /dev/null +++ b/.cargo-vendor/tower-http/src/decompression/request/future.rs @@ -0,0 +1,98 @@ +use crate::body::UnsyncBoxBody; +use crate::compression_utils::AcceptEncoding; +use crate::BoxError; +use bytes::Buf; +use http::{header, HeaderValue, Response, StatusCode}; +use http_body::Body; +use http_body_util::BodyExt; +use http_body_util::Empty; +use pin_project_lite::pin_project; +use std::future::Future; +use std::pin::Pin; +use std::task::Context; +use std::task::Poll; + +pin_project! { + #[derive(Debug)] + /// Response future of [`RequestDecompression`] + pub struct RequestDecompressionFuture + where + F: Future, E>>, + B: Body + { + #[pin] + kind: Kind, + } +} + +pin_project! { + #[derive(Debug)] + #[project = StateProj] + enum Kind + where + F: Future, E>>, + B: Body + { + Inner { + #[pin] + fut: F + }, + Unsupported { + #[pin] + accept: AcceptEncoding + }, + } +} + +impl RequestDecompressionFuture +where + F: Future, E>>, + B: Body, +{ + #[must_use] + pub(super) fn unsupported_encoding(accept: AcceptEncoding) -> Self { + Self { + kind: Kind::Unsupported { accept }, + } + } + + #[must_use] + pub(super) fn inner(fut: F) -> Self { + Self { + kind: Kind::Inner { fut }, + } + } +} + +impl Future for RequestDecompressionFuture +where + F: Future, E>>, + B: Body + Send + 'static, + B::Data: Buf + 'static, + B::Error: Into + 'static, +{ + type Output = Result>, E>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match self.project().kind.project() { + StateProj::Inner { fut } => fut.poll(cx).map_ok(|res| { + res.map(|body| UnsyncBoxBody::new(body.map_err(Into::into).boxed_unsync())) + }), + StateProj::Unsupported { accept } => { + let res = Response::builder() + .header( + header::ACCEPT_ENCODING, + accept + .to_header_value() + .unwrap_or(HeaderValue::from_static("identity")), + ) + .status(StatusCode::UNSUPPORTED_MEDIA_TYPE) + .body(UnsyncBoxBody::new( + Empty::new().map_err(Into::into).boxed_unsync(), + )) + .unwrap(); + Poll::Ready(Ok(res)) + } + } + } +} diff --git a/.cargo-vendor/tower-http/src/decompression/request/layer.rs b/.cargo-vendor/tower-http/src/decompression/request/layer.rs new file mode 100644 index 0000000000..71200960ed --- /dev/null +++ b/.cargo-vendor/tower-http/src/decompression/request/layer.rs @@ -0,0 +1,105 @@ +use super::service::RequestDecompression; +use crate::compression_utils::AcceptEncoding; +use tower_layer::Layer; + +/// Decompresses request bodies and calls its underlying service. +/// +/// Transparently decompresses request bodies based on the `Content-Encoding` header. +/// When the encoding in the `Content-Encoding` header is not accepted an `Unsupported Media Type` +/// status code will be returned with the accepted encodings in the `Accept-Encoding` header. +/// +/// Enabling pass-through of unaccepted encodings will not return an `Unsupported Media Type`. But +/// will call the underlying service with the unmodified request if the encoding is not supported. +/// This is disabled by default. +/// +/// See the [module docs](crate::decompression) for more details. +#[derive(Debug, Default, Clone)] +pub struct RequestDecompressionLayer { + accept: AcceptEncoding, + pass_through_unaccepted: bool, +} + +impl Layer for RequestDecompressionLayer { + type Service = RequestDecompression; + + fn layer(&self, service: S) -> Self::Service { + RequestDecompression { + inner: service, + accept: self.accept, + pass_through_unaccepted: self.pass_through_unaccepted, + } + } +} + +impl RequestDecompressionLayer { + /// Creates a new `RequestDecompressionLayer`. + pub fn new() -> Self { + Default::default() + } + + /// Sets whether to support gzip encoding. + #[cfg(feature = "decompression-gzip")] + pub fn gzip(mut self, enable: bool) -> Self { + self.accept.set_gzip(enable); + self + } + + /// Sets whether to support Deflate encoding. + #[cfg(feature = "decompression-deflate")] + pub fn deflate(mut self, enable: bool) -> Self { + self.accept.set_deflate(enable); + self + } + + /// Sets whether to support Brotli encoding. + #[cfg(feature = "decompression-br")] + pub fn br(mut self, enable: bool) -> Self { + self.accept.set_br(enable); + self + } + + /// Sets whether to support Zstd encoding. + #[cfg(feature = "decompression-zstd")] + pub fn zstd(mut self, enable: bool) -> Self { + self.accept.set_zstd(enable); + self + } + + /// Disables support for gzip encoding. + /// + /// This method is available even if the `gzip` crate feature is disabled. + pub fn no_gzip(mut self) -> Self { + self.accept.set_gzip(false); + self + } + + /// Disables support for Deflate encoding. + /// + /// This method is available even if the `deflate` crate feature is disabled. + pub fn no_deflate(mut self) -> Self { + self.accept.set_deflate(false); + self + } + + /// Disables support for Brotli encoding. + /// + /// This method is available even if the `br` crate feature is disabled. + pub fn no_br(mut self) -> Self { + self.accept.set_br(false); + self + } + + /// Disables support for Zstd encoding. + /// + /// This method is available even if the `zstd` crate feature is disabled. + pub fn no_zstd(mut self) -> Self { + self.accept.set_zstd(false); + self + } + + /// Sets whether to pass through the request even when the encoding is not supported. + pub fn pass_through_unaccepted(mut self, enable: bool) -> Self { + self.pass_through_unaccepted = enable; + self + } +} diff --git a/.cargo-vendor/tower-http/src/decompression/request/mod.rs b/.cargo-vendor/tower-http/src/decompression/request/mod.rs new file mode 100644 index 0000000000..da3d94093c --- /dev/null +++ b/.cargo-vendor/tower-http/src/decompression/request/mod.rs @@ -0,0 +1,90 @@ +pub(super) mod future; +pub(super) mod layer; +pub(super) mod service; + +#[cfg(test)] +mod tests { + use super::service::RequestDecompression; + use crate::decompression::DecompressionBody; + use crate::test_helpers::Body; + use flate2::{write::GzEncoder, Compression}; + use http::{header, Request, Response, StatusCode}; + use http_body_util::BodyExt; + use std::{convert::Infallible, io::Write}; + use tower::{service_fn, Service, ServiceExt}; + + #[tokio::test] + async fn decompress_accepted_encoding() { + let req = request_gzip(); + let mut svc = RequestDecompression::new(service_fn(assert_request_is_decompressed)); + let _ = svc.ready().await.unwrap().call(req).await.unwrap(); + } + + #[tokio::test] + async fn support_unencoded_body() { + let req = Request::builder().body(Body::from("Hello?")).unwrap(); + let mut svc = RequestDecompression::new(service_fn(assert_request_is_decompressed)); + let _ = svc.ready().await.unwrap().call(req).await.unwrap(); + } + + #[tokio::test] + async fn unaccepted_content_encoding_returns_unsupported_media_type() { + let req = request_gzip(); + let mut svc = RequestDecompression::new(service_fn(should_not_be_called)).gzip(false); + let res = svc.ready().await.unwrap().call(req).await.unwrap(); + assert_eq!(StatusCode::UNSUPPORTED_MEDIA_TYPE, res.status()); + } + + #[tokio::test] + async fn pass_through_unsupported_encoding_when_enabled() { + let req = request_gzip(); + let mut svc = RequestDecompression::new(service_fn(assert_request_is_passed_through)) + .pass_through_unaccepted(true) + .gzip(false); + let _ = svc.ready().await.unwrap().call(req).await.unwrap(); + } + + async fn assert_request_is_decompressed( + req: Request>, + ) -> Result, Infallible> { + let (parts, mut body) = req.into_parts(); + let body = read_body(&mut body).await; + + assert_eq!(body, b"Hello?"); + assert!(!parts.headers.contains_key(header::CONTENT_ENCODING)); + + Ok(Response::new(Body::from("Hello, World!"))) + } + + async fn assert_request_is_passed_through( + req: Request>, + ) -> Result, Infallible> { + let (parts, mut body) = req.into_parts(); + let body = read_body(&mut body).await; + + assert_ne!(body, b"Hello?"); + assert!(parts.headers.contains_key(header::CONTENT_ENCODING)); + + Ok(Response::new(Body::empty())) + } + + async fn should_not_be_called( + _: Request>, + ) -> Result, Infallible> { + panic!("Inner service should not be called"); + } + + fn request_gzip() -> Request { + let mut encoder = GzEncoder::new(Vec::new(), Compression::default()); + encoder.write_all(b"Hello?").unwrap(); + let body = encoder.finish().unwrap(); + Request::builder() + .header(header::CONTENT_ENCODING, "gzip") + .body(Body::from(body)) + .unwrap() + } + + async fn read_body(body: &mut DecompressionBody) -> Vec { + body.collect().await.unwrap().to_bytes().to_vec() + } +} diff --git a/.cargo-vendor/tower-http/src/decompression/request/service.rs b/.cargo-vendor/tower-http/src/decompression/request/service.rs new file mode 100644 index 0000000000..663436e5d1 --- /dev/null +++ b/.cargo-vendor/tower-http/src/decompression/request/service.rs @@ -0,0 +1,198 @@ +use super::future::RequestDecompressionFuture as ResponseFuture; +use super::layer::RequestDecompressionLayer; +use crate::body::UnsyncBoxBody; +use crate::compression_utils::CompressionLevel; +use crate::{ + compression_utils::AcceptEncoding, decompression::body::BodyInner, + decompression::DecompressionBody, BoxError, +}; +use bytes::Buf; +use http::{header, Request, Response}; +use http_body::Body; +use std::task::{Context, Poll}; +use tower_service::Service; + +#[cfg(any( + feature = "decompression-gzip", + feature = "decompression-deflate", + feature = "decompression-br", + feature = "decompression-zstd", +))] +use crate::content_encoding::SupportedEncodings; + +/// Decompresses request bodies and calls its underlying service. +/// +/// Transparently decompresses request bodies based on the `Content-Encoding` header. +/// When the encoding in the `Content-Encoding` header is not accepted an `Unsupported Media Type` +/// status code will be returned with the accepted encodings in the `Accept-Encoding` header. +/// +/// Enabling pass-through of unaccepted encodings will not return an `Unsupported Media Type` but +/// will call the underlying service with the unmodified request if the encoding is not supported. +/// This is disabled by default. +/// +/// See the [module docs](crate::decompression) for more details. +#[derive(Debug, Clone)] +pub struct RequestDecompression { + pub(super) inner: S, + pub(super) accept: AcceptEncoding, + pub(super) pass_through_unaccepted: bool, +} + +impl Service> for RequestDecompression +where + S: Service>, Response = Response>, + ReqBody: Body, + ResBody: Body + Send + 'static, + ::Error: Into, + D: Buf + 'static, +{ + type Response = Response>; + type Error = S::Error; + type Future = ResponseFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + let (mut parts, body) = req.into_parts(); + + let body = + if let header::Entry::Occupied(entry) = parts.headers.entry(header::CONTENT_ENCODING) { + match entry.get().as_bytes() { + #[cfg(feature = "decompression-gzip")] + b"gzip" if self.accept.gzip() => { + entry.remove(); + parts.headers.remove(header::CONTENT_LENGTH); + BodyInner::gzip(crate::compression_utils::WrapBody::new( + body, + CompressionLevel::default(), + )) + } + #[cfg(feature = "decompression-deflate")] + b"deflate" if self.accept.deflate() => { + entry.remove(); + parts.headers.remove(header::CONTENT_LENGTH); + BodyInner::deflate(crate::compression_utils::WrapBody::new( + body, + CompressionLevel::default(), + )) + } + #[cfg(feature = "decompression-br")] + b"br" if self.accept.br() => { + entry.remove(); + parts.headers.remove(header::CONTENT_LENGTH); + BodyInner::brotli(crate::compression_utils::WrapBody::new( + body, + CompressionLevel::default(), + )) + } + #[cfg(feature = "decompression-zstd")] + b"zstd" if self.accept.zstd() => { + entry.remove(); + parts.headers.remove(header::CONTENT_LENGTH); + BodyInner::zstd(crate::compression_utils::WrapBody::new( + body, + CompressionLevel::default(), + )) + } + b"identity" => BodyInner::identity(body), + _ if self.pass_through_unaccepted => BodyInner::identity(body), + _ => return ResponseFuture::unsupported_encoding(self.accept), + } + } else { + BodyInner::identity(body) + }; + let body = DecompressionBody::new(body); + let req = Request::from_parts(parts, body); + ResponseFuture::inner(self.inner.call(req)) + } +} + +impl RequestDecompression { + /// Creates a new `RequestDecompression` wrapping the `service`. + pub fn new(service: S) -> Self { + Self { + inner: service, + accept: AcceptEncoding::default(), + pass_through_unaccepted: false, + } + } + + define_inner_service_accessors!(); + + /// Returns a new [`Layer`] that wraps services with a `RequestDecompression` middleware. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer() -> RequestDecompressionLayer { + RequestDecompressionLayer::new() + } + + /// Passes through the request even when the encoding is not supported. + /// + /// By default pass-through is disabled. + pub fn pass_through_unaccepted(mut self, enabled: bool) -> Self { + self.pass_through_unaccepted = enabled; + self + } + + /// Sets whether to support gzip encoding. + #[cfg(feature = "decompression-gzip")] + pub fn gzip(mut self, enable: bool) -> Self { + self.accept.set_gzip(enable); + self + } + + /// Sets whether to support Deflate encoding. + #[cfg(feature = "decompression-deflate")] + pub fn deflate(mut self, enable: bool) -> Self { + self.accept.set_deflate(enable); + self + } + + /// Sets whether to support Brotli encoding. + #[cfg(feature = "decompression-br")] + pub fn br(mut self, enable: bool) -> Self { + self.accept.set_br(enable); + self + } + + /// Sets whether to support Zstd encoding. + #[cfg(feature = "decompression-zstd")] + pub fn zstd(mut self, enable: bool) -> Self { + self.accept.set_zstd(enable); + self + } + + /// Disables support for gzip encoding. + /// + /// This method is available even if the `gzip` crate feature is disabled. + pub fn no_gzip(mut self) -> Self { + self.accept.set_gzip(false); + self + } + + /// Disables support for Deflate encoding. + /// + /// This method is available even if the `deflate` crate feature is disabled. + pub fn no_deflate(mut self) -> Self { + self.accept.set_deflate(false); + self + } + + /// Disables support for Brotli encoding. + /// + /// This method is available even if the `br` crate feature is disabled. + pub fn no_br(mut self) -> Self { + self.accept.set_br(false); + self + } + + /// Disables support for Zstd encoding. + /// + /// This method is available even if the `zstd` crate feature is disabled. + pub fn no_zstd(mut self) -> Self { + self.accept.set_zstd(false); + self + } +} diff --git a/.cargo-vendor/tower-http/src/decompression/service.rs b/.cargo-vendor/tower-http/src/decompression/service.rs new file mode 100644 index 0000000000..50e8ead5fd --- /dev/null +++ b/.cargo-vendor/tower-http/src/decompression/service.rs @@ -0,0 +1,127 @@ +use super::{DecompressionBody, DecompressionLayer, ResponseFuture}; +use crate::compression_utils::AcceptEncoding; +use http::{ + header::{self, ACCEPT_ENCODING}, + Request, Response, +}; +use http_body::Body; +use std::task::{Context, Poll}; +use tower_service::Service; + +/// Decompresses response bodies of the underlying service. +/// +/// This adds the `Accept-Encoding` header to requests and transparently decompresses response +/// bodies based on the `Content-Encoding` header. +/// +/// See the [module docs](crate::decompression) for more details. +#[derive(Debug, Clone)] +pub struct Decompression { + pub(crate) inner: S, + pub(crate) accept: AcceptEncoding, +} + +impl Decompression { + /// Creates a new `Decompression` wrapping the `service`. + pub fn new(service: S) -> Self { + Self { + inner: service, + accept: AcceptEncoding::default(), + } + } + + define_inner_service_accessors!(); + + /// Returns a new [`Layer`] that wraps services with a `Decompression` middleware. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer() -> DecompressionLayer { + DecompressionLayer::new() + } + + /// Sets whether to request the gzip encoding. + #[cfg(feature = "decompression-gzip")] + pub fn gzip(mut self, enable: bool) -> Self { + self.accept.set_gzip(enable); + self + } + + /// Sets whether to request the Deflate encoding. + #[cfg(feature = "decompression-deflate")] + pub fn deflate(mut self, enable: bool) -> Self { + self.accept.set_deflate(enable); + self + } + + /// Sets whether to request the Brotli encoding. + #[cfg(feature = "decompression-br")] + pub fn br(mut self, enable: bool) -> Self { + self.accept.set_br(enable); + self + } + + /// Sets whether to request the Zstd encoding. + #[cfg(feature = "decompression-zstd")] + pub fn zstd(mut self, enable: bool) -> Self { + self.accept.set_zstd(enable); + self + } + + /// Disables the gzip encoding. + /// + /// This method is available even if the `gzip` crate feature is disabled. + pub fn no_gzip(mut self) -> Self { + self.accept.set_gzip(false); + self + } + + /// Disables the Deflate encoding. + /// + /// This method is available even if the `deflate` crate feature is disabled. + pub fn no_deflate(mut self) -> Self { + self.accept.set_deflate(false); + self + } + + /// Disables the Brotli encoding. + /// + /// This method is available even if the `br` crate feature is disabled. + pub fn no_br(mut self) -> Self { + self.accept.set_br(false); + self + } + + /// Disables the Zstd encoding. + /// + /// This method is available even if the `zstd` crate feature is disabled. + pub fn no_zstd(mut self) -> Self { + self.accept.set_zstd(false); + self + } +} + +impl Service> for Decompression +where + S: Service, Response = Response>, + ResBody: Body, +{ + type Response = Response>; + type Error = S::Error; + type Future = ResponseFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, mut req: Request) -> Self::Future { + if let header::Entry::Vacant(entry) = req.headers_mut().entry(ACCEPT_ENCODING) { + if let Some(accept) = self.accept.to_header_value() { + entry.insert(accept); + } + } + + ResponseFuture { + inner: self.inner.call(req), + accept: self.accept, + } + } +} diff --git a/.cargo-vendor/tower-http/src/follow_redirect/mod.rs b/.cargo-vendor/tower-http/src/follow_redirect/mod.rs new file mode 100644 index 0000000000..516fabf739 --- /dev/null +++ b/.cargo-vendor/tower-http/src/follow_redirect/mod.rs @@ -0,0 +1,462 @@ +//! Middleware for following redirections. +//! +//! # Overview +//! +//! The [`FollowRedirect`] middleware retries requests with the inner [`Service`] to follow HTTP +//! redirections. +//! +//! The middleware tries to clone the original [`Request`] when making a redirected request. +//! However, since [`Extensions`][http::Extensions] are `!Clone`, any extensions set by outer +//! middleware will be discarded. Also, the request body cannot always be cloned. When the +//! original body is known to be empty by [`Body::size_hint`], the middleware uses `Default` +//! implementation of the body type to create a new request body. If you know that the body can be +//! cloned in some way, you can tell the middleware to clone it by configuring a [`policy`]. +//! +//! # Examples +//! +//! ## Basic usage +//! +//! ``` +//! use http::{Request, Response}; +//! use bytes::Bytes; +//! use http_body_util::Full; +//! use tower::{Service, ServiceBuilder, ServiceExt}; +//! use tower_http::follow_redirect::{FollowRedirectLayer, RequestUri}; +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), std::convert::Infallible> { +//! # let http_client = tower::service_fn(|req: Request<_>| async move { +//! # let dest = "https://www.rust-lang.org/"; +//! # let mut res = http::Response::builder(); +//! # if req.uri() != dest { +//! # res = res +//! # .status(http::StatusCode::MOVED_PERMANENTLY) +//! # .header(http::header::LOCATION, dest); +//! # } +//! # Ok::<_, std::convert::Infallible>(res.body(Full::::default()).unwrap()) +//! # }); +//! let mut client = ServiceBuilder::new() +//! .layer(FollowRedirectLayer::new()) +//! .service(http_client); +//! +//! let request = Request::builder() +//! .uri("https://rust-lang.org/") +//! .body(Full::::default()) +//! .unwrap(); +//! +//! let response = client.ready().await?.call(request).await?; +//! // Get the final request URI. +//! assert_eq!(response.extensions().get::().unwrap().0, "https://www.rust-lang.org/"); +//! # Ok(()) +//! # } +//! ``` +//! +//! ## Customizing the `Policy` +//! +//! You can use a [`Policy`] value to customize how the middleware handles redirections. +//! +//! ``` +//! use http::{Request, Response}; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! use tower::{Service, ServiceBuilder, ServiceExt}; +//! use tower_http::follow_redirect::{ +//! policy::{self, PolicyExt}, +//! FollowRedirectLayer, +//! }; +//! +//! #[derive(Debug)] +//! enum MyError { +//! TooManyRedirects, +//! Other(tower::BoxError), +//! } +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), MyError> { +//! # let http_client = +//! # tower::service_fn(|_: Request>| async { Ok(Response::new(Full::::default())) }); +//! let policy = policy::Limited::new(10) // Set the maximum number of redirections to 10. +//! // Return an error when the limit was reached. +//! .or::<_, (), _>(policy::redirect_fn(|_| Err(MyError::TooManyRedirects))) +//! // Do not follow cross-origin redirections, and return the redirection responses as-is. +//! .and::<_, (), _>(policy::SameOrigin::new()); +//! +//! let mut client = ServiceBuilder::new() +//! .layer(FollowRedirectLayer::with_policy(policy)) +//! .map_err(MyError::Other) +//! .service(http_client); +//! +//! // ... +//! # let _ = client.ready().await?.call(Request::default()).await?; +//! # Ok(()) +//! # } +//! ``` + +pub mod policy; + +use self::policy::{Action, Attempt, Policy, Standard}; +use futures_util::future::Either; +use http::{ + header::LOCATION, HeaderMap, HeaderValue, Method, Request, Response, StatusCode, Uri, Version, +}; +use http_body::Body; +use iri_string::types::{UriAbsoluteString, UriReferenceStr}; +use pin_project_lite::pin_project; +use std::{ + convert::TryFrom, + future::Future, + mem, + pin::Pin, + str, + task::{ready, Context, Poll}, +}; +use tower::util::Oneshot; +use tower_layer::Layer; +use tower_service::Service; + +/// [`Layer`] for retrying requests with a [`Service`] to follow redirection responses. +/// +/// See the [module docs](self) for more details. +#[derive(Clone, Copy, Debug, Default)] +pub struct FollowRedirectLayer

{ + policy: P, +} + +impl FollowRedirectLayer { + /// Create a new [`FollowRedirectLayer`] with a [`Standard`] redirection policy. + pub fn new() -> Self { + Self::default() + } +} + +impl

FollowRedirectLayer

{ + /// Create a new [`FollowRedirectLayer`] with the given redirection [`Policy`]. + pub fn with_policy(policy: P) -> Self { + FollowRedirectLayer { policy } + } +} + +impl Layer for FollowRedirectLayer

+where + S: Clone, + P: Clone, +{ + type Service = FollowRedirect; + + fn layer(&self, inner: S) -> Self::Service { + FollowRedirect::with_policy(inner, self.policy.clone()) + } +} + +/// Middleware that retries requests with a [`Service`] to follow redirection responses. +/// +/// See the [module docs](self) for more details. +#[derive(Clone, Copy, Debug)] +pub struct FollowRedirect { + inner: S, + policy: P, +} + +impl FollowRedirect { + /// Create a new [`FollowRedirect`] with a [`Standard`] redirection policy. + pub fn new(inner: S) -> Self { + Self::with_policy(inner, Standard::default()) + } + + /// Returns a new [`Layer`] that wraps services with a `FollowRedirect` middleware. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer() -> FollowRedirectLayer { + FollowRedirectLayer::new() + } +} + +impl FollowRedirect +where + P: Clone, +{ + /// Create a new [`FollowRedirect`] with the given redirection [`Policy`]. + pub fn with_policy(inner: S, policy: P) -> Self { + FollowRedirect { inner, policy } + } + + /// Returns a new [`Layer`] that wraps services with a `FollowRedirect` middleware + /// with the given redirection [`Policy`]. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer_with_policy(policy: P) -> FollowRedirectLayer

{ + FollowRedirectLayer::with_policy(policy) + } + + define_inner_service_accessors!(); +} + +impl Service> for FollowRedirect +where + S: Service, Response = Response> + Clone, + ReqBody: Body + Default, + P: Policy + Clone, +{ + type Response = Response; + type Error = S::Error; + type Future = ResponseFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, mut req: Request) -> Self::Future { + let service = self.inner.clone(); + let mut service = mem::replace(&mut self.inner, service); + let mut policy = self.policy.clone(); + let mut body = BodyRepr::None; + body.try_clone_from(req.body(), &policy); + policy.on_request(&mut req); + ResponseFuture { + method: req.method().clone(), + uri: req.uri().clone(), + version: req.version(), + headers: req.headers().clone(), + body, + future: Either::Left(service.call(req)), + service, + policy, + } + } +} + +pin_project! { + /// Response future for [`FollowRedirect`]. + #[derive(Debug)] + pub struct ResponseFuture + where + S: Service>, + { + #[pin] + future: Either>>, + service: S, + policy: P, + method: Method, + uri: Uri, + version: Version, + headers: HeaderMap, + body: BodyRepr, + } +} + +impl Future for ResponseFuture +where + S: Service, Response = Response> + Clone, + ReqBody: Body + Default, + P: Policy, +{ + type Output = Result, S::Error>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let mut this = self.project(); + let mut res = ready!(this.future.as_mut().poll(cx)?); + res.extensions_mut().insert(RequestUri(this.uri.clone())); + + match res.status() { + StatusCode::MOVED_PERMANENTLY | StatusCode::FOUND => { + // User agents MAY change the request method from POST to GET + // (RFC 7231 section 6.4.2. and 6.4.3.). + if *this.method == Method::POST { + *this.method = Method::GET; + *this.body = BodyRepr::Empty; + } + } + StatusCode::SEE_OTHER => { + // A user agent can perform a GET or HEAD request (RFC 7231 section 6.4.4.). + if *this.method != Method::HEAD { + *this.method = Method::GET; + } + *this.body = BodyRepr::Empty; + } + StatusCode::TEMPORARY_REDIRECT | StatusCode::PERMANENT_REDIRECT => {} + _ => return Poll::Ready(Ok(res)), + }; + + let body = if let Some(body) = this.body.take() { + body + } else { + return Poll::Ready(Ok(res)); + }; + + let location = res + .headers() + .get(&LOCATION) + .and_then(|loc| resolve_uri(str::from_utf8(loc.as_bytes()).ok()?, this.uri)); + let location = if let Some(loc) = location { + loc + } else { + return Poll::Ready(Ok(res)); + }; + + let attempt = Attempt { + status: res.status(), + location: &location, + previous: this.uri, + }; + match this.policy.redirect(&attempt)? { + Action::Follow => { + *this.uri = location; + this.body.try_clone_from(&body, &this.policy); + + let mut req = Request::new(body); + *req.uri_mut() = this.uri.clone(); + *req.method_mut() = this.method.clone(); + *req.version_mut() = *this.version; + *req.headers_mut() = this.headers.clone(); + this.policy.on_request(&mut req); + this.future + .set(Either::Right(Oneshot::new(this.service.clone(), req))); + + cx.waker().wake_by_ref(); + Poll::Pending + } + Action::Stop => Poll::Ready(Ok(res)), + } + } +} + +/// Response [`Extensions`][http::Extensions] value that represents the effective request URI of +/// a response returned by a [`FollowRedirect`] middleware. +/// +/// The value differs from the original request's effective URI if the middleware has followed +/// redirections. +#[derive(Clone)] +pub struct RequestUri(pub Uri); + +#[derive(Debug)] +enum BodyRepr { + Some(B), + Empty, + None, +} + +impl BodyRepr +where + B: Body + Default, +{ + fn take(&mut self) -> Option { + match mem::replace(self, BodyRepr::None) { + BodyRepr::Some(body) => Some(body), + BodyRepr::Empty => { + *self = BodyRepr::Empty; + Some(B::default()) + } + BodyRepr::None => None, + } + } + + fn try_clone_from(&mut self, body: &B, policy: &P) + where + P: Policy, + { + match self { + BodyRepr::Some(_) | BodyRepr::Empty => {} + BodyRepr::None => { + if let Some(body) = clone_body(policy, body) { + *self = BodyRepr::Some(body); + } + } + } + } +} + +fn clone_body(policy: &P, body: &B) -> Option +where + P: Policy, + B: Body + Default, +{ + if body.size_hint().exact() == Some(0) { + Some(B::default()) + } else { + policy.clone_body(body) + } +} + +/// Try to resolve a URI reference `relative` against a base URI `base`. +fn resolve_uri(relative: &str, base: &Uri) -> Option { + let relative = UriReferenceStr::new(relative).ok()?; + let base = UriAbsoluteString::try_from(base.to_string()).ok()?; + let uri = relative.resolve_against(&base).to_string(); + Uri::try_from(uri).ok() +} + +#[cfg(test)] +mod tests { + use super::{policy::*, *}; + use crate::test_helpers::Body; + use http::header::LOCATION; + use std::convert::Infallible; + use tower::{ServiceBuilder, ServiceExt}; + + #[tokio::test] + async fn follows() { + let svc = ServiceBuilder::new() + .layer(FollowRedirectLayer::with_policy(Action::Follow)) + .buffer(1) + .service_fn(handle); + let req = Request::builder() + .uri("http://example.com/42") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + assert_eq!(*res.body(), 0); + assert_eq!( + res.extensions().get::().unwrap().0, + "http://example.com/0" + ); + } + + #[tokio::test] + async fn stops() { + let svc = ServiceBuilder::new() + .layer(FollowRedirectLayer::with_policy(Action::Stop)) + .buffer(1) + .service_fn(handle); + let req = Request::builder() + .uri("http://example.com/42") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + assert_eq!(*res.body(), 42); + assert_eq!( + res.extensions().get::().unwrap().0, + "http://example.com/42" + ); + } + + #[tokio::test] + async fn limited() { + let svc = ServiceBuilder::new() + .layer(FollowRedirectLayer::with_policy(Limited::new(10))) + .buffer(1) + .service_fn(handle); + let req = Request::builder() + .uri("http://example.com/42") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + assert_eq!(*res.body(), 42 - 10); + assert_eq!( + res.extensions().get::().unwrap().0, + "http://example.com/32" + ); + } + + /// A server with an endpoint `GET /{n}` which redirects to `/{n-1}` unless `n` equals zero, + /// returning `n` as the response body. + async fn handle(req: Request) -> Result, Infallible> { + let n: u64 = req.uri().path()[1..].parse().unwrap(); + let mut res = Response::builder(); + if n > 0 { + res = res + .status(StatusCode::MOVED_PERMANENTLY) + .header(LOCATION, format!("/{}", n - 1)); + } + Ok::<_, Infallible>(res.body(n).unwrap()) + } +} diff --git a/.cargo-vendor/tower-http/src/follow_redirect/policy/and.rs b/.cargo-vendor/tower-http/src/follow_redirect/policy/and.rs new file mode 100644 index 0000000000..69d2b7da4a --- /dev/null +++ b/.cargo-vendor/tower-http/src/follow_redirect/policy/and.rs @@ -0,0 +1,118 @@ +use super::{Action, Attempt, Policy}; +use http::Request; + +/// A redirection [`Policy`] that combines the results of two `Policy`s. +/// +/// See [`PolicyExt::and`][super::PolicyExt::and] for more details. +#[derive(Clone, Copy, Debug, Default)] +pub struct And { + a: A, + b: B, +} + +impl And { + pub(crate) fn new(a: A, b: B) -> Self + where + A: Policy, + B: Policy, + { + And { a, b } + } +} + +impl Policy for And +where + A: Policy, + B: Policy, +{ + fn redirect(&mut self, attempt: &Attempt<'_>) -> Result { + match self.a.redirect(attempt) { + Ok(Action::Follow) => self.b.redirect(attempt), + a => a, + } + } + + fn on_request(&mut self, request: &mut Request) { + self.a.on_request(request); + self.b.on_request(request); + } + + fn clone_body(&self, body: &Bd) -> Option { + self.a.clone_body(body).or_else(|| self.b.clone_body(body)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use http::Uri; + + struct Taint

{ + policy: P, + used: bool, + } + + impl

Taint

{ + fn new(policy: P) -> Self { + Taint { + policy, + used: false, + } + } + } + + impl Policy for Taint

+ where + P: Policy, + { + fn redirect(&mut self, attempt: &Attempt<'_>) -> Result { + self.used = true; + self.policy.redirect(attempt) + } + } + + #[test] + fn redirect() { + let attempt = Attempt { + status: Default::default(), + location: &Uri::from_static("*"), + previous: &Uri::from_static("*"), + }; + + let mut a = Taint::new(Action::Follow); + let mut b = Taint::new(Action::Follow); + let mut policy = And::new::<(), ()>(&mut a, &mut b); + assert!(Policy::<(), ()>::redirect(&mut policy, &attempt) + .unwrap() + .is_follow()); + assert!(a.used); + assert!(b.used); + + let mut a = Taint::new(Action::Stop); + let mut b = Taint::new(Action::Follow); + let mut policy = And::new::<(), ()>(&mut a, &mut b); + assert!(Policy::<(), ()>::redirect(&mut policy, &attempt) + .unwrap() + .is_stop()); + assert!(a.used); + assert!(!b.used); // short-circuiting + + let mut a = Taint::new(Action::Follow); + let mut b = Taint::new(Action::Stop); + let mut policy = And::new::<(), ()>(&mut a, &mut b); + assert!(Policy::<(), ()>::redirect(&mut policy, &attempt) + .unwrap() + .is_stop()); + assert!(a.used); + assert!(b.used); + + let mut a = Taint::new(Action::Stop); + let mut b = Taint::new(Action::Stop); + let mut policy = And::new::<(), ()>(&mut a, &mut b); + assert!(Policy::<(), ()>::redirect(&mut policy, &attempt) + .unwrap() + .is_stop()); + assert!(a.used); + assert!(!b.used); + } +} diff --git a/.cargo-vendor/tower-http/src/follow_redirect/policy/clone_body_fn.rs b/.cargo-vendor/tower-http/src/follow_redirect/policy/clone_body_fn.rs new file mode 100644 index 0000000000..d7d7cb7c7f --- /dev/null +++ b/.cargo-vendor/tower-http/src/follow_redirect/policy/clone_body_fn.rs @@ -0,0 +1,42 @@ +use super::{Action, Attempt, Policy}; +use std::fmt; + +/// A redirection [`Policy`] created from a closure. +/// +/// See [`clone_body_fn`] for more details. +#[derive(Clone, Copy)] +pub struct CloneBodyFn { + f: F, +} + +impl fmt::Debug for CloneBodyFn { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("CloneBodyFn") + .field("f", &std::any::type_name::()) + .finish() + } +} + +impl Policy for CloneBodyFn +where + F: Fn(&B) -> Option, +{ + fn redirect(&mut self, _: &Attempt<'_>) -> Result { + Ok(Action::Follow) + } + + fn clone_body(&self, body: &B) -> Option { + (self.f)(body) + } +} + +/// Create a new redirection [`Policy`] from a closure `F: Fn(&B) -> Option`. +/// +/// [`clone_body`][Policy::clone_body] method of the returned `Policy` delegates to the wrapped +/// closure and [`redirect`][Policy::redirect] method always returns [`Action::Follow`]. +pub fn clone_body_fn(f: F) -> CloneBodyFn +where + F: Fn(&B) -> Option, +{ + CloneBodyFn { f } +} diff --git a/.cargo-vendor/tower-http/src/follow_redirect/policy/filter_credentials.rs b/.cargo-vendor/tower-http/src/follow_redirect/policy/filter_credentials.rs new file mode 100644 index 0000000000..fea80f1198 --- /dev/null +++ b/.cargo-vendor/tower-http/src/follow_redirect/policy/filter_credentials.rs @@ -0,0 +1,161 @@ +use super::{eq_origin, Action, Attempt, Policy}; +use http::{ + header::{self, HeaderName}, + Request, +}; + +/// A redirection [`Policy`] that removes credentials from requests in redirections. +#[derive(Clone, Debug)] +pub struct FilterCredentials { + block_cross_origin: bool, + block_any: bool, + remove_blocklisted: bool, + remove_all: bool, + blocked: bool, +} + +const BLOCKLIST: &[HeaderName] = &[ + header::AUTHORIZATION, + header::COOKIE, + header::PROXY_AUTHORIZATION, +]; + +impl FilterCredentials { + /// Create a new [`FilterCredentials`] that removes blocklisted request headers in cross-origin + /// redirections. + pub fn new() -> Self { + FilterCredentials { + block_cross_origin: true, + block_any: false, + remove_blocklisted: true, + remove_all: false, + blocked: false, + } + } + + /// Configure `self` to mark cross-origin redirections as "blocked". + pub fn block_cross_origin(mut self, enable: bool) -> Self { + self.block_cross_origin = enable; + self + } + + /// Configure `self` to mark every redirection as "blocked". + pub fn block_any(mut self) -> Self { + self.block_any = true; + self + } + + /// Configure `self` to mark no redirections as "blocked". + pub fn block_none(mut self) -> Self { + self.block_any = false; + self.block_cross_origin(false) + } + + /// Configure `self` to remove blocklisted headers in "blocked" redirections. + /// + /// The blocklist includes the following headers: + /// + /// - `Authorization` + /// - `Cookie` + /// - `Proxy-Authorization` + pub fn remove_blocklisted(mut self, enable: bool) -> Self { + self.remove_blocklisted = enable; + self + } + + /// Configure `self` to remove all headers in "blocked" redirections. + pub fn remove_all(mut self) -> Self { + self.remove_all = true; + self + } + + /// Configure `self` to remove no headers in "blocked" redirections. + pub fn remove_none(mut self) -> Self { + self.remove_all = false; + self.remove_blocklisted(false) + } +} + +impl Default for FilterCredentials { + fn default() -> Self { + Self::new() + } +} + +impl Policy for FilterCredentials { + fn redirect(&mut self, attempt: &Attempt<'_>) -> Result { + self.blocked = self.block_any + || (self.block_cross_origin && !eq_origin(attempt.previous(), attempt.location())); + Ok(Action::Follow) + } + + fn on_request(&mut self, request: &mut Request) { + if self.blocked { + let headers = request.headers_mut(); + if self.remove_all { + headers.clear(); + } else if self.remove_blocklisted { + for key in BLOCKLIST { + headers.remove(key); + } + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use http::Uri; + + #[test] + fn works() { + let mut policy = FilterCredentials::default(); + + let initial = Uri::from_static("http://example.com/old"); + let same_origin = Uri::from_static("http://example.com/new"); + let cross_origin = Uri::from_static("https://example.com/new"); + + let mut request = Request::builder() + .uri(initial) + .header(header::COOKIE, "42") + .body(()) + .unwrap(); + Policy::<(), ()>::on_request(&mut policy, &mut request); + assert!(request.headers().contains_key(header::COOKIE)); + + let attempt = Attempt { + status: Default::default(), + location: &same_origin, + previous: request.uri(), + }; + assert!(Policy::<(), ()>::redirect(&mut policy, &attempt) + .unwrap() + .is_follow()); + + let mut request = Request::builder() + .uri(same_origin) + .header(header::COOKIE, "42") + .body(()) + .unwrap(); + Policy::<(), ()>::on_request(&mut policy, &mut request); + assert!(request.headers().contains_key(header::COOKIE)); + + let attempt = Attempt { + status: Default::default(), + location: &cross_origin, + previous: request.uri(), + }; + assert!(Policy::<(), ()>::redirect(&mut policy, &attempt) + .unwrap() + .is_follow()); + + let mut request = Request::builder() + .uri(cross_origin) + .header(header::COOKIE, "42") + .body(()) + .unwrap(); + Policy::<(), ()>::on_request(&mut policy, &mut request); + assert!(!request.headers().contains_key(header::COOKIE)); + } +} diff --git a/.cargo-vendor/tower-http/src/follow_redirect/policy/limited.rs b/.cargo-vendor/tower-http/src/follow_redirect/policy/limited.rs new file mode 100644 index 0000000000..a81b0d7924 --- /dev/null +++ b/.cargo-vendor/tower-http/src/follow_redirect/policy/limited.rs @@ -0,0 +1,74 @@ +use super::{Action, Attempt, Policy}; + +/// A redirection [`Policy`] that limits the number of successive redirections. +#[derive(Clone, Copy, Debug)] +pub struct Limited { + remaining: usize, +} + +impl Limited { + /// Create a new [`Limited`] with a limit of `max` redirections. + pub fn new(max: usize) -> Self { + Limited { remaining: max } + } +} + +impl Default for Limited { + /// Returns the default [`Limited`] with a limit of `20` redirections. + fn default() -> Self { + // This is the (default) limit of Firefox and the Fetch API. + // https://hg.mozilla.org/mozilla-central/file/6264f13d54a1caa4f5b60303617a819efd91b8ee/modules/libpref/init/all.js#l1371 + // https://fetch.spec.whatwg.org/#http-redirect-fetch + Limited::new(20) + } +} + +impl Policy for Limited { + fn redirect(&mut self, _: &Attempt<'_>) -> Result { + if self.remaining > 0 { + self.remaining -= 1; + Ok(Action::Follow) + } else { + Ok(Action::Stop) + } + } +} + +#[cfg(test)] +mod tests { + use http::{Request, Uri}; + + use super::*; + + #[test] + fn works() { + let uri = Uri::from_static("https://example.com/"); + let mut policy = Limited::new(2); + + for _ in 0..2 { + let mut request = Request::builder().uri(uri.clone()).body(()).unwrap(); + Policy::<(), ()>::on_request(&mut policy, &mut request); + + let attempt = Attempt { + status: Default::default(), + location: &uri, + previous: &uri, + }; + assert!(Policy::<(), ()>::redirect(&mut policy, &attempt) + .unwrap() + .is_follow()); + } + + let mut request = Request::builder().uri(uri.clone()).body(()).unwrap(); + Policy::<(), ()>::on_request(&mut policy, &mut request); + + let attempt = Attempt { + status: Default::default(), + location: &uri, + previous: &uri, + }; + assert!(Policy::<(), ()>::redirect(&mut policy, &attempt) + .unwrap() + .is_stop()); + } +} diff --git a/.cargo-vendor/tower-http/src/follow_redirect/policy/mod.rs b/.cargo-vendor/tower-http/src/follow_redirect/policy/mod.rs new file mode 100644 index 0000000000..8e5d39ce04 --- /dev/null +++ b/.cargo-vendor/tower-http/src/follow_redirect/policy/mod.rs @@ -0,0 +1,316 @@ +//! Tools for customizing the behavior of a [`FollowRedirect`][super::FollowRedirect] middleware. + +mod and; +mod clone_body_fn; +mod filter_credentials; +mod limited; +mod or; +mod redirect_fn; +mod same_origin; + +pub use self::{ + and::And, + clone_body_fn::{clone_body_fn, CloneBodyFn}, + filter_credentials::FilterCredentials, + limited::Limited, + or::Or, + redirect_fn::{redirect_fn, RedirectFn}, + same_origin::SameOrigin, +}; + +use http::{uri::Scheme, Request, StatusCode, Uri}; + +/// Trait for the policy on handling redirection responses. +/// +/// # Example +/// +/// Detecting a cyclic redirection: +/// +/// ``` +/// use http::{Request, Uri}; +/// use std::collections::HashSet; +/// use tower_http::follow_redirect::policy::{Action, Attempt, Policy}; +/// +/// #[derive(Clone)] +/// pub struct DetectCycle { +/// uris: HashSet, +/// } +/// +/// impl Policy for DetectCycle { +/// fn redirect(&mut self, attempt: &Attempt<'_>) -> Result { +/// if self.uris.contains(attempt.location()) { +/// Ok(Action::Stop) +/// } else { +/// self.uris.insert(attempt.previous().clone()); +/// Ok(Action::Follow) +/// } +/// } +/// } +/// ``` +pub trait Policy { + /// Invoked when the service received a response with a redirection status code (`3xx`). + /// + /// This method returns an [`Action`] which indicates whether the service should follow + /// the redirection. + fn redirect(&mut self, attempt: &Attempt<'_>) -> Result; + + /// Invoked right before the service makes a request, regardless of whether it is redirected + /// or not. + /// + /// This can for example be used to remove sensitive headers from the request + /// or prepare the request in other ways. + /// + /// The default implementation does nothing. + fn on_request(&mut self, _request: &mut Request) {} + + /// Try to clone a request body before the service makes a redirected request. + /// + /// If the request body cannot be cloned, return `None`. + /// + /// This is not invoked when [`B::size_hint`][http_body::Body::size_hint] returns zero, + /// in which case `B::default()` will be used to create a new request body. + /// + /// The default implementation returns `None`. + fn clone_body(&self, _body: &B) -> Option { + None + } +} + +impl Policy for &mut P +where + P: Policy + ?Sized, +{ + fn redirect(&mut self, attempt: &Attempt<'_>) -> Result { + (**self).redirect(attempt) + } + + fn on_request(&mut self, request: &mut Request) { + (**self).on_request(request) + } + + fn clone_body(&self, body: &B) -> Option { + (**self).clone_body(body) + } +} + +impl Policy for Box

+where + P: Policy + ?Sized, +{ + fn redirect(&mut self, attempt: &Attempt<'_>) -> Result { + (**self).redirect(attempt) + } + + fn on_request(&mut self, request: &mut Request) { + (**self).on_request(request) + } + + fn clone_body(&self, body: &B) -> Option { + (**self).clone_body(body) + } +} + +/// An extension trait for `Policy` that provides additional adapters. +pub trait PolicyExt { + /// Create a new `Policy` that returns [`Action::Follow`] only if `self` and `other` return + /// `Action::Follow`. + /// + /// [`clone_body`][Policy::clone_body] method of the returned `Policy` tries to clone the body + /// with both policies. + /// + /// # Example + /// + /// ``` + /// use bytes::Bytes; + /// use http_body_util::Full; + /// use tower_http::follow_redirect::policy::{self, clone_body_fn, Limited, PolicyExt}; + /// + /// enum MyBody { + /// Bytes(Bytes), + /// Full(Full), + /// } + /// + /// let policy = Limited::default().and::<_, _, ()>(clone_body_fn(|body| { + /// if let MyBody::Bytes(buf) = body { + /// Some(MyBody::Bytes(buf.clone())) + /// } else { + /// None + /// } + /// })); + /// ``` + fn and(self, other: P) -> And + where + Self: Policy + Sized, + P: Policy; + + /// Create a new `Policy` that returns [`Action::Follow`] if either `self` or `other` returns + /// `Action::Follow`. + /// + /// [`clone_body`][Policy::clone_body] method of the returned `Policy` tries to clone the body + /// with both policies. + /// + /// # Example + /// + /// ``` + /// use tower_http::follow_redirect::policy::{self, Action, Limited, PolicyExt}; + /// + /// #[derive(Clone)] + /// enum MyError { + /// TooManyRedirects, + /// // ... + /// } + /// + /// let policy = Limited::default().or::<_, (), _>(Err(MyError::TooManyRedirects)); + /// ``` + fn or(self, other: P) -> Or + where + Self: Policy + Sized, + P: Policy; +} + +impl PolicyExt for T +where + T: ?Sized, +{ + fn and(self, other: P) -> And + where + Self: Policy + Sized, + P: Policy, + { + And::new(self, other) + } + + fn or(self, other: P) -> Or + where + Self: Policy + Sized, + P: Policy, + { + Or::new(self, other) + } +} + +/// A redirection [`Policy`] with a reasonable set of standard behavior. +/// +/// This policy limits the number of successive redirections ([`Limited`]) +/// and removes credentials from requests in cross-origin redirections ([`FilterCredentials`]). +pub type Standard = And; + +/// A type that holds information on a redirection attempt. +pub struct Attempt<'a> { + pub(crate) status: StatusCode, + pub(crate) location: &'a Uri, + pub(crate) previous: &'a Uri, +} + +impl<'a> Attempt<'a> { + /// Returns the redirection response. + pub fn status(&self) -> StatusCode { + self.status + } + + /// Returns the destination URI of the redirection. + pub fn location(&self) -> &'a Uri { + self.location + } + + /// Returns the URI of the original request. + pub fn previous(&self) -> &'a Uri { + self.previous + } +} + +/// A value returned by [`Policy::redirect`] which indicates the action +/// [`FollowRedirect`][super::FollowRedirect] should take for a redirection response. +#[derive(Clone, Copy, Debug)] +pub enum Action { + /// Follow the redirection. + Follow, + /// Do not follow the redirection, and return the redirection response as-is. + Stop, +} + +impl Action { + /// Returns `true` if the `Action` is a `Follow` value. + pub fn is_follow(&self) -> bool { + if let Action::Follow = self { + true + } else { + false + } + } + + /// Returns `true` if the `Action` is a `Stop` value. + pub fn is_stop(&self) -> bool { + if let Action::Stop = self { + true + } else { + false + } + } +} + +impl Policy for Action { + fn redirect(&mut self, _: &Attempt<'_>) -> Result { + Ok(*self) + } +} + +impl Policy for Result +where + E: Clone, +{ + fn redirect(&mut self, _: &Attempt<'_>) -> Result { + self.clone() + } +} + +/// Compares the origins of two URIs as per RFC 6454 sections 4. through 5. +fn eq_origin(lhs: &Uri, rhs: &Uri) -> bool { + let default_port = match (lhs.scheme(), rhs.scheme()) { + (Some(l), Some(r)) if l == r => { + if l == &Scheme::HTTP { + 80 + } else if l == &Scheme::HTTPS { + 443 + } else { + return false; + } + } + _ => return false, + }; + match (lhs.host(), rhs.host()) { + (Some(l), Some(r)) if l == r => {} + _ => return false, + } + lhs.port_u16().unwrap_or(default_port) == rhs.port_u16().unwrap_or(default_port) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn eq_origin_works() { + assert!(eq_origin( + &Uri::from_static("https://example.com/1"), + &Uri::from_static("https://example.com/2") + )); + assert!(eq_origin( + &Uri::from_static("https://example.com:443/"), + &Uri::from_static("https://example.com/") + )); + assert!(eq_origin( + &Uri::from_static("https://example.com/"), + &Uri::from_static("https://user@example.com/") + )); + + assert!(!eq_origin( + &Uri::from_static("https://example.com/"), + &Uri::from_static("https://www.example.com/") + )); + assert!(!eq_origin( + &Uri::from_static("https://example.com/"), + &Uri::from_static("http://example.com/") + )); + } +} diff --git a/.cargo-vendor/tower-http/src/follow_redirect/policy/or.rs b/.cargo-vendor/tower-http/src/follow_redirect/policy/or.rs new file mode 100644 index 0000000000..858e57bd87 --- /dev/null +++ b/.cargo-vendor/tower-http/src/follow_redirect/policy/or.rs @@ -0,0 +1,118 @@ +use super::{Action, Attempt, Policy}; +use http::Request; + +/// A redirection [`Policy`] that combines the results of two `Policy`s. +/// +/// See [`PolicyExt::or`][super::PolicyExt::or] for more details. +#[derive(Clone, Copy, Debug, Default)] +pub struct Or { + a: A, + b: B, +} + +impl Or { + pub(crate) fn new(a: A, b: B) -> Self + where + A: Policy, + B: Policy, + { + Or { a, b } + } +} + +impl Policy for Or +where + A: Policy, + B: Policy, +{ + fn redirect(&mut self, attempt: &Attempt<'_>) -> Result { + match self.a.redirect(attempt) { + Ok(Action::Stop) | Err(_) => self.b.redirect(attempt), + a => a, + } + } + + fn on_request(&mut self, request: &mut Request) { + self.a.on_request(request); + self.b.on_request(request); + } + + fn clone_body(&self, body: &Bd) -> Option { + self.a.clone_body(body).or_else(|| self.b.clone_body(body)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use http::Uri; + + struct Taint

{ + policy: P, + used: bool, + } + + impl

Taint

{ + fn new(policy: P) -> Self { + Taint { + policy, + used: false, + } + } + } + + impl Policy for Taint

+ where + P: Policy, + { + fn redirect(&mut self, attempt: &Attempt<'_>) -> Result { + self.used = true; + self.policy.redirect(attempt) + } + } + + #[test] + fn redirect() { + let attempt = Attempt { + status: Default::default(), + location: &Uri::from_static("*"), + previous: &Uri::from_static("*"), + }; + + let mut a = Taint::new(Action::Follow); + let mut b = Taint::new(Action::Follow); + let mut policy = Or::new::<(), ()>(&mut a, &mut b); + assert!(Policy::<(), ()>::redirect(&mut policy, &attempt) + .unwrap() + .is_follow()); + assert!(a.used); + assert!(!b.used); // short-circuiting + + let mut a = Taint::new(Action::Stop); + let mut b = Taint::new(Action::Follow); + let mut policy = Or::new::<(), ()>(&mut a, &mut b); + assert!(Policy::<(), ()>::redirect(&mut policy, &attempt) + .unwrap() + .is_follow()); + assert!(a.used); + assert!(b.used); + + let mut a = Taint::new(Action::Follow); + let mut b = Taint::new(Action::Stop); + let mut policy = Or::new::<(), ()>(&mut a, &mut b); + assert!(Policy::<(), ()>::redirect(&mut policy, &attempt) + .unwrap() + .is_follow()); + assert!(a.used); + assert!(!b.used); + + let mut a = Taint::new(Action::Stop); + let mut b = Taint::new(Action::Stop); + let mut policy = Or::new::<(), ()>(&mut a, &mut b); + assert!(Policy::<(), ()>::redirect(&mut policy, &attempt) + .unwrap() + .is_stop()); + assert!(a.used); + assert!(b.used); + } +} diff --git a/.cargo-vendor/tower-http/src/follow_redirect/policy/redirect_fn.rs b/.cargo-vendor/tower-http/src/follow_redirect/policy/redirect_fn.rs new file mode 100644 index 0000000000..a16593aca9 --- /dev/null +++ b/.cargo-vendor/tower-http/src/follow_redirect/policy/redirect_fn.rs @@ -0,0 +1,39 @@ +use super::{Action, Attempt, Policy}; +use std::fmt; + +/// A redirection [`Policy`] created from a closure. +/// +/// See [`redirect_fn`] for more details. +#[derive(Clone, Copy)] +pub struct RedirectFn { + f: F, +} + +impl fmt::Debug for RedirectFn { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RedirectFn") + .field("f", &std::any::type_name::()) + .finish() + } +} + +impl Policy for RedirectFn +where + F: FnMut(&Attempt<'_>) -> Result, +{ + fn redirect(&mut self, attempt: &Attempt<'_>) -> Result { + (self.f)(attempt) + } +} + +/// Create a new redirection [`Policy`] from a closure +/// `F: FnMut(&Attempt<'_>) -> Result`. +/// +/// [`redirect`][Policy::redirect] method of the returned `Policy` delegates to +/// the wrapped closure. +pub fn redirect_fn(f: F) -> RedirectFn +where + F: FnMut(&Attempt<'_>) -> Result, +{ + RedirectFn { f } +} diff --git a/.cargo-vendor/tower-http/src/follow_redirect/policy/same_origin.rs b/.cargo-vendor/tower-http/src/follow_redirect/policy/same_origin.rs new file mode 100644 index 0000000000..cf7b7b1935 --- /dev/null +++ b/.cargo-vendor/tower-http/src/follow_redirect/policy/same_origin.rs @@ -0,0 +1,70 @@ +use super::{eq_origin, Action, Attempt, Policy}; +use std::fmt; + +/// A redirection [`Policy`] that stops cross-origin redirections. +#[derive(Clone, Copy, Default)] +pub struct SameOrigin { + _priv: (), +} + +impl SameOrigin { + /// Create a new [`SameOrigin`]. + pub fn new() -> Self { + Self::default() + } +} + +impl fmt::Debug for SameOrigin { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SameOrigin").finish() + } +} + +impl Policy for SameOrigin { + fn redirect(&mut self, attempt: &Attempt<'_>) -> Result { + if eq_origin(attempt.previous(), attempt.location()) { + Ok(Action::Follow) + } else { + Ok(Action::Stop) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use http::{Request, Uri}; + + #[test] + fn works() { + let mut policy = SameOrigin::default(); + + let initial = Uri::from_static("http://example.com/old"); + let same_origin = Uri::from_static("http://example.com/new"); + let cross_origin = Uri::from_static("https://example.com/new"); + + let mut request = Request::builder().uri(initial).body(()).unwrap(); + Policy::<(), ()>::on_request(&mut policy, &mut request); + + let attempt = Attempt { + status: Default::default(), + location: &same_origin, + previous: request.uri(), + }; + assert!(Policy::<(), ()>::redirect(&mut policy, &attempt) + .unwrap() + .is_follow()); + + let mut request = Request::builder().uri(same_origin).body(()).unwrap(); + Policy::<(), ()>::on_request(&mut policy, &mut request); + + let attempt = Attempt { + status: Default::default(), + location: &cross_origin, + previous: request.uri(), + }; + assert!(Policy::<(), ()>::redirect(&mut policy, &attempt) + .unwrap() + .is_stop()); + } +} diff --git a/.cargo-vendor/tower-http/src/lib.rs b/.cargo-vendor/tower-http/src/lib.rs new file mode 100644 index 0000000000..4c731e8363 --- /dev/null +++ b/.cargo-vendor/tower-http/src/lib.rs @@ -0,0 +1,368 @@ +//! `async fn(HttpRequest) -> Result` +//! +//! # Overview +//! +//! tower-http is a library that provides HTTP-specific middleware and utilities built on top of +//! [tower]. +//! +//! All middleware uses the [http] and [http-body] crates as the HTTP abstractions. That means +//! they're compatible with any library or framework that also uses those crates, such as +//! [hyper], [tonic], and [warp]. +//! +//! # Example server +//! +//! This example shows how to apply middleware from tower-http to a [`Service`] and then run +//! that service using [hyper]. +//! +//! ```rust,no_run +//! use tower_http::{ +//! add_extension::AddExtensionLayer, +//! compression::CompressionLayer, +//! propagate_header::PropagateHeaderLayer, +//! sensitive_headers::SetSensitiveRequestHeadersLayer, +//! set_header::SetResponseHeaderLayer, +//! trace::TraceLayer, +//! validate_request::ValidateRequestHeaderLayer, +//! }; +//! use tower::{ServiceBuilder, service_fn, BoxError}; +//! use http::{Request, Response, header::{HeaderName, CONTENT_TYPE, AUTHORIZATION}}; +//! use std::{sync::Arc, net::SocketAddr, convert::Infallible, iter::once}; +//! use bytes::Bytes; +//! use http_body_util::Full; +//! # struct DatabaseConnectionPool; +//! # impl DatabaseConnectionPool { +//! # fn new() -> DatabaseConnectionPool { DatabaseConnectionPool } +//! # } +//! # fn content_length_from_response(_: &http::Response) -> Option { None } +//! # async fn update_in_flight_requests_metric(count: usize) {} +//! +//! // Our request handler. This is where we would implement the application logic +//! // for responding to HTTP requests... +//! async fn handler(request: Request>) -> Result>, BoxError> { +//! // ... +//! # todo!() +//! } +//! +//! // Shared state across all request handlers --- in this case, a pool of database connections. +//! struct State { +//! pool: DatabaseConnectionPool, +//! } +//! +//! #[tokio::main] +//! async fn main() { +//! // Construct the shared state. +//! let state = State { +//! pool: DatabaseConnectionPool::new(), +//! }; +//! +//! // Use tower's `ServiceBuilder` API to build a stack of tower middleware +//! // wrapping our request handler. +//! let service = ServiceBuilder::new() +//! // Mark the `Authorization` request header as sensitive so it doesn't show in logs +//! .layer(SetSensitiveRequestHeadersLayer::new(once(AUTHORIZATION))) +//! // High level logging of requests and responses +//! .layer(TraceLayer::new_for_http()) +//! // Share an `Arc` with all requests +//! .layer(AddExtensionLayer::new(Arc::new(state))) +//! // Compress responses +//! .layer(CompressionLayer::new()) +//! // Propagate `X-Request-Id`s from requests to responses +//! .layer(PropagateHeaderLayer::new(HeaderName::from_static("x-request-id"))) +//! // If the response has a known size set the `Content-Length` header +//! .layer(SetResponseHeaderLayer::overriding(CONTENT_TYPE, content_length_from_response)) +//! // Authorize requests using a token +//! .layer(ValidateRequestHeaderLayer::bearer("passwordlol")) +//! // Accept only application/json, application/* and */* in a request's ACCEPT header +//! .layer(ValidateRequestHeaderLayer::accept("application/json")) +//! // Wrap a `Service` in our middleware stack +//! .service_fn(handler); +//! # let mut service = service; +//! # tower::Service::call(&mut service, Request::new(Full::default())); +//! } +//! ``` +//! +//! Keep in mind that while this example uses [hyper], tower-http supports any HTTP +//! client/server implementation that uses the [http] and [http-body] crates. +//! +//! # Example client +//! +//! tower-http middleware can also be applied to HTTP clients: +//! +//! ```rust,no_run +//! use tower_http::{ +//! decompression::DecompressionLayer, +//! set_header::SetRequestHeaderLayer, +//! trace::TraceLayer, +//! classify::StatusInRangeAsFailures, +//! }; +//! use tower::{ServiceBuilder, Service, ServiceExt}; +//! use hyper_util::{rt::TokioExecutor, client::legacy::Client}; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! use http::{Request, HeaderValue, header::USER_AGENT}; +//! +//! #[tokio::main] +//! async fn main() { +//! let client = Client::builder(TokioExecutor::new()).build_http(); +//! let mut client = ServiceBuilder::new() +//! // Add tracing and consider server errors and client +//! // errors as failures. +//! .layer(TraceLayer::new( +//! StatusInRangeAsFailures::new(400..=599).into_make_classifier() +//! )) +//! // Set a `User-Agent` header on all requests. +//! .layer(SetRequestHeaderLayer::overriding( +//! USER_AGENT, +//! HeaderValue::from_static("tower-http demo") +//! )) +//! // Decompress response bodies +//! .layer(DecompressionLayer::new()) +//! // Wrap a `Client` in our middleware stack. +//! // This is possible because `Client` implements +//! // `tower::Service`. +//! .service(client); +//! +//! // Make a request +//! let request = Request::builder() +//! .uri("http://example.com") +//! .body(Full::::default()) +//! .unwrap(); +//! +//! let response = client +//! .ready() +//! .await +//! .unwrap() +//! .call(request) +//! .await +//! .unwrap(); +//! } +//! ``` +//! +//! # Feature Flags +//! +//! All middleware are disabled by default and can be enabled using [cargo features]. +//! +//! For example, to enable the [`Trace`] middleware, add the "trace" feature flag in +//! your `Cargo.toml`: +//! +//! ```toml +//! tower-http = { version = "0.1", features = ["trace"] } +//! ``` +//! +//! You can use `"full"` to enable everything: +//! +//! ```toml +//! tower-http = { version = "0.1", features = ["full"] } +//! ``` +//! +//! # Getting Help +//! +//! If you're new to tower its [guides] might help. In the tower-http repo we also have a [number +//! of examples][examples] showing how to put everything together. You're also welcome to ask in +//! the [`#tower` Discord channel][chat] or open an [issue] with your question. +//! +//! [tower]: https://crates.io/crates/tower +//! [http]: https://crates.io/crates/http +//! [http-body]: https://crates.io/crates/http-body +//! [hyper]: https://crates.io/crates/hyper +//! [guides]: https://github.com/tower-rs/tower/tree/master/guides +//! [tonic]: https://crates.io/crates/tonic +//! [warp]: https://crates.io/crates/warp +//! [cargo features]: https://doc.rust-lang.org/cargo/reference/features.html +//! [`AddExtension`]: crate::add_extension::AddExtension +//! [`Service`]: https://docs.rs/tower/latest/tower/trait.Service.html +//! [chat]: https://discord.gg/tokio +//! [issue]: https://github.com/tower-rs/tower-http/issues/new +//! [`Trace`]: crate::trace::Trace +//! [examples]: https://github.com/tower-rs/tower-http/tree/master/examples + +#![warn( + clippy::all, + clippy::dbg_macro, + clippy::todo, + clippy::empty_enum, + clippy::enum_glob_use, + clippy::mem_forget, + clippy::unused_self, + clippy::filter_map_next, + clippy::needless_continue, + clippy::needless_borrow, + clippy::match_wildcard_for_single_variants, + clippy::if_let_mutex, + clippy::mismatched_target_os, + clippy::await_holding_lock, + clippy::match_on_vec_items, + clippy::imprecise_flops, + clippy::suboptimal_flops, + clippy::lossy_float_literal, + clippy::rest_pat_in_fully_bound_structs, + clippy::fn_params_excessive_bools, + clippy::exit, + clippy::inefficient_to_string, + clippy::linkedlist, + clippy::macro_use_imports, + clippy::option_option, + clippy::verbose_file_reads, + clippy::unnested_or_patterns, + rust_2018_idioms, + future_incompatible, + nonstandard_style, + missing_docs +)] +#![deny(unreachable_pub)] +#![allow( + elided_lifetimes_in_paths, + // TODO: Remove this once the MSRV bumps to 1.42.0 or above. + clippy::match_like_matches_macro, + clippy::type_complexity +)] +#![forbid(unsafe_code)] +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![cfg_attr(test, allow(clippy::float_cmp))] + +#[macro_use] +pub(crate) mod macros; + +#[cfg(test)] +mod test_helpers; + +#[cfg(feature = "auth")] +pub mod auth; + +#[cfg(feature = "set-header")] +pub mod set_header; + +#[cfg(feature = "propagate-header")] +pub mod propagate_header; + +#[cfg(any( + feature = "compression-br", + feature = "compression-deflate", + feature = "compression-gzip", + feature = "compression-zstd", +))] +pub mod compression; + +#[cfg(feature = "add-extension")] +pub mod add_extension; + +#[cfg(feature = "sensitive-headers")] +pub mod sensitive_headers; + +#[cfg(any( + feature = "decompression-br", + feature = "decompression-deflate", + feature = "decompression-gzip", + feature = "decompression-zstd", +))] +pub mod decompression; + +#[cfg(any( + feature = "compression-br", + feature = "compression-deflate", + feature = "compression-gzip", + feature = "compression-zstd", + feature = "decompression-br", + feature = "decompression-deflate", + feature = "decompression-gzip", + feature = "decompression-zstd", + feature = "fs" // Used for serving precompressed static files as well +))] +mod content_encoding; + +#[cfg(any( + feature = "compression-br", + feature = "compression-deflate", + feature = "compression-gzip", + feature = "compression-zstd", + feature = "decompression-br", + feature = "decompression-deflate", + feature = "decompression-gzip", + feature = "decompression-zstd", +))] +mod compression_utils; + +#[cfg(any( + feature = "compression-br", + feature = "compression-deflate", + feature = "compression-gzip", + feature = "compression-zstd", + feature = "decompression-br", + feature = "decompression-deflate", + feature = "decompression-gzip", + feature = "decompression-zstd", +))] +pub use compression_utils::CompressionLevel; + +#[cfg(feature = "map-response-body")] +pub mod map_response_body; + +#[cfg(feature = "map-request-body")] +pub mod map_request_body; + +#[cfg(feature = "trace")] +pub mod trace; + +#[cfg(feature = "follow-redirect")] +pub mod follow_redirect; + +#[cfg(feature = "limit")] +pub mod limit; + +#[cfg(feature = "metrics")] +pub mod metrics; + +#[cfg(feature = "cors")] +pub mod cors; + +#[cfg(feature = "request-id")] +pub mod request_id; + +#[cfg(feature = "catch-panic")] +pub mod catch_panic; + +#[cfg(feature = "set-status")] +pub mod set_status; + +#[cfg(feature = "timeout")] +pub mod timeout; + +#[cfg(feature = "normalize-path")] +pub mod normalize_path; + +pub mod classify; +pub mod services; + +#[cfg(feature = "util")] +mod builder; + +#[cfg(feature = "util")] +#[doc(inline)] +pub use self::builder::ServiceBuilderExt; + +#[cfg(feature = "validate-request")] +pub mod validate_request; + +pub mod body; + +/// The latency unit used to report latencies by middleware. +#[non_exhaustive] +#[derive(Copy, Clone, Debug)] +pub enum LatencyUnit { + /// Use seconds. + Seconds, + /// Use milliseconds. + Millis, + /// Use microseconds. + Micros, + /// Use nanoseconds. + Nanos, +} + +/// Alias for a type-erased error type. +pub type BoxError = Box; + +mod sealed { + #[allow(unreachable_pub)] + pub trait Sealed {} +} diff --git a/.cargo-vendor/tower-http/src/limit/body.rs b/.cargo-vendor/tower-http/src/limit/body.rs new file mode 100644 index 0000000000..4e540f8bea --- /dev/null +++ b/.cargo-vendor/tower-http/src/limit/body.rs @@ -0,0 +1,96 @@ +use bytes::Bytes; +use http::{HeaderValue, Response, StatusCode}; +use http_body::{Body, SizeHint}; +use http_body_util::Full; +use pin_project_lite::pin_project; +use std::pin::Pin; +use std::task::{Context, Poll}; + +pin_project! { + /// Response body for [`RequestBodyLimit`]. + /// + /// [`RequestBodyLimit`]: super::RequestBodyLimit + pub struct ResponseBody { + #[pin] + inner: ResponseBodyInner + } +} + +impl ResponseBody { + fn payload_too_large() -> Self { + Self { + inner: ResponseBodyInner::PayloadTooLarge { + body: Full::from(BODY), + }, + } + } + + pub(crate) fn new(body: B) -> Self { + Self { + inner: ResponseBodyInner::Body { body }, + } + } +} + +pin_project! { + #[project = BodyProj] + enum ResponseBodyInner { + PayloadTooLarge { + #[pin] + body: Full, + }, + Body { + #[pin] + body: B + } + } +} + +impl Body for ResponseBody +where + B: Body, +{ + type Data = Bytes; + type Error = B::Error; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + match self.project().inner.project() { + BodyProj::PayloadTooLarge { body } => body.poll_frame(cx).map_err(|err| match err {}), + BodyProj::Body { body } => body.poll_frame(cx), + } + } + + fn is_end_stream(&self) -> bool { + match &self.inner { + ResponseBodyInner::PayloadTooLarge { body } => body.is_end_stream(), + ResponseBodyInner::Body { body } => body.is_end_stream(), + } + } + + fn size_hint(&self) -> SizeHint { + match &self.inner { + ResponseBodyInner::PayloadTooLarge { body } => body.size_hint(), + ResponseBodyInner::Body { body } => body.size_hint(), + } + } +} + +const BODY: &[u8] = b"length limit exceeded"; + +pub(crate) fn create_error_response() -> Response> +where + B: Body, +{ + let mut res = Response::new(ResponseBody::payload_too_large()); + *res.status_mut() = StatusCode::PAYLOAD_TOO_LARGE; + + #[allow(clippy::declare_interior_mutable_const)] + const TEXT_PLAIN: HeaderValue = HeaderValue::from_static("text/plain; charset=utf-8"); + res.headers_mut() + .insert(http::header::CONTENT_TYPE, TEXT_PLAIN); + + res +} diff --git a/.cargo-vendor/tower-http/src/limit/future.rs b/.cargo-vendor/tower-http/src/limit/future.rs new file mode 100644 index 0000000000..fd913c75de --- /dev/null +++ b/.cargo-vendor/tower-http/src/limit/future.rs @@ -0,0 +1,60 @@ +use super::body::create_error_response; +use super::ResponseBody; +use http::Response; +use http_body::Body; +use pin_project_lite::pin_project; +use std::future::Future; +use std::pin::Pin; +use std::task::{ready, Context, Poll}; + +pin_project! { + /// Response future for [`RequestBodyLimit`]. + /// + /// [`RequestBodyLimit`]: super::RequestBodyLimit + pub struct ResponseFuture { + #[pin] + inner: ResponseFutureInner, + } +} + +impl ResponseFuture { + pub(crate) fn payload_too_large() -> Self { + Self { + inner: ResponseFutureInner::PayloadTooLarge, + } + } + + pub(crate) fn new(future: F) -> Self { + Self { + inner: ResponseFutureInner::Future { future }, + } + } +} + +pin_project! { + #[project = ResFutProj] + enum ResponseFutureInner { + PayloadTooLarge, + Future { + #[pin] + future: F, + } + } +} + +impl Future for ResponseFuture +where + ResBody: Body, + F: Future, E>>, +{ + type Output = Result>, E>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let res = match self.project().inner.project() { + ResFutProj::PayloadTooLarge => create_error_response(), + ResFutProj::Future { future } => ready!(future.poll(cx))?.map(ResponseBody::new), + }; + + Poll::Ready(Ok(res)) + } +} diff --git a/.cargo-vendor/tower-http/src/limit/layer.rs b/.cargo-vendor/tower-http/src/limit/layer.rs new file mode 100644 index 0000000000..2dcff71aac --- /dev/null +++ b/.cargo-vendor/tower-http/src/limit/layer.rs @@ -0,0 +1,32 @@ +use super::RequestBodyLimit; +use tower_layer::Layer; + +/// Layer that applies the [`RequestBodyLimit`] middleware that intercepts requests +/// with body lengths greater than the configured limit and converts them into +/// `413 Payload Too Large` responses. +/// +/// See the [module docs](crate::limit) for an example. +/// +/// [`RequestBodyLimit`]: super::RequestBodyLimit +#[derive(Clone, Copy, Debug)] +pub struct RequestBodyLimitLayer { + limit: usize, +} + +impl RequestBodyLimitLayer { + /// Create a new `RequestBodyLimitLayer` with the given body length limit. + pub fn new(limit: usize) -> Self { + Self { limit } + } +} + +impl Layer for RequestBodyLimitLayer { + type Service = RequestBodyLimit; + + fn layer(&self, inner: S) -> Self::Service { + RequestBodyLimit { + inner, + limit: self.limit, + } + } +} diff --git a/.cargo-vendor/tower-http/src/limit/mod.rs b/.cargo-vendor/tower-http/src/limit/mod.rs new file mode 100644 index 0000000000..3f2fede357 --- /dev/null +++ b/.cargo-vendor/tower-http/src/limit/mod.rs @@ -0,0 +1,142 @@ +//! Middleware for limiting request bodies. +//! +//! This layer will also intercept requests with a `Content-Length` header +//! larger than the allowable limit and return an immediate error response +//! before reading any of the body. +//! +//! Note that payload length errors can be used by adversaries in an attempt +//! to smuggle requests. When an incoming stream is dropped due to an +//! over-sized payload, servers should close the connection or resynchronize +//! by optimistically consuming some data in an attempt to reach the end of +//! the current HTTP frame. If the incoming stream cannot be resynchronized, +//! then the connection should be closed. If you're using [hyper] this is +//! automatically handled for you. +//! +//! # Examples +//! +//! ## Limiting based on `Content-Length` +//! +//! If a `Content-Length` header is present and indicates a payload that is +//! larger than the acceptable limit, then the underlying service will not +//! be called and a `413 Payload Too Large` response will be generated. +//! +//! ```rust +//! use bytes::Bytes; +//! use std::convert::Infallible; +//! use http::{Request, Response, StatusCode, HeaderValue, header::CONTENT_LENGTH}; +//! use http_body_util::{LengthLimitError}; +//! use tower::{Service, ServiceExt, ServiceBuilder}; +//! use tower_http::{body::Limited, limit::RequestBodyLimitLayer}; +//! use http_body_util::Full; +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! async fn handle(req: Request>>) -> Result>, Infallible> { +//! panic!("This should not be hit") +//! } +//! +//! let mut svc = ServiceBuilder::new() +//! // Limit incoming requests to 4096 bytes. +//! .layer(RequestBodyLimitLayer::new(4096)) +//! .service_fn(handle); +//! +//! // Call the service with a header that indicates the body is too large. +//! let mut request = Request::builder() +//! .header(CONTENT_LENGTH, HeaderValue::from_static("5000")) +//! .body(Full::::default()) +//! .unwrap(); +//! +//! // let response = svc.ready().await?.call(request).await?; +//! let response = svc.call(request).await?; +//! +//! assert_eq!(response.status(), StatusCode::PAYLOAD_TOO_LARGE); +//! # +//! # Ok(()) +//! # } +//! ``` +//! +//! ## Limiting without known `Content-Length` +//! +//! If a `Content-Length` header is not present, then the body will be read +//! until the configured limit has been reached. If the payload is larger than +//! the limit, the [`http_body_util::Limited`] body will return an error. This +//! error can be inspected to determine if it is a [`http_body_util::LengthLimitError`] +//! and return an appropriate response in such case. +//! +//! Note that no error will be generated if the body is never read. Similarly, +//! if the body _would be_ to large, but is never consumed beyond the length +//! limit, then no error is generated, and handling of the remaining incoming +//! data stream is left to the server implementation as described above. +//! +//! ```rust +//! # use bytes::Bytes; +//! # use std::convert::Infallible; +//! # use http::{Request, Response, StatusCode}; +//! # use http_body_util::LengthLimitError; +//! # use tower::{Service, ServiceExt, ServiceBuilder, BoxError}; +//! # use tower_http::{body::Limited, limit::RequestBodyLimitLayer}; +//! # use http_body_util::Full; +//! # use http_body_util::BodyExt; +//! # +//! # #[tokio::main] +//! # async fn main() -> Result<(), BoxError> { +//! async fn handle(req: Request>>) -> Result>, BoxError> { +//! let data = match req.into_body().collect().await { +//! Ok(collected) => collected.to_bytes(), +//! Err(err) => { +//! if let Some(_) = err.downcast_ref::() { +//! let mut resp = Response::new(Full::default()); +//! *resp.status_mut() = StatusCode::PAYLOAD_TOO_LARGE; +//! return Ok(resp); +//! } else { +//! return Err(err); +//! } +//! } +//! }; +//! +//! Ok(Response::new(Full::default())) +//! } +//! +//! let mut svc = ServiceBuilder::new() +//! // Limit incoming requests to 4096 bytes. +//! .layer(RequestBodyLimitLayer::new(4096)) +//! .service_fn(handle); +//! +//! // Call the service. +//! let request = Request::new(Full::::default()); +//! +//! let response = svc.ready().await?.call(request).await?; +//! +//! assert_eq!(response.status(), StatusCode::OK); +//! +//! // Call the service with a body that is too large. +//! let request = Request::new(Full::::from(Bytes::from(vec![0u8; 4097]))); +//! +//! let response = svc.ready().await?.call(request).await?; +//! +//! assert_eq!(response.status(), StatusCode::PAYLOAD_TOO_LARGE); +//! # +//! # Ok(()) +//! # } +//! ``` +//! +//! ## Limiting without `Content-Length` +//! +//! If enforcement of body size limits is desired without preemptively +//! handling requests with a `Content-Length` header indicating an over-sized +//! request, consider using [`MapRequestBody`] to wrap the request body with +//! [`http_body_util::Limited`] and checking for [`http_body_util::LengthLimitError`] +//! like in the previous example. +//! +//! [`MapRequestBody`]: crate::map_request_body +//! [hyper]: https://crates.io/crates/hyper + +mod body; +mod future; +mod layer; +mod service; + +pub use body::ResponseBody; +pub use future::ResponseFuture; +pub use layer::RequestBodyLimitLayer; +pub use service::RequestBodyLimit; diff --git a/.cargo-vendor/tower-http/src/limit/service.rs b/.cargo-vendor/tower-http/src/limit/service.rs new file mode 100644 index 0000000000..fdf65d256d --- /dev/null +++ b/.cargo-vendor/tower-http/src/limit/service.rs @@ -0,0 +1,64 @@ +use super::{RequestBodyLimitLayer, ResponseBody, ResponseFuture}; +use crate::body::Limited; +use http::{Request, Response}; +use http_body::Body; +use std::task::{Context, Poll}; +use tower_service::Service; + +/// Middleware that intercepts requests with body lengths greater than the +/// configured limit and converts them into `413 Payload Too Large` responses. +/// +/// See the [module docs](crate::limit) for an example. +#[derive(Clone, Copy, Debug)] +pub struct RequestBodyLimit { + pub(crate) inner: S, + pub(crate) limit: usize, +} + +impl RequestBodyLimit { + /// Create a new `RequestBodyLimit` with the given body length limit. + pub fn new(inner: S, limit: usize) -> Self { + Self { inner, limit } + } + + define_inner_service_accessors!(); + + /// Returns a new [`Layer`] that wraps services with a `RequestBodyLimit` middleware. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer(limit: usize) -> RequestBodyLimitLayer { + RequestBodyLimitLayer::new(limit) + } +} + +impl Service> for RequestBodyLimit +where + ResBody: Body, + S: Service>, Response = Response>, +{ + type Response = Response>; + type Error = S::Error; + type Future = ResponseFuture; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + let content_length = req + .headers() + .get(http::header::CONTENT_LENGTH) + .and_then(|value| value.to_str().ok()?.parse::().ok()); + + let body_limit = match content_length { + Some(len) if len > self.limit => return ResponseFuture::payload_too_large(), + Some(len) => self.limit.min(len), + None => self.limit, + }; + + let req = req.map(|body| Limited::new(http_body_util::Limited::new(body, body_limit))); + + ResponseFuture::new(self.inner.call(req)) + } +} diff --git a/.cargo-vendor/tower-http/src/macros.rs b/.cargo-vendor/tower-http/src/macros.rs new file mode 100644 index 0000000000..f58d34a669 --- /dev/null +++ b/.cargo-vendor/tower-http/src/macros.rs @@ -0,0 +1,105 @@ +#[allow(unused_macros)] +macro_rules! define_inner_service_accessors { + () => { + /// Gets a reference to the underlying service. + pub fn get_ref(&self) -> &S { + &self.inner + } + + /// Gets a mutable reference to the underlying service. + pub fn get_mut(&mut self) -> &mut S { + &mut self.inner + } + + /// Consumes `self`, returning the underlying service. + pub fn into_inner(self) -> S { + self.inner + } + }; +} + +#[allow(unused_macros)] +macro_rules! opaque_body { + ($(#[$m:meta])* pub type $name:ident = $actual:ty;) => { + opaque_body! { + $(#[$m])* pub type $name<> = $actual; + } + }; + + ($(#[$m:meta])* pub type $name:ident<$($param:ident),*> = $actual:ty;) => { + pin_project_lite::pin_project! { + $(#[$m])* + pub struct $name<$($param),*> { + #[pin] + pub(crate) inner: $actual + } + } + + impl<$($param),*> $name<$($param),*> { + pub(crate) fn new(inner: $actual) -> Self { + Self { inner } + } + } + + impl<$($param),*> http_body::Body for $name<$($param),*> { + type Data = <$actual as http_body::Body>::Data; + type Error = <$actual as http_body::Body>::Error; + + #[inline] + fn poll_frame( + self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll, Self::Error>>> { + self.project().inner.poll_frame(cx) + } + + #[inline] + fn is_end_stream(&self) -> bool { + http_body::Body::is_end_stream(&self.inner) + } + + #[inline] + fn size_hint(&self) -> http_body::SizeHint { + http_body::Body::size_hint(&self.inner) + } + } + }; +} + +#[allow(unused_macros)] +macro_rules! opaque_future { + ($(#[$m:meta])* pub type $name:ident<$($param:ident),+> = $actual:ty;) => { + pin_project_lite::pin_project! { + $(#[$m])* + pub struct $name<$($param),+> { + #[pin] + inner: $actual + } + } + + impl<$($param),+> $name<$($param),+> { + pub(crate) fn new(inner: $actual) -> Self { + Self { + inner + } + } + } + + impl<$($param),+> std::fmt::Debug for $name<$($param),+> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_tuple(stringify!($name)).field(&format_args!("...")).finish() + } + } + + impl<$($param),+> std::future::Future for $name<$($param),+> + where + $actual: std::future::Future, + { + type Output = <$actual as std::future::Future>::Output; + #[inline] + fn poll(self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> std::task::Poll { + self.project().inner.poll(cx) + } + } + } +} diff --git a/.cargo-vendor/tower-http/src/map_request_body.rs b/.cargo-vendor/tower-http/src/map_request_body.rs new file mode 100644 index 0000000000..dd067e924f --- /dev/null +++ b/.cargo-vendor/tower-http/src/map_request_body.rs @@ -0,0 +1,157 @@ +//! Apply a transformation to the request body. +//! +//! # Example +//! +//! ``` +//! use http_body_util::Full; +//! use bytes::Bytes; +//! use http::{Request, Response}; +//! use std::convert::Infallible; +//! use std::{pin::Pin, task::{ready, Context, Poll}}; +//! use tower::{ServiceBuilder, service_fn, ServiceExt, Service}; +//! use tower_http::map_request_body::MapRequestBodyLayer; +//! +//! // A wrapper for a `Full` +//! struct BodyWrapper { +//! inner: Full, +//! } +//! +//! impl BodyWrapper { +//! fn new(inner: Full) -> Self { +//! Self { inner } +//! } +//! } +//! +//! impl http_body::Body for BodyWrapper { +//! // ... +//! # type Data = Bytes; +//! # type Error = tower::BoxError; +//! # fn poll_frame( +//! # self: Pin<&mut Self>, +//! # cx: &mut Context<'_> +//! # ) -> Poll, Self::Error>>> { unimplemented!() } +//! # fn is_end_stream(&self) -> bool { unimplemented!() } +//! # fn size_hint(&self) -> http_body::SizeHint { unimplemented!() } +//! } +//! +//! async fn handle(_: Request) -> Result>, Infallible> { +//! // ... +//! # Ok(Response::new(Full::default())) +//! } +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! let mut svc = ServiceBuilder::new() +//! // Wrap response bodies in `BodyWrapper` +//! .layer(MapRequestBodyLayer::new(BodyWrapper::new)) +//! .service_fn(handle); +//! +//! // Call the service +//! let request = Request::new(Full::default()); +//! +//! svc.ready().await?.call(request).await?; +//! # Ok(()) +//! # } +//! ``` + +use http::{Request, Response}; +use std::{ + fmt, + task::{Context, Poll}, +}; +use tower_layer::Layer; +use tower_service::Service; + +/// Apply a transformation to the request body. +/// +/// See the [module docs](crate::map_request_body) for an example. +#[derive(Clone)] +pub struct MapRequestBodyLayer { + f: F, +} + +impl MapRequestBodyLayer { + /// Create a new [`MapRequestBodyLayer`]. + /// + /// `F` is expected to be a function that takes a body and returns another body. + pub fn new(f: F) -> Self { + Self { f } + } +} + +impl Layer for MapRequestBodyLayer +where + F: Clone, +{ + type Service = MapRequestBody; + + fn layer(&self, inner: S) -> Self::Service { + MapRequestBody::new(inner, self.f.clone()) + } +} + +impl fmt::Debug for MapRequestBodyLayer { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("MapRequestBodyLayer") + .field("f", &std::any::type_name::()) + .finish() + } +} + +/// Apply a transformation to the request body. +/// +/// See the [module docs](crate::map_request_body) for an example. +#[derive(Clone)] +pub struct MapRequestBody { + inner: S, + f: F, +} + +impl MapRequestBody { + /// Create a new [`MapRequestBody`]. + /// + /// `F` is expected to be a function that takes a body and returns another body. + pub fn new(service: S, f: F) -> Self { + Self { inner: service, f } + } + + /// Returns a new [`Layer`] that wraps services with a `MapRequestBodyLayer` middleware. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer(f: F) -> MapRequestBodyLayer { + MapRequestBodyLayer::new(f) + } + + define_inner_service_accessors!(); +} + +impl Service> for MapRequestBody +where + S: Service, Response = Response>, + F: FnMut(ReqBody) -> NewReqBody, +{ + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + let req = req.map(&mut self.f); + self.inner.call(req) + } +} + +impl fmt::Debug for MapRequestBody +where + S: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("MapRequestBody") + .field("inner", &self.inner) + .field("f", &std::any::type_name::()) + .finish() + } +} diff --git a/.cargo-vendor/tower-http/src/map_response_body.rs b/.cargo-vendor/tower-http/src/map_response_body.rs new file mode 100644 index 0000000000..5329e5d5ec --- /dev/null +++ b/.cargo-vendor/tower-http/src/map_response_body.rs @@ -0,0 +1,185 @@ +//! Apply a transformation to the response body. +//! +//! # Example +//! +//! ``` +//! use bytes::Bytes; +//! use http::{Request, Response}; +//! use http_body_util::Full; +//! use std::convert::Infallible; +//! use std::{pin::Pin, task::{ready, Context, Poll}}; +//! use tower::{ServiceBuilder, service_fn, ServiceExt, Service}; +//! use tower_http::map_response_body::MapResponseBodyLayer; +//! +//! // A wrapper for a `Full` +//! struct BodyWrapper { +//! inner: Full, +//! } +//! +//! impl BodyWrapper { +//! fn new(inner: Full) -> Self { +//! Self { inner } +//! } +//! } +//! +//! impl http_body::Body for BodyWrapper { +//! // ... +//! # type Data = Bytes; +//! # type Error = tower::BoxError; +//! # fn poll_frame( +//! # self: Pin<&mut Self>, +//! # cx: &mut Context<'_> +//! # ) -> Poll, Self::Error>>> { unimplemented!() } +//! # fn is_end_stream(&self) -> bool { unimplemented!() } +//! # fn size_hint(&self) -> http_body::SizeHint { unimplemented!() } +//! } +//! +//! async fn handle(_: Request) -> Result>, Infallible> { +//! // ... +//! # Ok(Response::new(Full::default())) +//! } +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! let mut svc = ServiceBuilder::new() +//! // Wrap response bodies in `BodyWrapper` +//! .layer(MapResponseBodyLayer::new(BodyWrapper::new)) +//! .service_fn(handle); +//! +//! // Call the service +//! let request = Request::new(Full::::from("foobar")); +//! +//! svc.ready().await?.call(request).await?; +//! # Ok(()) +//! # } +//! ``` + +use http::{Request, Response}; +use pin_project_lite::pin_project; +use std::future::Future; +use std::{ + fmt, + pin::Pin, + task::{ready, Context, Poll}, +}; +use tower_layer::Layer; +use tower_service::Service; + +/// Apply a transformation to the response body. +/// +/// See the [module docs](crate::map_response_body) for an example. +#[derive(Clone)] +pub struct MapResponseBodyLayer { + f: F, +} + +impl MapResponseBodyLayer { + /// Create a new [`MapResponseBodyLayer`]. + /// + /// `F` is expected to be a function that takes a body and returns another body. + pub fn new(f: F) -> Self { + Self { f } + } +} + +impl Layer for MapResponseBodyLayer +where + F: Clone, +{ + type Service = MapResponseBody; + + fn layer(&self, inner: S) -> Self::Service { + MapResponseBody::new(inner, self.f.clone()) + } +} + +impl fmt::Debug for MapResponseBodyLayer { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("MapResponseBodyLayer") + .field("f", &std::any::type_name::()) + .finish() + } +} + +/// Apply a transformation to the response body. +/// +/// See the [module docs](crate::map_response_body) for an example. +#[derive(Clone)] +pub struct MapResponseBody { + inner: S, + f: F, +} + +impl MapResponseBody { + /// Create a new [`MapResponseBody`]. + /// + /// `F` is expected to be a function that takes a body and returns another body. + pub fn new(service: S, f: F) -> Self { + Self { inner: service, f } + } + + /// Returns a new [`Layer`] that wraps services with a `MapResponseBodyLayer` middleware. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer(f: F) -> MapResponseBodyLayer { + MapResponseBodyLayer::new(f) + } + + define_inner_service_accessors!(); +} + +impl Service> for MapResponseBody +where + S: Service, Response = Response>, + F: FnMut(ResBody) -> NewResBody + Clone, +{ + type Response = Response; + type Error = S::Error; + type Future = ResponseFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + ResponseFuture { + inner: self.inner.call(req), + f: self.f.clone(), + } + } +} + +impl fmt::Debug for MapResponseBody +where + S: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("MapResponseBody") + .field("inner", &self.inner) + .field("f", &std::any::type_name::()) + .finish() + } +} + +pin_project! { + /// Response future for [`MapResponseBody`]. + pub struct ResponseFuture { + #[pin] + inner: Fut, + f: F, + } +} + +impl Future for ResponseFuture +where + Fut: Future, E>>, + F: FnMut(ResBody) -> NewResBody, +{ + type Output = Result, E>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + let res = ready!(this.inner.poll(cx)?); + Poll::Ready(Ok(res.map(this.f))) + } +} diff --git a/.cargo-vendor/tower-http/src/metrics/in_flight_requests.rs b/.cargo-vendor/tower-http/src/metrics/in_flight_requests.rs new file mode 100644 index 0000000000..dbb5e2ffc2 --- /dev/null +++ b/.cargo-vendor/tower-http/src/metrics/in_flight_requests.rs @@ -0,0 +1,327 @@ +//! Measure the number of in-flight requests. +//! +//! In-flight requests is the number of requests a service is currently processing. The processing +//! of a request starts when it is received by the service (`tower::Service::call` is called) and +//! is considered complete when the response body is consumed, dropped, or an error happens. +//! +//! # Example +//! +//! ``` +//! use tower::{Service, ServiceExt, ServiceBuilder}; +//! use tower_http::metrics::InFlightRequestsLayer; +//! use http::{Request, Response}; +//! use bytes::Bytes; +//! use http_body_util::Full; +//! use std::{time::Duration, convert::Infallible}; +//! +//! async fn handle(req: Request>) -> Result>, Infallible> { +//! // ... +//! # Ok(Response::new(Full::default())) +//! } +//! +//! async fn update_in_flight_requests_metric(count: usize) { +//! // ... +//! } +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! // Create a `Layer` with an associated counter. +//! let (in_flight_requests_layer, counter) = InFlightRequestsLayer::pair(); +//! +//! // Spawn a task that will receive the number of in-flight requests every 10 seconds. +//! tokio::spawn( +//! counter.run_emitter(Duration::from_secs(10), |count| async move { +//! update_in_flight_requests_metric(count).await; +//! }), +//! ); +//! +//! let mut service = ServiceBuilder::new() +//! // Keep track of the number of in-flight requests. This will increment and decrement +//! // `counter` automatically. +//! .layer(in_flight_requests_layer) +//! .service_fn(handle); +//! +//! // Call the service. +//! let response = service +//! .ready() +//! .await? +//! .call(Request::new(Full::default())) +//! .await?; +//! # Ok(()) +//! # } +//! ``` + +use http::{Request, Response}; +use http_body::Body; +use pin_project_lite::pin_project; +use std::{ + future::Future, + pin::Pin, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + task::{ready, Context, Poll}, + time::Duration, +}; +use tower_layer::Layer; +use tower_service::Service; + +/// Layer for applying [`InFlightRequests`] which counts the number of in-flight requests. +/// +/// See the [module docs](crate::metrics::in_flight_requests) for more details. +#[derive(Clone, Debug)] +pub struct InFlightRequestsLayer { + counter: InFlightRequestsCounter, +} + +impl InFlightRequestsLayer { + /// Create a new `InFlightRequestsLayer` and its associated counter. + pub fn pair() -> (Self, InFlightRequestsCounter) { + let counter = InFlightRequestsCounter::new(); + let layer = Self::new(counter.clone()); + (layer, counter) + } + + /// Create a new `InFlightRequestsLayer` that will update the given counter. + pub fn new(counter: InFlightRequestsCounter) -> Self { + Self { counter } + } +} + +impl Layer for InFlightRequestsLayer { + type Service = InFlightRequests; + + fn layer(&self, inner: S) -> Self::Service { + InFlightRequests { + inner, + counter: self.counter.clone(), + } + } +} + +/// Middleware that counts the number of in-flight requests. +/// +/// See the [module docs](crate::metrics::in_flight_requests) for more details. +#[derive(Clone, Debug)] +pub struct InFlightRequests { + inner: S, + counter: InFlightRequestsCounter, +} + +impl InFlightRequests { + /// Create a new `InFlightRequests` and its associated counter. + pub fn pair(inner: S) -> (Self, InFlightRequestsCounter) { + let counter = InFlightRequestsCounter::new(); + let service = Self::new(inner, counter.clone()); + (service, counter) + } + + /// Create a new `InFlightRequests` that will update the given counter. + pub fn new(inner: S, counter: InFlightRequestsCounter) -> Self { + Self { inner, counter } + } + + define_inner_service_accessors!(); +} + +/// An atomic counter that keeps track of the number of in-flight requests. +/// +/// This will normally combined with [`InFlightRequestsLayer`] or [`InFlightRequests`] which will +/// update the counter as requests arrive. +#[derive(Debug, Clone, Default)] +pub struct InFlightRequestsCounter { + count: Arc, +} + +impl InFlightRequestsCounter { + /// Create a new `InFlightRequestsCounter`. + pub fn new() -> Self { + Self::default() + } + + /// Get the current number of in-flight requests. + pub fn get(&self) -> usize { + self.count.load(Ordering::Relaxed) + } + + fn increment(&self) -> IncrementGuard { + self.count.fetch_add(1, Ordering::Relaxed); + IncrementGuard { + count: self.count.clone(), + } + } + + /// Run a future every `interval` which receives the current number of in-flight requests. + /// + /// This can be used to send the current count to your metrics system. + /// + /// This function will loop forever so normally it is called with [`tokio::spawn`]: + /// + /// ```rust,no_run + /// use tower_http::metrics::in_flight_requests::InFlightRequestsCounter; + /// use std::time::Duration; + /// + /// let counter = InFlightRequestsCounter::new(); + /// + /// tokio::spawn( + /// counter.run_emitter(Duration::from_secs(10), |count: usize| async move { + /// // Send `count` to metrics system. + /// }), + /// ); + /// ``` + pub async fn run_emitter(mut self, interval: Duration, mut emit: F) + where + F: FnMut(usize) -> Fut + Send + 'static, + Fut: Future + Send, + { + let mut interval = tokio::time::interval(interval); + + loop { + // if all producers have gone away we don't need to emit anymore + match Arc::try_unwrap(self.count) { + Ok(_) => return, + Err(shared_count) => { + self = Self { + count: shared_count, + } + } + } + + interval.tick().await; + emit(self.get()).await; + } + } +} + +struct IncrementGuard { + count: Arc, +} + +impl Drop for IncrementGuard { + fn drop(&mut self) { + self.count.fetch_sub(1, Ordering::Relaxed); + } +} + +impl Service> for InFlightRequests +where + S: Service, Response = Response>, +{ + type Response = Response>; + type Error = S::Error; + type Future = ResponseFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + let guard = self.counter.increment(); + ResponseFuture { + inner: self.inner.call(req), + guard: Some(guard), + } + } +} + +pin_project! { + /// Response future for [`InFlightRequests`]. + pub struct ResponseFuture { + #[pin] + inner: F, + guard: Option, + } +} + +impl Future for ResponseFuture +where + F: Future, E>>, +{ + type Output = Result>, E>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + let response = ready!(this.inner.poll(cx))?; + let guard = this.guard.take().unwrap(); + let response = response.map(move |body| ResponseBody { inner: body, guard }); + + Poll::Ready(Ok(response)) + } +} + +pin_project! { + /// Response body for [`InFlightRequests`]. + pub struct ResponseBody { + #[pin] + inner: B, + guard: IncrementGuard, + } +} + +impl Body for ResponseBody +where + B: Body, +{ + type Data = B::Data; + type Error = B::Error; + + #[inline] + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + self.project().inner.poll_frame(cx) + } + + #[inline] + fn is_end_stream(&self) -> bool { + self.inner.is_end_stream() + } + + #[inline] + fn size_hint(&self) -> http_body::SizeHint { + self.inner.size_hint() + } +} + +#[cfg(test)] +mod tests { + #[allow(unused_imports)] + use super::*; + use crate::test_helpers::Body; + use http::Request; + use tower::{BoxError, ServiceBuilder}; + + #[tokio::test] + async fn basic() { + let (in_flight_requests_layer, counter) = InFlightRequestsLayer::pair(); + + let mut service = ServiceBuilder::new() + .layer(in_flight_requests_layer) + .service_fn(echo); + assert_eq!(counter.get(), 0); + + // driving service to ready shouldn't increment the counter + std::future::poll_fn(|cx| service.poll_ready(cx)) + .await + .unwrap(); + assert_eq!(counter.get(), 0); + + // creating the response future should increment the count + let response_future = service.call(Request::new(Body::empty())); + assert_eq!(counter.get(), 1); + + // count shouldn't decrement until the full body has been comsumed + let response = response_future.await.unwrap(); + assert_eq!(counter.get(), 1); + + let body = response.into_body(); + crate::test_helpers::to_bytes(body).await.unwrap(); + assert_eq!(counter.get(), 0); + } + + async fn echo(req: Request) -> Result, BoxError> { + Ok(Response::new(req.into_body())) + } +} diff --git a/.cargo-vendor/tower-http/src/metrics/mod.rs b/.cargo-vendor/tower-http/src/metrics/mod.rs new file mode 100644 index 0000000000..317d17b8fe --- /dev/null +++ b/.cargo-vendor/tower-http/src/metrics/mod.rs @@ -0,0 +1,12 @@ +//! Middlewares for adding metrics to services. +//! +//! Supported metrics: +//! +//! - [In-flight requests][]: Measure the number of requests a service is currently processing. +//! +//! [In-flight requests]: in_flight_requests + +pub mod in_flight_requests; + +#[doc(inline)] +pub use self::in_flight_requests::{InFlightRequests, InFlightRequestsLayer}; diff --git a/.cargo-vendor/tower-http/src/normalize_path.rs b/.cargo-vendor/tower-http/src/normalize_path.rs new file mode 100644 index 0000000000..efc7be5254 --- /dev/null +++ b/.cargo-vendor/tower-http/src/normalize_path.rs @@ -0,0 +1,230 @@ +//! Middleware that normalizes paths. +//! +//! Any trailing slashes from request paths will be removed. For example, a request with `/foo/` +//! will be changed to `/foo` before reaching the inner service. +//! +//! # Example +//! +//! ``` +//! use tower_http::normalize_path::NormalizePathLayer; +//! use http::{Request, Response, StatusCode}; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! use std::{iter::once, convert::Infallible}; +//! use tower::{ServiceBuilder, Service, ServiceExt}; +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! async fn handle(req: Request>) -> Result>, Infallible> { +//! // `req.uri().path()` will not have trailing slashes +//! # Ok(Response::new(Full::default())) +//! } +//! +//! let mut service = ServiceBuilder::new() +//! // trim trailing slashes from paths +//! .layer(NormalizePathLayer::trim_trailing_slash()) +//! .service_fn(handle); +//! +//! // call the service +//! let request = Request::builder() +//! // `handle` will see `/foo` +//! .uri("/foo/") +//! .body(Full::default())?; +//! +//! service.ready().await?.call(request).await?; +//! # +//! # Ok(()) +//! # } +//! ``` + +use http::{Request, Response, Uri}; +use std::{ + borrow::Cow, + task::{Context, Poll}, +}; +use tower_layer::Layer; +use tower_service::Service; + +/// Layer that applies [`NormalizePath`] which normalizes paths. +/// +/// See the [module docs](self) for more details. +#[derive(Debug, Copy, Clone)] +pub struct NormalizePathLayer {} + +impl NormalizePathLayer { + /// Create a new [`NormalizePathLayer`]. + /// + /// Any trailing slashes from request paths will be removed. For example, a request with `/foo/` + /// will be changed to `/foo` before reaching the inner service. + pub fn trim_trailing_slash() -> Self { + NormalizePathLayer {} + } +} + +impl Layer for NormalizePathLayer { + type Service = NormalizePath; + + fn layer(&self, inner: S) -> Self::Service { + NormalizePath::trim_trailing_slash(inner) + } +} + +/// Middleware that normalizes paths. +/// +/// See the [module docs](self) for more details. +#[derive(Debug, Copy, Clone)] +pub struct NormalizePath { + inner: S, +} + +impl NormalizePath { + /// Create a new [`NormalizePath`]. + /// + /// Any trailing slashes from request paths will be removed. For example, a request with `/foo/` + /// will be changed to `/foo` before reaching the inner service. + pub fn trim_trailing_slash(inner: S) -> Self { + Self { inner } + } + + define_inner_service_accessors!(); +} + +impl Service> for NormalizePath +where + S: Service, Response = Response>, +{ + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, mut req: Request) -> Self::Future { + normalize_trailing_slash(req.uri_mut()); + self.inner.call(req) + } +} + +fn normalize_trailing_slash(uri: &mut Uri) { + if !uri.path().ends_with('/') && !uri.path().starts_with("//") { + return; + } + + let new_path = format!("/{}", uri.path().trim_matches('/')); + + let mut parts = uri.clone().into_parts(); + + let new_path_and_query = if let Some(path_and_query) = &parts.path_and_query { + let new_path_and_query = if let Some(query) = path_and_query.query() { + Cow::Owned(format!("{}?{}", new_path, query)) + } else { + new_path.into() + } + .parse() + .unwrap(); + + Some(new_path_and_query) + } else { + None + }; + + parts.path_and_query = new_path_and_query; + if let Ok(new_uri) = Uri::from_parts(parts) { + *uri = new_uri; + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::convert::Infallible; + use tower::{ServiceBuilder, ServiceExt}; + + #[tokio::test] + async fn works() { + async fn handle(request: Request<()>) -> Result, Infallible> { + Ok(Response::new(request.uri().to_string())) + } + + let mut svc = ServiceBuilder::new() + .layer(NormalizePathLayer::trim_trailing_slash()) + .service_fn(handle); + + let body = svc + .ready() + .await + .unwrap() + .call(Request::builder().uri("/foo/").body(()).unwrap()) + .await + .unwrap() + .into_body(); + + assert_eq!(body, "/foo"); + } + + #[test] + fn is_noop_if_no_trailing_slash() { + let mut uri = "/foo".parse::().unwrap(); + normalize_trailing_slash(&mut uri); + assert_eq!(uri, "/foo"); + } + + #[test] + fn maintains_query() { + let mut uri = "/foo/?a=a".parse::().unwrap(); + normalize_trailing_slash(&mut uri); + assert_eq!(uri, "/foo?a=a"); + } + + #[test] + fn removes_multiple_trailing_slashes() { + let mut uri = "/foo////".parse::().unwrap(); + normalize_trailing_slash(&mut uri); + assert_eq!(uri, "/foo"); + } + + #[test] + fn removes_multiple_trailing_slashes_even_with_query() { + let mut uri = "/foo////?a=a".parse::().unwrap(); + normalize_trailing_slash(&mut uri); + assert_eq!(uri, "/foo?a=a"); + } + + #[test] + fn is_noop_on_index() { + let mut uri = "/".parse::().unwrap(); + normalize_trailing_slash(&mut uri); + assert_eq!(uri, "/"); + } + + #[test] + fn removes_multiple_trailing_slashes_on_index() { + let mut uri = "////".parse::().unwrap(); + normalize_trailing_slash(&mut uri); + assert_eq!(uri, "/"); + } + + #[test] + fn removes_multiple_trailing_slashes_on_index_even_with_query() { + let mut uri = "////?a=a".parse::().unwrap(); + normalize_trailing_slash(&mut uri); + assert_eq!(uri, "/?a=a"); + } + + #[test] + fn removes_multiple_preceding_slashes_even_with_query() { + let mut uri = "///foo//?a=a".parse::().unwrap(); + normalize_trailing_slash(&mut uri); + assert_eq!(uri, "/foo?a=a"); + } + + #[test] + fn removes_multiple_preceding_slashes() { + let mut uri = "///foo".parse::().unwrap(); + normalize_trailing_slash(&mut uri); + assert_eq!(uri, "/foo"); + } +} diff --git a/.cargo-vendor/tower-http/src/propagate_header.rs b/.cargo-vendor/tower-http/src/propagate_header.rs new file mode 100644 index 0000000000..6c77ec325c --- /dev/null +++ b/.cargo-vendor/tower-http/src/propagate_header.rs @@ -0,0 +1,154 @@ +//! Propagate a header from the request to the response. +//! +//! # Example +//! +//! ```rust +//! use http::{Request, Response, header::HeaderName}; +//! use std::convert::Infallible; +//! use tower::{Service, ServiceExt, ServiceBuilder, service_fn}; +//! use tower_http::propagate_header::PropagateHeaderLayer; +//! use bytes::Bytes; +//! use http_body_util::Full; +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! async fn handle(req: Request>) -> Result>, Infallible> { +//! // ... +//! # Ok(Response::new(Full::default())) +//! } +//! +//! let mut svc = ServiceBuilder::new() +//! // This will copy `x-request-id` headers from requests onto responses. +//! .layer(PropagateHeaderLayer::new(HeaderName::from_static("x-request-id"))) +//! .service_fn(handle); +//! +//! // Call the service. +//! let request = Request::builder() +//! .header("x-request-id", "1337") +//! .body(Full::default())?; +//! +//! let response = svc.ready().await?.call(request).await?; +//! +//! assert_eq!(response.headers()["x-request-id"], "1337"); +//! # +//! # Ok(()) +//! # } +//! ``` + +use http::{header::HeaderName, HeaderValue, Request, Response}; +use pin_project_lite::pin_project; +use std::future::Future; +use std::{ + pin::Pin, + task::{ready, Context, Poll}, +}; +use tower_layer::Layer; +use tower_service::Service; + +/// Layer that applies [`PropagateHeader`] which propagates headers from requests to responses. +/// +/// If the header is present on the request it'll be applied to the response as well. This could +/// for example be used to propagate headers such as `X-Request-Id`. +/// +/// See the [module docs](crate::propagate_header) for more details. +#[derive(Clone, Debug)] +pub struct PropagateHeaderLayer { + header: HeaderName, +} + +impl PropagateHeaderLayer { + /// Create a new [`PropagateHeaderLayer`]. + pub fn new(header: HeaderName) -> Self { + Self { header } + } +} + +impl Layer for PropagateHeaderLayer { + type Service = PropagateHeader; + + fn layer(&self, inner: S) -> Self::Service { + PropagateHeader { + inner, + header: self.header.clone(), + } + } +} + +/// Middleware that propagates headers from requests to responses. +/// +/// If the header is present on the request it'll be applied to the response as well. This could +/// for example be used to propagate headers such as `X-Request-Id`. +/// +/// See the [module docs](crate::propagate_header) for more details. +#[derive(Clone, Debug)] +pub struct PropagateHeader { + inner: S, + header: HeaderName, +} + +impl PropagateHeader { + /// Create a new [`PropagateHeader`] that propagates the given header. + pub fn new(inner: S, header: HeaderName) -> Self { + Self { inner, header } + } + + define_inner_service_accessors!(); + + /// Returns a new [`Layer`] that wraps services with a `PropagateHeader` middleware. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer(header: HeaderName) -> PropagateHeaderLayer { + PropagateHeaderLayer::new(header) + } +} + +impl Service> for PropagateHeader +where + S: Service, Response = Response>, +{ + type Response = S::Response; + type Error = S::Error; + type Future = ResponseFuture; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + let value = req.headers().get(&self.header).cloned(); + + ResponseFuture { + future: self.inner.call(req), + header_and_value: Some(self.header.clone()).zip(value), + } + } +} + +pin_project! { + /// Response future for [`PropagateHeader`]. + #[derive(Debug)] + pub struct ResponseFuture { + #[pin] + future: F, + header_and_value: Option<(HeaderName, HeaderValue)>, + } +} + +impl Future for ResponseFuture +where + F: Future, E>>, +{ + type Output = F::Output; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + let mut res = ready!(this.future.poll(cx)?); + + if let Some((header, value)) = this.header_and_value.take() { + res.headers_mut().insert(header, value); + } + + Poll::Ready(Ok(res)) + } +} diff --git a/.cargo-vendor/tower-http/src/request_id.rs b/.cargo-vendor/tower-http/src/request_id.rs new file mode 100644 index 0000000000..1db2d02a0d --- /dev/null +++ b/.cargo-vendor/tower-http/src/request_id.rs @@ -0,0 +1,608 @@ +//! Set and propagate request ids. +//! +//! # Example +//! +//! ``` +//! use http::{Request, Response, header::HeaderName}; +//! use tower::{Service, ServiceExt, ServiceBuilder}; +//! use tower_http::request_id::{ +//! SetRequestIdLayer, PropagateRequestIdLayer, MakeRequestId, RequestId, +//! }; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! use std::sync::{Arc, atomic::{AtomicU64, Ordering}}; +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! # let handler = tower::service_fn(|request: Request>| async move { +//! # Ok::<_, std::convert::Infallible>(Response::new(request.into_body())) +//! # }); +//! # +//! // A `MakeRequestId` that increments an atomic counter +//! #[derive(Clone, Default)] +//! struct MyMakeRequestId { +//! counter: Arc, +//! } +//! +//! impl MakeRequestId for MyMakeRequestId { +//! fn make_request_id(&mut self, request: &Request) -> Option { +//! let request_id = self.counter +//! .fetch_add(1, Ordering::SeqCst) +//! .to_string() +//! .parse() +//! .unwrap(); +//! +//! Some(RequestId::new(request_id)) +//! } +//! } +//! +//! let x_request_id = HeaderName::from_static("x-request-id"); +//! +//! let mut svc = ServiceBuilder::new() +//! // set `x-request-id` header on all requests +//! .layer(SetRequestIdLayer::new( +//! x_request_id.clone(), +//! MyMakeRequestId::default(), +//! )) +//! // propagate `x-request-id` headers from request to response +//! .layer(PropagateRequestIdLayer::new(x_request_id)) +//! .service(handler); +//! +//! let request = Request::new(Full::default()); +//! let response = svc.ready().await?.call(request).await?; +//! +//! assert_eq!(response.headers()["x-request-id"], "0"); +//! # +//! # Ok(()) +//! # } +//! ``` +//! +//! Additional convenience methods are available on [`ServiceBuilderExt`]: +//! +//! ``` +//! use tower_http::ServiceBuilderExt; +//! # use http::{Request, Response, header::HeaderName}; +//! # use tower::{Service, ServiceExt, ServiceBuilder}; +//! # use tower_http::request_id::{ +//! # SetRequestIdLayer, PropagateRequestIdLayer, MakeRequestId, RequestId, +//! # }; +//! # use bytes::Bytes; +//! # use http_body_util::Full; +//! # use std::sync::{Arc, atomic::{AtomicU64, Ordering}}; +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! # let handler = tower::service_fn(|request: Request>| async move { +//! # Ok::<_, std::convert::Infallible>(Response::new(request.into_body())) +//! # }); +//! # #[derive(Clone, Default)] +//! # struct MyMakeRequestId { +//! # counter: Arc, +//! # } +//! # impl MakeRequestId for MyMakeRequestId { +//! # fn make_request_id(&mut self, request: &Request) -> Option { +//! # let request_id = self.counter +//! # .fetch_add(1, Ordering::SeqCst) +//! # .to_string() +//! # .parse() +//! # .unwrap(); +//! # Some(RequestId::new(request_id)) +//! # } +//! # } +//! +//! let mut svc = ServiceBuilder::new() +//! .set_x_request_id(MyMakeRequestId::default()) +//! .propagate_x_request_id() +//! .service(handler); +//! +//! let request = Request::new(Full::default()); +//! let response = svc.ready().await?.call(request).await?; +//! +//! assert_eq!(response.headers()["x-request-id"], "0"); +//! # +//! # Ok(()) +//! # } +//! ``` +//! +//! See [`SetRequestId`] and [`PropagateRequestId`] for more details. +//! +//! # Using `Trace` +//! +//! To have request ids show up correctly in logs produced by [`Trace`] you must apply the layers +//! in this order: +//! +//! ``` +//! use tower_http::{ +//! ServiceBuilderExt, +//! trace::{TraceLayer, DefaultMakeSpan, DefaultOnResponse}, +//! }; +//! # use http::{Request, Response, header::HeaderName}; +//! # use tower::{Service, ServiceExt, ServiceBuilder}; +//! # use tower_http::request_id::{ +//! # SetRequestIdLayer, PropagateRequestIdLayer, MakeRequestId, RequestId, +//! # }; +//! # use http_body_util::Full; +//! # use bytes::Bytes; +//! # use std::sync::{Arc, atomic::{AtomicU64, Ordering}}; +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! # let handler = tower::service_fn(|request: Request>| async move { +//! # Ok::<_, std::convert::Infallible>(Response::new(request.into_body())) +//! # }); +//! # #[derive(Clone, Default)] +//! # struct MyMakeRequestId { +//! # counter: Arc, +//! # } +//! # impl MakeRequestId for MyMakeRequestId { +//! # fn make_request_id(&mut self, request: &Request) -> Option { +//! # let request_id = self.counter +//! # .fetch_add(1, Ordering::SeqCst) +//! # .to_string() +//! # .parse() +//! # .unwrap(); +//! # Some(RequestId::new(request_id)) +//! # } +//! # } +//! +//! let svc = ServiceBuilder::new() +//! // make sure to set request ids before the request reaches `TraceLayer` +//! .set_x_request_id(MyMakeRequestId::default()) +//! // log requests and responses +//! .layer( +//! TraceLayer::new_for_http() +//! .make_span_with(DefaultMakeSpan::new().include_headers(true)) +//! .on_response(DefaultOnResponse::new().include_headers(true)) +//! ) +//! // propagate the header to the response before the response reaches `TraceLayer` +//! .propagate_x_request_id() +//! .service(handler); +//! # +//! # Ok(()) +//! # } +//! ``` +//! +//! # Doesn't override existing headers +//! +//! [`SetRequestId`] and [`PropagateRequestId`] wont override request ids if its already present on +//! requests or responses. Among other things, this allows other middleware to conditionally set +//! request ids and use the middleware in this module as a fallback. +//! +//! [`ServiceBuilderExt`]: crate::ServiceBuilderExt +//! [`Uuid`]: https://crates.io/crates/uuid +//! [`Trace`]: crate::trace::Trace + +use http::{ + header::{HeaderName, HeaderValue}, + Request, Response, +}; +use pin_project_lite::pin_project; +use std::task::{ready, Context, Poll}; +use std::{future::Future, pin::Pin}; +use tower_layer::Layer; +use tower_service::Service; +use uuid::Uuid; + +pub(crate) const X_REQUEST_ID: &str = "x-request-id"; + +/// Trait for producing [`RequestId`]s. +/// +/// Used by [`SetRequestId`]. +pub trait MakeRequestId { + /// Try and produce a [`RequestId`] from the request. + fn make_request_id(&mut self, request: &Request) -> Option; +} + +/// An identifier for a request. +#[derive(Debug, Clone)] +pub struct RequestId(HeaderValue); + +impl RequestId { + /// Create a new `RequestId` from a [`HeaderValue`]. + pub fn new(header_value: HeaderValue) -> Self { + Self(header_value) + } + + /// Gets a reference to the underlying [`HeaderValue`]. + pub fn header_value(&self) -> &HeaderValue { + &self.0 + } + + /// Consumes `self`, returning the underlying [`HeaderValue`]. + pub fn into_header_value(self) -> HeaderValue { + self.0 + } +} + +impl From for RequestId { + fn from(value: HeaderValue) -> Self { + Self::new(value) + } +} + +/// Set request id headers and extensions on requests. +/// +/// This layer applies the [`SetRequestId`] middleware. +/// +/// See the [module docs](self) and [`SetRequestId`] for more details. +#[derive(Debug, Clone)] +pub struct SetRequestIdLayer { + header_name: HeaderName, + make_request_id: M, +} + +impl SetRequestIdLayer { + /// Create a new `SetRequestIdLayer`. + pub fn new(header_name: HeaderName, make_request_id: M) -> Self + where + M: MakeRequestId, + { + SetRequestIdLayer { + header_name, + make_request_id, + } + } + + /// Create a new `SetRequestIdLayer` that uses `x-request-id` as the header name. + pub fn x_request_id(make_request_id: M) -> Self + where + M: MakeRequestId, + { + SetRequestIdLayer::new(HeaderName::from_static(X_REQUEST_ID), make_request_id) + } +} + +impl Layer for SetRequestIdLayer +where + M: Clone + MakeRequestId, +{ + type Service = SetRequestId; + + fn layer(&self, inner: S) -> Self::Service { + SetRequestId::new( + inner, + self.header_name.clone(), + self.make_request_id.clone(), + ) + } +} + +/// Set request id headers and extensions on requests. +/// +/// See the [module docs](self) for an example. +/// +/// If [`MakeRequestId::make_request_id`] returns `Some(_)` and the request doesn't already have a +/// header with the same name, then the header will be inserted. +/// +/// Additionally [`RequestId`] will be inserted into [`Request::extensions`] so other +/// services can access it. +#[derive(Debug, Clone)] +pub struct SetRequestId { + inner: S, + header_name: HeaderName, + make_request_id: M, +} + +impl SetRequestId { + /// Create a new `SetRequestId`. + pub fn new(inner: S, header_name: HeaderName, make_request_id: M) -> Self + where + M: MakeRequestId, + { + Self { + inner, + header_name, + make_request_id, + } + } + + /// Create a new `SetRequestId` that uses `x-request-id` as the header name. + pub fn x_request_id(inner: S, make_request_id: M) -> Self + where + M: MakeRequestId, + { + Self::new( + inner, + HeaderName::from_static(X_REQUEST_ID), + make_request_id, + ) + } + + define_inner_service_accessors!(); + + /// Returns a new [`Layer`] that wraps services with a `SetRequestId` middleware. + pub fn layer(header_name: HeaderName, make_request_id: M) -> SetRequestIdLayer + where + M: MakeRequestId, + { + SetRequestIdLayer::new(header_name, make_request_id) + } +} + +impl Service> for SetRequestId +where + S: Service, Response = Response>, + M: MakeRequestId, +{ + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, mut req: Request) -> Self::Future { + if let Some(request_id) = req.headers().get(&self.header_name) { + if req.extensions().get::().is_none() { + let request_id = request_id.clone(); + req.extensions_mut().insert(RequestId::new(request_id)); + } + } else if let Some(request_id) = self.make_request_id.make_request_id(&req) { + req.extensions_mut().insert(request_id.clone()); + req.headers_mut() + .insert(self.header_name.clone(), request_id.0); + } + + self.inner.call(req) + } +} + +/// Propagate request ids from requests to responses. +/// +/// This layer applies the [`PropagateRequestId`] middleware. +/// +/// See the [module docs](self) and [`PropagateRequestId`] for more details. +#[derive(Debug, Clone)] +pub struct PropagateRequestIdLayer { + header_name: HeaderName, +} + +impl PropagateRequestIdLayer { + /// Create a new `PropagateRequestIdLayer`. + pub fn new(header_name: HeaderName) -> Self { + PropagateRequestIdLayer { header_name } + } + + /// Create a new `PropagateRequestIdLayer` that uses `x-request-id` as the header name. + pub fn x_request_id() -> Self { + Self::new(HeaderName::from_static(X_REQUEST_ID)) + } +} + +impl Layer for PropagateRequestIdLayer { + type Service = PropagateRequestId; + + fn layer(&self, inner: S) -> Self::Service { + PropagateRequestId::new(inner, self.header_name.clone()) + } +} + +/// Propagate request ids from requests to responses. +/// +/// See the [module docs](self) for an example. +/// +/// If the request contains a matching header that header will be applied to responses. If a +/// [`RequestId`] extension is also present it will be propagated as well. +#[derive(Debug, Clone)] +pub struct PropagateRequestId { + inner: S, + header_name: HeaderName, +} + +impl PropagateRequestId { + /// Create a new `PropagateRequestId`. + pub fn new(inner: S, header_name: HeaderName) -> Self { + Self { inner, header_name } + } + + /// Create a new `PropagateRequestId` that uses `x-request-id` as the header name. + pub fn x_request_id(inner: S) -> Self { + Self::new(inner, HeaderName::from_static(X_REQUEST_ID)) + } + + define_inner_service_accessors!(); + + /// Returns a new [`Layer`] that wraps services with a `PropagateRequestId` middleware. + pub fn layer(header_name: HeaderName) -> PropagateRequestIdLayer { + PropagateRequestIdLayer::new(header_name) + } +} + +impl Service> for PropagateRequestId +where + S: Service, Response = Response>, +{ + type Response = S::Response; + type Error = S::Error; + type Future = PropagateRequestIdResponseFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + let request_id = req + .headers() + .get(&self.header_name) + .cloned() + .map(RequestId::new); + + PropagateRequestIdResponseFuture { + inner: self.inner.call(req), + header_name: self.header_name.clone(), + request_id, + } + } +} + +pin_project! { + /// Response future for [`PropagateRequestId`]. + pub struct PropagateRequestIdResponseFuture { + #[pin] + inner: F, + header_name: HeaderName, + request_id: Option, + } +} + +impl Future for PropagateRequestIdResponseFuture +where + F: Future, E>>, +{ + type Output = Result, E>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + let mut response = ready!(this.inner.poll(cx))?; + + if let Some(current_id) = response.headers().get(&*this.header_name) { + if response.extensions().get::().is_none() { + let current_id = current_id.clone(); + response.extensions_mut().insert(RequestId::new(current_id)); + } + } else if let Some(request_id) = this.request_id.take() { + response + .headers_mut() + .insert(this.header_name.clone(), request_id.0.clone()); + response.extensions_mut().insert(request_id); + } + + Poll::Ready(Ok(response)) + } +} + +/// A [`MakeRequestId`] that generates `UUID`s. +#[derive(Clone, Copy, Default)] +pub struct MakeRequestUuid; + +impl MakeRequestId for MakeRequestUuid { + fn make_request_id(&mut self, _request: &Request) -> Option { + let request_id = Uuid::new_v4().to_string().parse().unwrap(); + Some(RequestId::new(request_id)) + } +} + +#[cfg(test)] +mod tests { + use crate::test_helpers::Body; + use crate::ServiceBuilderExt as _; + use http::Response; + use std::{ + convert::Infallible, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, + }; + use tower::{ServiceBuilder, ServiceExt}; + + #[allow(unused_imports)] + use super::*; + + #[tokio::test] + async fn basic() { + let svc = ServiceBuilder::new() + .set_x_request_id(Counter::default()) + .propagate_x_request_id() + .service_fn(handler); + + // header on response + let req = Request::builder().body(Body::empty()).unwrap(); + let res = svc.clone().oneshot(req).await.unwrap(); + assert_eq!(res.headers()["x-request-id"], "0"); + + let req = Request::builder().body(Body::empty()).unwrap(); + let res = svc.clone().oneshot(req).await.unwrap(); + assert_eq!(res.headers()["x-request-id"], "1"); + + // doesn't override if header is already there + let req = Request::builder() + .header("x-request-id", "foo") + .body(Body::empty()) + .unwrap(); + let res = svc.clone().oneshot(req).await.unwrap(); + assert_eq!(res.headers()["x-request-id"], "foo"); + + // extension propagated + let req = Request::builder().body(Body::empty()).unwrap(); + let res = svc.clone().oneshot(req).await.unwrap(); + assert_eq!(res.extensions().get::().unwrap().0, "2"); + } + + #[tokio::test] + async fn other_middleware_setting_request_id() { + let svc = ServiceBuilder::new() + .override_request_header( + HeaderName::from_static("x-request-id"), + HeaderValue::from_str("foo").unwrap(), + ) + .set_x_request_id(Counter::default()) + .map_request(|request: Request<_>| { + // `set_x_request_id` should set the extension if its missing + assert_eq!(request.extensions().get::().unwrap().0, "foo"); + request + }) + .propagate_x_request_id() + .service_fn(handler); + + let req = Request::builder() + .header( + "x-request-id", + "this-will-be-overriden-by-override_request_header-middleware", + ) + .body(Body::empty()) + .unwrap(); + let res = svc.clone().oneshot(req).await.unwrap(); + assert_eq!(res.headers()["x-request-id"], "foo"); + assert_eq!(res.extensions().get::().unwrap().0, "foo"); + } + + #[tokio::test] + async fn other_middleware_setting_request_id_on_response() { + let svc = ServiceBuilder::new() + .set_x_request_id(Counter::default()) + .propagate_x_request_id() + .override_response_header( + HeaderName::from_static("x-request-id"), + HeaderValue::from_str("foo").unwrap(), + ) + .service_fn(handler); + + let req = Request::builder() + .header("x-request-id", "foo") + .body(Body::empty()) + .unwrap(); + let res = svc.clone().oneshot(req).await.unwrap(); + assert_eq!(res.headers()["x-request-id"], "foo"); + assert_eq!(res.extensions().get::().unwrap().0, "foo"); + } + + #[derive(Clone, Default)] + struct Counter(Arc); + + impl MakeRequestId for Counter { + fn make_request_id(&mut self, _request: &Request) -> Option { + let id = + HeaderValue::from_str(&self.0.fetch_add(1, Ordering::SeqCst).to_string()).unwrap(); + Some(RequestId::new(id)) + } + } + + async fn handler(_: Request) -> Result, Infallible> { + Ok(Response::new(Body::empty())) + } + + #[tokio::test] + async fn uuid() { + let svc = ServiceBuilder::new() + .set_x_request_id(MakeRequestUuid) + .propagate_x_request_id() + .service_fn(handler); + + // header on response + let req = Request::builder().body(Body::empty()).unwrap(); + let mut res = svc.clone().oneshot(req).await.unwrap(); + let id = res.headers_mut().remove("x-request-id").unwrap(); + id.to_str().unwrap().parse::().unwrap(); + } +} diff --git a/.cargo-vendor/tower-http/src/sensitive_headers.rs b/.cargo-vendor/tower-http/src/sensitive_headers.rs new file mode 100644 index 0000000000..3bd081db69 --- /dev/null +++ b/.cargo-vendor/tower-http/src/sensitive_headers.rs @@ -0,0 +1,448 @@ +//! Middlewares that mark headers as [sensitive]. +//! +//! [sensitive]: https://docs.rs/http/latest/http/header/struct.HeaderValue.html#method.set_sensitive +//! +//! # Example +//! +//! ``` +//! use tower_http::sensitive_headers::SetSensitiveHeadersLayer; +//! use tower::{Service, ServiceExt, ServiceBuilder, service_fn}; +//! use http::{Request, Response, header::AUTHORIZATION}; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! use std::{iter::once, convert::Infallible}; +//! +//! async fn handle(req: Request>) -> Result>, Infallible> { +//! // ... +//! # Ok(Response::new(Full::default())) +//! } +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! let mut service = ServiceBuilder::new() +//! // Mark the `Authorization` header as sensitive so it doesn't show in logs +//! // +//! // `SetSensitiveHeadersLayer` will mark the header as sensitive on both the +//! // request and response. +//! // +//! // The middleware is constructed from an iterator of headers to easily mark +//! // multiple headers at once. +//! .layer(SetSensitiveHeadersLayer::new(once(AUTHORIZATION))) +//! .service(service_fn(handle)); +//! +//! // Call the service. +//! let response = service +//! .ready() +//! .await? +//! .call(Request::new(Full::default())) +//! .await?; +//! # Ok(()) +//! # } +//! ``` +//! +//! Its important to think about the order in which requests and responses arrive at your +//! middleware. For example to hide headers both on requests and responses when using +//! [`TraceLayer`] you have to apply [`SetSensitiveRequestHeadersLayer`] before [`TraceLayer`] +//! and [`SetSensitiveResponseHeadersLayer`] afterwards. +//! +//! ``` +//! use tower_http::{ +//! trace::TraceLayer, +//! sensitive_headers::{ +//! SetSensitiveRequestHeadersLayer, +//! SetSensitiveResponseHeadersLayer, +//! }, +//! }; +//! use tower::{Service, ServiceExt, ServiceBuilder, service_fn}; +//! use http::header; +//! use std::sync::Arc; +//! # use http::{Request, Response}; +//! # use bytes::Bytes; +//! # use http_body_util::Full; +//! # use std::convert::Infallible; +//! # async fn handle(req: Request>) -> Result>, Infallible> { +//! # Ok(Response::new(Full::default())) +//! # } +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! let headers: Arc<[_]> = Arc::new([ +//! header::AUTHORIZATION, +//! header::PROXY_AUTHORIZATION, +//! header::COOKIE, +//! header::SET_COOKIE, +//! ]); +//! +//! let service = ServiceBuilder::new() +//! .layer(SetSensitiveRequestHeadersLayer::from_shared(Arc::clone(&headers))) +//! .layer(TraceLayer::new_for_http()) +//! .layer(SetSensitiveResponseHeadersLayer::from_shared(headers)) +//! .service_fn(handle); +//! # Ok(()) +//! # } +//! ``` +//! +//! [`TraceLayer`]: crate::trace::TraceLayer + +use http::{header::HeaderName, Request, Response}; +use pin_project_lite::pin_project; +use std::{ + future::Future, + pin::Pin, + sync::Arc, + task::{ready, Context, Poll}, +}; +use tower_layer::Layer; +use tower_service::Service; + +/// Mark headers as [sensitive] on both requests and responses. +/// +/// Produces [`SetSensitiveHeaders`] services. +/// +/// See the [module docs](crate::sensitive_headers) for more details. +/// +/// [sensitive]: https://docs.rs/http/latest/http/header/struct.HeaderValue.html#method.set_sensitive +#[derive(Clone, Debug)] +pub struct SetSensitiveHeadersLayer { + headers: Arc<[HeaderName]>, +} + +impl SetSensitiveHeadersLayer { + /// Create a new [`SetSensitiveHeadersLayer`]. + pub fn new(headers: I) -> Self + where + I: IntoIterator, + { + let headers = headers.into_iter().collect::>(); + Self::from_shared(headers.into()) + } + + /// Create a new [`SetSensitiveHeadersLayer`] from a shared slice of headers. + pub fn from_shared(headers: Arc<[HeaderName]>) -> Self { + Self { headers } + } +} + +impl Layer for SetSensitiveHeadersLayer { + type Service = SetSensitiveHeaders; + + fn layer(&self, inner: S) -> Self::Service { + SetSensitiveRequestHeaders::from_shared( + SetSensitiveResponseHeaders::from_shared(inner, self.headers.clone()), + self.headers.clone(), + ) + } +} + +/// Mark headers as [sensitive] on both requests and responses. +/// +/// See the [module docs](crate::sensitive_headers) for more details. +/// +/// [sensitive]: https://docs.rs/http/latest/http/header/struct.HeaderValue.html#method.set_sensitive +pub type SetSensitiveHeaders = SetSensitiveRequestHeaders>; + +/// Mark request headers as [sensitive]. +/// +/// Produces [`SetSensitiveRequestHeaders`] services. +/// +/// See the [module docs](crate::sensitive_headers) for more details. +/// +/// [sensitive]: https://docs.rs/http/latest/http/header/struct.HeaderValue.html#method.set_sensitive +#[derive(Clone, Debug)] +pub struct SetSensitiveRequestHeadersLayer { + headers: Arc<[HeaderName]>, +} + +impl SetSensitiveRequestHeadersLayer { + /// Create a new [`SetSensitiveRequestHeadersLayer`]. + pub fn new(headers: I) -> Self + where + I: IntoIterator, + { + let headers = headers.into_iter().collect::>(); + Self::from_shared(headers.into()) + } + + /// Create a new [`SetSensitiveRequestHeadersLayer`] from a shared slice of headers. + pub fn from_shared(headers: Arc<[HeaderName]>) -> Self { + Self { headers } + } +} + +impl Layer for SetSensitiveRequestHeadersLayer { + type Service = SetSensitiveRequestHeaders; + + fn layer(&self, inner: S) -> Self::Service { + SetSensitiveRequestHeaders { + inner, + headers: self.headers.clone(), + } + } +} + +/// Mark request headers as [sensitive]. +/// +/// See the [module docs](crate::sensitive_headers) for more details. +/// +/// [sensitive]: https://docs.rs/http/latest/http/header/struct.HeaderValue.html#method.set_sensitive +#[derive(Clone, Debug)] +pub struct SetSensitiveRequestHeaders { + inner: S, + headers: Arc<[HeaderName]>, +} + +impl SetSensitiveRequestHeaders { + /// Create a new [`SetSensitiveRequestHeaders`]. + pub fn new(inner: S, headers: I) -> Self + where + I: IntoIterator, + { + let headers = headers.into_iter().collect::>(); + Self::from_shared(inner, headers.into()) + } + + /// Create a new [`SetSensitiveRequestHeaders`] from a shared slice of headers. + pub fn from_shared(inner: S, headers: Arc<[HeaderName]>) -> Self { + Self { inner, headers } + } + + define_inner_service_accessors!(); + + /// Returns a new [`Layer`] that wraps services with a `SetSensitiveRequestHeaders` middleware. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer(headers: I) -> SetSensitiveRequestHeadersLayer + where + I: IntoIterator, + { + SetSensitiveRequestHeadersLayer::new(headers) + } +} + +impl Service> for SetSensitiveRequestHeaders +where + S: Service, Response = Response>, +{ + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, mut req: Request) -> Self::Future { + let headers = req.headers_mut(); + for header in &*self.headers { + if let http::header::Entry::Occupied(mut entry) = headers.entry(header) { + for value in entry.iter_mut() { + value.set_sensitive(true); + } + } + } + + self.inner.call(req) + } +} + +/// Mark response headers as [sensitive]. +/// +/// Produces [`SetSensitiveResponseHeaders`] services. +/// +/// See the [module docs](crate::sensitive_headers) for more details. +/// +/// [sensitive]: https://docs.rs/http/latest/http/header/struct.HeaderValue.html#method.set_sensitive +#[derive(Clone, Debug)] +pub struct SetSensitiveResponseHeadersLayer { + headers: Arc<[HeaderName]>, +} + +impl SetSensitiveResponseHeadersLayer { + /// Create a new [`SetSensitiveResponseHeadersLayer`]. + pub fn new(headers: I) -> Self + where + I: IntoIterator, + { + let headers = headers.into_iter().collect::>(); + Self::from_shared(headers.into()) + } + + /// Create a new [`SetSensitiveResponseHeadersLayer`] from a shared slice of headers. + pub fn from_shared(headers: Arc<[HeaderName]>) -> Self { + Self { headers } + } +} + +impl Layer for SetSensitiveResponseHeadersLayer { + type Service = SetSensitiveResponseHeaders; + + fn layer(&self, inner: S) -> Self::Service { + SetSensitiveResponseHeaders { + inner, + headers: self.headers.clone(), + } + } +} + +/// Mark response headers as [sensitive]. +/// +/// See the [module docs](crate::sensitive_headers) for more details. +/// +/// [sensitive]: https://docs.rs/http/latest/http/header/struct.HeaderValue.html#method.set_sensitive +#[derive(Clone, Debug)] +pub struct SetSensitiveResponseHeaders { + inner: S, + headers: Arc<[HeaderName]>, +} + +impl SetSensitiveResponseHeaders { + /// Create a new [`SetSensitiveResponseHeaders`]. + pub fn new(inner: S, headers: I) -> Self + where + I: IntoIterator, + { + let headers = headers.into_iter().collect::>(); + Self::from_shared(inner, headers.into()) + } + + /// Create a new [`SetSensitiveResponseHeaders`] from a shared slice of headers. + pub fn from_shared(inner: S, headers: Arc<[HeaderName]>) -> Self { + Self { inner, headers } + } + + define_inner_service_accessors!(); + + /// Returns a new [`Layer`] that wraps services with a `SetSensitiveResponseHeaders` middleware. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer(headers: I) -> SetSensitiveResponseHeadersLayer + where + I: IntoIterator, + { + SetSensitiveResponseHeadersLayer::new(headers) + } +} + +impl Service> for SetSensitiveResponseHeaders +where + S: Service, Response = Response>, +{ + type Response = S::Response; + type Error = S::Error; + type Future = SetSensitiveResponseHeadersResponseFuture; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + SetSensitiveResponseHeadersResponseFuture { + future: self.inner.call(req), + headers: self.headers.clone(), + } + } +} + +pin_project! { + /// Response future for [`SetSensitiveResponseHeaders`]. + #[derive(Debug)] + pub struct SetSensitiveResponseHeadersResponseFuture { + #[pin] + future: F, + headers: Arc<[HeaderName]>, + } +} + +impl Future for SetSensitiveResponseHeadersResponseFuture +where + F: Future, E>>, +{ + type Output = F::Output; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + let mut res = ready!(this.future.poll(cx)?); + + let headers = res.headers_mut(); + for header in &**this.headers { + if let http::header::Entry::Occupied(mut entry) = headers.entry(header) { + for value in entry.iter_mut() { + value.set_sensitive(true); + } + } + } + + Poll::Ready(Ok(res)) + } +} + +#[cfg(test)] +mod tests { + #[allow(unused_imports)] + use super::*; + use http::header; + use tower::{ServiceBuilder, ServiceExt}; + + #[tokio::test] + async fn multiple_value_header() { + async fn response_set_cookie(req: http::Request<()>) -> Result, ()> { + let mut iter = req.headers().get_all(header::COOKIE).iter().peekable(); + + assert!(iter.peek().is_some()); + + for value in iter { + assert!(value.is_sensitive()) + } + + let mut resp = http::Response::new(()); + resp.headers_mut().append( + header::CONTENT_TYPE, + http::HeaderValue::from_static("text/html"), + ); + resp.headers_mut().append( + header::SET_COOKIE, + http::HeaderValue::from_static("cookie-1"), + ); + resp.headers_mut().append( + header::SET_COOKIE, + http::HeaderValue::from_static("cookie-2"), + ); + resp.headers_mut().append( + header::SET_COOKIE, + http::HeaderValue::from_static("cookie-3"), + ); + Ok(resp) + } + + let mut service = ServiceBuilder::new() + .layer(SetSensitiveRequestHeadersLayer::new(vec![header::COOKIE])) + .layer(SetSensitiveResponseHeadersLayer::new(vec![ + header::SET_COOKIE, + ])) + .service_fn(response_set_cookie); + + let mut req = http::Request::new(()); + req.headers_mut() + .append(header::COOKIE, http::HeaderValue::from_static("cookie+1")); + req.headers_mut() + .append(header::COOKIE, http::HeaderValue::from_static("cookie+2")); + + let resp = service.ready().await.unwrap().call(req).await.unwrap(); + + assert!(!resp + .headers() + .get(header::CONTENT_TYPE) + .unwrap() + .is_sensitive()); + + let mut iter = resp.headers().get_all(header::SET_COOKIE).iter().peekable(); + + assert!(iter.peek().is_some()); + + for value in iter { + assert!(value.is_sensitive()) + } + } +} diff --git a/.cargo-vendor/tower-http/src/services/fs/mod.rs b/.cargo-vendor/tower-http/src/services/fs/mod.rs new file mode 100644 index 0000000000..32dd6f1cc3 --- /dev/null +++ b/.cargo-vendor/tower-http/src/services/fs/mod.rs @@ -0,0 +1,79 @@ +//! File system related services. + +use bytes::Bytes; +use futures_util::Stream; +use http_body::{Body, Frame}; +use pin_project_lite::pin_project; +use std::{ + io, + pin::Pin, + task::{Context, Poll}, +}; +use tokio::io::{AsyncRead, AsyncReadExt, Take}; +use tokio_util::io::ReaderStream; + +mod serve_dir; +mod serve_file; + +pub use self::{ + serve_dir::{ + future::ResponseFuture as ServeFileSystemResponseFuture, + DefaultServeDirFallback, + // The response body and future are used for both ServeDir and ServeFile + ResponseBody as ServeFileSystemResponseBody, + ServeDir, + }, + serve_file::ServeFile, +}; + +pin_project! { + // NOTE: This could potentially be upstreamed to `http-body`. + /// Adapter that turns an [`impl AsyncRead`][tokio::io::AsyncRead] to an [`impl Body`][http_body::Body]. + #[derive(Debug)] + pub struct AsyncReadBody { + #[pin] + reader: ReaderStream, + } +} + +impl AsyncReadBody +where + T: AsyncRead, +{ + /// Create a new [`AsyncReadBody`] wrapping the given reader, + /// with a specific read buffer capacity + fn with_capacity(read: T, capacity: usize) -> Self { + Self { + reader: ReaderStream::with_capacity(read, capacity), + } + } + + fn with_capacity_limited( + read: T, + capacity: usize, + max_read_bytes: u64, + ) -> AsyncReadBody> { + AsyncReadBody { + reader: ReaderStream::with_capacity(read.take(max_read_bytes), capacity), + } + } +} + +impl Body for AsyncReadBody +where + T: AsyncRead, +{ + type Data = Bytes; + type Error = io::Error; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + match std::task::ready!(self.project().reader.poll_next(cx)) { + Some(Ok(chunk)) => Poll::Ready(Some(Ok(Frame::data(chunk)))), + Some(Err(err)) => Poll::Ready(Some(Err(err))), + None => Poll::Ready(None), + } + } +} diff --git a/.cargo-vendor/tower-http/src/services/fs/serve_dir/future.rs b/.cargo-vendor/tower-http/src/services/fs/serve_dir/future.rs new file mode 100644 index 0000000000..8b255f7294 --- /dev/null +++ b/.cargo-vendor/tower-http/src/services/fs/serve_dir/future.rs @@ -0,0 +1,319 @@ +use super::{ + open_file::{FileOpened, FileRequestExtent, OpenFileOutput}, + DefaultServeDirFallback, ResponseBody, +}; +use crate::{ + body::UnsyncBoxBody, content_encoding::Encoding, services::fs::AsyncReadBody, BoxError, +}; +use bytes::Bytes; +use futures_util::future::{BoxFuture, FutureExt, TryFutureExt}; +use http::{ + header::{self, ALLOW}, + HeaderValue, Request, Response, StatusCode, +}; +use http_body_util::{BodyExt, Empty, Full}; +use pin_project_lite::pin_project; +use std::{ + convert::Infallible, + future::Future, + io, + pin::Pin, + task::{ready, Context, Poll}, +}; +use tower_service::Service; + +pin_project! { + /// Response future of [`ServeDir::try_call()`][`super::ServeDir::try_call()`]. + pub struct ResponseFuture { + #[pin] + pub(super) inner: ResponseFutureInner, + } +} + +impl ResponseFuture { + pub(super) fn open_file_future( + future: BoxFuture<'static, io::Result>, + fallback_and_request: Option<(F, Request)>, + ) -> Self { + Self { + inner: ResponseFutureInner::OpenFileFuture { + future, + fallback_and_request, + }, + } + } + + pub(super) fn invalid_path(fallback_and_request: Option<(F, Request)>) -> Self { + Self { + inner: ResponseFutureInner::InvalidPath { + fallback_and_request, + }, + } + } + + pub(super) fn method_not_allowed() -> Self { + Self { + inner: ResponseFutureInner::MethodNotAllowed, + } + } +} + +pin_project! { + #[project = ResponseFutureInnerProj] + pub(super) enum ResponseFutureInner { + OpenFileFuture { + #[pin] + future: BoxFuture<'static, io::Result>, + fallback_and_request: Option<(F, Request)>, + }, + FallbackFuture { + future: BoxFuture<'static, Result, Infallible>>, + }, + InvalidPath { + fallback_and_request: Option<(F, Request)>, + }, + MethodNotAllowed, + } +} + +impl Future for ResponseFuture +where + F: Service, Response = Response, Error = Infallible> + Clone, + F::Future: Send + 'static, + ResBody: http_body::Body + Send + 'static, + ResBody::Error: Into>, +{ + type Output = io::Result>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + loop { + let mut this = self.as_mut().project(); + + let new_state = match this.inner.as_mut().project() { + ResponseFutureInnerProj::OpenFileFuture { + future: open_file_future, + fallback_and_request, + } => match ready!(open_file_future.poll(cx)) { + Ok(OpenFileOutput::FileOpened(file_output)) => { + break Poll::Ready(Ok(build_response(*file_output))); + } + + Ok(OpenFileOutput::Redirect { location }) => { + let mut res = response_with_status(StatusCode::TEMPORARY_REDIRECT); + res.headers_mut().insert(http::header::LOCATION, location); + break Poll::Ready(Ok(res)); + } + + Ok(OpenFileOutput::FileNotFound) => { + if let Some((mut fallback, request)) = fallback_and_request.take() { + call_fallback(&mut fallback, request) + } else { + break Poll::Ready(Ok(not_found())); + } + } + + Ok(OpenFileOutput::PreconditionFailed) => { + break Poll::Ready(Ok(response_with_status( + StatusCode::PRECONDITION_FAILED, + ))); + } + + Ok(OpenFileOutput::NotModified) => { + break Poll::Ready(Ok(response_with_status(StatusCode::NOT_MODIFIED))); + } + + Err(err) => { + #[cfg(unix)] + // 20 = libc::ENOTDIR => "not a directory + // when `io_error_more` landed, this can be changed + // to checking for `io::ErrorKind::NotADirectory`. + // https://github.com/rust-lang/rust/issues/86442 + let error_is_not_a_directory = err.raw_os_error() == Some(20); + #[cfg(not(unix))] + let error_is_not_a_directory = false; + + if matches!( + err.kind(), + io::ErrorKind::NotFound | io::ErrorKind::PermissionDenied + ) || error_is_not_a_directory + { + if let Some((mut fallback, request)) = fallback_and_request.take() { + call_fallback(&mut fallback, request) + } else { + break Poll::Ready(Ok(not_found())); + } + } else { + break Poll::Ready(Err(err)); + } + } + }, + + ResponseFutureInnerProj::FallbackFuture { future } => { + break Pin::new(future).poll(cx).map_err(|err| match err {}) + } + + ResponseFutureInnerProj::InvalidPath { + fallback_and_request, + } => { + if let Some((mut fallback, request)) = fallback_and_request.take() { + call_fallback(&mut fallback, request) + } else { + break Poll::Ready(Ok(not_found())); + } + } + + ResponseFutureInnerProj::MethodNotAllowed => { + let mut res = response_with_status(StatusCode::METHOD_NOT_ALLOWED); + res.headers_mut() + .insert(ALLOW, HeaderValue::from_static("GET,HEAD")); + break Poll::Ready(Ok(res)); + } + }; + + this.inner.set(new_state); + } + } +} + +fn response_with_status(status: StatusCode) -> Response { + Response::builder() + .status(status) + .body(empty_body()) + .unwrap() +} + +fn not_found() -> Response { + response_with_status(StatusCode::NOT_FOUND) +} + +pub(super) fn call_fallback( + fallback: &mut F, + req: Request, +) -> ResponseFutureInner +where + F: Service, Response = Response, Error = Infallible> + Clone, + F::Future: Send + 'static, + FResBody: http_body::Body + Send + 'static, + FResBody::Error: Into, +{ + let future = fallback + .call(req) + .map_ok(|response| { + response + .map(|body| { + UnsyncBoxBody::new( + body.map_err(|err| match err.into().downcast::() { + Ok(err) => *err, + Err(err) => io::Error::new(io::ErrorKind::Other, err), + }) + .boxed_unsync(), + ) + }) + .map(ResponseBody::new) + }) + .boxed(); + + ResponseFutureInner::FallbackFuture { future } +} + +fn build_response(output: FileOpened) -> Response { + let (maybe_file, size) = match output.extent { + FileRequestExtent::Full(file, meta) => (Some(file), meta.len()), + FileRequestExtent::Head(meta) => (None, meta.len()), + }; + + let mut builder = Response::builder() + .header(header::CONTENT_TYPE, output.mime_header_value) + .header(header::ACCEPT_RANGES, "bytes"); + + if let Some(encoding) = output + .maybe_encoding + .filter(|encoding| *encoding != Encoding::Identity) + { + builder = builder.header(header::CONTENT_ENCODING, encoding.into_header_value()); + } + + if let Some(last_modified) = output.last_modified { + builder = builder.header(header::LAST_MODIFIED, last_modified.0.to_string()); + } + + match output.maybe_range { + Some(Ok(ranges)) => { + if let Some(range) = ranges.first() { + if ranges.len() > 1 { + builder + .header(header::CONTENT_RANGE, format!("bytes */{}", size)) + .status(StatusCode::RANGE_NOT_SATISFIABLE) + .body(body_from_bytes(Bytes::from( + "Cannot serve multipart range requests", + ))) + .unwrap() + } else { + let body = if let Some(file) = maybe_file { + let range_size = range.end() - range.start() + 1; + ResponseBody::new(UnsyncBoxBody::new( + AsyncReadBody::with_capacity_limited( + file, + output.chunk_size, + range_size, + ) + .boxed_unsync(), + )) + } else { + empty_body() + }; + + builder + .header( + header::CONTENT_RANGE, + format!("bytes {}-{}/{}", range.start(), range.end(), size), + ) + .header(header::CONTENT_LENGTH, range.end() - range.start() + 1) + .status(StatusCode::PARTIAL_CONTENT) + .body(body) + .unwrap() + } + } else { + builder + .header(header::CONTENT_RANGE, format!("bytes */{}", size)) + .status(StatusCode::RANGE_NOT_SATISFIABLE) + .body(body_from_bytes(Bytes::from( + "No range found after parsing range header, please file an issue", + ))) + .unwrap() + } + } + + Some(Err(_)) => builder + .header(header::CONTENT_RANGE, format!("bytes */{}", size)) + .status(StatusCode::RANGE_NOT_SATISFIABLE) + .body(empty_body()) + .unwrap(), + + // Not a range request + None => { + let body = if let Some(file) = maybe_file { + ResponseBody::new(UnsyncBoxBody::new( + AsyncReadBody::with_capacity(file, output.chunk_size).boxed_unsync(), + )) + } else { + empty_body() + }; + + builder + .header(header::CONTENT_LENGTH, size.to_string()) + .body(body) + .unwrap() + } + } +} + +fn body_from_bytes(bytes: Bytes) -> ResponseBody { + let body = Full::from(bytes).map_err(|err| match err {}).boxed_unsync(); + ResponseBody::new(UnsyncBoxBody::new(body)) +} + +fn empty_body() -> ResponseBody { + let body = Empty::new().map_err(|err| match err {}).boxed_unsync(); + ResponseBody::new(UnsyncBoxBody::new(body)) +} diff --git a/.cargo-vendor/tower-http/src/services/fs/serve_dir/headers.rs b/.cargo-vendor/tower-http/src/services/fs/serve_dir/headers.rs new file mode 100644 index 0000000000..e9e809073f --- /dev/null +++ b/.cargo-vendor/tower-http/src/services/fs/serve_dir/headers.rs @@ -0,0 +1,45 @@ +use http::header::HeaderValue; +use httpdate::HttpDate; +use std::time::SystemTime; + +pub(super) struct LastModified(pub(super) HttpDate); + +impl From for LastModified { + fn from(time: SystemTime) -> Self { + LastModified(time.into()) + } +} + +pub(super) struct IfModifiedSince(HttpDate); + +impl IfModifiedSince { + /// Check if the supplied time means the resource has been modified. + pub(super) fn is_modified(&self, last_modified: &LastModified) -> bool { + self.0 < last_modified.0 + } + + /// convert a header value into a IfModifiedSince, invalid values are silentely ignored + pub(super) fn from_header_value(value: &HeaderValue) -> Option { + std::str::from_utf8(value.as_bytes()) + .ok() + .and_then(|value| httpdate::parse_http_date(value).ok()) + .map(|time| IfModifiedSince(time.into())) + } +} + +pub(super) struct IfUnmodifiedSince(HttpDate); + +impl IfUnmodifiedSince { + /// Check if the supplied time passes the precondtion. + pub(super) fn precondition_passes(&self, last_modified: &LastModified) -> bool { + self.0 >= last_modified.0 + } + + /// Convert a header value into a IfModifiedSince, invalid values are silentely ignored + pub(super) fn from_header_value(value: &HeaderValue) -> Option { + std::str::from_utf8(value.as_bytes()) + .ok() + .and_then(|value| httpdate::parse_http_date(value).ok()) + .map(|time| IfUnmodifiedSince(time.into())) + } +} diff --git a/.cargo-vendor/tower-http/src/services/fs/serve_dir/mod.rs b/.cargo-vendor/tower-http/src/services/fs/serve_dir/mod.rs new file mode 100644 index 0000000000..61b956d13c --- /dev/null +++ b/.cargo-vendor/tower-http/src/services/fs/serve_dir/mod.rs @@ -0,0 +1,541 @@ +use self::future::ResponseFuture; +use crate::{ + body::UnsyncBoxBody, + content_encoding::{encodings, SupportedEncodings}, + set_status::SetStatus, +}; +use bytes::Bytes; +use futures_util::FutureExt; +use http::{header, HeaderValue, Method, Request, Response, StatusCode}; +use http_body_util::{BodyExt, Empty}; +use percent_encoding::percent_decode; +use std::{ + convert::Infallible, + io, + path::{Component, Path, PathBuf}, + task::{Context, Poll}, +}; +use tower_service::Service; + +pub(crate) mod future; +mod headers; +mod open_file; + +#[cfg(test)] +mod tests; + +// default capacity 64KiB +const DEFAULT_CAPACITY: usize = 65536; + +/// Service that serves files from a given directory and all its sub directories. +/// +/// The `Content-Type` will be guessed from the file extension. +/// +/// An empty response with status `404 Not Found` will be returned if: +/// +/// - The file doesn't exist +/// - Any segment of the path contains `..` +/// - Any segment of the path contains a backslash +/// - On unix, any segment of the path referenced as directory is actually an +/// existing file (`/file.html/something`) +/// - We don't have necessary permissions to read the file +/// +/// # Example +/// +/// ``` +/// use tower_http::services::ServeDir; +/// +/// // This will serve files in the "assets" directory and +/// // its subdirectories +/// let service = ServeDir::new("assets"); +/// ``` +#[derive(Clone, Debug)] +pub struct ServeDir { + base: PathBuf, + buf_chunk_size: usize, + precompressed_variants: Option, + // This is used to specialise implementation for + // single files + variant: ServeVariant, + fallback: Option, + call_fallback_on_method_not_allowed: bool, +} + +impl ServeDir { + /// Create a new [`ServeDir`]. + pub fn new

(path: P) -> Self + where + P: AsRef, + { + let mut base = PathBuf::from("."); + base.push(path.as_ref()); + + Self { + base, + buf_chunk_size: DEFAULT_CAPACITY, + precompressed_variants: None, + variant: ServeVariant::Directory { + append_index_html_on_directories: true, + }, + fallback: None, + call_fallback_on_method_not_allowed: false, + } + } + + pub(crate) fn new_single_file

(path: P, mime: HeaderValue) -> Self + where + P: AsRef, + { + Self { + base: path.as_ref().to_owned(), + buf_chunk_size: DEFAULT_CAPACITY, + precompressed_variants: None, + variant: ServeVariant::SingleFile { mime }, + fallback: None, + call_fallback_on_method_not_allowed: false, + } + } +} + +impl ServeDir { + /// If the requested path is a directory append `index.html`. + /// + /// This is useful for static sites. + /// + /// Defaults to `true`. + pub fn append_index_html_on_directories(mut self, append: bool) -> Self { + match &mut self.variant { + ServeVariant::Directory { + append_index_html_on_directories, + } => { + *append_index_html_on_directories = append; + self + } + ServeVariant::SingleFile { mime: _ } => self, + } + } + + /// Set a specific read buffer chunk size. + /// + /// The default capacity is 64kb. + pub fn with_buf_chunk_size(mut self, chunk_size: usize) -> Self { + self.buf_chunk_size = chunk_size; + self + } + + /// Informs the service that it should also look for a precompressed gzip + /// version of _any_ file in the directory. + /// + /// Assuming the `dir` directory is being served and `dir/foo.txt` is requested, + /// a client with an `Accept-Encoding` header that allows the gzip encoding + /// will receive the file `dir/foo.txt.gz` instead of `dir/foo.txt`. + /// If the precompressed file is not available, or the client doesn't support it, + /// the uncompressed version will be served instead. + /// Both the precompressed version and the uncompressed version are expected + /// to be present in the directory. Different precompressed variants can be combined. + pub fn precompressed_gzip(mut self) -> Self { + self.precompressed_variants + .get_or_insert(Default::default()) + .gzip = true; + self + } + + /// Informs the service that it should also look for a precompressed brotli + /// version of _any_ file in the directory. + /// + /// Assuming the `dir` directory is being served and `dir/foo.txt` is requested, + /// a client with an `Accept-Encoding` header that allows the brotli encoding + /// will receive the file `dir/foo.txt.br` instead of `dir/foo.txt`. + /// If the precompressed file is not available, or the client doesn't support it, + /// the uncompressed version will be served instead. + /// Both the precompressed version and the uncompressed version are expected + /// to be present in the directory. Different precompressed variants can be combined. + pub fn precompressed_br(mut self) -> Self { + self.precompressed_variants + .get_or_insert(Default::default()) + .br = true; + self + } + + /// Informs the service that it should also look for a precompressed deflate + /// version of _any_ file in the directory. + /// + /// Assuming the `dir` directory is being served and `dir/foo.txt` is requested, + /// a client with an `Accept-Encoding` header that allows the deflate encoding + /// will receive the file `dir/foo.txt.zz` instead of `dir/foo.txt`. + /// If the precompressed file is not available, or the client doesn't support it, + /// the uncompressed version will be served instead. + /// Both the precompressed version and the uncompressed version are expected + /// to be present in the directory. Different precompressed variants can be combined. + pub fn precompressed_deflate(mut self) -> Self { + self.precompressed_variants + .get_or_insert(Default::default()) + .deflate = true; + self + } + + /// Informs the service that it should also look for a precompressed zstd + /// version of _any_ file in the directory. + /// + /// Assuming the `dir` directory is being served and `dir/foo.txt` is requested, + /// a client with an `Accept-Encoding` header that allows the zstd encoding + /// will receive the file `dir/foo.txt.zst` instead of `dir/foo.txt`. + /// If the precompressed file is not available, or the client doesn't support it, + /// the uncompressed version will be served instead. + /// Both the precompressed version and the uncompressed version are expected + /// to be present in the directory. Different precompressed variants can be combined. + pub fn precompressed_zstd(mut self) -> Self { + self.precompressed_variants + .get_or_insert(Default::default()) + .zstd = true; + self + } + + /// Set the fallback service. + /// + /// This service will be called if there is no file at the path of the request. + /// + /// The status code returned by the fallback will not be altered. Use + /// [`ServeDir::not_found_service`] to set a fallback and always respond with `404 Not Found`. + /// + /// # Example + /// + /// This can be used to respond with a different file: + /// + /// ```rust + /// use tower_http::services::{ServeDir, ServeFile}; + /// + /// let service = ServeDir::new("assets") + /// // respond with `not_found.html` for missing files + /// .fallback(ServeFile::new("assets/not_found.html")); + /// ``` + pub fn fallback(self, new_fallback: F2) -> ServeDir { + ServeDir { + base: self.base, + buf_chunk_size: self.buf_chunk_size, + precompressed_variants: self.precompressed_variants, + variant: self.variant, + fallback: Some(new_fallback), + call_fallback_on_method_not_allowed: self.call_fallback_on_method_not_allowed, + } + } + + /// Set the fallback service and override the fallback's status code to `404 Not Found`. + /// + /// This service will be called if there is no file at the path of the request. + /// + /// # Example + /// + /// This can be used to respond with a different file: + /// + /// ```rust + /// use tower_http::services::{ServeDir, ServeFile}; + /// + /// let service = ServeDir::new("assets") + /// // respond with `404 Not Found` and the contents of `not_found.html` for missing files + /// .not_found_service(ServeFile::new("assets/not_found.html")); + /// ``` + /// + /// Setups like this are often found in single page applications. + pub fn not_found_service(self, new_fallback: F2) -> ServeDir> { + self.fallback(SetStatus::new(new_fallback, StatusCode::NOT_FOUND)) + } + + /// Customize whether or not to call the fallback for requests that aren't `GET` or `HEAD`. + /// + /// Defaults to not calling the fallback and instead returning `405 Method Not Allowed`. + pub fn call_fallback_on_method_not_allowed(mut self, call_fallback: bool) -> Self { + self.call_fallback_on_method_not_allowed = call_fallback; + self + } + + /// Call the service and get a future that contains any `std::io::Error` that might have + /// happened. + /// + /// By default `>::call` will handle IO errors and convert them into + /// responses. It does that by converting [`std::io::ErrorKind::NotFound`] and + /// [`std::io::ErrorKind::PermissionDenied`] to `404 Not Found` and any other error to `500 + /// Internal Server Error`. The error will also be logged with `tracing`. + /// + /// If you want to manually control how the error response is generated you can make a new + /// service that wraps a `ServeDir` and calls `try_call` instead of `call`. + /// + /// # Example + /// + /// ``` + /// use tower_http::services::ServeDir; + /// use std::{io, convert::Infallible}; + /// use http::{Request, Response, StatusCode}; + /// use http_body::Body as _; + /// use http_body_util::{Full, BodyExt, combinators::UnsyncBoxBody}; + /// use bytes::Bytes; + /// use tower::{service_fn, ServiceExt, BoxError}; + /// + /// async fn serve_dir( + /// request: Request> + /// ) -> Result>, Infallible> { + /// let mut service = ServeDir::new("assets"); + /// + /// // You only need to worry about backpressure, and thus call `ServiceExt::ready`, if + /// // your adding a fallback to `ServeDir` that cares about backpressure. + /// // + /// // Its shown here for demonstration but you can do `service.try_call(request)` + /// // otherwise + /// let ready_service = match ServiceExt::>>::ready(&mut service).await { + /// Ok(ready_service) => ready_service, + /// Err(infallible) => match infallible {}, + /// }; + /// + /// match ready_service.try_call(request).await { + /// Ok(response) => { + /// Ok(response.map(|body| body.map_err(Into::into).boxed_unsync())) + /// } + /// Err(err) => { + /// let body = Full::from("Something went wrong...") + /// .map_err(Into::into) + /// .boxed_unsync(); + /// let response = Response::builder() + /// .status(StatusCode::INTERNAL_SERVER_ERROR) + /// .body(body) + /// .unwrap(); + /// Ok(response) + /// } + /// } + /// } + /// ``` + pub fn try_call( + &mut self, + req: Request, + ) -> ResponseFuture + where + F: Service, Response = Response, Error = Infallible> + Clone, + F::Future: Send + 'static, + FResBody: http_body::Body + Send + 'static, + FResBody::Error: Into>, + { + if req.method() != Method::GET && req.method() != Method::HEAD { + if self.call_fallback_on_method_not_allowed { + if let Some(fallback) = &mut self.fallback { + return ResponseFuture { + inner: future::call_fallback(fallback, req), + }; + } + } else { + return ResponseFuture::method_not_allowed(); + } + } + + // `ServeDir` doesn't care about the request body but the fallback might. So move out the + // body and pass it to the fallback, leaving an empty body in its place + // + // this is necessary because we cannot clone bodies + let (mut parts, body) = req.into_parts(); + // same goes for extensions + let extensions = std::mem::take(&mut parts.extensions); + let req = Request::from_parts(parts, Empty::::new()); + + let fallback_and_request = self.fallback.as_mut().map(|fallback| { + let mut fallback_req = Request::new(body); + *fallback_req.method_mut() = req.method().clone(); + *fallback_req.uri_mut() = req.uri().clone(); + *fallback_req.headers_mut() = req.headers().clone(); + *fallback_req.extensions_mut() = extensions; + + // get the ready fallback and leave a non-ready clone in its place + let clone = fallback.clone(); + let fallback = std::mem::replace(fallback, clone); + + (fallback, fallback_req) + }); + + let path_to_file = match self + .variant + .build_and_validate_path(&self.base, req.uri().path()) + { + Some(path_to_file) => path_to_file, + None => { + return ResponseFuture::invalid_path(fallback_and_request); + } + }; + + let buf_chunk_size = self.buf_chunk_size; + let range_header = req + .headers() + .get(header::RANGE) + .and_then(|value| value.to_str().ok()) + .map(|s| s.to_owned()); + + let negotiated_encodings: Vec<_> = encodings( + req.headers(), + self.precompressed_variants.unwrap_or_default(), + ) + .collect(); + + let variant = self.variant.clone(); + + let open_file_future = Box::pin(open_file::open_file( + variant, + path_to_file, + req, + negotiated_encodings, + range_header, + buf_chunk_size, + )); + + ResponseFuture::open_file_future(open_file_future, fallback_and_request) + } +} + +impl Service> for ServeDir +where + F: Service, Response = Response, Error = Infallible> + Clone, + F::Future: Send + 'static, + FResBody: http_body::Body + Send + 'static, + FResBody::Error: Into>, +{ + type Response = Response; + type Error = Infallible; + type Future = InfallibleResponseFuture; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + if let Some(fallback) = &mut self.fallback { + fallback.poll_ready(cx) + } else { + Poll::Ready(Ok(())) + } + } + + fn call(&mut self, req: Request) -> Self::Future { + let future = self + .try_call(req) + .map(|result: Result<_, _>| -> Result<_, Infallible> { + let response = result.unwrap_or_else(|err| { + tracing::error!(error = %err, "Failed to read file"); + + let body = ResponseBody::new(UnsyncBoxBody::new( + Empty::new().map_err(|err| match err {}).boxed_unsync(), + )); + Response::builder() + .status(StatusCode::INTERNAL_SERVER_ERROR) + .body(body) + .unwrap() + }); + Ok(response) + } as _); + + InfallibleResponseFuture::new(future) + } +} + +opaque_future! { + /// Response future of [`ServeDir`]. + pub type InfallibleResponseFuture = + futures_util::future::Map< + ResponseFuture, + fn(Result, io::Error>) -> Result, Infallible>, + >; +} + +// Allow the ServeDir service to be used in the ServeFile service +// with almost no overhead +#[derive(Clone, Debug)] +enum ServeVariant { + Directory { + append_index_html_on_directories: bool, + }, + SingleFile { + mime: HeaderValue, + }, +} + +impl ServeVariant { + fn build_and_validate_path(&self, base_path: &Path, requested_path: &str) -> Option { + match self { + ServeVariant::Directory { + append_index_html_on_directories: _, + } => { + let path = requested_path.trim_start_matches('/'); + + let path_decoded = percent_decode(path.as_ref()).decode_utf8().ok()?; + let path_decoded = Path::new(&*path_decoded); + + let mut path_to_file = base_path.to_path_buf(); + for component in path_decoded.components() { + match component { + Component::Normal(comp) => { + // protect against paths like `/foo/c:/bar/baz` (#204) + if Path::new(&comp) + .components() + .all(|c| matches!(c, Component::Normal(_))) + { + path_to_file.push(comp) + } else { + return None; + } + } + Component::CurDir => {} + Component::Prefix(_) | Component::RootDir | Component::ParentDir => { + return None; + } + } + } + Some(path_to_file) + } + ServeVariant::SingleFile { mime: _ } => Some(base_path.to_path_buf()), + } + } +} + +opaque_body! { + /// Response body for [`ServeDir`] and [`ServeFile`][super::ServeFile]. + #[derive(Default)] + pub type ResponseBody = UnsyncBoxBody; +} + +/// The default fallback service used with [`ServeDir`]. +#[derive(Debug, Clone, Copy)] +pub struct DefaultServeDirFallback(Infallible); + +impl Service> for DefaultServeDirFallback +where + ReqBody: Send + 'static, +{ + type Response = Response; + type Error = Infallible; + type Future = InfallibleResponseFuture; + + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + match self.0 {} + } + + fn call(&mut self, _req: Request) -> Self::Future { + match self.0 {} + } +} + +#[derive(Clone, Copy, Debug, Default)] +struct PrecompressedVariants { + gzip: bool, + deflate: bool, + br: bool, + zstd: bool, +} + +impl SupportedEncodings for PrecompressedVariants { + fn gzip(&self) -> bool { + self.gzip + } + + fn deflate(&self) -> bool { + self.deflate + } + + fn br(&self) -> bool { + self.br + } + + fn zstd(&self) -> bool { + self.zstd + } +} diff --git a/.cargo-vendor/tower-http/src/services/fs/serve_dir/open_file.rs b/.cargo-vendor/tower-http/src/services/fs/serve_dir/open_file.rs new file mode 100644 index 0000000000..01c1e2f9c1 --- /dev/null +++ b/.cargo-vendor/tower-http/src/services/fs/serve_dir/open_file.rs @@ -0,0 +1,321 @@ +use super::{ + headers::{IfModifiedSince, IfUnmodifiedSince, LastModified}, + ServeVariant, +}; +use crate::content_encoding::{Encoding, QValue}; +use bytes::Bytes; +use http::{header, HeaderValue, Method, Request, Uri}; +use http_body_util::Empty; +use http_range_header::RangeUnsatisfiableError; +use std::{ + ffi::OsStr, + fs::Metadata, + io::{self, SeekFrom}, + ops::RangeInclusive, + path::{Path, PathBuf}, +}; +use tokio::{fs::File, io::AsyncSeekExt}; + +pub(super) enum OpenFileOutput { + FileOpened(Box), + Redirect { location: HeaderValue }, + FileNotFound, + PreconditionFailed, + NotModified, +} + +pub(super) struct FileOpened { + pub(super) extent: FileRequestExtent, + pub(super) chunk_size: usize, + pub(super) mime_header_value: HeaderValue, + pub(super) maybe_encoding: Option, + pub(super) maybe_range: Option>, RangeUnsatisfiableError>>, + pub(super) last_modified: Option, +} + +pub(super) enum FileRequestExtent { + Full(File, Metadata), + Head(Metadata), +} + +pub(super) async fn open_file( + variant: ServeVariant, + mut path_to_file: PathBuf, + req: Request>, + negotiated_encodings: Vec<(Encoding, QValue)>, + range_header: Option, + buf_chunk_size: usize, +) -> io::Result { + let if_unmodified_since = req + .headers() + .get(header::IF_UNMODIFIED_SINCE) + .and_then(IfUnmodifiedSince::from_header_value); + + let if_modified_since = req + .headers() + .get(header::IF_MODIFIED_SINCE) + .and_then(IfModifiedSince::from_header_value); + + let mime = match variant { + ServeVariant::Directory { + append_index_html_on_directories, + } => { + // Might already at this point know a redirect or not found result should be + // returned which corresponds to a Some(output). Otherwise the path might be + // modified and proceed to the open file/metadata future. + if let Some(output) = maybe_redirect_or_append_path( + &mut path_to_file, + req.uri(), + append_index_html_on_directories, + ) + .await + { + return Ok(output); + } + + mime_guess::from_path(&path_to_file) + .first_raw() + .map(HeaderValue::from_static) + .unwrap_or_else(|| { + HeaderValue::from_str(mime::APPLICATION_OCTET_STREAM.as_ref()).unwrap() + }) + } + + ServeVariant::SingleFile { mime } => mime, + }; + + if req.method() == Method::HEAD { + let (meta, maybe_encoding) = + file_metadata_with_fallback(path_to_file, negotiated_encodings).await?; + + let last_modified = meta.modified().ok().map(LastModified::from); + if let Some(output) = check_modified_headers( + last_modified.as_ref(), + if_unmodified_since, + if_modified_since, + ) { + return Ok(output); + } + + let maybe_range = try_parse_range(range_header.as_deref(), meta.len()); + + Ok(OpenFileOutput::FileOpened(Box::new(FileOpened { + extent: FileRequestExtent::Head(meta), + chunk_size: buf_chunk_size, + mime_header_value: mime, + maybe_encoding, + maybe_range, + last_modified, + }))) + } else { + let (mut file, maybe_encoding) = + open_file_with_fallback(path_to_file, negotiated_encodings).await?; + let meta = file.metadata().await?; + let last_modified = meta.modified().ok().map(LastModified::from); + if let Some(output) = check_modified_headers( + last_modified.as_ref(), + if_unmodified_since, + if_modified_since, + ) { + return Ok(output); + } + + let maybe_range = try_parse_range(range_header.as_deref(), meta.len()); + if let Some(Ok(ranges)) = maybe_range.as_ref() { + // if there is any other amount of ranges than 1 we'll return an + // unsatisfiable later as there isn't yet support for multipart ranges + if ranges.len() == 1 { + file.seek(SeekFrom::Start(*ranges[0].start())).await?; + } + } + + Ok(OpenFileOutput::FileOpened(Box::new(FileOpened { + extent: FileRequestExtent::Full(file, meta), + chunk_size: buf_chunk_size, + mime_header_value: mime, + maybe_encoding, + maybe_range, + last_modified, + }))) + } +} + +fn check_modified_headers( + modified: Option<&LastModified>, + if_unmodified_since: Option, + if_modified_since: Option, +) -> Option { + if let Some(since) = if_unmodified_since { + let precondition = modified + .as_ref() + .map(|time| since.precondition_passes(time)) + .unwrap_or(false); + + if !precondition { + return Some(OpenFileOutput::PreconditionFailed); + } + } + + if let Some(since) = if_modified_since { + let unmodified = modified + .as_ref() + .map(|time| !since.is_modified(time)) + // no last_modified means its always modified + .unwrap_or(false); + if unmodified { + return Some(OpenFileOutput::NotModified); + } + } + + None +} + +// Returns the preferred_encoding encoding and modifies the path extension +// to the corresponding file extension for the encoding. +fn preferred_encoding( + path: &mut PathBuf, + negotiated_encoding: &[(Encoding, QValue)], +) -> Option { + let preferred_encoding = Encoding::preferred_encoding(negotiated_encoding.iter().copied()); + + if let Some(file_extension) = + preferred_encoding.and_then(|encoding| encoding.to_file_extension()) + { + let new_extension = path + .extension() + .map(|extension| { + let mut os_string = extension.to_os_string(); + os_string.push(file_extension); + os_string + }) + .unwrap_or_else(|| file_extension.to_os_string()); + + path.set_extension(new_extension); + } + + preferred_encoding +} + +// Attempts to open the file with any of the possible negotiated_encodings in the +// preferred order. If none of the negotiated_encodings have a corresponding precompressed +// file the uncompressed file is used as a fallback. +async fn open_file_with_fallback( + mut path: PathBuf, + mut negotiated_encoding: Vec<(Encoding, QValue)>, +) -> io::Result<(File, Option)> { + let (file, encoding) = loop { + // Get the preferred encoding among the negotiated ones. + let encoding = preferred_encoding(&mut path, &negotiated_encoding); + match (File::open(&path).await, encoding) { + (Ok(file), maybe_encoding) => break (file, maybe_encoding), + (Err(err), Some(encoding)) if err.kind() == io::ErrorKind::NotFound => { + // Remove the extension corresponding to a precompressed file (.gz, .br, .zz) + // to reset the path before the next iteration. + path.set_extension(OsStr::new("")); + // Remove the encoding from the negotiated_encodings since the file doesn't exist + negotiated_encoding + .retain(|(negotiated_encoding, _)| *negotiated_encoding != encoding); + continue; + } + (Err(err), _) => return Err(err), + }; + }; + Ok((file, encoding)) +} + +// Attempts to get the file metadata with any of the possible negotiated_encodings in the +// preferred order. If none of the negotiated_encodings have a corresponding precompressed +// file the uncompressed file is used as a fallback. +async fn file_metadata_with_fallback( + mut path: PathBuf, + mut negotiated_encoding: Vec<(Encoding, QValue)>, +) -> io::Result<(Metadata, Option)> { + let (file, encoding) = loop { + // Get the preferred encoding among the negotiated ones. + let encoding = preferred_encoding(&mut path, &negotiated_encoding); + match (tokio::fs::metadata(&path).await, encoding) { + (Ok(file), maybe_encoding) => break (file, maybe_encoding), + (Err(err), Some(encoding)) if err.kind() == io::ErrorKind::NotFound => { + // Remove the extension corresponding to a precompressed file (.gz, .br, .zz) + // to reset the path before the next iteration. + path.set_extension(OsStr::new("")); + // Remove the encoding from the negotiated_encodings since the file doesn't exist + negotiated_encoding + .retain(|(negotiated_encoding, _)| *negotiated_encoding != encoding); + continue; + } + (Err(err), _) => return Err(err), + }; + }; + Ok((file, encoding)) +} + +async fn maybe_redirect_or_append_path( + path_to_file: &mut PathBuf, + uri: &Uri, + append_index_html_on_directories: bool, +) -> Option { + if !is_dir(path_to_file).await { + return None; + } + + if !append_index_html_on_directories { + return Some(OpenFileOutput::FileNotFound); + } + + if uri.path().ends_with('/') { + path_to_file.push("index.html"); + None + } else { + let location = + HeaderValue::from_str(&append_slash_on_path(uri.clone()).to_string()).unwrap(); + Some(OpenFileOutput::Redirect { location }) + } +} + +fn try_parse_range( + maybe_range_ref: Option<&str>, + file_size: u64, +) -> Option>, RangeUnsatisfiableError>> { + maybe_range_ref.map(|header_value| { + http_range_header::parse_range_header(header_value) + .and_then(|first_pass| first_pass.validate(file_size)) + }) +} + +async fn is_dir(path_to_file: &Path) -> bool { + tokio::fs::metadata(path_to_file) + .await + .map_or(false, |meta_data| meta_data.is_dir()) +} + +fn append_slash_on_path(uri: Uri) -> Uri { + let http::uri::Parts { + scheme, + authority, + path_and_query, + .. + } = uri.into_parts(); + + let mut uri_builder = Uri::builder(); + + if let Some(scheme) = scheme { + uri_builder = uri_builder.scheme(scheme); + } + + if let Some(authority) = authority { + uri_builder = uri_builder.authority(authority); + } + + let uri_builder = if let Some(path_and_query) = path_and_query { + if let Some(query) = path_and_query.query() { + uri_builder.path_and_query(format!("{}/?{}", path_and_query.path(), query)) + } else { + uri_builder.path_and_query(format!("{}/", path_and_query.path())) + } + } else { + uri_builder.path_and_query("/") + }; + + uri_builder.build().unwrap() +} diff --git a/.cargo-vendor/tower-http/src/services/fs/serve_dir/tests.rs b/.cargo-vendor/tower-http/src/services/fs/serve_dir/tests.rs new file mode 100644 index 0000000000..d0d3952c7d --- /dev/null +++ b/.cargo-vendor/tower-http/src/services/fs/serve_dir/tests.rs @@ -0,0 +1,768 @@ +use crate::services::{ServeDir, ServeFile}; +use crate::test_helpers::{to_bytes, Body}; +use brotli::BrotliDecompress; +use bytes::Bytes; +use flate2::bufread::{DeflateDecoder, GzDecoder}; +use http::header::ALLOW; +use http::{header, Method, Response}; +use http::{Request, StatusCode}; +use http_body::Body as HttpBody; +use http_body_util::BodyExt; +use std::convert::Infallible; +use std::io::Read; +use tower::{service_fn, ServiceExt}; + +#[tokio::test] +async fn basic() { + let svc = ServeDir::new(".."); + + let req = Request::builder() + .uri("/README.md") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.headers()["content-type"], "text/markdown"); + + let body = body_into_text(res.into_body()).await; + + let contents = std::fs::read_to_string("../README.md").unwrap(); + assert_eq!(body, contents); +} + +#[tokio::test] +async fn basic_with_index() { + let svc = ServeDir::new("../test-files"); + + let req = Request::new(Body::empty()); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.headers()[header::CONTENT_TYPE], "text/html"); + + let body = body_into_text(res.into_body()).await; + assert_eq!(body, "HTML!\n"); +} + +#[tokio::test] +async fn head_request() { + let svc = ServeDir::new("../test-files"); + + let req = Request::builder() + .uri("/precompressed.txt") + .method(Method::HEAD) + .body(Body::empty()) + .unwrap(); + + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-length"], "23"); + + assert!(res.into_body().frame().await.is_none()); +} + +#[tokio::test] +async fn precompresed_head_request() { + let svc = ServeDir::new("../test-files").precompressed_gzip(); + + let req = Request::builder() + .uri("/precompressed.txt") + .header("Accept-Encoding", "gzip") + .method(Method::HEAD) + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-encoding"], "gzip"); + assert_eq!(res.headers()["content-length"], "59"); + + assert!(res.into_body().frame().await.is_none()); +} + +#[tokio::test] +async fn with_custom_chunk_size() { + let svc = ServeDir::new("..").with_buf_chunk_size(1024 * 32); + + let req = Request::builder() + .uri("/README.md") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.headers()["content-type"], "text/markdown"); + + let body = body_into_text(res.into_body()).await; + + let contents = std::fs::read_to_string("../README.md").unwrap(); + assert_eq!(body, contents); +} + +#[tokio::test] +async fn precompressed_gzip() { + let svc = ServeDir::new("../test-files").precompressed_gzip(); + + let req = Request::builder() + .uri("/precompressed.txt") + .header("Accept-Encoding", "gzip") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-encoding"], "gzip"); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let mut decoder = GzDecoder::new(&body[..]); + let mut decompressed = String::new(); + decoder.read_to_string(&mut decompressed).unwrap(); + assert!(decompressed.starts_with("\"This is a test file!\"")); +} + +#[tokio::test] +async fn precompressed_br() { + let svc = ServeDir::new("../test-files").precompressed_br(); + + let req = Request::builder() + .uri("/precompressed.txt") + .header("Accept-Encoding", "br") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-encoding"], "br"); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let mut decompressed = Vec::new(); + BrotliDecompress(&mut &body[..], &mut decompressed).unwrap(); + let decompressed = String::from_utf8(decompressed.to_vec()).unwrap(); + assert!(decompressed.starts_with("\"This is a test file!\"")); +} + +#[tokio::test] +async fn precompressed_deflate() { + let svc = ServeDir::new("../test-files").precompressed_deflate(); + let request = Request::builder() + .uri("/precompressed.txt") + .header("Accept-Encoding", "deflate,br") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-encoding"], "deflate"); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let mut decoder = DeflateDecoder::new(&body[..]); + let mut decompressed = String::new(); + decoder.read_to_string(&mut decompressed).unwrap(); + assert!(decompressed.starts_with("\"This is a test file!\"")); +} + +#[tokio::test] +async fn unsupported_precompression_alogrithm_fallbacks_to_uncompressed() { + let svc = ServeDir::new("../test-files").precompressed_gzip(); + + let request = Request::builder() + .uri("/precompressed.txt") + .header("Accept-Encoding", "br") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert!(res.headers().get("content-encoding").is_none()); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let body = String::from_utf8(body.to_vec()).unwrap(); + assert!(body.starts_with("\"This is a test file!\"")); +} + +#[tokio::test] +async fn only_precompressed_variant_existing() { + let svc = ServeDir::new("../test-files").precompressed_gzip(); + + let request = Request::builder() + .uri("/only_gzipped.txt") + .body(Body::empty()) + .unwrap(); + let res = svc.clone().oneshot(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::NOT_FOUND); + + // Should reply with gzipped file if client supports it + let request = Request::builder() + .uri("/only_gzipped.txt") + .header("Accept-Encoding", "gzip") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-encoding"], "gzip"); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let mut decoder = GzDecoder::new(&body[..]); + let mut decompressed = String::new(); + decoder.read_to_string(&mut decompressed).unwrap(); + assert!(decompressed.starts_with("\"This is a test file\"")); +} + +#[tokio::test] +async fn missing_precompressed_variant_fallbacks_to_uncompressed() { + let svc = ServeDir::new("../test-files").precompressed_gzip(); + + let request = Request::builder() + .uri("/missing_precompressed.txt") + .header("Accept-Encoding", "gzip") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + // Uncompressed file is served because compressed version is missing + assert!(res.headers().get("content-encoding").is_none()); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let body = String::from_utf8(body.to_vec()).unwrap(); + assert!(body.starts_with("Test file!")); +} + +#[tokio::test] +async fn missing_precompressed_variant_fallbacks_to_uncompressed_for_head_request() { + let svc = ServeDir::new("../test-files").precompressed_gzip(); + + let request = Request::builder() + .uri("/missing_precompressed.txt") + .header("Accept-Encoding", "gzip") + .method(Method::HEAD) + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-length"], "11"); + // Uncompressed file is served because compressed version is missing + assert!(res.headers().get("content-encoding").is_none()); + + assert!(res.into_body().frame().await.is_none()); +} + +#[tokio::test] +async fn access_to_sub_dirs() { + let svc = ServeDir::new(".."); + + let req = Request::builder() + .uri("/tower-http/Cargo.toml") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.headers()["content-type"], "text/x-toml"); + + let body = body_into_text(res.into_body()).await; + + let contents = std::fs::read_to_string("Cargo.toml").unwrap(); + assert_eq!(body, contents); +} + +#[tokio::test] +async fn not_found() { + let svc = ServeDir::new(".."); + + let req = Request::builder() + .uri("/not-found") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::NOT_FOUND); + assert!(res.headers().get(header::CONTENT_TYPE).is_none()); + + let body = body_into_text(res.into_body()).await; + assert!(body.is_empty()); +} + +#[cfg(unix)] +#[tokio::test] +async fn not_found_when_not_a_directory() { + let svc = ServeDir::new("../test-files"); + + // `index.html` is a file, and we are trying to request + // it as a directory. + let req = Request::builder() + .uri("/index.html/some_file") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + // This should lead to a 404 + assert_eq!(res.status(), StatusCode::NOT_FOUND); + assert!(res.headers().get(header::CONTENT_TYPE).is_none()); + + let body = body_into_text(res.into_body()).await; + assert!(body.is_empty()); +} + +#[tokio::test] +async fn not_found_precompressed() { + let svc = ServeDir::new("../test-files").precompressed_gzip(); + + let req = Request::builder() + .uri("/not-found") + .header("Accept-Encoding", "gzip") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::NOT_FOUND); + assert!(res.headers().get(header::CONTENT_TYPE).is_none()); + + let body = body_into_text(res.into_body()).await; + assert!(body.is_empty()); +} + +#[tokio::test] +async fn fallbacks_to_different_precompressed_variant_if_not_found_for_head_request() { + let svc = ServeDir::new("../test-files") + .precompressed_gzip() + .precompressed_br(); + + let req = Request::builder() + .uri("/precompressed_br.txt") + .header("Accept-Encoding", "gzip,br,deflate") + .method(Method::HEAD) + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-encoding"], "br"); + assert_eq!(res.headers()["content-length"], "15"); + + assert!(res.into_body().frame().await.is_none()); +} + +#[tokio::test] +async fn fallbacks_to_different_precompressed_variant_if_not_found() { + let svc = ServeDir::new("../test-files") + .precompressed_gzip() + .precompressed_br(); + + let req = Request::builder() + .uri("/precompressed_br.txt") + .header("Accept-Encoding", "gzip,br,deflate") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-encoding"], "br"); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let mut decompressed = Vec::new(); + BrotliDecompress(&mut &body[..], &mut decompressed).unwrap(); + let decompressed = String::from_utf8(decompressed.to_vec()).unwrap(); + assert!(decompressed.starts_with("Test file")); +} + +#[tokio::test] +async fn redirect_to_trailing_slash_on_dir() { + let svc = ServeDir::new("."); + + let req = Request::builder().uri("/src").body(Body::empty()).unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::TEMPORARY_REDIRECT); + + let location = &res.headers()[http::header::LOCATION]; + assert_eq!(location, "/src/"); +} + +#[tokio::test] +async fn empty_directory_without_index() { + let svc = ServeDir::new(".").append_index_html_on_directories(false); + + let req = Request::new(Body::empty()); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::NOT_FOUND); + assert!(res.headers().get(header::CONTENT_TYPE).is_none()); + + let body = body_into_text(res.into_body()).await; + assert!(body.is_empty()); +} + +#[tokio::test] +async fn empty_directory_without_index_no_information_leak() { + let svc = ServeDir::new("..").append_index_html_on_directories(false); + + let req = Request::builder() + .uri("/test-files") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::NOT_FOUND); + assert!(res.headers().get(header::CONTENT_TYPE).is_none()); + + let body = body_into_text(res.into_body()).await; + assert!(body.is_empty()); +} + +async fn body_into_text(body: B) -> String +where + B: HttpBody + Unpin, + B::Error: std::fmt::Debug, +{ + let bytes = to_bytes(body).await.unwrap(); + String::from_utf8(bytes.to_vec()).unwrap() +} + +#[tokio::test] +async fn access_cjk_percent_encoded_uri_path() { + // percent encoding present of 你好世界.txt + let cjk_filename_encoded = "%E4%BD%A0%E5%A5%BD%E4%B8%96%E7%95%8C.txt"; + + let svc = ServeDir::new("../test-files"); + + let req = Request::builder() + .uri(format!("/{}", cjk_filename_encoded)) + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.headers()["content-type"], "text/plain"); +} + +#[tokio::test] +async fn access_space_percent_encoded_uri_path() { + let encoded_filename = "filename%20with%20space.txt"; + + let svc = ServeDir::new("../test-files"); + + let req = Request::builder() + .uri(format!("/{}", encoded_filename)) + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.headers()["content-type"], "text/plain"); +} + +#[tokio::test] +async fn read_partial_in_bounds() { + let svc = ServeDir::new(".."); + let bytes_start_incl = 9; + let bytes_end_incl = 1023; + + let req = Request::builder() + .uri("/README.md") + .header( + "Range", + format!("bytes={}-{}", bytes_start_incl, bytes_end_incl), + ) + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + let file_contents = std::fs::read("../README.md").unwrap(); + assert_eq!(res.status(), StatusCode::PARTIAL_CONTENT); + assert_eq!( + res.headers()["content-length"], + (bytes_end_incl - bytes_start_incl + 1).to_string() + ); + assert!(res.headers()["content-range"] + .to_str() + .unwrap() + .starts_with(&format!( + "bytes {}-{}/{}", + bytes_start_incl, + bytes_end_incl, + file_contents.len() + ))); + assert_eq!(res.headers()["content-type"], "text/markdown"); + + let body = to_bytes(res.into_body()).await.ok().unwrap(); + let source = Bytes::from(file_contents[bytes_start_incl..=bytes_end_incl].to_vec()); + assert_eq!(body, source); +} + +#[tokio::test] +async fn read_partial_accepts_out_of_bounds_range() { + let svc = ServeDir::new(".."); + let bytes_start_incl = 0; + let bytes_end_excl = 9999999; + let requested_len = bytes_end_excl - bytes_start_incl; + + let req = Request::builder() + .uri("/README.md") + .header( + "Range", + format!("bytes={}-{}", bytes_start_incl, requested_len - 1), + ) + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::PARTIAL_CONTENT); + let file_contents = std::fs::read("../README.md").unwrap(); + // Out of bounds range gives all bytes + assert_eq!( + res.headers()["content-range"], + &format!( + "bytes 0-{}/{}", + file_contents.len() - 1, + file_contents.len() + ) + ) +} + +#[tokio::test] +async fn read_partial_errs_on_garbage_header() { + let svc = ServeDir::new(".."); + let req = Request::builder() + .uri("/README.md") + .header("Range", "bad_format") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + assert_eq!(res.status(), StatusCode::RANGE_NOT_SATISFIABLE); + let file_contents = std::fs::read("../README.md").unwrap(); + assert_eq!( + res.headers()["content-range"], + &format!("bytes */{}", file_contents.len()) + ) +} + +#[tokio::test] +async fn read_partial_errs_on_bad_range() { + let svc = ServeDir::new(".."); + let req = Request::builder() + .uri("/README.md") + .header("Range", "bytes=-1-15") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + assert_eq!(res.status(), StatusCode::RANGE_NOT_SATISFIABLE); + let file_contents = std::fs::read("../README.md").unwrap(); + assert_eq!( + res.headers()["content-range"], + &format!("bytes */{}", file_contents.len()) + ) +} + +#[tokio::test] +async fn accept_encoding_identity() { + let svc = ServeDir::new(".."); + let req = Request::builder() + .uri("/README.md") + .header("Accept-Encoding", "identity") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + assert_eq!(res.status(), StatusCode::OK); + // Identity encoding should not be included in the response headers + assert!(res.headers().get("content-encoding").is_none()); +} + +#[tokio::test] +async fn last_modified() { + let svc = ServeDir::new(".."); + let req = Request::builder() + .uri("/README.md") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + assert_eq!(res.status(), StatusCode::OK); + + let last_modified = res + .headers() + .get(header::LAST_MODIFIED) + .expect("Missing last modified header!"); + + // -- If-Modified-Since + + let svc = ServeDir::new(".."); + let req = Request::builder() + .uri("/README.md") + .header(header::IF_MODIFIED_SINCE, last_modified) + .body(Body::empty()) + .unwrap(); + + let res = svc.oneshot(req).await.unwrap(); + assert_eq!(res.status(), StatusCode::NOT_MODIFIED); + assert!(res.into_body().frame().await.is_none()); + + let svc = ServeDir::new(".."); + let req = Request::builder() + .uri("/README.md") + .header(header::IF_MODIFIED_SINCE, "Fri, 09 Aug 1996 14:21:40 GMT") + .body(Body::empty()) + .unwrap(); + + let res = svc.oneshot(req).await.unwrap(); + assert_eq!(res.status(), StatusCode::OK); + let readme_bytes = include_bytes!("../../../../../README.md"); + let body = res.into_body().collect().await.unwrap().to_bytes(); + assert_eq!(body.as_ref(), readme_bytes); + + // -- If-Unmodified-Since + + let svc = ServeDir::new(".."); + let req = Request::builder() + .uri("/README.md") + .header(header::IF_UNMODIFIED_SINCE, last_modified) + .body(Body::empty()) + .unwrap(); + + let res = svc.oneshot(req).await.unwrap(); + assert_eq!(res.status(), StatusCode::OK); + let body = res.into_body().collect().await.unwrap().to_bytes(); + assert_eq!(body.as_ref(), readme_bytes); + + let svc = ServeDir::new(".."); + let req = Request::builder() + .uri("/README.md") + .header(header::IF_UNMODIFIED_SINCE, "Fri, 09 Aug 1996 14:21:40 GMT") + .body(Body::empty()) + .unwrap(); + + let res = svc.oneshot(req).await.unwrap(); + assert_eq!(res.status(), StatusCode::PRECONDITION_FAILED); + assert!(res.into_body().frame().await.is_none()); +} + +#[tokio::test] +async fn with_fallback_svc() { + async fn fallback(req: Request) -> Result, Infallible> { + Ok(Response::new(Body::from(format!( + "from fallback {}", + req.uri().path() + )))) + } + + let svc = ServeDir::new("..").fallback(tower::service_fn(fallback)); + + let req = Request::builder() + .uri("/doesnt-exist") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + + let body = body_into_text(res.into_body()).await; + assert_eq!(body, "from fallback /doesnt-exist"); +} + +#[tokio::test] +async fn with_fallback_serve_file() { + let svc = ServeDir::new("..").fallback(ServeFile::new("../README.md")); + + let req = Request::builder() + .uri("/doesnt-exist") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.headers()["content-type"], "text/markdown"); + + let body = body_into_text(res.into_body()).await; + + let contents = std::fs::read_to_string("../README.md").unwrap(); + assert_eq!(body, contents); +} + +#[tokio::test] +async fn method_not_allowed() { + let svc = ServeDir::new(".."); + + let req = Request::builder() + .method(Method::POST) + .uri("/README.md") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::METHOD_NOT_ALLOWED); + assert_eq!(res.headers()[ALLOW], "GET,HEAD"); +} + +#[tokio::test] +async fn calling_fallback_on_not_allowed() { + async fn fallback(req: Request) -> Result, Infallible> { + Ok(Response::new(Body::from(format!( + "from fallback {}", + req.uri().path() + )))) + } + + let svc = ServeDir::new("..") + .call_fallback_on_method_not_allowed(true) + .fallback(tower::service_fn(fallback)); + + let req = Request::builder() + .method(Method::POST) + .uri("/doesnt-exist") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + + let body = body_into_text(res.into_body()).await; + assert_eq!(body, "from fallback /doesnt-exist"); +} + +#[tokio::test] +async fn with_fallback_svc_and_not_append_index_html_on_directories() { + async fn fallback(req: Request) -> Result, Infallible> { + Ok(Response::new(Body::from(format!( + "from fallback {}", + req.uri().path() + )))) + } + + let svc = ServeDir::new("..") + .append_index_html_on_directories(false) + .fallback(tower::service_fn(fallback)); + + let req = Request::builder().uri("/").body(Body::empty()).unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + + let body = body_into_text(res.into_body()).await; + assert_eq!(body, "from fallback /"); +} + +// https://github.com/tower-rs/tower-http/issues/308 +#[tokio::test] +async fn calls_fallback_on_invalid_paths() { + async fn fallback(_: T) -> Result, Infallible> { + let mut res = Response::new(Body::empty()); + res.headers_mut() + .insert("from-fallback", "1".parse().unwrap()); + Ok(res) + } + + let svc = ServeDir::new("..").fallback(service_fn(fallback)); + + let req = Request::builder() + .uri("/weird_%c3%28_path") + .body(Body::empty()) + .unwrap(); + + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.headers()["from-fallback"], "1"); +} diff --git a/.cargo-vendor/tower-http/src/services/fs/serve_file.rs b/.cargo-vendor/tower-http/src/services/fs/serve_file.rs new file mode 100644 index 0000000000..ade3cd151b --- /dev/null +++ b/.cargo-vendor/tower-http/src/services/fs/serve_file.rs @@ -0,0 +1,560 @@ +//! Service that serves a file. + +use super::ServeDir; +use http::{HeaderValue, Request}; +use mime::Mime; +use std::{ + path::Path, + task::{Context, Poll}, +}; +use tower_service::Service; + +/// Service that serves a file. +#[derive(Clone, Debug)] +pub struct ServeFile(ServeDir); + +// Note that this is just a special case of ServeDir +impl ServeFile { + /// Create a new [`ServeFile`]. + /// + /// The `Content-Type` will be guessed from the file extension. + pub fn new>(path: P) -> Self { + let guess = mime_guess::from_path(path.as_ref()); + let mime = guess + .first_raw() + .map(HeaderValue::from_static) + .unwrap_or_else(|| { + HeaderValue::from_str(mime::APPLICATION_OCTET_STREAM.as_ref()).unwrap() + }); + + Self(ServeDir::new_single_file(path, mime)) + } + + /// Create a new [`ServeFile`] with a specific mime type. + /// + /// # Panics + /// + /// Will panic if the mime type isn't a valid [header value]. + /// + /// [header value]: https://docs.rs/http/latest/http/header/struct.HeaderValue.html + pub fn new_with_mime>(path: P, mime: &Mime) -> Self { + let mime = HeaderValue::from_str(mime.as_ref()).expect("mime isn't a valid header value"); + Self(ServeDir::new_single_file(path, mime)) + } + + /// Informs the service that it should also look for a precompressed gzip + /// version of the file. + /// + /// If the client has an `Accept-Encoding` header that allows the gzip encoding, + /// the file `foo.txt.gz` will be served instead of `foo.txt`. + /// If the precompressed file is not available, or the client doesn't support it, + /// the uncompressed version will be served instead. + /// Both the precompressed version and the uncompressed version are expected + /// to be present in the same directory. Different precompressed + /// variants can be combined. + pub fn precompressed_gzip(self) -> Self { + Self(self.0.precompressed_gzip()) + } + + /// Informs the service that it should also look for a precompressed brotli + /// version of the file. + /// + /// If the client has an `Accept-Encoding` header that allows the brotli encoding, + /// the file `foo.txt.br` will be served instead of `foo.txt`. + /// If the precompressed file is not available, or the client doesn't support it, + /// the uncompressed version will be served instead. + /// Both the precompressed version and the uncompressed version are expected + /// to be present in the same directory. Different precompressed + /// variants can be combined. + pub fn precompressed_br(self) -> Self { + Self(self.0.precompressed_br()) + } + + /// Informs the service that it should also look for a precompressed deflate + /// version of the file. + /// + /// If the client has an `Accept-Encoding` header that allows the deflate encoding, + /// the file `foo.txt.zz` will be served instead of `foo.txt`. + /// If the precompressed file is not available, or the client doesn't support it, + /// the uncompressed version will be served instead. + /// Both the precompressed version and the uncompressed version are expected + /// to be present in the same directory. Different precompressed + /// variants can be combined. + pub fn precompressed_deflate(self) -> Self { + Self(self.0.precompressed_deflate()) + } + + /// Informs the service that it should also look for a precompressed zstd + /// version of the file. + /// + /// If the client has an `Accept-Encoding` header that allows the zstd encoding, + /// the file `foo.txt.zst` will be served instead of `foo.txt`. + /// If the precompressed file is not available, or the client doesn't support it, + /// the uncompressed version will be served instead. + /// Both the precompressed version and the uncompressed version are expected + /// to be present in the same directory. Different precompressed + /// variants can be combined. + pub fn precompressed_zstd(self) -> Self { + Self(self.0.precompressed_zstd()) + } + + /// Set a specific read buffer chunk size. + /// + /// The default capacity is 64kb. + pub fn with_buf_chunk_size(self, chunk_size: usize) -> Self { + Self(self.0.with_buf_chunk_size(chunk_size)) + } + + /// Call the service and get a future that contains any `std::io::Error` that might have + /// happened. + /// + /// See [`ServeDir::try_call`] for more details. + pub fn try_call( + &mut self, + req: Request, + ) -> super::serve_dir::future::ResponseFuture + where + ReqBody: Send + 'static, + { + self.0.try_call(req) + } +} + +impl Service> for ServeFile +where + ReqBody: Send + 'static, +{ + type Error = >>::Error; + type Response = >>::Response; + type Future = >>::Future; + + #[inline] + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + #[inline] + fn call(&mut self, req: Request) -> Self::Future { + self.0.call(req) + } +} + +#[cfg(test)] +mod tests { + use crate::services::ServeFile; + use crate::test_helpers::Body; + use async_compression::tokio::bufread::ZstdDecoder; + use brotli::BrotliDecompress; + use flate2::bufread::DeflateDecoder; + use flate2::bufread::GzDecoder; + use http::header; + use http::Method; + use http::{Request, StatusCode}; + use http_body_util::BodyExt; + use mime::Mime; + use std::io::Read; + use std::str::FromStr; + use tokio::io::AsyncReadExt; + use tower::ServiceExt; + + #[tokio::test] + async fn basic() { + let svc = ServeFile::new("../README.md"); + + let res = svc.oneshot(Request::new(Body::empty())).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/markdown"); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let body = String::from_utf8(body.to_vec()).unwrap(); + + assert!(body.starts_with("# Tower HTTP")); + } + + #[tokio::test] + async fn basic_with_mime() { + let svc = ServeFile::new_with_mime("../README.md", &Mime::from_str("image/jpg").unwrap()); + + let res = svc.oneshot(Request::new(Body::empty())).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "image/jpg"); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let body = String::from_utf8(body.to_vec()).unwrap(); + + assert!(body.starts_with("# Tower HTTP")); + } + + #[tokio::test] + async fn head_request() { + let svc = ServeFile::new("../test-files/precompressed.txt"); + + let mut request = Request::new(Body::empty()); + *request.method_mut() = Method::HEAD; + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-length"], "23"); + + assert!(res.into_body().frame().await.is_none()); + } + + #[tokio::test] + async fn precompresed_head_request() { + let svc = ServeFile::new("../test-files/precompressed.txt").precompressed_gzip(); + + let request = Request::builder() + .header("Accept-Encoding", "gzip") + .method(Method::HEAD) + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-encoding"], "gzip"); + assert_eq!(res.headers()["content-length"], "59"); + + assert!(res.into_body().frame().await.is_none()); + } + + #[tokio::test] + async fn precompressed_gzip() { + let svc = ServeFile::new("../test-files/precompressed.txt").precompressed_gzip(); + + let request = Request::builder() + .header("Accept-Encoding", "gzip") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-encoding"], "gzip"); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let mut decoder = GzDecoder::new(&body[..]); + let mut decompressed = String::new(); + decoder.read_to_string(&mut decompressed).unwrap(); + assert!(decompressed.starts_with("\"This is a test file!\"")); + } + + #[tokio::test] + async fn unsupported_precompression_alogrithm_fallbacks_to_uncompressed() { + let svc = ServeFile::new("../test-files/precompressed.txt").precompressed_gzip(); + + let request = Request::builder() + .header("Accept-Encoding", "br") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert!(res.headers().get("content-encoding").is_none()); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let body = String::from_utf8(body.to_vec()).unwrap(); + assert!(body.starts_with("\"This is a test file!\"")); + } + + #[tokio::test] + async fn missing_precompressed_variant_fallbacks_to_uncompressed() { + let svc = ServeFile::new("../test-files/missing_precompressed.txt").precompressed_gzip(); + + let request = Request::builder() + .header("Accept-Encoding", "gzip") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + // Uncompressed file is served because compressed version is missing + assert!(res.headers().get("content-encoding").is_none()); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let body = String::from_utf8(body.to_vec()).unwrap(); + assert!(body.starts_with("Test file!")); + } + + #[tokio::test] + async fn missing_precompressed_variant_fallbacks_to_uncompressed_head_request() { + let svc = ServeFile::new("../test-files/missing_precompressed.txt").precompressed_gzip(); + + let request = Request::builder() + .header("Accept-Encoding", "gzip") + .method(Method::HEAD) + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-length"], "11"); + // Uncompressed file is served because compressed version is missing + assert!(res.headers().get("content-encoding").is_none()); + + assert!(res.into_body().frame().await.is_none()); + } + + #[tokio::test] + async fn only_precompressed_variant_existing() { + let svc = ServeFile::new("../test-files/only_gzipped.txt").precompressed_gzip(); + + let request = Request::builder().body(Body::empty()).unwrap(); + let res = svc.clone().oneshot(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::NOT_FOUND); + + // Should reply with gzipped file if client supports it + let request = Request::builder() + .header("Accept-Encoding", "gzip") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-encoding"], "gzip"); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let mut decoder = GzDecoder::new(&body[..]); + let mut decompressed = String::new(); + decoder.read_to_string(&mut decompressed).unwrap(); + assert!(decompressed.starts_with("\"This is a test file\"")); + } + + #[tokio::test] + async fn precompressed_br() { + let svc = ServeFile::new("../test-files/precompressed.txt").precompressed_br(); + + let request = Request::builder() + .header("Accept-Encoding", "gzip,br") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-encoding"], "br"); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let mut decompressed = Vec::new(); + BrotliDecompress(&mut &body[..], &mut decompressed).unwrap(); + let decompressed = String::from_utf8(decompressed.to_vec()).unwrap(); + assert!(decompressed.starts_with("\"This is a test file!\"")); + } + + #[tokio::test] + async fn precompressed_deflate() { + let svc = ServeFile::new("../test-files/precompressed.txt").precompressed_deflate(); + let request = Request::builder() + .header("Accept-Encoding", "deflate,br") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-encoding"], "deflate"); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let mut decoder = DeflateDecoder::new(&body[..]); + let mut decompressed = String::new(); + decoder.read_to_string(&mut decompressed).unwrap(); + assert!(decompressed.starts_with("\"This is a test file!\"")); + } + + #[tokio::test] + async fn precompressed_zstd() { + let svc = ServeFile::new("../test-files/precompressed.txt").precompressed_zstd(); + let request = Request::builder() + .header("Accept-Encoding", "zstd,br") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-encoding"], "zstd"); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let mut decoder = ZstdDecoder::new(&body[..]); + let mut decompressed = String::new(); + decoder.read_to_string(&mut decompressed).await.unwrap(); + assert!(decompressed.starts_with("\"This is a test file!\"")); + } + + #[tokio::test] + async fn multi_precompressed() { + let svc = ServeFile::new("../test-files/precompressed.txt") + .precompressed_gzip() + .precompressed_br(); + + let request = Request::builder() + .header("Accept-Encoding", "gzip") + .body(Body::empty()) + .unwrap(); + let res = svc.clone().oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-encoding"], "gzip"); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let mut decoder = GzDecoder::new(&body[..]); + let mut decompressed = String::new(); + decoder.read_to_string(&mut decompressed).unwrap(); + assert!(decompressed.starts_with("\"This is a test file!\"")); + + let request = Request::builder() + .header("Accept-Encoding", "br") + .body(Body::empty()) + .unwrap(); + let res = svc.clone().oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-encoding"], "br"); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let mut decompressed = Vec::new(); + BrotliDecompress(&mut &body[..], &mut decompressed).unwrap(); + let decompressed = String::from_utf8(decompressed.to_vec()).unwrap(); + assert!(decompressed.starts_with("\"This is a test file!\"")); + } + + #[tokio::test] + async fn with_custom_chunk_size() { + let svc = ServeFile::new("../README.md").with_buf_chunk_size(1024 * 32); + + let res = svc.oneshot(Request::new(Body::empty())).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/markdown"); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let body = String::from_utf8(body.to_vec()).unwrap(); + + assert!(body.starts_with("# Tower HTTP")); + } + + #[tokio::test] + async fn fallbacks_to_different_precompressed_variant_if_not_found() { + let svc = ServeFile::new("../test-files/precompressed_br.txt") + .precompressed_gzip() + .precompressed_deflate() + .precompressed_br(); + + let request = Request::builder() + .header("Accept-Encoding", "gzip,deflate,br") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-encoding"], "br"); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let mut decompressed = Vec::new(); + BrotliDecompress(&mut &body[..], &mut decompressed).unwrap(); + let decompressed = String::from_utf8(decompressed.to_vec()).unwrap(); + assert!(decompressed.starts_with("Test file")); + } + + #[tokio::test] + async fn fallbacks_to_different_precompressed_variant_if_not_found_head_request() { + let svc = ServeFile::new("../test-files/precompressed_br.txt") + .precompressed_gzip() + .precompressed_deflate() + .precompressed_br(); + + let request = Request::builder() + .header("Accept-Encoding", "gzip,deflate,br") + .method(Method::HEAD) + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-length"], "15"); + assert_eq!(res.headers()["content-encoding"], "br"); + + assert!(res.into_body().frame().await.is_none()); + } + + #[tokio::test] + async fn returns_404_if_file_doesnt_exist() { + let svc = ServeFile::new("../this-doesnt-exist.md"); + + let res = svc.oneshot(Request::new(Body::empty())).await.unwrap(); + + assert_eq!(res.status(), StatusCode::NOT_FOUND); + assert!(res.headers().get(header::CONTENT_TYPE).is_none()); + } + + #[tokio::test] + async fn returns_404_if_file_doesnt_exist_when_precompression_is_used() { + let svc = ServeFile::new("../this-doesnt-exist.md").precompressed_deflate(); + + let request = Request::builder() + .header("Accept-Encoding", "deflate") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::NOT_FOUND); + assert!(res.headers().get(header::CONTENT_TYPE).is_none()); + } + + #[tokio::test] + async fn last_modified() { + let svc = ServeFile::new("../README.md"); + + let req = Request::builder().body(Body::empty()).unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + + let last_modified = res + .headers() + .get(header::LAST_MODIFIED) + .expect("Missing last modified header!"); + + // -- If-Modified-Since + + let svc = ServeFile::new("../README.md"); + let req = Request::builder() + .header(header::IF_MODIFIED_SINCE, last_modified) + .body(Body::empty()) + .unwrap(); + + let res = svc.oneshot(req).await.unwrap(); + assert_eq!(res.status(), StatusCode::NOT_MODIFIED); + assert!(res.into_body().frame().await.is_none()); + + let svc = ServeFile::new("../README.md"); + let req = Request::builder() + .header(header::IF_MODIFIED_SINCE, "Fri, 09 Aug 1996 14:21:40 GMT") + .body(Body::empty()) + .unwrap(); + + let res = svc.oneshot(req).await.unwrap(); + assert_eq!(res.status(), StatusCode::OK); + let readme_bytes = include_bytes!("../../../../README.md"); + let body = res.into_body().collect().await.unwrap().to_bytes(); + assert_eq!(body.as_ref(), readme_bytes); + + // -- If-Unmodified-Since + + let svc = ServeFile::new("../README.md"); + let req = Request::builder() + .header(header::IF_UNMODIFIED_SINCE, last_modified) + .body(Body::empty()) + .unwrap(); + + let res = svc.oneshot(req).await.unwrap(); + assert_eq!(res.status(), StatusCode::OK); + let body = res.into_body().collect().await.unwrap().to_bytes(); + assert_eq!(body.as_ref(), readme_bytes); + + let svc = ServeFile::new("../README.md"); + let req = Request::builder() + .header(header::IF_UNMODIFIED_SINCE, "Fri, 09 Aug 1996 14:21:40 GMT") + .body(Body::empty()) + .unwrap(); + + let res = svc.oneshot(req).await.unwrap(); + assert_eq!(res.status(), StatusCode::PRECONDITION_FAILED); + assert!(res.into_body().frame().await.is_none()); + } +} diff --git a/.cargo-vendor/tower-http/src/services/mod.rs b/.cargo-vendor/tower-http/src/services/mod.rs new file mode 100644 index 0000000000..737d2fa195 --- /dev/null +++ b/.cargo-vendor/tower-http/src/services/mod.rs @@ -0,0 +1,21 @@ +//! [`Service`]s that return responses without wrapping other [`Service`]s. +//! +//! These kinds of services are also referred to as "leaf services" since they sit at the leaves of +//! a [tree] of services. +//! +//! [`Service`]: https://docs.rs/tower/latest/tower/trait.Service.html +//! [tree]: https://en.wikipedia.org/wiki/Tree_(data_structure) + +#[cfg(feature = "redirect")] +pub mod redirect; + +#[cfg(feature = "redirect")] +#[doc(inline)] +pub use self::redirect::Redirect; + +#[cfg(feature = "fs")] +pub mod fs; + +#[cfg(feature = "fs")] +#[doc(inline)] +pub use self::fs::{ServeDir, ServeFile}; diff --git a/.cargo-vendor/tower-http/src/services/redirect.rs b/.cargo-vendor/tower-http/src/services/redirect.rs new file mode 100644 index 0000000000..020927c921 --- /dev/null +++ b/.cargo-vendor/tower-http/src/services/redirect.rs @@ -0,0 +1,159 @@ +//! Service that redirects all requests. +//! +//! # Example +//! +//! Imagine that we run `example.com` and want to redirect all requests using `HTTP` to `HTTPS`. +//! That can be done like so: +//! +//! ```rust +//! use http::{Request, Uri, StatusCode}; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! use tower::{Service, ServiceExt}; +//! use tower_http::services::Redirect; +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! let uri: Uri = "https://example.com/".parse().unwrap(); +//! let mut service: Redirect> = Redirect::permanent(uri); +//! +//! let request = Request::builder() +//! .uri("http://example.com") +//! .body(Full::::default()) +//! .unwrap(); +//! +//! let response = service.oneshot(request).await?; +//! +//! assert_eq!(response.status(), StatusCode::PERMANENT_REDIRECT); +//! assert_eq!(response.headers()["location"], "https://example.com/"); +//! # +//! # Ok(()) +//! # } +//! ``` + +use http::{header, HeaderValue, Response, StatusCode, Uri}; +use std::{ + convert::{Infallible, TryFrom}, + fmt, + future::Future, + marker::PhantomData, + pin::Pin, + task::{Context, Poll}, +}; +use tower_service::Service; + +/// Service that redirects all requests. +/// +/// See the [module docs](crate::services::redirect) for more details. +pub struct Redirect { + status_code: StatusCode, + location: HeaderValue, + // Covariant over ResBody, no dropping of ResBody + _marker: PhantomData ResBody>, +} + +impl Redirect { + /// Create a new [`Redirect`] that uses a [`307 Temporary Redirect`][mdn] status code. + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/307 + pub fn temporary(uri: Uri) -> Self { + Self::with_status_code(StatusCode::TEMPORARY_REDIRECT, uri) + } + + /// Create a new [`Redirect`] that uses a [`308 Permanent Redirect`][mdn] status code. + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/308 + pub fn permanent(uri: Uri) -> Self { + Self::with_status_code(StatusCode::PERMANENT_REDIRECT, uri) + } + + /// Create a new [`Redirect`] that uses the given status code. + /// + /// # Panics + /// + /// - If `status_code` isn't a [redirection status code][mdn] (3xx). + /// - If `uri` isn't a valid [`HeaderValue`]. + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status#redirection_messages + pub fn with_status_code(status_code: StatusCode, uri: Uri) -> Self { + assert!( + status_code.is_redirection(), + "not a redirection status code" + ); + + Self { + status_code, + location: HeaderValue::try_from(uri.to_string()) + .expect("URI isn't a valid header value"), + _marker: PhantomData, + } + } +} + +impl Service for Redirect +where + ResBody: Default, +{ + type Response = Response; + type Error = Infallible; + type Future = ResponseFuture; + + #[inline] + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, _req: R) -> Self::Future { + ResponseFuture { + status_code: self.status_code, + location: Some(self.location.clone()), + _marker: PhantomData, + } + } +} + +impl fmt::Debug for Redirect { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Redirect") + .field("status_code", &self.status_code) + .field("location", &self.location) + .finish() + } +} + +impl Clone for Redirect { + fn clone(&self) -> Self { + Self { + status_code: self.status_code, + location: self.location.clone(), + _marker: PhantomData, + } + } +} + +/// Response future of [`Redirect`]. +#[derive(Debug)] +pub struct ResponseFuture { + location: Option, + status_code: StatusCode, + // Covariant over ResBody, no dropping of ResBody + _marker: PhantomData ResBody>, +} + +impl Future for ResponseFuture +where + ResBody: Default, +{ + type Output = Result, Infallible>; + + fn poll(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { + let mut res = Response::default(); + + *res.status_mut() = self.status_code; + + res.headers_mut() + .insert(header::LOCATION, self.location.take().unwrap()); + + Poll::Ready(Ok(res)) + } +} diff --git a/.cargo-vendor/tower-http/src/set_header/mod.rs b/.cargo-vendor/tower-http/src/set_header/mod.rs new file mode 100644 index 0000000000..396527ef44 --- /dev/null +++ b/.cargo-vendor/tower-http/src/set_header/mod.rs @@ -0,0 +1,110 @@ +//! Middleware for setting headers on requests and responses. +//! +//! See [request] and [response] for more details. + +use http::{header::HeaderName, HeaderMap, HeaderValue, Request, Response}; + +pub mod request; +pub mod response; + +#[doc(inline)] +pub use self::{ + request::{SetRequestHeader, SetRequestHeaderLayer}, + response::{SetResponseHeader, SetResponseHeaderLayer}, +}; + +/// Trait for producing header values. +/// +/// Used by [`SetRequestHeader`] and [`SetResponseHeader`]. +/// +/// This trait is implemented for closures with the correct type signature. Typically users will +/// not have to implement this trait for their own types. +/// +/// It is also implemented directly for [`HeaderValue`]. When a fixed header value should be added +/// to all responses, it can be supplied directly to the middleware. +pub trait MakeHeaderValue { + /// Try to create a header value from the request or response. + fn make_header_value(&mut self, message: &T) -> Option; +} + +impl MakeHeaderValue for F +where + F: FnMut(&T) -> Option, +{ + fn make_header_value(&mut self, message: &T) -> Option { + self(message) + } +} + +impl MakeHeaderValue for HeaderValue { + fn make_header_value(&mut self, _message: &T) -> Option { + Some(self.clone()) + } +} + +impl MakeHeaderValue for Option { + fn make_header_value(&mut self, _message: &T) -> Option { + self.clone() + } +} + +#[derive(Debug, Clone, Copy)] +enum InsertHeaderMode { + Override, + Append, + IfNotPresent, +} + +impl InsertHeaderMode { + fn apply(self, header_name: &HeaderName, target: &mut T, make: &mut M) + where + T: Headers, + M: MakeHeaderValue, + { + match self { + InsertHeaderMode::Override => { + if let Some(value) = make.make_header_value(target) { + target.headers_mut().insert(header_name.clone(), value); + } + } + InsertHeaderMode::IfNotPresent => { + if !target.headers().contains_key(header_name) { + if let Some(value) = make.make_header_value(target) { + target.headers_mut().insert(header_name.clone(), value); + } + } + } + InsertHeaderMode::Append => { + if let Some(value) = make.make_header_value(target) { + target.headers_mut().append(header_name.clone(), value); + } + } + } + } +} + +trait Headers { + fn headers(&self) -> &HeaderMap; + + fn headers_mut(&mut self) -> &mut HeaderMap; +} + +impl Headers for Request { + fn headers(&self) -> &HeaderMap { + Request::headers(self) + } + + fn headers_mut(&mut self) -> &mut HeaderMap { + Request::headers_mut(self) + } +} + +impl Headers for Response { + fn headers(&self) -> &HeaderMap { + Response::headers(self) + } + + fn headers_mut(&mut self) -> &mut HeaderMap { + Response::headers_mut(self) + } +} diff --git a/.cargo-vendor/tower-http/src/set_header/request.rs b/.cargo-vendor/tower-http/src/set_header/request.rs new file mode 100644 index 0000000000..4032e23a78 --- /dev/null +++ b/.cargo-vendor/tower-http/src/set_header/request.rs @@ -0,0 +1,254 @@ +//! Set a header on the request. +//! +//! The header value to be set may be provided as a fixed value when the +//! middleware is constructed, or determined dynamically based on the request +//! by a closure. See the [`MakeHeaderValue`] trait for details. +//! +//! # Example +//! +//! Setting a header from a fixed value provided when the middleware is constructed: +//! +//! ``` +//! use http::{Request, Response, header::{self, HeaderValue}}; +//! use tower::{Service, ServiceExt, ServiceBuilder}; +//! use tower_http::set_header::SetRequestHeaderLayer; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! # let http_client = tower::service_fn(|_: Request>| async move { +//! # Ok::<_, std::convert::Infallible>(Response::new(Full::::default())) +//! # }); +//! # +//! let mut svc = ServiceBuilder::new() +//! .layer( +//! // Layer that sets `User-Agent: my very cool app` on requests. +//! // +//! // `if_not_present` will only insert the header if it does not already +//! // have a value. +//! SetRequestHeaderLayer::if_not_present( +//! header::USER_AGENT, +//! HeaderValue::from_static("my very cool app"), +//! ) +//! ) +//! .service(http_client); +//! +//! let request = Request::new(Full::default()); +//! +//! let response = svc.ready().await?.call(request).await?; +//! # +//! # Ok(()) +//! # } +//! ``` +//! +//! Setting a header based on a value determined dynamically from the request: +//! +//! ``` +//! use http::{Request, Response, header::{self, HeaderValue}}; +//! use tower::{Service, ServiceExt, ServiceBuilder}; +//! use tower_http::set_header::SetRequestHeaderLayer; +//! use bytes::Bytes; +//! use http_body_util::Full; +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! # let http_client = tower::service_fn(|_: Request>| async move { +//! # Ok::<_, std::convert::Infallible>(Response::new(Full::::default())) +//! # }); +//! fn date_header_value() -> HeaderValue { +//! // ... +//! # HeaderValue::from_static("now") +//! } +//! +//! let mut svc = ServiceBuilder::new() +//! .layer( +//! // Layer that sets `Date` to the current date and time. +//! // +//! // `overriding` will insert the header and override any previous values it +//! // may have. +//! SetRequestHeaderLayer::overriding( +//! header::DATE, +//! |request: &Request>| { +//! Some(date_header_value()) +//! } +//! ) +//! ) +//! .service(http_client); +//! +//! let request = Request::new(Full::default()); +//! +//! let response = svc.ready().await?.call(request).await?; +//! # +//! # Ok(()) +//! # } +//! ``` + +use super::{InsertHeaderMode, MakeHeaderValue}; +use http::{header::HeaderName, Request, Response}; +use std::{ + fmt, + task::{Context, Poll}, +}; +use tower_layer::Layer; +use tower_service::Service; + +/// Layer that applies [`SetRequestHeader`] which adds a request header. +/// +/// See [`SetRequestHeader`] for more details. +pub struct SetRequestHeaderLayer { + header_name: HeaderName, + make: M, + mode: InsertHeaderMode, +} + +impl fmt::Debug for SetRequestHeaderLayer { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SetRequestHeaderLayer") + .field("header_name", &self.header_name) + .field("mode", &self.mode) + .field("make", &std::any::type_name::()) + .finish() + } +} + +impl SetRequestHeaderLayer { + /// Create a new [`SetRequestHeaderLayer`]. + /// + /// If a previous value exists for the same header, it is removed and replaced with the new + /// header value. + pub fn overriding(header_name: HeaderName, make: M) -> Self { + Self::new(header_name, make, InsertHeaderMode::Override) + } + + /// Create a new [`SetRequestHeaderLayer`]. + /// + /// The new header is always added, preserving any existing values. If previous values exist, + /// the header will have multiple values. + pub fn appending(header_name: HeaderName, make: M) -> Self { + Self::new(header_name, make, InsertHeaderMode::Append) + } + + /// Create a new [`SetRequestHeaderLayer`]. + /// + /// If a previous value exists for the header, the new value is not inserted. + pub fn if_not_present(header_name: HeaderName, make: M) -> Self { + Self::new(header_name, make, InsertHeaderMode::IfNotPresent) + } + + fn new(header_name: HeaderName, make: M, mode: InsertHeaderMode) -> Self { + Self { + make, + header_name, + mode, + } + } +} + +impl Layer for SetRequestHeaderLayer +where + M: Clone, +{ + type Service = SetRequestHeader; + + fn layer(&self, inner: S) -> Self::Service { + SetRequestHeader { + inner, + header_name: self.header_name.clone(), + make: self.make.clone(), + mode: self.mode, + } + } +} + +impl Clone for SetRequestHeaderLayer +where + M: Clone, +{ + fn clone(&self) -> Self { + Self { + make: self.make.clone(), + header_name: self.header_name.clone(), + mode: self.mode, + } + } +} + +/// Middleware that sets a header on the request. +#[derive(Clone)] +pub struct SetRequestHeader { + inner: S, + header_name: HeaderName, + make: M, + mode: InsertHeaderMode, +} + +impl SetRequestHeader { + /// Create a new [`SetRequestHeader`]. + /// + /// If a previous value exists for the same header, it is removed and replaced with the new + /// header value. + pub fn overriding(inner: S, header_name: HeaderName, make: M) -> Self { + Self::new(inner, header_name, make, InsertHeaderMode::Override) + } + + /// Create a new [`SetRequestHeader`]. + /// + /// The new header is always added, preserving any existing values. If previous values exist, + /// the header will have multiple values. + pub fn appending(inner: S, header_name: HeaderName, make: M) -> Self { + Self::new(inner, header_name, make, InsertHeaderMode::Append) + } + + /// Create a new [`SetRequestHeader`]. + /// + /// If a previous value exists for the header, the new value is not inserted. + pub fn if_not_present(inner: S, header_name: HeaderName, make: M) -> Self { + Self::new(inner, header_name, make, InsertHeaderMode::IfNotPresent) + } + + fn new(inner: S, header_name: HeaderName, make: M, mode: InsertHeaderMode) -> Self { + Self { + inner, + header_name, + make, + mode, + } + } + + define_inner_service_accessors!(); +} + +impl fmt::Debug for SetRequestHeader +where + S: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SetRequestHeader") + .field("inner", &self.inner) + .field("header_name", &self.header_name) + .field("mode", &self.mode) + .field("make", &std::any::type_name::()) + .finish() + } +} + +impl Service> for SetRequestHeader +where + S: Service, Response = Response>, + M: MakeHeaderValue>, +{ + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, mut req: Request) -> Self::Future { + self.mode.apply(&self.header_name, &mut req, &mut self.make); + self.inner.call(req) + } +} diff --git a/.cargo-vendor/tower-http/src/set_header/response.rs b/.cargo-vendor/tower-http/src/set_header/response.rs new file mode 100644 index 0000000000..c7b8ea8440 --- /dev/null +++ b/.cargo-vendor/tower-http/src/set_header/response.rs @@ -0,0 +1,391 @@ +//! Set a header on the response. +//! +//! The header value to be set may be provided as a fixed value when the +//! middleware is constructed, or determined dynamically based on the response +//! by a closure. See the [`MakeHeaderValue`] trait for details. +//! +//! # Example +//! +//! Setting a header from a fixed value provided when the middleware is constructed: +//! +//! ``` +//! use http::{Request, Response, header::{self, HeaderValue}}; +//! use tower::{Service, ServiceExt, ServiceBuilder}; +//! use tower_http::set_header::SetResponseHeaderLayer; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! # let render_html = tower::service_fn(|request: Request>| async move { +//! # Ok::<_, std::convert::Infallible>(Response::new(request.into_body())) +//! # }); +//! # +//! let mut svc = ServiceBuilder::new() +//! .layer( +//! // Layer that sets `Content-Type: text/html` on responses. +//! // +//! // `if_not_present` will only insert the header if it does not already +//! // have a value. +//! SetResponseHeaderLayer::if_not_present( +//! header::CONTENT_TYPE, +//! HeaderValue::from_static("text/html"), +//! ) +//! ) +//! .service(render_html); +//! +//! let request = Request::new(Full::default()); +//! +//! let response = svc.ready().await?.call(request).await?; +//! +//! assert_eq!(response.headers()["content-type"], "text/html"); +//! # +//! # Ok(()) +//! # } +//! ``` +//! +//! Setting a header based on a value determined dynamically from the response: +//! +//! ``` +//! use http::{Request, Response, header::{self, HeaderValue}}; +//! use tower::{Service, ServiceExt, ServiceBuilder}; +//! use tower_http::set_header::SetResponseHeaderLayer; +//! use bytes::Bytes; +//! use http_body_util::Full; +//! use http_body::Body as _; // for `Body::size_hint` +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! # let render_html = tower::service_fn(|request: Request>| async move { +//! # Ok::<_, std::convert::Infallible>(Response::new(Full::from("1234567890"))) +//! # }); +//! # +//! let mut svc = ServiceBuilder::new() +//! .layer( +//! // Layer that sets `Content-Length` if the body has a known size. +//! // Bodies with streaming responses wont have a known size. +//! // +//! // `overriding` will insert the header and override any previous values it +//! // may have. +//! SetResponseHeaderLayer::overriding( +//! header::CONTENT_LENGTH, +//! |response: &Response>| { +//! if let Some(size) = response.body().size_hint().exact() { +//! // If the response body has a known size, returning `Some` will +//! // set the `Content-Length` header to that value. +//! Some(HeaderValue::from_str(&size.to_string()).unwrap()) +//! } else { +//! // If the response body doesn't have a known size, return `None` +//! // to skip setting the header on this response. +//! None +//! } +//! } +//! ) +//! ) +//! .service(render_html); +//! +//! let request = Request::new(Full::default()); +//! +//! let response = svc.ready().await?.call(request).await?; +//! +//! assert_eq!(response.headers()["content-length"], "10"); +//! # +//! # Ok(()) +//! # } +//! ``` + +use super::{InsertHeaderMode, MakeHeaderValue}; +use http::{header::HeaderName, Request, Response}; +use pin_project_lite::pin_project; +use std::{ + fmt, + future::Future, + pin::Pin, + task::{ready, Context, Poll}, +}; +use tower_layer::Layer; +use tower_service::Service; + +/// Layer that applies [`SetResponseHeader`] which adds a response header. +/// +/// See [`SetResponseHeader`] for more details. +pub struct SetResponseHeaderLayer { + header_name: HeaderName, + make: M, + mode: InsertHeaderMode, +} + +impl fmt::Debug for SetResponseHeaderLayer { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SetResponseHeaderLayer") + .field("header_name", &self.header_name) + .field("mode", &self.mode) + .field("make", &std::any::type_name::()) + .finish() + } +} + +impl SetResponseHeaderLayer { + /// Create a new [`SetResponseHeaderLayer`]. + /// + /// If a previous value exists for the same header, it is removed and replaced with the new + /// header value. + pub fn overriding(header_name: HeaderName, make: M) -> Self { + Self::new(header_name, make, InsertHeaderMode::Override) + } + + /// Create a new [`SetResponseHeaderLayer`]. + /// + /// The new header is always added, preserving any existing values. If previous values exist, + /// the header will have multiple values. + pub fn appending(header_name: HeaderName, make: M) -> Self { + Self::new(header_name, make, InsertHeaderMode::Append) + } + + /// Create a new [`SetResponseHeaderLayer`]. + /// + /// If a previous value exists for the header, the new value is not inserted. + pub fn if_not_present(header_name: HeaderName, make: M) -> Self { + Self::new(header_name, make, InsertHeaderMode::IfNotPresent) + } + + fn new(header_name: HeaderName, make: M, mode: InsertHeaderMode) -> Self { + Self { + make, + header_name, + mode, + } + } +} + +impl Layer for SetResponseHeaderLayer +where + M: Clone, +{ + type Service = SetResponseHeader; + + fn layer(&self, inner: S) -> Self::Service { + SetResponseHeader { + inner, + header_name: self.header_name.clone(), + make: self.make.clone(), + mode: self.mode, + } + } +} + +impl Clone for SetResponseHeaderLayer +where + M: Clone, +{ + fn clone(&self) -> Self { + Self { + make: self.make.clone(), + header_name: self.header_name.clone(), + mode: self.mode, + } + } +} + +/// Middleware that sets a header on the response. +#[derive(Clone)] +pub struct SetResponseHeader { + inner: S, + header_name: HeaderName, + make: M, + mode: InsertHeaderMode, +} + +impl SetResponseHeader { + /// Create a new [`SetResponseHeader`]. + /// + /// If a previous value exists for the same header, it is removed and replaced with the new + /// header value. + pub fn overriding(inner: S, header_name: HeaderName, make: M) -> Self { + Self::new(inner, header_name, make, InsertHeaderMode::Override) + } + + /// Create a new [`SetResponseHeader`]. + /// + /// The new header is always added, preserving any existing values. If previous values exist, + /// the header will have multiple values. + pub fn appending(inner: S, header_name: HeaderName, make: M) -> Self { + Self::new(inner, header_name, make, InsertHeaderMode::Append) + } + + /// Create a new [`SetResponseHeader`]. + /// + /// If a previous value exists for the header, the new value is not inserted. + pub fn if_not_present(inner: S, header_name: HeaderName, make: M) -> Self { + Self::new(inner, header_name, make, InsertHeaderMode::IfNotPresent) + } + + fn new(inner: S, header_name: HeaderName, make: M, mode: InsertHeaderMode) -> Self { + Self { + inner, + header_name, + make, + mode, + } + } + + define_inner_service_accessors!(); +} + +impl fmt::Debug for SetResponseHeader +where + S: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SetResponseHeader") + .field("inner", &self.inner) + .field("header_name", &self.header_name) + .field("mode", &self.mode) + .field("make", &std::any::type_name::()) + .finish() + } +} + +impl Service> for SetResponseHeader +where + S: Service, Response = Response>, + M: MakeHeaderValue> + Clone, +{ + type Response = S::Response; + type Error = S::Error; + type Future = ResponseFuture; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + ResponseFuture { + future: self.inner.call(req), + header_name: self.header_name.clone(), + make: self.make.clone(), + mode: self.mode, + } + } +} + +pin_project! { + /// Response future for [`SetResponseHeader`]. + #[derive(Debug)] + pub struct ResponseFuture { + #[pin] + future: F, + header_name: HeaderName, + make: M, + mode: InsertHeaderMode, + } +} + +impl Future for ResponseFuture +where + F: Future, E>>, + M: MakeHeaderValue>, +{ + type Output = F::Output; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + let mut res = ready!(this.future.poll(cx)?); + + this.mode.apply(this.header_name, &mut res, &mut *this.make); + + Poll::Ready(Ok(res)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_helpers::Body; + use http::{header, HeaderValue}; + use std::convert::Infallible; + use tower::{service_fn, ServiceExt}; + + #[tokio::test] + async fn test_override_mode() { + let svc = SetResponseHeader::overriding( + service_fn(|_req: Request| async { + let res = Response::builder() + .header(header::CONTENT_TYPE, "good-content") + .body(Body::empty()) + .unwrap(); + Ok::<_, Infallible>(res) + }), + header::CONTENT_TYPE, + HeaderValue::from_static("text/html"), + ); + + let res = svc.oneshot(Request::new(Body::empty())).await.unwrap(); + + let mut values = res.headers().get_all(header::CONTENT_TYPE).iter(); + assert_eq!(values.next().unwrap(), "text/html"); + assert_eq!(values.next(), None); + } + + #[tokio::test] + async fn test_append_mode() { + let svc = SetResponseHeader::appending( + service_fn(|_req: Request| async { + let res = Response::builder() + .header(header::CONTENT_TYPE, "good-content") + .body(Body::empty()) + .unwrap(); + Ok::<_, Infallible>(res) + }), + header::CONTENT_TYPE, + HeaderValue::from_static("text/html"), + ); + + let res = svc.oneshot(Request::new(Body::empty())).await.unwrap(); + + let mut values = res.headers().get_all(header::CONTENT_TYPE).iter(); + assert_eq!(values.next().unwrap(), "good-content"); + assert_eq!(values.next().unwrap(), "text/html"); + assert_eq!(values.next(), None); + } + + #[tokio::test] + async fn test_skip_if_present_mode() { + let svc = SetResponseHeader::if_not_present( + service_fn(|_req: Request| async { + let res = Response::builder() + .header(header::CONTENT_TYPE, "good-content") + .body(Body::empty()) + .unwrap(); + Ok::<_, Infallible>(res) + }), + header::CONTENT_TYPE, + HeaderValue::from_static("text/html"), + ); + + let res = svc.oneshot(Request::new(Body::empty())).await.unwrap(); + + let mut values = res.headers().get_all(header::CONTENT_TYPE).iter(); + assert_eq!(values.next().unwrap(), "good-content"); + assert_eq!(values.next(), None); + } + + #[tokio::test] + async fn test_skip_if_present_mode_when_not_present() { + let svc = SetResponseHeader::if_not_present( + service_fn(|_req: Request| async { + let res = Response::builder().body(Body::empty()).unwrap(); + Ok::<_, Infallible>(res) + }), + header::CONTENT_TYPE, + HeaderValue::from_static("text/html"), + ); + + let res = svc.oneshot(Request::new(Body::empty())).await.unwrap(); + + let mut values = res.headers().get_all(header::CONTENT_TYPE).iter(); + assert_eq!(values.next().unwrap(), "text/html"); + assert_eq!(values.next(), None); + } +} diff --git a/.cargo-vendor/tower-http/src/set_status.rs b/.cargo-vendor/tower-http/src/set_status.rs new file mode 100644 index 0000000000..65f5405e47 --- /dev/null +++ b/.cargo-vendor/tower-http/src/set_status.rs @@ -0,0 +1,137 @@ +//! Middleware to override status codes. +//! +//! # Example +//! +//! ``` +//! use tower_http::set_status::SetStatusLayer; +//! use http::{Request, Response, StatusCode}; +//! use bytes::Bytes; +//! use http_body_util::Full; +//! use std::{iter::once, convert::Infallible}; +//! use tower::{ServiceBuilder, Service, ServiceExt}; +//! +//! async fn handle(req: Request>) -> Result>, Infallible> { +//! // ... +//! # Ok(Response::new(Full::default())) +//! } +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! let mut service = ServiceBuilder::new() +//! // change the status to `404 Not Found` regardless what the inner service returns +//! .layer(SetStatusLayer::new(StatusCode::NOT_FOUND)) +//! .service_fn(handle); +//! +//! // Call the service. +//! let request = Request::builder().body(Full::default())?; +//! +//! let response = service.ready().await?.call(request).await?; +//! +//! assert_eq!(response.status(), StatusCode::NOT_FOUND); +//! # +//! # Ok(()) +//! # } +//! ``` + +use http::{Request, Response, StatusCode}; +use pin_project_lite::pin_project; +use std::{ + future::Future, + pin::Pin, + task::{ready, Context, Poll}, +}; +use tower_layer::Layer; +use tower_service::Service; + +/// Layer that applies [`SetStatus`] which overrides the status codes. +#[derive(Debug, Clone, Copy)] +pub struct SetStatusLayer { + status: StatusCode, +} + +impl SetStatusLayer { + /// Create a new [`SetStatusLayer`]. + /// + /// The response status code will be `status` regardless of what the inner service returns. + pub fn new(status: StatusCode) -> Self { + SetStatusLayer { status } + } +} + +impl Layer for SetStatusLayer { + type Service = SetStatus; + + fn layer(&self, inner: S) -> Self::Service { + SetStatus::new(inner, self.status) + } +} + +/// Middleware to override status codes. +/// +/// See the [module docs](self) for more details. +#[derive(Debug, Clone, Copy)] +pub struct SetStatus { + inner: S, + status: StatusCode, +} + +impl SetStatus { + /// Create a new [`SetStatus`]. + /// + /// The response status code will be `status` regardless of what the inner service returns. + pub fn new(inner: S, status: StatusCode) -> Self { + Self { status, inner } + } + + define_inner_service_accessors!(); + + /// Returns a new [`Layer`] that wraps services with a `SetStatus` middleware. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer(status: StatusCode) -> SetStatusLayer { + SetStatusLayer::new(status) + } +} + +impl Service> for SetStatus +where + S: Service, Response = Response>, +{ + type Response = S::Response; + type Error = S::Error; + type Future = ResponseFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + ResponseFuture { + inner: self.inner.call(req), + status: Some(self.status), + } + } +} + +pin_project! { + /// Response future for [`SetStatus`]. + pub struct ResponseFuture { + #[pin] + inner: F, + status: Option, + } +} + +impl Future for ResponseFuture +where + F: Future, E>>, +{ + type Output = F::Output; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + let mut response = ready!(this.inner.poll(cx)?); + *response.status_mut() = this.status.take().expect("future polled after completion"); + Poll::Ready(Ok(response)) + } +} diff --git a/.cargo-vendor/tower-http/src/test_helpers.rs b/.cargo-vendor/tower-http/src/test_helpers.rs new file mode 100644 index 0000000000..6add4233e9 --- /dev/null +++ b/.cargo-vendor/tower-http/src/test_helpers.rs @@ -0,0 +1,166 @@ +use std::{ + pin::Pin, + task::{Context, Poll}, +}; + +use bytes::Bytes; +use futures_util::TryStream; +use http::HeaderMap; +use http_body::Frame; +use http_body_util::BodyExt; +use pin_project_lite::pin_project; +use sync_wrapper::SyncWrapper; +use tower::BoxError; + +type BoxBody = http_body_util::combinators::UnsyncBoxBody; + +#[derive(Debug)] +pub(crate) struct Body(BoxBody); + +impl Body { + pub(crate) fn new(body: B) -> Self + where + B: http_body::Body + Send + 'static, + B::Error: Into, + { + Self(body.map_err(Into::into).boxed_unsync()) + } + + pub(crate) fn empty() -> Self { + Self::new(http_body_util::Empty::new()) + } + + pub(crate) fn from_stream(stream: S) -> Self + where + S: TryStream + Send + 'static, + S::Ok: Into, + S::Error: Into, + { + Self::new(StreamBody { + stream: SyncWrapper::new(stream), + }) + } + + pub(crate) fn with_trailers(self, trailers: HeaderMap) -> WithTrailers { + WithTrailers { + inner: self, + trailers: Some(trailers), + } + } +} + +impl Default for Body { + fn default() -> Self { + Self::empty() + } +} + +macro_rules! body_from_impl { + ($ty:ty) => { + impl From<$ty> for Body { + fn from(buf: $ty) -> Self { + Self::new(http_body_util::Full::from(buf)) + } + } + }; +} + +body_from_impl!(&'static [u8]); +body_from_impl!(std::borrow::Cow<'static, [u8]>); +body_from_impl!(Vec); + +body_from_impl!(&'static str); +body_from_impl!(std::borrow::Cow<'static, str>); +body_from_impl!(String); + +body_from_impl!(Bytes); + +impl http_body::Body for Body { + type Data = Bytes; + type Error = BoxError; + + fn poll_frame( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + Pin::new(&mut self.0).poll_frame(cx) + } + + fn size_hint(&self) -> http_body::SizeHint { + self.0.size_hint() + } + + fn is_end_stream(&self) -> bool { + self.0.is_end_stream() + } +} + +pin_project! { + struct StreamBody { + #[pin] + stream: SyncWrapper, + } +} + +impl http_body::Body for StreamBody +where + S: TryStream, + S::Ok: Into, + S::Error: Into, +{ + type Data = Bytes; + type Error = BoxError; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + let stream = self.project().stream.get_pin_mut(); + match std::task::ready!(stream.try_poll_next(cx)) { + Some(Ok(chunk)) => Poll::Ready(Some(Ok(Frame::data(chunk.into())))), + Some(Err(err)) => Poll::Ready(Some(Err(err.into()))), + None => Poll::Ready(None), + } + } +} + +pub(crate) async fn to_bytes(body: T) -> Result +where + T: http_body::Body, +{ + futures_util::pin_mut!(body); + Ok(body.collect().await?.to_bytes()) +} + +pin_project! { + pub(crate) struct WithTrailers { + #[pin] + inner: B, + trailers: Option, + } +} + +impl http_body::Body for WithTrailers +where + B: http_body::Body, +{ + type Data = B::Data; + type Error = B::Error; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + let this = self.project(); + match std::task::ready!(this.inner.poll_frame(cx)) { + Some(frame) => Poll::Ready(Some(frame)), + None => { + if let Some(trailers) = this.trailers.take() { + Poll::Ready(Some(Ok(Frame::trailers(trailers)))) + } else { + Poll::Ready(None) + } + } + } + } +} diff --git a/.cargo-vendor/tower-http/src/timeout/body.rs b/.cargo-vendor/tower-http/src/timeout/body.rs new file mode 100644 index 0000000000..3705d1c07e --- /dev/null +++ b/.cargo-vendor/tower-http/src/timeout/body.rs @@ -0,0 +1,192 @@ +use crate::BoxError; +use http_body::Body; +use pin_project_lite::pin_project; +use std::{ + future::Future, + pin::Pin, + task::{ready, Context, Poll}, + time::Duration, +}; +use tokio::time::{sleep, Sleep}; + +pin_project! { + /// Middleware that applies a timeout to request and response bodies. + /// + /// Wrapper around a [`http_body::Body`] to time out if data is not ready within the specified duration. + /// + /// Bodies must produce data at most within the specified timeout. + /// If the body does not produce a requested data frame within the timeout period, it will return an error. + /// + /// # Differences from [`Timeout`][crate::timeout::Timeout] + /// + /// [`Timeout`][crate::timeout::Timeout] applies a timeout to the request future, not body. + /// That timeout is not reset when bytes are handled, whether the request is active or not. + /// Bodies are handled asynchronously outside of the tower stack's future and thus needs an additional timeout. + /// + /// This middleware will return a [`TimeoutError`]. + /// + /// # Example + /// + /// ``` + /// use http::{Request, Response}; + /// use bytes::Bytes; + /// use http_body_util::Full; + /// use std::time::Duration; + /// use tower::ServiceBuilder; + /// use tower_http::timeout::RequestBodyTimeoutLayer; + /// + /// async fn handle(_: Request>) -> Result>, std::convert::Infallible> { + /// // ... + /// # todo!() + /// } + /// + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// let svc = ServiceBuilder::new() + /// // Timeout bodies after 30 seconds of inactivity + /// .layer(RequestBodyTimeoutLayer::new(Duration::from_secs(30))) + /// .service_fn(handle); + /// # Ok(()) + /// # } + /// ``` + pub struct TimeoutBody { + timeout: Duration, + #[pin] + sleep: Option, + #[pin] + body: B, + } +} + +impl TimeoutBody { + /// Creates a new [`TimeoutBody`]. + pub fn new(timeout: Duration, body: B) -> Self { + TimeoutBody { + timeout, + sleep: None, + body, + } + } +} + +impl Body for TimeoutBody +where + B: Body, + B::Error: Into, +{ + type Data = B::Data; + type Error = Box; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + let mut this = self.project(); + + // Start the `Sleep` if not active. + let sleep_pinned = if let Some(some) = this.sleep.as_mut().as_pin_mut() { + some + } else { + this.sleep.set(Some(sleep(*this.timeout))); + this.sleep.as_mut().as_pin_mut().unwrap() + }; + + // Error if the timeout has expired. + if let Poll::Ready(()) = sleep_pinned.poll(cx) { + return Poll::Ready(Some(Err(Box::new(TimeoutError(()))))); + } + + // Check for body data. + let frame = ready!(this.body.poll_frame(cx)); + // A frame is ready. Reset the `Sleep`... + this.sleep.set(None); + + Poll::Ready(frame.transpose().map_err(Into::into).transpose()) + } +} + +/// Error for [`TimeoutBody`]. +#[derive(Debug)] +pub struct TimeoutError(()); + +impl std::error::Error for TimeoutError {} + +impl std::fmt::Display for TimeoutError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "data was not received within the designated timeout") + } +} +#[cfg(test)] +mod tests { + use super::*; + + use bytes::Bytes; + use http_body::Frame; + use http_body_util::BodyExt; + use pin_project_lite::pin_project; + use std::{error::Error, fmt::Display}; + + #[derive(Debug)] + struct MockError; + + impl Error for MockError {} + + impl Display for MockError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "mock error") + } + } + + pin_project! { + struct MockBody { + #[pin] + sleep: Sleep + } + } + + impl Body for MockBody { + type Data = Bytes; + type Error = MockError; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + let this = self.project(); + this.sleep + .poll(cx) + .map(|_| Some(Ok(Frame::data(vec![].into())))) + } + } + + #[tokio::test] + async fn test_body_available_within_timeout() { + let mock_sleep = Duration::from_secs(1); + let timeout_sleep = Duration::from_secs(2); + + let mock_body = MockBody { + sleep: sleep(mock_sleep), + }; + let timeout_body = TimeoutBody::new(timeout_sleep, mock_body); + + assert!(timeout_body + .boxed() + .frame() + .await + .expect("no frame") + .is_ok()); + } + + #[tokio::test] + async fn test_body_unavailable_within_timeout_error() { + let mock_sleep = Duration::from_secs(2); + let timeout_sleep = Duration::from_secs(1); + + let mock_body = MockBody { + sleep: sleep(mock_sleep), + }; + let timeout_body = TimeoutBody::new(timeout_sleep, mock_body); + + assert!(timeout_body.boxed().frame().await.unwrap().is_err()); + } +} diff --git a/.cargo-vendor/tower-http/src/timeout/mod.rs b/.cargo-vendor/tower-http/src/timeout/mod.rs new file mode 100644 index 0000000000..facb6a920e --- /dev/null +++ b/.cargo-vendor/tower-http/src/timeout/mod.rs @@ -0,0 +1,50 @@ +//! Middleware that applies a timeout to requests. +//! +//! If the request does not complete within the specified timeout it will be aborted and a `408 +//! Request Timeout` response will be sent. +//! +//! # Differences from `tower::timeout` +//! +//! tower's [`Timeout`](tower::timeout::Timeout) middleware uses an error to signal timeout, i.e. +//! it changes the error type to [`BoxError`](tower::BoxError). For HTTP services that is rarely +//! what you want as returning errors will terminate the connection without sending a response. +//! +//! This middleware won't change the error type and instead return a `408 Request Timeout` +//! response. That means if your service's error type is [`Infallible`] it will still be +//! [`Infallible`] after applying this middleware. +//! +//! # Example +//! +//! ``` +//! use http::{Request, Response}; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! use std::{convert::Infallible, time::Duration}; +//! use tower::ServiceBuilder; +//! use tower_http::timeout::TimeoutLayer; +//! +//! async fn handle(_: Request>) -> Result>, Infallible> { +//! // ... +//! # Ok(Response::new(Full::default())) +//! } +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! let svc = ServiceBuilder::new() +//! // Timeout requests after 30 seconds +//! .layer(TimeoutLayer::new(Duration::from_secs(30))) +//! .service_fn(handle); +//! # Ok(()) +//! # } +//! ``` +//! +//! [`Infallible`]: std::convert::Infallible + +mod body; +mod service; + +pub use body::{TimeoutBody, TimeoutError}; +pub use service::{ + RequestBodyTimeout, RequestBodyTimeoutLayer, ResponseBodyTimeout, ResponseBodyTimeoutLayer, + Timeout, TimeoutLayer, +}; diff --git a/.cargo-vendor/tower-http/src/timeout/service.rs b/.cargo-vendor/tower-http/src/timeout/service.rs new file mode 100644 index 0000000000..8371b03ffe --- /dev/null +++ b/.cargo-vendor/tower-http/src/timeout/service.rs @@ -0,0 +1,273 @@ +use crate::timeout::body::TimeoutBody; +use http::{Request, Response, StatusCode}; +use pin_project_lite::pin_project; +use std::{ + future::Future, + pin::Pin, + task::{ready, Context, Poll}, + time::Duration, +}; +use tokio::time::Sleep; +use tower_layer::Layer; +use tower_service::Service; + +/// Layer that applies the [`Timeout`] middleware which apply a timeout to requests. +/// +/// See the [module docs](super) for an example. +#[derive(Debug, Clone, Copy)] +pub struct TimeoutLayer { + timeout: Duration, +} + +impl TimeoutLayer { + /// Creates a new [`TimeoutLayer`]. + pub fn new(timeout: Duration) -> Self { + TimeoutLayer { timeout } + } +} + +impl Layer for TimeoutLayer { + type Service = Timeout; + + fn layer(&self, inner: S) -> Self::Service { + Timeout::new(inner, self.timeout) + } +} + +/// Middleware which apply a timeout to requests. +/// +/// If the request does not complete within the specified timeout it will be aborted and a `408 +/// Request Timeout` response will be sent. +/// +/// See the [module docs](super) for an example. +#[derive(Debug, Clone, Copy)] +pub struct Timeout { + inner: S, + timeout: Duration, +} + +impl Timeout { + /// Creates a new [`Timeout`]. + pub fn new(inner: S, timeout: Duration) -> Self { + Self { inner, timeout } + } + + define_inner_service_accessors!(); + + /// Returns a new [`Layer`] that wraps services with a `Timeout` middleware. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer(timeout: Duration) -> TimeoutLayer { + TimeoutLayer::new(timeout) + } +} + +impl Service> for Timeout +where + S: Service, Response = Response>, + ResBody: Default, +{ + type Response = S::Response; + type Error = S::Error; + type Future = ResponseFuture; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + let sleep = tokio::time::sleep(self.timeout); + ResponseFuture { + inner: self.inner.call(req), + sleep, + } + } +} + +pin_project! { + /// Response future for [`Timeout`]. + pub struct ResponseFuture { + #[pin] + inner: F, + #[pin] + sleep: Sleep, + } +} + +impl Future for ResponseFuture +where + F: Future, E>>, + B: Default, +{ + type Output = Result, E>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + + if this.sleep.poll(cx).is_ready() { + let mut res = Response::new(B::default()); + *res.status_mut() = StatusCode::REQUEST_TIMEOUT; + return Poll::Ready(Ok(res)); + } + + this.inner.poll(cx) + } +} + +/// Applies a [`TimeoutBody`] to the request body. +#[derive(Clone, Debug)] +pub struct RequestBodyTimeoutLayer { + timeout: Duration, +} + +impl RequestBodyTimeoutLayer { + /// Creates a new [`RequestBodyTimeoutLayer`]. + pub fn new(timeout: Duration) -> Self { + Self { timeout } + } +} + +impl Layer for RequestBodyTimeoutLayer { + type Service = RequestBodyTimeout; + + fn layer(&self, inner: S) -> Self::Service { + RequestBodyTimeout::new(inner, self.timeout) + } +} + +/// Applies a [`TimeoutBody`] to the request body. +#[derive(Clone, Debug)] +pub struct RequestBodyTimeout { + inner: S, + timeout: Duration, +} + +impl RequestBodyTimeout { + /// Creates a new [`RequestBodyTimeout`]. + pub fn new(service: S, timeout: Duration) -> Self { + Self { + inner: service, + timeout, + } + } + + /// Returns a new [`Layer`] that wraps services with a [`RequestBodyTimeoutLayer`] middleware. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer(timeout: Duration) -> RequestBodyTimeoutLayer { + RequestBodyTimeoutLayer::new(timeout) + } + + define_inner_service_accessors!(); +} + +impl Service> for RequestBodyTimeout +where + S: Service>>, + S::Error: Into>, +{ + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + let req = req.map(|body| TimeoutBody::new(self.timeout, body)); + self.inner.call(req) + } +} + +/// Applies a [`TimeoutBody`] to the response body. +#[derive(Clone)] +pub struct ResponseBodyTimeoutLayer { + timeout: Duration, +} + +impl ResponseBodyTimeoutLayer { + /// Creates a new [`ResponseBodyTimeoutLayer`]. + pub fn new(timeout: Duration) -> Self { + Self { timeout } + } +} + +impl Layer for ResponseBodyTimeoutLayer { + type Service = ResponseBodyTimeout; + + fn layer(&self, inner: S) -> Self::Service { + ResponseBodyTimeout::new(inner, self.timeout) + } +} + +/// Applies a [`TimeoutBody`] to the response body. +#[derive(Clone)] +pub struct ResponseBodyTimeout { + inner: S, + timeout: Duration, +} + +impl Service> for ResponseBodyTimeout +where + S: Service, Response = Response>, + S::Error: Into>, +{ + type Response = Response>; + type Error = S::Error; + type Future = ResponseBodyTimeoutFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + ResponseBodyTimeoutFuture { + inner: self.inner.call(req), + timeout: self.timeout, + } + } +} + +impl ResponseBodyTimeout { + /// Creates a new [`ResponseBodyTimeout`]. + pub fn new(service: S, timeout: Duration) -> Self { + Self { + inner: service, + timeout, + } + } + + /// Returns a new [`Layer`] that wraps services with a [`ResponseBodyTimeoutLayer`] middleware. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer(timeout: Duration) -> ResponseBodyTimeoutLayer { + ResponseBodyTimeoutLayer::new(timeout) + } + + define_inner_service_accessors!(); +} + +pin_project! { + /// Response future for [`ResponseBodyTimeout`]. + pub struct ResponseBodyTimeoutFuture { + #[pin] + inner: Fut, + timeout: Duration, + } +} + +impl Future for ResponseBodyTimeoutFuture +where + Fut: Future, E>>, +{ + type Output = Result>, E>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let timeout = self.timeout; + let this = self.project(); + let res = ready!(this.inner.poll(cx))?; + Poll::Ready(Ok(res.map(|body| TimeoutBody::new(timeout, body)))) + } +} diff --git a/.cargo-vendor/tower-http/src/trace/body.rs b/.cargo-vendor/tower-http/src/trace/body.rs new file mode 100644 index 0000000000..d713f2432c --- /dev/null +++ b/.cargo-vendor/tower-http/src/trace/body.rs @@ -0,0 +1,102 @@ +use super::{DefaultOnBodyChunk, DefaultOnEos, DefaultOnFailure, OnBodyChunk, OnEos, OnFailure}; +use crate::classify::ClassifyEos; +use http_body::{Body, Frame}; +use pin_project_lite::pin_project; +use std::{ + fmt, + pin::Pin, + task::{ready, Context, Poll}, + time::Instant, +}; +use tracing::Span; + +pin_project! { + /// Response body for [`Trace`]. + /// + /// [`Trace`]: super::Trace + pub struct ResponseBody { + #[pin] + pub(crate) inner: B, + pub(crate) classify_eos: Option, + pub(crate) on_eos: Option<(OnEos, Instant)>, + pub(crate) on_body_chunk: OnBodyChunk, + pub(crate) on_failure: Option, + pub(crate) start: Instant, + pub(crate) span: Span, + } +} + +impl Body + for ResponseBody +where + B: Body, + B::Error: fmt::Display + 'static, + C: ClassifyEos, + OnEosT: OnEos, + OnBodyChunkT: OnBodyChunk, + OnFailureT: OnFailure, +{ + type Data = B::Data; + type Error = B::Error; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + let this = self.project(); + let _guard = this.span.enter(); + let result = ready!(this.inner.poll_frame(cx)); + + let latency = this.start.elapsed(); + *this.start = Instant::now(); + + match result { + Some(Ok(frame)) => { + let frame = match frame.into_data() { + Ok(chunk) => { + this.on_body_chunk.on_body_chunk(&chunk, latency, this.span); + Frame::data(chunk) + } + Err(frame) => frame, + }; + + let frame = match frame.into_trailers() { + Ok(trailers) => { + if let Some((on_eos, stream_start)) = this.on_eos.take() { + on_eos.on_eos(Some(&trailers), stream_start.elapsed(), this.span); + } + Frame::trailers(trailers) + } + Err(frame) => frame, + }; + + Poll::Ready(Some(Ok(frame))) + } + Some(Err(err)) => { + if let Some((classify_eos, mut on_failure)) = + this.classify_eos.take().zip(this.on_failure.take()) + { + let failure_class = classify_eos.classify_error(&err); + on_failure.on_failure(failure_class, latency, this.span); + } + + Poll::Ready(Some(Err(err))) + } + None => { + if let Some((on_eos, stream_start)) = this.on_eos.take() { + on_eos.on_eos(None, stream_start.elapsed(), this.span); + } + + Poll::Ready(None) + } + } + } + + fn is_end_stream(&self) -> bool { + self.inner.is_end_stream() + } + + fn size_hint(&self) -> http_body::SizeHint { + self.inner.size_hint() + } +} diff --git a/.cargo-vendor/tower-http/src/trace/future.rs b/.cargo-vendor/tower-http/src/trace/future.rs new file mode 100644 index 0000000000..e205ea32c3 --- /dev/null +++ b/.cargo-vendor/tower-http/src/trace/future.rs @@ -0,0 +1,116 @@ +use super::{ + DefaultOnBodyChunk, DefaultOnEos, DefaultOnFailure, DefaultOnResponse, OnBodyChunk, OnEos, + OnFailure, OnResponse, ResponseBody, +}; +use crate::classify::{ClassifiedResponse, ClassifyResponse}; +use http::Response; +use http_body::Body; +use pin_project_lite::pin_project; +use std::{ + future::Future, + pin::Pin, + task::{ready, Context, Poll}, + time::Instant, +}; +use tracing::Span; + +pin_project! { + /// Response future for [`Trace`]. + /// + /// [`Trace`]: super::Trace + pub struct ResponseFuture { + #[pin] + pub(crate) inner: F, + pub(crate) span: Span, + pub(crate) classifier: Option, + pub(crate) on_response: Option, + pub(crate) on_body_chunk: Option, + pub(crate) on_eos: Option, + pub(crate) on_failure: Option, + pub(crate) start: Instant, + } +} + +impl Future + for ResponseFuture +where + Fut: Future, E>>, + ResBody: Body, + ResBody::Error: std::fmt::Display + 'static, + E: std::fmt::Display + 'static, + C: ClassifyResponse, + OnResponseT: OnResponse, + OnFailureT: OnFailure, + OnBodyChunkT: OnBodyChunk, + OnEosT: OnEos, +{ + type Output = Result< + Response>, + E, + >; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + let _guard = this.span.enter(); + let result = ready!(this.inner.poll(cx)); + let latency = this.start.elapsed(); + + let classifier = this.classifier.take().unwrap(); + let on_eos = this.on_eos.take(); + let on_body_chunk = this.on_body_chunk.take().unwrap(); + let mut on_failure = this.on_failure.take().unwrap(); + + match result { + Ok(res) => { + let classification = classifier.classify_response(&res); + let start = *this.start; + + this.on_response + .take() + .unwrap() + .on_response(&res, latency, this.span); + + match classification { + ClassifiedResponse::Ready(classification) => { + if let Err(failure_class) = classification { + on_failure.on_failure(failure_class, latency, this.span); + } + + let span = this.span.clone(); + let res = res.map(|body| ResponseBody { + inner: body, + classify_eos: None, + on_eos: None, + on_body_chunk, + on_failure: Some(on_failure), + start, + span, + }); + + Poll::Ready(Ok(res)) + } + ClassifiedResponse::RequiresEos(classify_eos) => { + let span = this.span.clone(); + let res = res.map(|body| ResponseBody { + inner: body, + classify_eos: Some(classify_eos), + on_eos: on_eos.zip(Some(Instant::now())), + on_body_chunk, + on_failure: Some(on_failure), + start, + span, + }); + + Poll::Ready(Ok(res)) + } + } + } + Err(err) => { + let failure_class = classifier.classify_error(&err); + on_failure.on_failure(failure_class, latency, this.span); + + Poll::Ready(Err(err)) + } + } + } +} diff --git a/.cargo-vendor/tower-http/src/trace/layer.rs b/.cargo-vendor/tower-http/src/trace/layer.rs new file mode 100644 index 0000000000..21ff321c1d --- /dev/null +++ b/.cargo-vendor/tower-http/src/trace/layer.rs @@ -0,0 +1,236 @@ +use super::{ + DefaultMakeSpan, DefaultOnBodyChunk, DefaultOnEos, DefaultOnFailure, DefaultOnRequest, + DefaultOnResponse, GrpcMakeClassifier, HttpMakeClassifier, Trace, +}; +use crate::classify::{ + GrpcErrorsAsFailures, MakeClassifier, ServerErrorsAsFailures, SharedClassifier, +}; +use tower_layer::Layer; + +/// [`Layer`] that adds high level [tracing] to a [`Service`]. +/// +/// See the [module docs](crate::trace) for more details. +/// +/// [`Layer`]: tower_layer::Layer +/// [tracing]: https://crates.io/crates/tracing +/// [`Service`]: tower_service::Service +#[derive(Debug, Copy, Clone)] +pub struct TraceLayer< + M, + MakeSpan = DefaultMakeSpan, + OnRequest = DefaultOnRequest, + OnResponse = DefaultOnResponse, + OnBodyChunk = DefaultOnBodyChunk, + OnEos = DefaultOnEos, + OnFailure = DefaultOnFailure, +> { + pub(crate) make_classifier: M, + pub(crate) make_span: MakeSpan, + pub(crate) on_request: OnRequest, + pub(crate) on_response: OnResponse, + pub(crate) on_body_chunk: OnBodyChunk, + pub(crate) on_eos: OnEos, + pub(crate) on_failure: OnFailure, +} + +impl TraceLayer { + /// Create a new [`TraceLayer`] using the given [`MakeClassifier`]. + pub fn new(make_classifier: M) -> Self + where + M: MakeClassifier, + { + Self { + make_classifier, + make_span: DefaultMakeSpan::new(), + on_failure: DefaultOnFailure::default(), + on_request: DefaultOnRequest::default(), + on_eos: DefaultOnEos::default(), + on_body_chunk: DefaultOnBodyChunk::default(), + on_response: DefaultOnResponse::default(), + } + } +} + +impl + TraceLayer +{ + /// Customize what to do when a request is received. + /// + /// `NewOnRequest` is expected to implement [`OnRequest`]. + /// + /// [`OnRequest`]: super::OnRequest + pub fn on_request( + self, + new_on_request: NewOnRequest, + ) -> TraceLayer { + TraceLayer { + on_request: new_on_request, + on_failure: self.on_failure, + on_eos: self.on_eos, + on_body_chunk: self.on_body_chunk, + make_span: self.make_span, + on_response: self.on_response, + make_classifier: self.make_classifier, + } + } + + /// Customize what to do when a response has been produced. + /// + /// `NewOnResponse` is expected to implement [`OnResponse`]. + /// + /// [`OnResponse`]: super::OnResponse + pub fn on_response( + self, + new_on_response: NewOnResponse, + ) -> TraceLayer { + TraceLayer { + on_response: new_on_response, + on_request: self.on_request, + on_eos: self.on_eos, + on_body_chunk: self.on_body_chunk, + on_failure: self.on_failure, + make_span: self.make_span, + make_classifier: self.make_classifier, + } + } + + /// Customize what to do when a body chunk has been sent. + /// + /// `NewOnBodyChunk` is expected to implement [`OnBodyChunk`]. + /// + /// [`OnBodyChunk`]: super::OnBodyChunk + pub fn on_body_chunk( + self, + new_on_body_chunk: NewOnBodyChunk, + ) -> TraceLayer { + TraceLayer { + on_body_chunk: new_on_body_chunk, + on_eos: self.on_eos, + on_failure: self.on_failure, + on_request: self.on_request, + make_span: self.make_span, + on_response: self.on_response, + make_classifier: self.make_classifier, + } + } + + /// Customize what to do when a streaming response has closed. + /// + /// `NewOnEos` is expected to implement [`OnEos`]. + /// + /// [`OnEos`]: super::OnEos + pub fn on_eos( + self, + new_on_eos: NewOnEos, + ) -> TraceLayer { + TraceLayer { + on_eos: new_on_eos, + on_body_chunk: self.on_body_chunk, + on_failure: self.on_failure, + on_request: self.on_request, + make_span: self.make_span, + on_response: self.on_response, + make_classifier: self.make_classifier, + } + } + + /// Customize what to do when a response has been classified as a failure. + /// + /// `NewOnFailure` is expected to implement [`OnFailure`]. + /// + /// [`OnFailure`]: super::OnFailure + pub fn on_failure( + self, + new_on_failure: NewOnFailure, + ) -> TraceLayer { + TraceLayer { + on_failure: new_on_failure, + on_request: self.on_request, + on_eos: self.on_eos, + on_body_chunk: self.on_body_chunk, + make_span: self.make_span, + on_response: self.on_response, + make_classifier: self.make_classifier, + } + } + + /// Customize how to make [`Span`]s that all request handling will be wrapped in. + /// + /// `NewMakeSpan` is expected to implement [`MakeSpan`]. + /// + /// [`MakeSpan`]: super::MakeSpan + /// [`Span`]: tracing::Span + pub fn make_span_with( + self, + new_make_span: NewMakeSpan, + ) -> TraceLayer { + TraceLayer { + make_span: new_make_span, + on_request: self.on_request, + on_failure: self.on_failure, + on_body_chunk: self.on_body_chunk, + on_eos: self.on_eos, + on_response: self.on_response, + make_classifier: self.make_classifier, + } + } +} + +impl TraceLayer { + /// Create a new [`TraceLayer`] using [`ServerErrorsAsFailures`] which supports classifying + /// regular HTTP responses based on the status code. + pub fn new_for_http() -> Self { + Self { + make_classifier: SharedClassifier::new(ServerErrorsAsFailures::default()), + make_span: DefaultMakeSpan::new(), + on_response: DefaultOnResponse::default(), + on_request: DefaultOnRequest::default(), + on_body_chunk: DefaultOnBodyChunk::default(), + on_eos: DefaultOnEos::default(), + on_failure: DefaultOnFailure::default(), + } + } +} + +impl TraceLayer { + /// Create a new [`TraceLayer`] using [`GrpcErrorsAsFailures`] which supports classifying + /// gRPC responses and streams based on the `grpc-status` header. + pub fn new_for_grpc() -> Self { + Self { + make_classifier: SharedClassifier::new(GrpcErrorsAsFailures::default()), + make_span: DefaultMakeSpan::new(), + on_response: DefaultOnResponse::default(), + on_request: DefaultOnRequest::default(), + on_body_chunk: DefaultOnBodyChunk::default(), + on_eos: DefaultOnEos::default(), + on_failure: DefaultOnFailure::default(), + } + } +} + +impl Layer + for TraceLayer +where + M: Clone, + MakeSpan: Clone, + OnRequest: Clone, + OnResponse: Clone, + OnEos: Clone, + OnBodyChunk: Clone, + OnFailure: Clone, +{ + type Service = Trace; + + fn layer(&self, inner: S) -> Self::Service { + Trace { + inner, + make_classifier: self.make_classifier.clone(), + make_span: self.make_span.clone(), + on_request: self.on_request.clone(), + on_eos: self.on_eos.clone(), + on_body_chunk: self.on_body_chunk.clone(), + on_response: self.on_response.clone(), + on_failure: self.on_failure.clone(), + } + } +} diff --git a/.cargo-vendor/tower-http/src/trace/make_span.rs b/.cargo-vendor/tower-http/src/trace/make_span.rs new file mode 100644 index 0000000000..bf558d3b36 --- /dev/null +++ b/.cargo-vendor/tower-http/src/trace/make_span.rs @@ -0,0 +1,113 @@ +use http::Request; +use tracing::{Level, Span}; + +use super::DEFAULT_MESSAGE_LEVEL; + +/// Trait used to generate [`Span`]s from requests. [`Trace`] wraps all request handling in this +/// span. +/// +/// [`Span`]: tracing::Span +/// [`Trace`]: super::Trace +pub trait MakeSpan { + /// Make a span from a request. + fn make_span(&mut self, request: &Request) -> Span; +} + +impl MakeSpan for Span { + fn make_span(&mut self, _request: &Request) -> Span { + self.clone() + } +} + +impl MakeSpan for F +where + F: FnMut(&Request) -> Span, +{ + fn make_span(&mut self, request: &Request) -> Span { + self(request) + } +} + +/// The default way [`Span`]s will be created for [`Trace`]. +/// +/// [`Span`]: tracing::Span +/// [`Trace`]: super::Trace +#[derive(Debug, Clone)] +pub struct DefaultMakeSpan { + level: Level, + include_headers: bool, +} + +impl DefaultMakeSpan { + /// Create a new `DefaultMakeSpan`. + pub fn new() -> Self { + Self { + level: DEFAULT_MESSAGE_LEVEL, + include_headers: false, + } + } + + /// Set the [`Level`] used for the [tracing span]. + /// + /// Defaults to [`Level::DEBUG`]. + /// + /// [tracing span]: https://docs.rs/tracing/latest/tracing/#spans + pub fn level(mut self, level: Level) -> Self { + self.level = level; + self + } + + /// Include request headers on the [`Span`]. + /// + /// By default headers are not included. + /// + /// [`Span`]: tracing::Span + pub fn include_headers(mut self, include_headers: bool) -> Self { + self.include_headers = include_headers; + self + } +} + +impl Default for DefaultMakeSpan { + fn default() -> Self { + Self::new() + } +} + +impl MakeSpan for DefaultMakeSpan { + fn make_span(&mut self, request: &Request) -> Span { + // This ugly macro is needed, unfortunately, because `tracing::span!` + // required the level argument to be static. Meaning we can't just pass + // `self.level`. + macro_rules! make_span { + ($level:expr) => { + if self.include_headers { + tracing::span!( + $level, + "request", + method = %request.method(), + uri = %request.uri(), + version = ?request.version(), + headers = ?request.headers(), + ) + } else { + tracing::span!( + $level, + "request", + method = %request.method(), + uri = %request.uri(), + version = ?request.version(), + ) + } + } + } + + match self.level { + Level::ERROR => make_span!(Level::ERROR), + Level::WARN => make_span!(Level::WARN), + Level::INFO => make_span!(Level::INFO), + Level::DEBUG => make_span!(Level::DEBUG), + Level::TRACE => make_span!(Level::TRACE), + } + } +} diff --git a/.cargo-vendor/tower-http/src/trace/mod.rs b/.cargo-vendor/tower-http/src/trace/mod.rs new file mode 100644 index 0000000000..65734a4241 --- /dev/null +++ b/.cargo-vendor/tower-http/src/trace/mod.rs @@ -0,0 +1,635 @@ +//! Middleware that adds high level [tracing] to a [`Service`]. +//! +//! # Example +//! +//! Adding tracing to your service can be as simple as: +//! +//! ```rust +//! use http::{Request, Response}; +//! use tower::{ServiceBuilder, ServiceExt, Service}; +//! use tower_http::trace::TraceLayer; +//! use std::convert::Infallible; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! +//! async fn handle(request: Request>) -> Result>, Infallible> { +//! Ok(Response::new(Full::default())) +//! } +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! // Setup tracing +//! tracing_subscriber::fmt::init(); +//! +//! let mut service = ServiceBuilder::new() +//! .layer(TraceLayer::new_for_http()) +//! .service_fn(handle); +//! +//! let request = Request::new(Full::from("foo")); +//! +//! let response = service +//! .ready() +//! .await? +//! .call(request) +//! .await?; +//! # Ok(()) +//! # } +//! ``` +//! +//! If you run this application with `RUST_LOG=tower_http=trace cargo run` you should see logs like: +//! +//! ```text +//! Mar 05 20:50:28.523 DEBUG request{method=GET path="/foo"}: tower_http::trace::on_request: started processing request +//! Mar 05 20:50:28.524 DEBUG request{method=GET path="/foo"}: tower_http::trace::on_response: finished processing request latency=1 ms status=200 +//! ``` +//! +//! # Customization +//! +//! [`Trace`] comes with good defaults but also supports customizing many aspects of the output. +//! +//! The default behaviour supports some customization: +//! +//! ```rust +//! use http::{Request, Response, HeaderMap, StatusCode}; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! use tower::ServiceBuilder; +//! use tracing::Level; +//! use tower_http::{ +//! LatencyUnit, +//! trace::{TraceLayer, DefaultMakeSpan, DefaultOnRequest, DefaultOnResponse}, +//! }; +//! use std::time::Duration; +//! # use tower::{ServiceExt, Service}; +//! # use std::convert::Infallible; +//! +//! # async fn handle(request: Request>) -> Result>, Infallible> { +//! # Ok(Response::new(Full::from("foo"))) +//! # } +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! # tracing_subscriber::fmt::init(); +//! # +//! let service = ServiceBuilder::new() +//! .layer( +//! TraceLayer::new_for_http() +//! .make_span_with( +//! DefaultMakeSpan::new().include_headers(true) +//! ) +//! .on_request( +//! DefaultOnRequest::new().level(Level::INFO) +//! ) +//! .on_response( +//! DefaultOnResponse::new() +//! .level(Level::INFO) +//! .latency_unit(LatencyUnit::Micros) +//! ) +//! // on so on for `on_eos`, `on_body_chunk`, and `on_failure` +//! ) +//! .service_fn(handle); +//! # let mut service = service; +//! # let response = service +//! # .ready() +//! # .await? +//! # .call(Request::new(Full::from("foo"))) +//! # .await?; +//! # Ok(()) +//! # } +//! ``` +//! +//! However for maximum control you can provide callbacks: +//! +//! ```rust +//! use http::{Request, Response, HeaderMap, StatusCode}; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! use tower::ServiceBuilder; +//! use tower_http::{classify::ServerErrorsFailureClass, trace::TraceLayer}; +//! use std::time::Duration; +//! use tracing::Span; +//! # use tower::{ServiceExt, Service}; +//! # use std::convert::Infallible; +//! +//! # async fn handle(request: Request>) -> Result>, Infallible> { +//! # Ok(Response::new(Full::from("foo"))) +//! # } +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! # tracing_subscriber::fmt::init(); +//! # +//! let service = ServiceBuilder::new() +//! .layer( +//! TraceLayer::new_for_http() +//! .make_span_with(|request: &Request>| { +//! tracing::debug_span!("http-request") +//! }) +//! .on_request(|request: &Request>, _span: &Span| { +//! tracing::debug!("started {} {}", request.method(), request.uri().path()) +//! }) +//! .on_response(|response: &Response>, latency: Duration, _span: &Span| { +//! tracing::debug!("response generated in {:?}", latency) +//! }) +//! .on_body_chunk(|chunk: &Bytes, latency: Duration, _span: &Span| { +//! tracing::debug!("sending {} bytes", chunk.len()) +//! }) +//! .on_eos(|trailers: Option<&HeaderMap>, stream_duration: Duration, _span: &Span| { +//! tracing::debug!("stream closed after {:?}", stream_duration) +//! }) +//! .on_failure(|error: ServerErrorsFailureClass, latency: Duration, _span: &Span| { +//! tracing::debug!("something went wrong") +//! }) +//! ) +//! .service_fn(handle); +//! # let mut service = service; +//! # let response = service +//! # .ready() +//! # .await? +//! # .call(Request::new(Full::from("foo"))) +//! # .await?; +//! # Ok(()) +//! # } +//! ``` +//! +//! ## Disabling something +//! +//! Setting the behaviour to `()` will be disable that particular step: +//! +//! ```rust +//! use http::StatusCode; +//! use tower::ServiceBuilder; +//! use tower_http::{classify::ServerErrorsFailureClass, trace::TraceLayer}; +//! use std::time::Duration; +//! use tracing::Span; +//! # use tower::{ServiceExt, Service}; +//! # use http_body_util::Full; +//! # use bytes::Bytes; +//! # use http::{Response, Request}; +//! # use std::convert::Infallible; +//! +//! # async fn handle(request: Request>) -> Result>, Infallible> { +//! # Ok(Response::new(Full::from("foo"))) +//! # } +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! # tracing_subscriber::fmt::init(); +//! # +//! let service = ServiceBuilder::new() +//! .layer( +//! // This configuration will only emit events on failures +//! TraceLayer::new_for_http() +//! .on_request(()) +//! .on_response(()) +//! .on_body_chunk(()) +//! .on_eos(()) +//! .on_failure(|error: ServerErrorsFailureClass, latency: Duration, _span: &Span| { +//! tracing::debug!("something went wrong") +//! }) +//! ) +//! .service_fn(handle); +//! # let mut service = service; +//! # let response = service +//! # .ready() +//! # .await? +//! # .call(Request::new(Full::from("foo"))) +//! # .await?; +//! # Ok(()) +//! # } +//! ``` +//! +//! # When the callbacks are called +//! +//! ### `on_request` +//! +//! The `on_request` callback is called when the request arrives at the +//! middleware in [`Service::call`] just prior to passing the request to the +//! inner service. +//! +//! ### `on_response` +//! +//! The `on_response` callback is called when the inner service's response +//! future completes with `Ok(response)` regardless if the response is +//! classified as a success or a failure. +//! +//! For example if you're using [`ServerErrorsAsFailures`] as your classifier +//! and the inner service responds with `500 Internal Server Error` then the +//! `on_response` callback is still called. `on_failure` would _also_ be called +//! in this case since the response was classified as a failure. +//! +//! ### `on_body_chunk` +//! +//! The `on_body_chunk` callback is called when the response body produces a new +//! chunk, that is when [`Body::poll_frame`] returns a data frame. +//! +//! `on_body_chunk` is called even if the chunk is empty. +//! +//! ### `on_eos` +//! +//! The `on_eos` callback is called when a streaming response body ends, that is +//! when [`Body::poll_frame`] returns a trailers frame. +//! +//! `on_eos` is called even if the trailers produced are `None`. +//! +//! ### `on_failure` +//! +//! The `on_failure` callback is called when: +//! +//! - The inner [`Service`]'s response future resolves to an error. +//! - A response is classified as a failure. +//! - [`Body::poll_frame`] returns an error. +//! - An end-of-stream is classified as a failure. +//! +//! # Recording fields on the span +//! +//! All callbacks receive a reference to the [tracing] [`Span`], corresponding to this request, +//! produced by the closure passed to [`TraceLayer::make_span_with`]. It can be used to [record +//! field values][record] that weren't known when the span was created. +//! +//! ```rust +//! use http::{Request, Response, HeaderMap, StatusCode}; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! use tower::ServiceBuilder; +//! use tower_http::trace::TraceLayer; +//! use tracing::Span; +//! use std::time::Duration; +//! # use std::convert::Infallible; +//! +//! # async fn handle(request: Request>) -> Result>, Infallible> { +//! # Ok(Response::new(Full::from("foo"))) +//! # } +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! # tracing_subscriber::fmt::init(); +//! # +//! let service = ServiceBuilder::new() +//! .layer( +//! TraceLayer::new_for_http() +//! .make_span_with(|request: &Request>| { +//! tracing::debug_span!( +//! "http-request", +//! status_code = tracing::field::Empty, +//! ) +//! }) +//! .on_response(|response: &Response>, _latency: Duration, span: &Span| { +//! span.record("status_code", &tracing::field::display(response.status())); +//! +//! tracing::debug!("response generated") +//! }) +//! ) +//! .service_fn(handle); +//! # Ok(()) +//! # } +//! ``` +//! +//! # Providing classifiers +//! +//! Tracing requires determining if a response is a success or failure. [`MakeClassifier`] is used +//! to create a classifier for the incoming request. See the docs for [`MakeClassifier`] and +//! [`ClassifyResponse`] for more details on classification. +//! +//! A [`MakeClassifier`] can be provided when creating a [`TraceLayer`]: +//! +//! ```rust +//! use http::{Request, Response}; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! use tower::ServiceBuilder; +//! use tower_http::{ +//! trace::TraceLayer, +//! classify::{ +//! MakeClassifier, ClassifyResponse, ClassifiedResponse, NeverClassifyEos, +//! SharedClassifier, +//! }, +//! }; +//! use std::convert::Infallible; +//! +//! # async fn handle(request: Request>) -> Result>, Infallible> { +//! # Ok(Response::new(Full::from("foo"))) +//! # } +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! # tracing_subscriber::fmt::init(); +//! # +//! // Our `MakeClassifier` that always crates `MyClassifier` classifiers. +//! #[derive(Copy, Clone)] +//! struct MyMakeClassify; +//! +//! impl MakeClassifier for MyMakeClassify { +//! type Classifier = MyClassifier; +//! type FailureClass = &'static str; +//! type ClassifyEos = NeverClassifyEos<&'static str>; +//! +//! fn make_classifier(&self, req: &Request) -> Self::Classifier { +//! MyClassifier +//! } +//! } +//! +//! // A classifier that classifies failures as `"something went wrong..."`. +//! #[derive(Copy, Clone)] +//! struct MyClassifier; +//! +//! impl ClassifyResponse for MyClassifier { +//! type FailureClass = &'static str; +//! type ClassifyEos = NeverClassifyEos<&'static str>; +//! +//! fn classify_response( +//! self, +//! res: &Response +//! ) -> ClassifiedResponse { +//! // Classify based on the status code. +//! if res.status().is_server_error() { +//! ClassifiedResponse::Ready(Err("something went wrong...")) +//! } else { +//! ClassifiedResponse::Ready(Ok(())) +//! } +//! } +//! +//! fn classify_error(self, error: &E) -> Self::FailureClass +//! where +//! E: std::fmt::Display + 'static, +//! { +//! "something went wrong..." +//! } +//! } +//! +//! let service = ServiceBuilder::new() +//! // Create a trace layer that uses our classifier. +//! .layer(TraceLayer::new(MyMakeClassify)) +//! .service_fn(handle); +//! +//! // Since `MyClassifier` is `Clone` we can also use `SharedClassifier` +//! // to avoid having to define a separate `MakeClassifier`. +//! let service = ServiceBuilder::new() +//! .layer(TraceLayer::new(SharedClassifier::new(MyClassifier))) +//! .service_fn(handle); +//! # Ok(()) +//! # } +//! ``` +//! +//! [`TraceLayer`] comes with convenience methods for using common classifiers: +//! +//! - [`TraceLayer::new_for_http`] classifies based on the status code. It doesn't consider +//! streaming responses. +//! - [`TraceLayer::new_for_grpc`] classifies based on the gRPC protocol and supports streaming +//! responses. +//! +//! [tracing]: https://crates.io/crates/tracing +//! [`Service`]: tower_service::Service +//! [`Service::call`]: tower_service::Service::call +//! [`MakeClassifier`]: crate::classify::MakeClassifier +//! [`ClassifyResponse`]: crate::classify::ClassifyResponse +//! [record]: https://docs.rs/tracing/latest/tracing/span/struct.Span.html#method.record +//! [`TraceLayer::make_span_with`]: crate::trace::TraceLayer::make_span_with +//! [`Span`]: tracing::Span +//! [`ServerErrorsAsFailures`]: crate::classify::ServerErrorsAsFailures +//! [`Body::poll_frame`]: http_body::Body::poll_frame + +use std::{fmt, time::Duration}; + +use tracing::Level; + +pub use self::{ + body::ResponseBody, + future::ResponseFuture, + layer::TraceLayer, + make_span::{DefaultMakeSpan, MakeSpan}, + on_body_chunk::{DefaultOnBodyChunk, OnBodyChunk}, + on_eos::{DefaultOnEos, OnEos}, + on_failure::{DefaultOnFailure, OnFailure}, + on_request::{DefaultOnRequest, OnRequest}, + on_response::{DefaultOnResponse, OnResponse}, + service::Trace, +}; +use crate::{ + classify::{GrpcErrorsAsFailures, ServerErrorsAsFailures, SharedClassifier}, + LatencyUnit, +}; + +/// MakeClassifier for HTTP requests. +pub type HttpMakeClassifier = SharedClassifier; + +/// MakeClassifier for gRPC requests. +pub type GrpcMakeClassifier = SharedClassifier; + +macro_rules! event_dynamic_lvl { + ( $(target: $target:expr,)? $(parent: $parent:expr,)? $lvl:expr, $($tt:tt)* ) => { + match $lvl { + tracing::Level::ERROR => { + tracing::event!( + $(target: $target,)? + $(parent: $parent,)? + tracing::Level::ERROR, + $($tt)* + ); + } + tracing::Level::WARN => { + tracing::event!( + $(target: $target,)? + $(parent: $parent,)? + tracing::Level::WARN, + $($tt)* + ); + } + tracing::Level::INFO => { + tracing::event!( + $(target: $target,)? + $(parent: $parent,)? + tracing::Level::INFO, + $($tt)* + ); + } + tracing::Level::DEBUG => { + tracing::event!( + $(target: $target,)? + $(parent: $parent,)? + tracing::Level::DEBUG, + $($tt)* + ); + } + tracing::Level::TRACE => { + tracing::event!( + $(target: $target,)? + $(parent: $parent,)? + tracing::Level::TRACE, + $($tt)* + ); + } + } + }; +} + +mod body; +mod future; +mod layer; +mod make_span; +mod on_body_chunk; +mod on_eos; +mod on_failure; +mod on_request; +mod on_response; +mod service; + +const DEFAULT_MESSAGE_LEVEL: Level = Level::DEBUG; +const DEFAULT_ERROR_LEVEL: Level = Level::ERROR; + +struct Latency { + unit: LatencyUnit, + duration: Duration, +} + +impl fmt::Display for Latency { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.unit { + LatencyUnit::Seconds => write!(f, "{} s", self.duration.as_secs_f64()), + LatencyUnit::Millis => write!(f, "{} ms", self.duration.as_millis()), + LatencyUnit::Micros => write!(f, "{} μs", self.duration.as_micros()), + LatencyUnit::Nanos => write!(f, "{} ns", self.duration.as_nanos()), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::classify::ServerErrorsFailureClass; + use crate::test_helpers::Body; + use bytes::Bytes; + use http::{HeaderMap, Request, Response}; + use once_cell::sync::Lazy; + use std::{ + sync::atomic::{AtomicU32, Ordering}, + time::Duration, + }; + use tower::{BoxError, Service, ServiceBuilder, ServiceExt}; + use tracing::Span; + + #[tokio::test] + async fn unary_request() { + static ON_REQUEST_COUNT: Lazy = Lazy::new(|| AtomicU32::new(0)); + static ON_RESPONSE_COUNT: Lazy = Lazy::new(|| AtomicU32::new(0)); + static ON_BODY_CHUNK_COUNT: Lazy = Lazy::new(|| AtomicU32::new(0)); + static ON_EOS: Lazy = Lazy::new(|| AtomicU32::new(0)); + static ON_FAILURE: Lazy = Lazy::new(|| AtomicU32::new(0)); + + let trace_layer = TraceLayer::new_for_http() + .make_span_with(|_req: &Request| { + tracing::info_span!("test-span", foo = tracing::field::Empty) + }) + .on_request(|_req: &Request, span: &Span| { + span.record("foo", &42); + ON_REQUEST_COUNT.fetch_add(1, Ordering::SeqCst); + }) + .on_response(|_res: &Response, _latency: Duration, _span: &Span| { + ON_RESPONSE_COUNT.fetch_add(1, Ordering::SeqCst); + }) + .on_body_chunk(|_chunk: &Bytes, _latency: Duration, _span: &Span| { + ON_BODY_CHUNK_COUNT.fetch_add(1, Ordering::SeqCst); + }) + .on_eos( + |_trailers: Option<&HeaderMap>, _latency: Duration, _span: &Span| { + ON_EOS.fetch_add(1, Ordering::SeqCst); + }, + ) + .on_failure( + |_class: ServerErrorsFailureClass, _latency: Duration, _span: &Span| { + ON_FAILURE.fetch_add(1, Ordering::SeqCst); + }, + ); + + let mut svc = ServiceBuilder::new().layer(trace_layer).service_fn(echo); + + let res = svc + .ready() + .await + .unwrap() + .call(Request::new(Body::from("foobar"))) + .await + .unwrap(); + + assert_eq!(1, ON_REQUEST_COUNT.load(Ordering::SeqCst), "request"); + assert_eq!(1, ON_RESPONSE_COUNT.load(Ordering::SeqCst), "request"); + assert_eq!(0, ON_BODY_CHUNK_COUNT.load(Ordering::SeqCst), "body chunk"); + assert_eq!(0, ON_EOS.load(Ordering::SeqCst), "eos"); + assert_eq!(0, ON_FAILURE.load(Ordering::SeqCst), "failure"); + + crate::test_helpers::to_bytes(res.into_body()) + .await + .unwrap(); + assert_eq!(1, ON_BODY_CHUNK_COUNT.load(Ordering::SeqCst), "body chunk"); + assert_eq!(0, ON_EOS.load(Ordering::SeqCst), "eos"); + assert_eq!(0, ON_FAILURE.load(Ordering::SeqCst), "failure"); + } + + #[tokio::test] + async fn streaming_response() { + static ON_REQUEST_COUNT: Lazy = Lazy::new(|| AtomicU32::new(0)); + static ON_RESPONSE_COUNT: Lazy = Lazy::new(|| AtomicU32::new(0)); + static ON_BODY_CHUNK_COUNT: Lazy = Lazy::new(|| AtomicU32::new(0)); + static ON_EOS: Lazy = Lazy::new(|| AtomicU32::new(0)); + static ON_FAILURE: Lazy = Lazy::new(|| AtomicU32::new(0)); + + let trace_layer = TraceLayer::new_for_http() + .on_request(|_req: &Request, _span: &Span| { + ON_REQUEST_COUNT.fetch_add(1, Ordering::SeqCst); + }) + .on_response(|_res: &Response, _latency: Duration, _span: &Span| { + ON_RESPONSE_COUNT.fetch_add(1, Ordering::SeqCst); + }) + .on_body_chunk(|_chunk: &Bytes, _latency: Duration, _span: &Span| { + ON_BODY_CHUNK_COUNT.fetch_add(1, Ordering::SeqCst); + }) + .on_eos( + |_trailers: Option<&HeaderMap>, _latency: Duration, _span: &Span| { + ON_EOS.fetch_add(1, Ordering::SeqCst); + }, + ) + .on_failure( + |_class: ServerErrorsFailureClass, _latency: Duration, _span: &Span| { + ON_FAILURE.fetch_add(1, Ordering::SeqCst); + }, + ); + + let mut svc = ServiceBuilder::new() + .layer(trace_layer) + .service_fn(streaming_body); + + let res = svc + .ready() + .await + .unwrap() + .call(Request::new(Body::empty())) + .await + .unwrap(); + + assert_eq!(1, ON_REQUEST_COUNT.load(Ordering::SeqCst), "request"); + assert_eq!(1, ON_RESPONSE_COUNT.load(Ordering::SeqCst), "request"); + assert_eq!(0, ON_BODY_CHUNK_COUNT.load(Ordering::SeqCst), "body chunk"); + assert_eq!(0, ON_EOS.load(Ordering::SeqCst), "eos"); + assert_eq!(0, ON_FAILURE.load(Ordering::SeqCst), "failure"); + + crate::test_helpers::to_bytes(res.into_body()) + .await + .unwrap(); + assert_eq!(3, ON_BODY_CHUNK_COUNT.load(Ordering::SeqCst), "body chunk"); + assert_eq!(0, ON_EOS.load(Ordering::SeqCst), "eos"); + assert_eq!(0, ON_FAILURE.load(Ordering::SeqCst), "failure"); + } + + async fn echo(req: Request) -> Result, BoxError> { + Ok(Response::new(req.into_body())) + } + + async fn streaming_body(_req: Request) -> Result, BoxError> { + use futures_util::stream::iter; + + let stream = iter(vec![ + Ok::<_, BoxError>(Bytes::from("one")), + Ok::<_, BoxError>(Bytes::from("two")), + Ok::<_, BoxError>(Bytes::from("three")), + ]); + + let body = Body::from_stream(stream); + + Ok(Response::new(body)) + } +} diff --git a/.cargo-vendor/tower-http/src/trace/on_body_chunk.rs b/.cargo-vendor/tower-http/src/trace/on_body_chunk.rs new file mode 100644 index 0000000000..543f2a6365 --- /dev/null +++ b/.cargo-vendor/tower-http/src/trace/on_body_chunk.rs @@ -0,0 +1,64 @@ +use std::time::Duration; +use tracing::Span; + +/// Trait used to tell [`Trace`] what to do when a body chunk has been sent. +/// +/// See the [module docs](../trace/index.html#on_body_chunk) for details on exactly when the +/// `on_body_chunk` callback is called. +/// +/// [`Trace`]: super::Trace +pub trait OnBodyChunk { + /// Do the thing. + /// + /// `latency` is the duration since the response was sent or since the last body chunk as sent. + /// + /// `span` is the `tracing` [`Span`], corresponding to this request, produced by the closure + /// passed to [`TraceLayer::make_span_with`]. It can be used to [record field values][record] + /// that weren't known when the span was created. + /// + /// [`Span`]: https://docs.rs/tracing/latest/tracing/span/index.html + /// [record]: https://docs.rs/tracing/latest/tracing/span/struct.Span.html#method.record + /// + /// If you're using [hyper] as your server `B` will most likely be [`Bytes`]. + /// + /// [hyper]: https://hyper.rs + /// [`Bytes`]: https://docs.rs/bytes/latest/bytes/struct.Bytes.html + /// [`TraceLayer::make_span_with`]: crate::trace::TraceLayer::make_span_with + fn on_body_chunk(&mut self, chunk: &B, latency: Duration, span: &Span); +} + +impl OnBodyChunk for F +where + F: FnMut(&B, Duration, &Span), +{ + fn on_body_chunk(&mut self, chunk: &B, latency: Duration, span: &Span) { + self(chunk, latency, span) + } +} + +impl OnBodyChunk for () { + #[inline] + fn on_body_chunk(&mut self, _: &B, _: Duration, _: &Span) {} +} + +/// The default [`OnBodyChunk`] implementation used by [`Trace`]. +/// +/// Simply does nothing. +/// +/// [`Trace`]: super::Trace +#[derive(Debug, Default, Clone)] +pub struct DefaultOnBodyChunk { + _priv: (), +} + +impl DefaultOnBodyChunk { + /// Create a new `DefaultOnBodyChunk`. + pub fn new() -> Self { + Self { _priv: () } + } +} + +impl OnBodyChunk for DefaultOnBodyChunk { + #[inline] + fn on_body_chunk(&mut self, _: &B, _: Duration, _: &Span) {} +} diff --git a/.cargo-vendor/tower-http/src/trace/on_eos.rs b/.cargo-vendor/tower-http/src/trace/on_eos.rs new file mode 100644 index 0000000000..ab90fc9c0d --- /dev/null +++ b/.cargo-vendor/tower-http/src/trace/on_eos.rs @@ -0,0 +1,107 @@ +use super::{Latency, DEFAULT_MESSAGE_LEVEL}; +use crate::{classify::grpc_errors_as_failures::ParsedGrpcStatus, LatencyUnit}; +use http::header::HeaderMap; +use std::time::Duration; +use tracing::{Level, Span}; + +/// Trait used to tell [`Trace`] what to do when a stream closes. +/// +/// See the [module docs](../trace/index.html#on_eos) for details on exactly when the `on_eos` +/// callback is called. +/// +/// [`Trace`]: super::Trace +pub trait OnEos { + /// Do the thing. + /// + /// `stream_duration` is the duration since the response was sent. + /// + /// `span` is the `tracing` [`Span`], corresponding to this request, produced by the closure + /// passed to [`TraceLayer::make_span_with`]. It can be used to [record field values][record] + /// that weren't known when the span was created. + /// + /// [`Span`]: https://docs.rs/tracing/latest/tracing/span/index.html + /// [record]: https://docs.rs/tracing/latest/tracing/span/struct.Span.html#method.record + /// [`TraceLayer::make_span_with`]: crate::trace::TraceLayer::make_span_with + fn on_eos(self, trailers: Option<&HeaderMap>, stream_duration: Duration, span: &Span); +} + +impl OnEos for () { + #[inline] + fn on_eos(self, _: Option<&HeaderMap>, _: Duration, _: &Span) {} +} + +impl OnEos for F +where + F: FnOnce(Option<&HeaderMap>, Duration, &Span), +{ + fn on_eos(self, trailers: Option<&HeaderMap>, stream_duration: Duration, span: &Span) { + self(trailers, stream_duration, span) + } +} + +/// The default [`OnEos`] implementation used by [`Trace`]. +/// +/// [`Trace`]: super::Trace +#[derive(Clone, Debug)] +pub struct DefaultOnEos { + level: Level, + latency_unit: LatencyUnit, +} + +impl Default for DefaultOnEos { + fn default() -> Self { + Self { + level: DEFAULT_MESSAGE_LEVEL, + latency_unit: LatencyUnit::Millis, + } + } +} + +impl DefaultOnEos { + /// Create a new [`DefaultOnEos`]. + pub fn new() -> Self { + Self::default() + } + + /// Set the [`Level`] used for [tracing events]. + /// + /// Defaults to [`Level::DEBUG`]. + /// + /// [tracing events]: https://docs.rs/tracing/latest/tracing/#events + /// [`Level::DEBUG`]: https://docs.rs/tracing/latest/tracing/struct.Level.html#associatedconstant.DEBUG + pub fn level(mut self, level: Level) -> Self { + self.level = level; + self + } + + /// Set the [`LatencyUnit`] latencies will be reported in. + /// + /// Defaults to [`LatencyUnit::Millis`]. + pub fn latency_unit(mut self, latency_unit: LatencyUnit) -> Self { + self.latency_unit = latency_unit; + self + } +} + +impl OnEos for DefaultOnEos { + fn on_eos(self, trailers: Option<&HeaderMap>, stream_duration: Duration, _span: &Span) { + let stream_duration = Latency { + unit: self.latency_unit, + duration: stream_duration, + }; + let status = trailers.and_then(|trailers| { + match crate::classify::grpc_errors_as_failures::classify_grpc_metadata( + trailers, + crate::classify::GrpcCode::Ok.into_bitmask(), + ) { + ParsedGrpcStatus::Success + | ParsedGrpcStatus::HeaderNotString + | ParsedGrpcStatus::HeaderNotInt => Some(0), + ParsedGrpcStatus::NonSuccess(status) => Some(status.get()), + ParsedGrpcStatus::GrpcStatusHeaderMissing => None, + } + }); + + event_dynamic_lvl!(self.level, %stream_duration, status, "end of stream"); + } +} diff --git a/.cargo-vendor/tower-http/src/trace/on_failure.rs b/.cargo-vendor/tower-http/src/trace/on_failure.rs new file mode 100644 index 0000000000..7dfa186dc6 --- /dev/null +++ b/.cargo-vendor/tower-http/src/trace/on_failure.rs @@ -0,0 +1,100 @@ +use super::{Latency, DEFAULT_ERROR_LEVEL}; +use crate::LatencyUnit; +use std::{fmt, time::Duration}; +use tracing::{Level, Span}; + +/// Trait used to tell [`Trace`] what to do when a request fails. +/// +/// See the [module docs](../trace/index.html#on_failure) for details on exactly when the +/// `on_failure` callback is called. +/// +/// [`Trace`]: super::Trace +pub trait OnFailure { + /// Do the thing. + /// + /// `latency` is the duration since the request was received. + /// + /// `span` is the `tracing` [`Span`], corresponding to this request, produced by the closure + /// passed to [`TraceLayer::make_span_with`]. It can be used to [record field values][record] + /// that weren't known when the span was created. + /// + /// [`Span`]: https://docs.rs/tracing/latest/tracing/span/index.html + /// [record]: https://docs.rs/tracing/latest/tracing/span/struct.Span.html#method.record + /// [`TraceLayer::make_span_with`]: crate::trace::TraceLayer::make_span_with + fn on_failure(&mut self, failure_classification: FailureClass, latency: Duration, span: &Span); +} + +impl OnFailure for () { + #[inline] + fn on_failure(&mut self, _: FailureClass, _: Duration, _: &Span) {} +} + +impl OnFailure for F +where + F: FnMut(FailureClass, Duration, &Span), +{ + fn on_failure(&mut self, failure_classification: FailureClass, latency: Duration, span: &Span) { + self(failure_classification, latency, span) + } +} + +/// The default [`OnFailure`] implementation used by [`Trace`]. +/// +/// [`Trace`]: super::Trace +#[derive(Clone, Debug)] +pub struct DefaultOnFailure { + level: Level, + latency_unit: LatencyUnit, +} + +impl Default for DefaultOnFailure { + fn default() -> Self { + Self { + level: DEFAULT_ERROR_LEVEL, + latency_unit: LatencyUnit::Millis, + } + } +} + +impl DefaultOnFailure { + /// Create a new `DefaultOnFailure`. + pub fn new() -> Self { + Self::default() + } + + /// Set the [`Level`] used for [tracing events]. + /// + /// Defaults to [`Level::ERROR`]. + /// + /// [tracing events]: https://docs.rs/tracing/latest/tracing/#events + pub fn level(mut self, level: Level) -> Self { + self.level = level; + self + } + + /// Set the [`LatencyUnit`] latencies will be reported in. + /// + /// Defaults to [`LatencyUnit::Millis`]. + pub fn latency_unit(mut self, latency_unit: LatencyUnit) -> Self { + self.latency_unit = latency_unit; + self + } +} + +impl OnFailure for DefaultOnFailure +where + FailureClass: fmt::Display, +{ + fn on_failure(&mut self, failure_classification: FailureClass, latency: Duration, _: &Span) { + let latency = Latency { + unit: self.latency_unit, + duration: latency, + }; + event_dynamic_lvl!( + self.level, + classification = %failure_classification, + %latency, + "response failed" + ); + } +} diff --git a/.cargo-vendor/tower-http/src/trace/on_request.rs b/.cargo-vendor/tower-http/src/trace/on_request.rs new file mode 100644 index 0000000000..07de1893db --- /dev/null +++ b/.cargo-vendor/tower-http/src/trace/on_request.rs @@ -0,0 +1,82 @@ +use super::DEFAULT_MESSAGE_LEVEL; +use http::Request; +use tracing::Level; +use tracing::Span; + +/// Trait used to tell [`Trace`] what to do when a request is received. +/// +/// See the [module docs](../trace/index.html#on_request) for details on exactly when the +/// `on_request` callback is called. +/// +/// [`Trace`]: super::Trace +pub trait OnRequest { + /// Do the thing. + /// + /// `span` is the `tracing` [`Span`], corresponding to this request, produced by the closure + /// passed to [`TraceLayer::make_span_with`]. It can be used to [record field values][record] + /// that weren't known when the span was created. + /// + /// [`Span`]: https://docs.rs/tracing/latest/tracing/span/index.html + /// [record]: https://docs.rs/tracing/latest/tracing/span/struct.Span.html#method.record + /// [`TraceLayer::make_span_with`]: crate::trace::TraceLayer::make_span_with + fn on_request(&mut self, request: &Request, span: &Span); +} + +impl OnRequest for () { + #[inline] + fn on_request(&mut self, _: &Request, _: &Span) {} +} + +impl OnRequest for F +where + F: FnMut(&Request, &Span), +{ + fn on_request(&mut self, request: &Request, span: &Span) { + self(request, span) + } +} + +/// The default [`OnRequest`] implementation used by [`Trace`]. +/// +/// [`Trace`]: super::Trace +#[derive(Clone, Debug)] +pub struct DefaultOnRequest { + level: Level, +} + +impl Default for DefaultOnRequest { + fn default() -> Self { + Self { + level: DEFAULT_MESSAGE_LEVEL, + } + } +} + +impl DefaultOnRequest { + /// Create a new `DefaultOnRequest`. + pub fn new() -> Self { + Self::default() + } + + /// Set the [`Level`] used for [tracing events]. + /// + /// Please note that while this will set the level for the tracing events + /// themselves, it might cause them to lack expected information, like + /// request method or path. You can address this using + /// [`DefaultMakeSpan::level`]. + /// + /// Defaults to [`Level::DEBUG`]. + /// + /// [tracing events]: https://docs.rs/tracing/latest/tracing/#events + /// [`DefaultMakeSpan::level`]: crate::trace::DefaultMakeSpan::level + pub fn level(mut self, level: Level) -> Self { + self.level = level; + self + } +} + +impl OnRequest for DefaultOnRequest { + fn on_request(&mut self, _: &Request, _: &Span) { + event_dynamic_lvl!(self.level, "started processing request"); + } +} diff --git a/.cargo-vendor/tower-http/src/trace/on_response.rs b/.cargo-vendor/tower-http/src/trace/on_response.rs new file mode 100644 index 0000000000..c6ece840dd --- /dev/null +++ b/.cargo-vendor/tower-http/src/trace/on_response.rs @@ -0,0 +1,161 @@ +use super::{Latency, DEFAULT_MESSAGE_LEVEL}; +use crate::LatencyUnit; +use http::Response; +use std::time::Duration; +use tracing::Level; +use tracing::Span; + +/// Trait used to tell [`Trace`] what to do when a response has been produced. +/// +/// See the [module docs](../trace/index.html#on_response) for details on exactly when the +/// `on_response` callback is called. +/// +/// [`Trace`]: super::Trace +pub trait OnResponse { + /// Do the thing. + /// + /// `latency` is the duration since the request was received. + /// + /// `span` is the `tracing` [`Span`], corresponding to this request, produced by the closure + /// passed to [`TraceLayer::make_span_with`]. It can be used to [record field values][record] + /// that weren't known when the span was created. + /// + /// [`Span`]: https://docs.rs/tracing/latest/tracing/span/index.html + /// [record]: https://docs.rs/tracing/latest/tracing/span/struct.Span.html#method.record + /// [`TraceLayer::make_span_with`]: crate::trace::TraceLayer::make_span_with + fn on_response(self, response: &Response, latency: Duration, span: &Span); +} + +impl OnResponse for () { + #[inline] + fn on_response(self, _: &Response, _: Duration, _: &Span) {} +} + +impl OnResponse for F +where + F: FnOnce(&Response, Duration, &Span), +{ + fn on_response(self, response: &Response, latency: Duration, span: &Span) { + self(response, latency, span) + } +} + +/// The default [`OnResponse`] implementation used by [`Trace`]. +/// +/// [`Trace`]: super::Trace +#[derive(Clone, Debug)] +pub struct DefaultOnResponse { + level: Level, + latency_unit: LatencyUnit, + include_headers: bool, +} + +impl Default for DefaultOnResponse { + fn default() -> Self { + Self { + level: DEFAULT_MESSAGE_LEVEL, + latency_unit: LatencyUnit::Millis, + include_headers: false, + } + } +} + +impl DefaultOnResponse { + /// Create a new `DefaultOnResponse`. + pub fn new() -> Self { + Self::default() + } + + /// Set the [`Level`] used for [tracing events]. + /// + /// Please note that while this will set the level for the tracing events + /// themselves, it might cause them to lack expected information, like + /// request method or path. You can address this using + /// [`DefaultMakeSpan::level`]. + /// + /// Defaults to [`Level::DEBUG`]. + /// + /// [tracing events]: https://docs.rs/tracing/latest/tracing/#events + /// [`DefaultMakeSpan::level`]: crate::trace::DefaultMakeSpan::level + pub fn level(mut self, level: Level) -> Self { + self.level = level; + self + } + + /// Set the [`LatencyUnit`] latencies will be reported in. + /// + /// Defaults to [`LatencyUnit::Millis`]. + pub fn latency_unit(mut self, latency_unit: LatencyUnit) -> Self { + self.latency_unit = latency_unit; + self + } + + /// Include response headers on the [`Event`]. + /// + /// By default headers are not included. + /// + /// [`Event`]: tracing::Event + pub fn include_headers(mut self, include_headers: bool) -> Self { + self.include_headers = include_headers; + self + } +} + +impl OnResponse for DefaultOnResponse { + fn on_response(self, response: &Response, latency: Duration, _: &Span) { + let latency = Latency { + unit: self.latency_unit, + duration: latency, + }; + let response_headers = self + .include_headers + .then(|| tracing::field::debug(response.headers())); + + event_dynamic_lvl!( + self.level, + %latency, + status = status(response), + response_headers, + "finished processing request" + ); + } +} + +fn status(res: &Response) -> Option { + use crate::classify::grpc_errors_as_failures::ParsedGrpcStatus; + + // gRPC-over-HTTP2 uses the "application/grpc[+format]" content type, and gRPC-Web uses + // "application/grpc-web[+format]" or "application/grpc-web-text[+format]", where "format" is + // the message format, e.g. +proto, +json. + // + // So, valid grpc content types include (but are not limited to): + // - application/grpc + // - application/grpc+proto + // - application/grpc-web+proto + // - application/grpc-web-text+proto + // + // For simplicity, we simply check that the content type starts with "application/grpc". + let is_grpc = res + .headers() + .get(http::header::CONTENT_TYPE) + .map_or(false, |value| { + value.as_bytes().starts_with("application/grpc".as_bytes()) + }); + + if is_grpc { + match crate::classify::grpc_errors_as_failures::classify_grpc_metadata( + res.headers(), + crate::classify::GrpcCode::Ok.into_bitmask(), + ) { + ParsedGrpcStatus::Success + | ParsedGrpcStatus::HeaderNotString + | ParsedGrpcStatus::HeaderNotInt => Some(0), + ParsedGrpcStatus::NonSuccess(status) => Some(status.get()), + // if `grpc-status` is missing then its a streaming response and there is no status + // _yet_, so its neither success nor error + ParsedGrpcStatus::GrpcStatusHeaderMissing => None, + } + } else { + Some(res.status().as_u16().into()) + } +} diff --git a/.cargo-vendor/tower-http/src/trace/service.rs b/.cargo-vendor/tower-http/src/trace/service.rs new file mode 100644 index 0000000000..1ab4c1f001 --- /dev/null +++ b/.cargo-vendor/tower-http/src/trace/service.rs @@ -0,0 +1,325 @@ +use super::{ + DefaultMakeSpan, DefaultOnBodyChunk, DefaultOnEos, DefaultOnFailure, DefaultOnRequest, + DefaultOnResponse, GrpcMakeClassifier, HttpMakeClassifier, MakeSpan, OnBodyChunk, OnEos, + OnFailure, OnRequest, OnResponse, ResponseBody, ResponseFuture, TraceLayer, +}; +use crate::classify::{ + GrpcErrorsAsFailures, MakeClassifier, ServerErrorsAsFailures, SharedClassifier, +}; +use http::{Request, Response}; +use http_body::Body; +use std::{ + fmt, + task::{Context, Poll}, + time::Instant, +}; +use tower_service::Service; + +/// Middleware that adds high level [tracing] to a [`Service`]. +/// +/// See the [module docs](crate::trace) for an example. +/// +/// [tracing]: https://crates.io/crates/tracing +/// [`Service`]: tower_service::Service +#[derive(Debug, Clone, Copy)] +pub struct Trace< + S, + M, + MakeSpan = DefaultMakeSpan, + OnRequest = DefaultOnRequest, + OnResponse = DefaultOnResponse, + OnBodyChunk = DefaultOnBodyChunk, + OnEos = DefaultOnEos, + OnFailure = DefaultOnFailure, +> { + pub(crate) inner: S, + pub(crate) make_classifier: M, + pub(crate) make_span: MakeSpan, + pub(crate) on_request: OnRequest, + pub(crate) on_response: OnResponse, + pub(crate) on_body_chunk: OnBodyChunk, + pub(crate) on_eos: OnEos, + pub(crate) on_failure: OnFailure, +} + +impl Trace { + /// Create a new [`Trace`] using the given [`MakeClassifier`]. + pub fn new(inner: S, make_classifier: M) -> Self + where + M: MakeClassifier, + { + Self { + inner, + make_classifier, + make_span: DefaultMakeSpan::new(), + on_request: DefaultOnRequest::default(), + on_response: DefaultOnResponse::default(), + on_body_chunk: DefaultOnBodyChunk::default(), + on_eos: DefaultOnEos::default(), + on_failure: DefaultOnFailure::default(), + } + } + + /// Returns a new [`Layer`] that wraps services with a [`TraceLayer`] middleware. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer(make_classifier: M) -> TraceLayer + where + M: MakeClassifier, + { + TraceLayer::new(make_classifier) + } +} + +impl + Trace +{ + define_inner_service_accessors!(); + + /// Customize what to do when a request is received. + /// + /// `NewOnRequest` is expected to implement [`OnRequest`]. + /// + /// [`OnRequest`]: super::OnRequest + pub fn on_request( + self, + new_on_request: NewOnRequest, + ) -> Trace { + Trace { + on_request: new_on_request, + inner: self.inner, + on_failure: self.on_failure, + on_eos: self.on_eos, + on_body_chunk: self.on_body_chunk, + make_span: self.make_span, + on_response: self.on_response, + make_classifier: self.make_classifier, + } + } + + /// Customize what to do when a response has been produced. + /// + /// `NewOnResponse` is expected to implement [`OnResponse`]. + /// + /// [`OnResponse`]: super::OnResponse + pub fn on_response( + self, + new_on_response: NewOnResponse, + ) -> Trace { + Trace { + on_response: new_on_response, + inner: self.inner, + on_request: self.on_request, + on_failure: self.on_failure, + on_body_chunk: self.on_body_chunk, + on_eos: self.on_eos, + make_span: self.make_span, + make_classifier: self.make_classifier, + } + } + + /// Customize what to do when a body chunk has been sent. + /// + /// `NewOnBodyChunk` is expected to implement [`OnBodyChunk`]. + /// + /// [`OnBodyChunk`]: super::OnBodyChunk + pub fn on_body_chunk( + self, + new_on_body_chunk: NewOnBodyChunk, + ) -> Trace { + Trace { + on_body_chunk: new_on_body_chunk, + on_eos: self.on_eos, + make_span: self.make_span, + inner: self.inner, + on_failure: self.on_failure, + on_request: self.on_request, + on_response: self.on_response, + make_classifier: self.make_classifier, + } + } + + /// Customize what to do when a streaming response has closed. + /// + /// `NewOnEos` is expected to implement [`OnEos`]. + /// + /// [`OnEos`]: super::OnEos + pub fn on_eos( + self, + new_on_eos: NewOnEos, + ) -> Trace { + Trace { + on_eos: new_on_eos, + make_span: self.make_span, + inner: self.inner, + on_failure: self.on_failure, + on_request: self.on_request, + on_body_chunk: self.on_body_chunk, + on_response: self.on_response, + make_classifier: self.make_classifier, + } + } + + /// Customize what to do when a response has been classified as a failure. + /// + /// `NewOnFailure` is expected to implement [`OnFailure`]. + /// + /// [`OnFailure`]: super::OnFailure + pub fn on_failure( + self, + new_on_failure: NewOnFailure, + ) -> Trace { + Trace { + on_failure: new_on_failure, + inner: self.inner, + make_span: self.make_span, + on_body_chunk: self.on_body_chunk, + on_request: self.on_request, + on_eos: self.on_eos, + on_response: self.on_response, + make_classifier: self.make_classifier, + } + } + + /// Customize how to make [`Span`]s that all request handling will be wrapped in. + /// + /// `NewMakeSpan` is expected to implement [`MakeSpan`]. + /// + /// [`MakeSpan`]: super::MakeSpan + /// [`Span`]: tracing::Span + pub fn make_span_with( + self, + new_make_span: NewMakeSpan, + ) -> Trace { + Trace { + make_span: new_make_span, + inner: self.inner, + on_failure: self.on_failure, + on_request: self.on_request, + on_body_chunk: self.on_body_chunk, + on_response: self.on_response, + on_eos: self.on_eos, + make_classifier: self.make_classifier, + } + } +} + +impl + Trace< + S, + HttpMakeClassifier, + DefaultMakeSpan, + DefaultOnRequest, + DefaultOnResponse, + DefaultOnBodyChunk, + DefaultOnEos, + DefaultOnFailure, + > +{ + /// Create a new [`Trace`] using [`ServerErrorsAsFailures`] which supports classifying + /// regular HTTP responses based on the status code. + pub fn new_for_http(inner: S) -> Self { + Self { + inner, + make_classifier: SharedClassifier::new(ServerErrorsAsFailures::default()), + make_span: DefaultMakeSpan::new(), + on_request: DefaultOnRequest::default(), + on_response: DefaultOnResponse::default(), + on_body_chunk: DefaultOnBodyChunk::default(), + on_eos: DefaultOnEos::default(), + on_failure: DefaultOnFailure::default(), + } + } +} + +impl + Trace< + S, + GrpcMakeClassifier, + DefaultMakeSpan, + DefaultOnRequest, + DefaultOnResponse, + DefaultOnBodyChunk, + DefaultOnEos, + DefaultOnFailure, + > +{ + /// Create a new [`Trace`] using [`GrpcErrorsAsFailures`] which supports classifying + /// gRPC responses and streams based on the `grpc-status` header. + pub fn new_for_grpc(inner: S) -> Self { + Self { + inner, + make_classifier: SharedClassifier::new(GrpcErrorsAsFailures::default()), + make_span: DefaultMakeSpan::new(), + on_request: DefaultOnRequest::default(), + on_response: DefaultOnResponse::default(), + on_body_chunk: DefaultOnBodyChunk::default(), + on_eos: DefaultOnEos::default(), + on_failure: DefaultOnFailure::default(), + } + } +} + +impl< + S, + ReqBody, + ResBody, + M, + OnRequestT, + OnResponseT, + OnFailureT, + OnBodyChunkT, + OnEosT, + MakeSpanT, + > Service> + for Trace +where + S: Service, Response = Response>, + ReqBody: Body, + ResBody: Body, + ResBody::Error: fmt::Display + 'static, + S::Error: fmt::Display + 'static, + M: MakeClassifier, + M::Classifier: Clone, + MakeSpanT: MakeSpan, + OnRequestT: OnRequest, + OnResponseT: OnResponse + Clone, + OnBodyChunkT: OnBodyChunk + Clone, + OnEosT: OnEos + Clone, + OnFailureT: OnFailure + Clone, +{ + type Response = + Response>; + type Error = S::Error; + type Future = + ResponseFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + let start = Instant::now(); + + let span = self.make_span.make_span(&req); + + let classifier = self.make_classifier.make_classifier(&req); + + let future = { + let _guard = span.enter(); + self.on_request.on_request(&req, &span); + self.inner.call(req) + }; + + ResponseFuture { + inner: future, + span, + classifier: Some(classifier), + on_response: Some(self.on_response.clone()), + on_body_chunk: Some(self.on_body_chunk.clone()), + on_eos: Some(self.on_eos.clone()), + on_failure: Some(self.on_failure.clone()), + start, + } + } +} diff --git a/.cargo-vendor/tower-http/src/validate_request.rs b/.cargo-vendor/tower-http/src/validate_request.rs new file mode 100644 index 0000000000..327266af73 --- /dev/null +++ b/.cargo-vendor/tower-http/src/validate_request.rs @@ -0,0 +1,588 @@ +//! Middleware that validates requests. +//! +//! # Example +//! +//! ``` +//! use tower_http::validate_request::ValidateRequestHeaderLayer; +//! use http::{Request, Response, StatusCode, header::ACCEPT}; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! use tower::{Service, ServiceExt, ServiceBuilder, service_fn, BoxError}; +//! +//! async fn handle(request: Request>) -> Result>, BoxError> { +//! Ok(Response::new(Full::default())) +//! } +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), BoxError> { +//! let mut service = ServiceBuilder::new() +//! // Require the `Accept` header to be `application/json`, `*/*` or `application/*` +//! .layer(ValidateRequestHeaderLayer::accept("application/json")) +//! .service_fn(handle); +//! +//! // Requests with the correct value are allowed through +//! let request = Request::builder() +//! .header(ACCEPT, "application/json") +//! .body(Full::default()) +//! .unwrap(); +//! +//! let response = service +//! .ready() +//! .await? +//! .call(request) +//! .await?; +//! +//! assert_eq!(StatusCode::OK, response.status()); +//! +//! // Requests with an invalid value get a `406 Not Acceptable` response +//! let request = Request::builder() +//! .header(ACCEPT, "text/strings") +//! .body(Full::default()) +//! .unwrap(); +//! +//! let response = service +//! .ready() +//! .await? +//! .call(request) +//! .await?; +//! +//! assert_eq!(StatusCode::NOT_ACCEPTABLE, response.status()); +//! # Ok(()) +//! # } +//! ``` +//! +//! Custom validation can be made by implementing [`ValidateRequest`]: +//! +//! ``` +//! use tower_http::validate_request::{ValidateRequestHeaderLayer, ValidateRequest}; +//! use http::{Request, Response, StatusCode, header::ACCEPT}; +//! use http_body_util::Full; +//! use tower::{Service, ServiceExt, ServiceBuilder, service_fn, BoxError}; +//! use bytes::Bytes; +//! +//! #[derive(Clone, Copy)] +//! pub struct MyHeader { /* ... */ } +//! +//! impl ValidateRequest for MyHeader { +//! type ResponseBody = Full; +//! +//! fn validate( +//! &mut self, +//! request: &mut Request, +//! ) -> Result<(), Response> { +//! // validate the request... +//! # unimplemented!() +//! } +//! } +//! +//! async fn handle(request: Request>) -> Result>, BoxError> { +//! Ok(Response::new(Full::default())) +//! } +//! +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), BoxError> { +//! let service = ServiceBuilder::new() +//! // Validate requests using `MyHeader` +//! .layer(ValidateRequestHeaderLayer::custom(MyHeader { /* ... */ })) +//! .service_fn(handle); +//! # Ok(()) +//! # } +//! ``` +//! +//! Or using a closure: +//! +//! ``` +//! use tower_http::validate_request::{ValidateRequestHeaderLayer, ValidateRequest}; +//! use http::{Request, Response, StatusCode, header::ACCEPT}; +//! use bytes::Bytes; +//! use http_body_util::Full; +//! use tower::{Service, ServiceExt, ServiceBuilder, service_fn, BoxError}; +//! +//! async fn handle(request: Request>) -> Result>, BoxError> { +//! # todo!(); +//! // ... +//! } +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), BoxError> { +//! let service = ServiceBuilder::new() +//! .layer(ValidateRequestHeaderLayer::custom(|request: &mut Request>| { +//! // Validate the request +//! # Ok::<_, Response>>(()) +//! })) +//! .service_fn(handle); +//! # Ok(()) +//! # } +//! ``` + +use http::{header, Request, Response, StatusCode}; +use http_body::Body; +use mime::{Mime, MimeIter}; +use pin_project_lite::pin_project; +use std::{ + fmt, + future::Future, + marker::PhantomData, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; +use tower_layer::Layer; +use tower_service::Service; + +/// Layer that applies [`ValidateRequestHeader`] which validates all requests. +/// +/// See the [module docs](crate::validate_request) for an example. +#[derive(Debug, Clone)] +pub struct ValidateRequestHeaderLayer { + validate: T, +} + +impl ValidateRequestHeaderLayer> { + /// Validate requests have the required Accept header. + /// + /// The `Accept` header is required to be `*/*`, `type/*` or `type/subtype`, + /// as configured. + /// + /// # Panics + /// + /// Panics if `header_value` is not in the form: `type/subtype`, such as `application/json` + /// See `AcceptHeader::new` for when this method panics. + /// + /// # Example + /// + /// ``` + /// use http_body_util::Full; + /// use bytes::Bytes; + /// use tower_http::validate_request::{AcceptHeader, ValidateRequestHeaderLayer}; + /// + /// let layer = ValidateRequestHeaderLayer::>>::accept("application/json"); + /// ``` + /// + /// [`Accept`]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept + pub fn accept(value: &str) -> Self + where + ResBody: Body + Default, + { + Self::custom(AcceptHeader::new(value)) + } +} + +impl ValidateRequestHeaderLayer { + /// Validate requests using a custom method. + pub fn custom(validate: T) -> ValidateRequestHeaderLayer { + Self { validate } + } +} + +impl Layer for ValidateRequestHeaderLayer +where + T: Clone, +{ + type Service = ValidateRequestHeader; + + fn layer(&self, inner: S) -> Self::Service { + ValidateRequestHeader::new(inner, self.validate.clone()) + } +} + +/// Middleware that validates requests. +/// +/// See the [module docs](crate::validate_request) for an example. +#[derive(Clone, Debug)] +pub struct ValidateRequestHeader { + inner: S, + validate: T, +} + +impl ValidateRequestHeader { + fn new(inner: S, validate: T) -> Self { + Self::custom(inner, validate) + } + + define_inner_service_accessors!(); +} + +impl ValidateRequestHeader> { + /// Validate requests have the required Accept header. + /// + /// The `Accept` header is required to be `*/*`, `type/*` or `type/subtype`, + /// as configured. + /// + /// # Panics + /// + /// See `AcceptHeader::new` for when this method panics. + pub fn accept(inner: S, value: &str) -> Self + where + ResBody: Body + Default, + { + Self::custom(inner, AcceptHeader::new(value)) + } +} + +impl ValidateRequestHeader { + /// Validate requests using a custom method. + pub fn custom(inner: S, validate: T) -> ValidateRequestHeader { + Self { inner, validate } + } +} + +impl Service> for ValidateRequestHeader +where + V: ValidateRequest, + S: Service, Response = Response>, +{ + type Response = Response; + type Error = S::Error; + type Future = ResponseFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, mut req: Request) -> Self::Future { + match self.validate.validate(&mut req) { + Ok(_) => ResponseFuture::future(self.inner.call(req)), + Err(res) => ResponseFuture::invalid_header_value(res), + } + } +} + +pin_project! { + /// Response future for [`ValidateRequestHeader`]. + pub struct ResponseFuture { + #[pin] + kind: Kind, + } +} + +impl ResponseFuture { + fn future(future: F) -> Self { + Self { + kind: Kind::Future { future }, + } + } + + fn invalid_header_value(res: Response) -> Self { + Self { + kind: Kind::Error { + response: Some(res), + }, + } + } +} + +pin_project! { + #[project = KindProj] + enum Kind { + Future { + #[pin] + future: F, + }, + Error { + response: Option>, + }, + } +} + +impl Future for ResponseFuture +where + F: Future, E>>, +{ + type Output = F::Output; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match self.project().kind.project() { + KindProj::Future { future } => future.poll(cx), + KindProj::Error { response } => { + let response = response.take().expect("future polled after completion"); + Poll::Ready(Ok(response)) + } + } + } +} + +/// Trait for validating requests. +pub trait ValidateRequest { + /// The body type used for responses to unvalidated requests. + type ResponseBody; + + /// Validate the request. + /// + /// If `Ok(())` is returned then the request is allowed through, otherwise not. + fn validate(&mut self, request: &mut Request) -> Result<(), Response>; +} + +impl ValidateRequest for F +where + F: FnMut(&mut Request) -> Result<(), Response>, +{ + type ResponseBody = ResBody; + + fn validate(&mut self, request: &mut Request) -> Result<(), Response> { + self(request) + } +} + +/// Type that performs validation of the Accept header. +pub struct AcceptHeader { + header_value: Arc, + _ty: PhantomData ResBody>, +} + +impl AcceptHeader { + /// Create a new `AcceptHeader`. + /// + /// # Panics + /// + /// Panics if `header_value` is not in the form: `type/subtype`, such as `application/json` + fn new(header_value: &str) -> Self + where + ResBody: Body + Default, + { + Self { + header_value: Arc::new( + header_value + .parse::() + .expect("value is not a valid header value"), + ), + _ty: PhantomData, + } + } +} + +impl Clone for AcceptHeader { + fn clone(&self) -> Self { + Self { + header_value: self.header_value.clone(), + _ty: PhantomData, + } + } +} + +impl fmt::Debug for AcceptHeader { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("AcceptHeader") + .field("header_value", &self.header_value) + .finish() + } +} + +impl ValidateRequest for AcceptHeader +where + ResBody: Body + Default, +{ + type ResponseBody = ResBody; + + fn validate(&mut self, req: &mut Request) -> Result<(), Response> { + if !req.headers().contains_key(header::ACCEPT) { + return Ok(()); + } + if req + .headers() + .get_all(header::ACCEPT) + .into_iter() + .filter_map(|header| header.to_str().ok()) + .any(|h| { + MimeIter::new(h) + .map(|mim| { + if let Ok(mim) = mim { + let typ = self.header_value.type_(); + let subtype = self.header_value.subtype(); + match (mim.type_(), mim.subtype()) { + (t, s) if t == typ && s == subtype => true, + (t, mime::STAR) if t == typ => true, + (mime::STAR, mime::STAR) => true, + _ => false, + } + } else { + false + } + }) + .reduce(|acc, mim| acc || mim) + .unwrap_or(false) + }) + { + return Ok(()); + } + let mut res = Response::new(ResBody::default()); + *res.status_mut() = StatusCode::NOT_ACCEPTABLE; + Err(res) + } +} + +#[cfg(test)] +mod tests { + #[allow(unused_imports)] + use super::*; + use crate::test_helpers::Body; + use http::header; + use tower::{BoxError, ServiceBuilder, ServiceExt}; + + #[tokio::test] + async fn valid_accept_header() { + let mut service = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::accept("application/json")) + .service_fn(echo); + + let request = Request::get("/") + .header(header::ACCEPT, "application/json") + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + } + + #[tokio::test] + async fn valid_accept_header_accept_all_json() { + let mut service = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::accept("application/json")) + .service_fn(echo); + + let request = Request::get("/") + .header(header::ACCEPT, "application/*") + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + } + + #[tokio::test] + async fn valid_accept_header_accept_all() { + let mut service = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::accept("application/json")) + .service_fn(echo); + + let request = Request::get("/") + .header(header::ACCEPT, "*/*") + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + } + + #[tokio::test] + async fn invalid_accept_header() { + let mut service = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::accept("application/json")) + .service_fn(echo); + + let request = Request::get("/") + .header(header::ACCEPT, "invalid") + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::NOT_ACCEPTABLE); + } + #[tokio::test] + async fn not_accepted_accept_header_subtype() { + let mut service = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::accept("application/json")) + .service_fn(echo); + + let request = Request::get("/") + .header(header::ACCEPT, "application/strings") + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::NOT_ACCEPTABLE); + } + + #[tokio::test] + async fn not_accepted_accept_header() { + let mut service = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::accept("application/json")) + .service_fn(echo); + + let request = Request::get("/") + .header(header::ACCEPT, "text/strings") + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::NOT_ACCEPTABLE); + } + + #[tokio::test] + async fn accepted_multiple_header_value() { + let mut service = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::accept("application/json")) + .service_fn(echo); + + let request = Request::get("/") + .header(header::ACCEPT, "text/strings") + .header(header::ACCEPT, "invalid, application/json") + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + } + + #[tokio::test] + async fn accepted_inner_header_value() { + let mut service = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::accept("application/json")) + .service_fn(echo); + + let request = Request::get("/") + .header(header::ACCEPT, "text/strings, invalid, application/json") + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + } + + #[tokio::test] + async fn accepted_header_with_quotes_valid() { + let value = "foo/bar; parisien=\"baguette, text/html, jambon, fromage\", application/*"; + let mut service = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::accept("application/xml")) + .service_fn(echo); + + let request = Request::get("/") + .header(header::ACCEPT, value) + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + } + + #[tokio::test] + async fn accepted_header_with_quotes_invalid() { + let value = "foo/bar; parisien=\"baguette, text/html, jambon, fromage\""; + let mut service = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::accept("text/html")) + .service_fn(echo); + + let request = Request::get("/") + .header(header::ACCEPT, value) + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::NOT_ACCEPTABLE); + } + + async fn echo(req: Request) -> Result, BoxError> { + Ok(Response::new(req.into_body())) + } +} diff --git a/.cargo-vendor/tungstenite/.cargo-checksum.json b/.cargo-vendor/tungstenite/.cargo-checksum.json new file mode 100644 index 0000000000..9c9ac07a8b --- /dev/null +++ b/.cargo-vendor/tungstenite/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"983d5f14ef39ef3662e843dc9b6411c176a8d085c15c35740d043c7bb47dee0f","Cargo.lock":"4a39b4dc027e931a05182eb15728e3c1ccb12a7790325ece5470da88b38428b8","Cargo.toml":"58aa0d3b733e4bf0232c5bddf3d79988037359c4b2eae4875d16657a2492fd29","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"7fea0ee51a4ca5d5cea7464135fd55e8b09caf3a61da3d451ac8a22af95c033f","README.md":"0a8b44c900be104faf8bb8d24e419ca1569e32d7bfae4f1528bc27f9f01d4b33","benches/buffer.rs":"615d66b4e5d5f11758aa111f2c3274e82dba5afeca3863bf39138d906c67344c","benches/write.rs":"921c72b4f1b219873fd2c69baa15fcb064373d55d0a5a47f29fc7f03eebde767","examples/autobahn-client.rs":"9d525f68cab5cc1791aa8ba376bea77f27534f3e953f3a486bb4a515b9b3fbf4","examples/autobahn-server.rs":"4b50ff21113ca920a2d91a37f8f3452ecc64ee3dcf0d557905d3bcd1746a92e3","examples/callback-error.rs":"fe732374176dfe4e5d1f36a3fa09d6d6e3cc95885e920480ef5bcd94206835e0","examples/client.rs":"348a78efba691d502c5d2cb2c02f1704c513b2a752a2fa36e2e18d01f234a2bb","examples/server.rs":"676d6cc3b3042b106def38a5a5f65f2be9f43d6156f9164046b0a666ed06f048","examples/srv_accept_unmasked_frames.rs":"9a898510c622d2b2d3a28505d45b9187272186ae487fd74b827fe280c8f6a5f9","src/buffer.rs":"c346e4282b294b856d29a17ea078ebd2580b3142f531e1e10d545e4af0baba2b","src/client.rs":"4691e64343a38879367cafe45a999789fc143938e5e71a4d34e96d924df5ece3","src/error.rs":"4fa76e7d681a464da9e144b6fcc7022b00591607cac04107474b35e46ba50307","src/handshake/client.rs":"ee777c8e23f3503213d0fc5d0e35cee1c4f2112bfb2902543dda64e963b8257f","src/handshake/headers.rs":"0e6d3e15faa8b82bd2a239f5094335368e578acef2f17ee74825de606f529b75","src/handshake/machine.rs":"d8d07e8797ab93516154abc904cbc44dc72b49f6ce9601b4e559c7fc1ae065f5","src/handshake/mod.rs":"783b05cb7a45fcc4ff3d06eec5f17d87f7d1cc32db9dd836d1b8fcd2d9018436","src/handshake/server.rs":"000d888d2d5ba140a57db1e1520f844441b403fda61f7e674c4730e1fe15a202","src/lib.rs":"f5e1e6ae7e1af292f5d56011333f8dc28de18cd5a81d0da580a0c73b98ab7d5e","src/protocol/frame/coding.rs":"dfa740428a7ae63a308b0d22258354f5234690a105e10963075d4af02d8d5450","src/protocol/frame/frame.rs":"efebb2b48d17d7cecf090805ccb8c1930471210b74276c5c62ce941d8188a402","src/protocol/frame/mask.rs":"ab2d387f9e2dbfda29b26d3147facacaeb7b91de1ae5d07d742fb9b9d297f8b1","src/protocol/frame/mod.rs":"7ae23d5c389fb1ff789b7f2597ff390d7846dfd5d654bccf4ec3682ad1741055","src/protocol/message.rs":"446cfc9c28ac8cc91214699b62044d0a1ab1d8c62bee1f101758ae8560db5223","src/protocol/mod.rs":"e24aaa577263eeb890b5b0a3cf0ad023c1c8ccc01080abf441a3d8fd1cb88a47","src/server.rs":"f01a418588a7d924dcfc0cdc632aec7f483a4ab5409c1b5563b2205d97cf8a49","src/stream.rs":"af00ffb61ce504a83fbe88b2ceb5780824008cff4a876007e969ec1455a7b723","src/tls.rs":"8260cf1ac17f404d5c985f3defb3ca799324449c3a98ff438c6d2a7805335049","src/util.rs":"ada1383900811e7c367f20d02ebf87145b423c3b8fcee1d64329578d80064f8d"},"package":"9ef1a641ea34f399a848dea702823bbecfb4c486f911735368f1f137cb8257e1"} \ No newline at end of file diff --git a/.cargo-vendor/tungstenite/CHANGELOG.md b/.cargo-vendor/tungstenite/CHANGELOG.md new file mode 100644 index 0000000000..5c0348e489 --- /dev/null +++ b/.cargo-vendor/tungstenite/CHANGELOG.md @@ -0,0 +1,95 @@ +# Unreleased +- Fix read-predominant auto pong responses not flushing when hitting WouldBlock errors. +- Improve `FrameHeader::format` write correctness. +- Up minimum _rustls_ to `0.21.6`. +- Update _webpki-roots_ to `0.26`. + +# 0.20.1 +- Fixes [CVE-2023-43669](https://github.com/snapview/tungstenite-rs/pull/379). + +# 0.20.0 +- Remove many implicit flushing behaviours. In general reading and writing messages will no + longer flush until calling `flush`. An exception is automatic responses (e.g. pongs) + which will continue to be written and flushed when reading and writing. + This allows writing a batch of messages and flushing once, improving performance. +- Add `WebSocket::read`, `write`, `send`, `flush`. Deprecate `read_message`, `write_message`, `write_pending`. +- Add `FrameSocket::read`, `write`, `send`, `flush`. Remove `read_frame`, `write_frame`, `write_pending`. + Note: Previous use of `write_frame` may be replaced with `send`. +- Add `WebSocketContext::read`, `write`, `flush`. Remove `read_message`, `write_message`, `write_pending`. + Note: Previous use of `write_message` may be replaced with `write` + `flush`. +- Remove `send_queue`, replaced with using the frame write buffer to achieve similar results. + * Add `WebSocketConfig::max_write_buffer_size`. Deprecate `max_send_queue`. + * Add `Error::WriteBufferFull`. Remove `Error::SendQueueFull`. + Note: `WriteBufferFull` returns the message that could not be written as a `Message::Frame`. +- Add ability to buffer multiple writes before writing to the underlying stream, controlled by + `WebSocketConfig::write_buffer_size` (default 128 KiB). Improves batch message write performance. +- Panic on receiving invalid `WebSocketConfig`. + +# 0.19.0 + +- Update TLS dependencies. +- Exchanging `base64` for `data-encoding`. + +# 0.18.0 + +- Make handshake dependencies optional with a new `handshake` feature (now a default one!). +- Return HTTP error responses (their HTTP body) upon non 101 status codes. + +# 0.17.3 + +- Respect the case-sentitivity of the "Origin" header to keep compatibility with the older servers that use case-sensitive comparison. + +# 0.17.2 + +- Fix panic when invalid manually constructed `http::Request` is passed to `tungstenite`. +- Downgrade the MSRV to `1.56` due to some other crates that rely on us not being quite ready for `1.58`. + +# 0.17.1 + +- Specify the minimum required Rust version. + +# 0.17.0 + +- Update of dependencies (primarily `sha1`). +- Add support of the fragmented messages (allow the user to send the frames without composing the full message). +- Overhaul of the client's request generation process. Now the users are able to pass the constructed `http::Request` "as is" to `tungstenite-rs`, letting the library to check the correctness of the request and specifying their own headers (including its own key if necessary). No changes for those ones who used the client in a normal way by connecting using a URL/URI (most common use-case). + +# 0.16.0 + +- Update of dependencies (primarily `rustls`, `webpki-roots`, `rustls-native-certs`). +- When the close frame is received, the reply that is automatically sent to the initiator has the same code (so we just echo the frame back). Previously a new close frame was created (i.e. the close code / reason was always the same regardless of what code / reason specified by the initiator). Now it’s more symmetrical and arguably more intuitive behavior (see [#246](https://github.com/snapview/tungstenite-rs/pull/246) for more context). +- The internal `ReadBuffer` implementation uses heap instead of stack to store the buffer. This should solve issues with possible stack overflows in some scenarios (see [#241](https://github.com/snapview/tungstenite-rs/pull/241) for more context). + +# 0.15.0 + +- Allow selecting the method of loading root certificates if `rustls` is used as TLS implementation. + - Two new feature flags `rustls-tls-native-roots` and `rustls-tls-webpki-roots` have been added + that activate the respective method to load certificates. + - The `rustls-tls` flag was removed to raise awareness of this change. Otherwise, compilation + would have continue to work and potential errors (due to different or missing certificates) + only occurred at runtime. + - The new feature flags are additive. If both are enabled, both methods will be used to add + certificates to the TLS configuration. +- Allow specifying a connector (for more fine-grained configuration of the TLS). + +# 0.14.0 + +- Use `rustls-native-certs` instead of `webpki-root` when `rustls-tls` feature is enabled. +- Don't use `native-tls` as a default feature (see #202 for more details). +- New fast and safe implementation of the reading buffer (replacement for the `input_buffer`). +- Remove some errors from the `Error` enum that can't be triggered anymore with the new buffer implementation. + +# 0.13.0 + +- Add `CapacityError`, `UrlError`, and `ProtocolError` types to represent the different types of capacity, URL, and protocol errors respectively. +- Modify variants `Error::Capacity`, `Error::Url`, and `Error::Protocol` to hold the above errors types instead of string error messages. +- Add `handshake::derive_accept_key` to facilitate external handshakes. +- Add support for `rustls` as TLS backend. The previous `tls` feature flag is now removed in favor + of `native-tls` and `rustls-tls`, which allows to pick the TLS backend. The error API surface had + to be changed to support the new error types coming from rustls related crates. + +# 0.12.0 + +- Add facilities to allow clients to follow HTTP 3XX redirects. +- Allow accepting unmasked clients on the server side to be compatible with some legacy / invalid clients. +- Update of dependencies and documentation fixes. diff --git a/.cargo-vendor/tungstenite/Cargo.lock b/.cargo-vendor/tungstenite/Cargo.lock new file mode 100644 index 0000000000..7ec88dc39c --- /dev/null +++ b/.cargo-vendor/tungstenite/Cargo.lock @@ -0,0 +1,1349 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "aho-corasick" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" +dependencies = [ + "memchr", +] + +[[package]] +name = "anes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" + +[[package]] +name = "anstyle" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87" + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "base64" +version = "0.21.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9" + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "bumpalo" +version = "3.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" + +[[package]] +name = "cast" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" + +[[package]] +name = "cc" +version = "1.0.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +dependencies = [ + "libc", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "ciborium" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "effd91f6c78e5a4ace8a5d3c0b6bfaec9e2baaef55f3efc00e45fb2e477ee926" +dependencies = [ + "ciborium-io", + "ciborium-ll", + "serde", +] + +[[package]] +name = "ciborium-io" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdf919175532b369853f5d5e20b26b43112613fd6fe7aee757e35f7a44642656" + +[[package]] +name = "ciborium-ll" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "defaa24ecc093c77630e6c15e17c51f5e187bf35ee514f4e2d67baaa96dae22b" +dependencies = [ + "ciborium-io", + "half", +] + +[[package]] +name = "clap" +version = "4.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfaff671f6b22ca62406885ece523383b9b64022e341e53e009a62ebc47a45f2" +dependencies = [ + "clap_builder", +] + +[[package]] +name = "clap_builder" +version = "4.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a216b506622bb1d316cd51328dce24e07bdff4a6128a47c7e7fad11878d5adbb" +dependencies = [ + "anstyle", + "clap_lex", +] + +[[package]] +name = "clap_lex" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1" + +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" + +[[package]] +name = "cpufeatures" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce420fe07aecd3e67c5f910618fe65e94158f6dcc0adf44e00d69ce2bdfe0fd0" +dependencies = [ + "libc", +] + +[[package]] +name = "criterion" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" +dependencies = [ + "anes", + "cast", + "ciborium", + "clap", + "criterion-plot", + "is-terminal", + "itertools", + "num-traits", + "once_cell", + "oorandom", + "plotters", + "rayon", + "regex", + "serde", + "serde_derive", + "serde_json", + "tinytemplate", + "walkdir", +] + +[[package]] +name = "criterion-plot" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" +dependencies = [ + "cast", + "itertools", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" +dependencies = [ + "cfg-if", + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" +dependencies = [ + "autocfg", + "cfg-if", + "crossbeam-utils", + "memoffset", + "scopeguard", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "data-encoding" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5" + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "crypto-common", +] + +[[package]] +name = "either" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" + +[[package]] +name = "env_logger" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95b3f3e67048839cb0d0781f445682a35113da7121f7c949db0e2be96a4fbece" +dependencies = [ + "humantime", + "is-terminal", + "log", + "regex", + "termcolor", +] + +[[package]] +name = "errno" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "fastrand" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + +[[package]] +name = "form_urlencoded" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "half" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" + +[[package]] +name = "hermit-abi" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" + +[[package]] +name = "http" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "httparse" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" + +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + +[[package]] +name = "idna" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "input_buffer" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acee673b88a760f5d1f7b2677a90ab797878282ca36ebd0ed8d560361bee9810" +dependencies = [ + "bytes", +] + +[[package]] +name = "is-terminal" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" +dependencies = [ + "hermit-abi", + "rustix", + "windows-sys 0.48.0", +] + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" + +[[package]] +name = "js-sys" +version = "0.3.66" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cee9c64da59eae3b50095c18d3e74f8b73c0b86d2792824ff01bbce68ba229ca" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.150" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c" + +[[package]] +name = "linux-raw-sys" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456" + +[[package]] +name = "log" +version = "0.4.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" + +[[package]] +name = "memchr" +version = "2.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" + +[[package]] +name = "memoffset" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" +dependencies = [ + "autocfg", +] + +[[package]] +name = "native-tls" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" +dependencies = [ + "lazy_static", + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + +[[package]] +name = "num-traits" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" +dependencies = [ + "autocfg", +] + +[[package]] +name = "once_cell" +version = "1.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" + +[[package]] +name = "oorandom" +version = "11.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" + +[[package]] +name = "openssl" +version = "0.10.61" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b8419dc8cc6d866deb801274bba2e6f8f6108c1bb7fcc10ee5ab864931dbb45" +dependencies = [ + "bitflags 2.4.1", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "openssl-probe" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" + +[[package]] +name = "openssl-src" +version = "300.1.6+3.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439fac53e092cd7442a3660c85dde4643ab3b5bd39040912388dcdabf6b88085" +dependencies = [ + "cc", +] + +[[package]] +name = "openssl-sys" +version = "0.9.97" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3eaad34cdd97d81de97964fc7f29e2d104f483840d906ef56daa1912338460b" +dependencies = [ + "cc", + "libc", + "openssl-src", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "percent-encoding" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" + +[[package]] +name = "pkg-config" +version = "0.3.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" + +[[package]] +name = "plotters" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2c224ba00d7cadd4d5c660deaf2098e5e80e07846537c51f9cfa4be50c1fd45" +dependencies = [ + "num-traits", + "plotters-backend", + "plotters-svg", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "plotters-backend" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e76628b4d3a7581389a35d5b6e2139607ad7c75b17aed325f210aa91f4a9609" + +[[package]] +name = "plotters-svg" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38f6d39893cca0701371e3c27294f09797214b86f1fb951b89ade8ec04e2abab" +dependencies = [ + "plotters-backend", +] + +[[package]] +name = "ppv-lite86" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" + +[[package]] +name = "proc-macro2" +version = "1.0.70" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39278fbbf5fb4f646ce651690877f89d1c5811a3d4acb27700c1cb3cdb78fd3b" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rayon" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c27db03db7734835b3f53954b534c91069375ce6ccaa2e065441e07d9b6cdb1" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ce3fb6ad83f861aac485e76e1985cd109d9a3713802152be56c3b1f0e0658ed" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + +[[package]] +name = "redox_syscall" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +dependencies = [ + "bitflags 1.3.2", +] + +[[package]] +name = "regex" +version = "1.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" + +[[package]] +name = "ring" +version = "0.17.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" +dependencies = [ + "cc", + "getrandom", + "libc", + "spin", + "untrusted", + "windows-sys 0.48.0", +] + +[[package]] +name = "rustix" +version = "0.38.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9470c4bf8246c8daf25f9598dca807fb6510347b1e1cfa55749113850c79d88a" +dependencies = [ + "bitflags 2.4.1", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.52.0", +] + +[[package]] +name = "rustls" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5bc238b76c51bbc449c55ffbc39d03772a057cc8cf783c49d4af4c2537b74a8b" +dependencies = [ + "log", + "ring", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-native-certs" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f1fb85efa936c42c6d5fc28d2629bb51e4b2f4b8a5211e297d599cc5a093792" +dependencies = [ + "openssl-probe", + "rustls-pemfile", + "rustls-pki-types", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-pemfile" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35e4980fa29e4c4b212ffb3db068a564cbf560e51d3944b7c88bd8bf5bec64f4" +dependencies = [ + "base64", + "rustls-pki-types", +] + +[[package]] +name = "rustls-pki-types" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7673e0aa20ee4937c6aacfc12bb8341cfbf054cdd21df6bec5fd0629fe9339b" + +[[package]] +name = "rustls-webpki" +version = "0.102.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de2635c8bc2b88d367767c5de8ea1d8db9af3f6219eba28442242d9ab81d1b89" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted", +] + +[[package]] +name = "ryu" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "schannel" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" +dependencies = [ + "windows-sys 0.48.0", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "security-framework" +version = "2.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "serde" +version = "1.0.193" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.193" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.108" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" +dependencies = [ + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "sha1" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "socket2" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" +dependencies = [ + "libc", + "windows-sys 0.48.0", +] + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + +[[package]] +name = "subtle" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" + +[[package]] +name = "syn" +version = "2.0.39" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "tempfile" +version = "3.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" +dependencies = [ + "cfg-if", + "fastrand", + "redox_syscall", + "rustix", + "windows-sys 0.48.0", +] + +[[package]] +name = "termcolor" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff1bc3d3f05aff0403e8ac0d92ced918ec05b666a43f83297ccef5bea8a3d449" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "thiserror" +version = "1.0.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tinytemplate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +dependencies = [ + "serde", + "serde_json", +] + +[[package]] +name = "tinyvec" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tungstenite" +version = "0.21.0" +dependencies = [ + "byteorder", + "bytes", + "criterion", + "data-encoding", + "env_logger", + "http", + "httparse", + "input_buffer", + "log", + "native-tls", + "rand", + "rustls", + "rustls-native-certs", + "rustls-pki-types", + "sha1", + "socket2", + "thiserror", + "url", + "utf-8", + "webpki-roots", +] + +[[package]] +name = "typenum" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" + +[[package]] +name = "unicode-bidi" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f2528f27a9eb2b21e69c95319b30bd0efd85d09c379741b0f78ea1d86be2416" + +[[package]] +name = "unicode-ident" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" + +[[package]] +name = "unicode-normalization" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "url" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", +] + +[[package]] +name = "utf-8" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "version_check" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" + +[[package]] +name = "walkdir" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" +dependencies = [ + "same-file", + "winapi-util", +] + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wasm-bindgen" +version = "0.2.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ed0d4f68a3015cc185aff4db9506a015f4b96f95303897bfa23f846db54064e" +dependencies = [ + "cfg-if", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b56f625e64f3a1084ded111c4d5f477df9f8c92df113852fa5a374dbda78826" +dependencies = [ + "bumpalo", + "log", + "once_cell", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0162dbf37223cd2afce98f3d0785506dcb8d266223983e4b5b525859e6e182b2" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ab9b36309365056cd639da3134bf87fa8f3d86008abf99e612384a6eecd459f" + +[[package]] +name = "web-sys" +version = "0.3.66" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50c24a44ec86bb68fbecd1b3efed7e85ea5621b39b35ef2766b66cd984f8010f" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki-roots" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de2cfda980f21be5a7ed2eadb3e6fe074d56022bea2cdeb1a62eb220fc04188" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" +dependencies = [ + "winapi", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.0", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +dependencies = [ + "windows_aarch64_gnullvm 0.52.0", + "windows_aarch64_msvc 0.52.0", + "windows_i686_gnu 0.52.0", + "windows_i686_msvc 0.52.0", + "windows_x86_64_gnu 0.52.0", + "windows_x86_64_gnullvm 0.52.0", + "windows_x86_64_msvc 0.52.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" + +[[package]] +name = "zeroize" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" diff --git a/.cargo-vendor/tungstenite/Cargo.toml b/.cargo-vendor/tungstenite/Cargo.toml new file mode 100644 index 0000000000..3d8cca5d43 --- /dev/null +++ b/.cargo-vendor/tungstenite/Cargo.toml @@ -0,0 +1,179 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +rust-version = "1.51" +name = "tungstenite" +version = "0.21.0" +authors = [ + "Alexey Galakhov", + "Daniel Abramov", +] +include = [ + "benches/**/*", + "src/**/*", + "examples/**/*", + "LICENSE-*", + "README.md", + "CHANGELOG.md", +] +description = "Lightweight stream-based WebSocket implementation" +homepage = "https://github.com/snapview/tungstenite-rs" +documentation = "https://docs.rs/tungstenite/0.21.0" +readme = "README.md" +keywords = [ + "websocket", + "io", + "web", +] +categories = [ + "web-programming::websocket", + "network-programming", +] +license = "MIT OR Apache-2.0" +repository = "https://github.com/snapview/tungstenite-rs" + +[package.metadata.docs.rs] +all-features = true + +[[example]] +name = "client" +required-features = ["handshake"] + +[[example]] +name = "server" +required-features = ["handshake"] + +[[example]] +name = "autobahn-client" +required-features = ["handshake"] + +[[example]] +name = "autobahn-server" +required-features = ["handshake"] + +[[example]] +name = "callback-error" +required-features = ["handshake"] + +[[example]] +name = "srv_accept_unmasked_frames" +required-features = ["handshake"] + +[[bench]] +name = "buffer" +harness = false + +[[bench]] +name = "write" +harness = false + +[dependencies.byteorder] +version = "1.3.2" + +[dependencies.bytes] +version = "1.0" + +[dependencies.data-encoding] +version = "2" +optional = true + +[dependencies.http] +version = "1.0" +optional = true + +[dependencies.httparse] +version = "1.3.4" +optional = true + +[dependencies.log] +version = "0.4.8" + +[dependencies.native-tls-crate] +version = "0.2.3" +optional = true +package = "native-tls" + +[dependencies.rand] +version = "0.8.0" + +[dependencies.rustls] +version = "0.22.0" +optional = true + +[dependencies.rustls-native-certs] +version = "0.7.0" +optional = true + +[dependencies.rustls-pki-types] +version = "1.0" +optional = true + +[dependencies.sha1] +version = "0.10" +optional = true + +[dependencies.thiserror] +version = "1.0.23" + +[dependencies.url] +version = "2.1.0" +optional = true + +[dependencies.utf-8] +version = "0.7.5" + +[dependencies.webpki-roots] +version = "0.26" +optional = true + +[dev-dependencies.criterion] +version = "0.5.0" + +[dev-dependencies.env_logger] +version = "0.10.0" + +[dev-dependencies.input_buffer] +version = "0.5.0" + +[dev-dependencies.rand] +version = "0.8.4" + +[dev-dependencies.socket2] +version = "0.5.5" + +[features] +__rustls-tls = [ + "rustls", + "rustls-pki-types", +] +default = ["handshake"] +handshake = [ + "data-encoding", + "http", + "httparse", + "sha1", + "url", +] +native-tls = ["native-tls-crate"] +native-tls-vendored = [ + "native-tls", + "native-tls-crate/vendored", +] +rustls-tls-native-roots = [ + "__rustls-tls", + "rustls-native-certs", +] +rustls-tls-webpki-roots = [ + "__rustls-tls", + "webpki-roots", +] diff --git a/.cargo-vendor/tungstenite/LICENSE-APACHE b/.cargo-vendor/tungstenite/LICENSE-APACHE new file mode 100644 index 0000000000..16fe87b06e --- /dev/null +++ b/.cargo-vendor/tungstenite/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/.cargo-vendor/tungstenite/LICENSE-MIT b/.cargo-vendor/tungstenite/LICENSE-MIT new file mode 100644 index 0000000000..dfb98c6579 --- /dev/null +++ b/.cargo-vendor/tungstenite/LICENSE-MIT @@ -0,0 +1,20 @@ +Copyright (c) 2017 Alexey Galakhov +Copyright (c) 2016 Jason Housley + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/.cargo-vendor/tungstenite/README.md b/.cargo-vendor/tungstenite/README.md new file mode 100644 index 0000000000..8c1b2a2b36 --- /dev/null +++ b/.cargo-vendor/tungstenite/README.md @@ -0,0 +1,86 @@ +# Tungstenite + +Lightweight stream-based WebSocket implementation for [Rust](https://www.rust-lang.org/). + +```rust +use std::net::TcpListener; +use std::thread::spawn; +use tungstenite::accept; + +/// A WebSocket echo server +fn main () { + let server = TcpListener::bind("127.0.0.1:9001").unwrap(); + for stream in server.incoming() { + spawn (move || { + let mut websocket = accept(stream.unwrap()).unwrap(); + loop { + let msg = websocket.read().unwrap(); + + // We do not want to send back ping/pong messages. + if msg.is_binary() || msg.is_text() { + websocket.send(msg).unwrap(); + } + } + }); + } +} +``` + +Take a look at the examples section to see how to write a simple client/server. + +**NOTE:** `tungstenite-rs` is more like a barebone to build reliable modern networking applications +using WebSockets. If you're looking for a modern production-ready "batteries included" WebSocket +library that allows you to efficiently use non-blocking sockets and do "full-duplex" communication, +take a look at [`tokio-tungstenite`](https://github.com/snapview/tokio-tungstenite). + +[![MIT licensed](https://img.shields.io/badge/License-MIT-blue.svg)](./LICENSE-MIT) +[![Apache-2.0 licensed](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](./LICENSE-APACHE) +[![Crates.io](https://img.shields.io/crates/v/tungstenite.svg?maxAge=2592000)](https://crates.io/crates/tungstenite) +[![Build Status](https://github.com/snapview/tungstenite-rs/actions/workflows/ci.yml/badge.svg)](https://github.com/snapview/tungstenite-rs/actions) + +[Documentation](https://docs.rs/tungstenite) + +Introduction +------------ +This library provides an implementation of WebSockets, +[RFC6455](https://tools.ietf.org/html/rfc6455). It allows for both synchronous (like TcpStream) +and asynchronous usage and is easy to integrate into any third-party event loops including +[MIO](https://github.com/tokio-rs/mio). The API design abstracts away all the internals of the +WebSocket protocol but still makes them accessible for those who wants full control over the +network. + +Why Tungstenite? +---------------- + +It's formerly WS2, the 2nd implementation of WS. WS2 is the chemical formula of +tungsten disulfide, the tungstenite mineral. + +Features +-------- + +Tungstenite provides a complete implementation of the WebSocket specification. +TLS is supported on all platforms using `native-tls` or `rustls`. The following +features are available: + +* `native-tls` +* `native-tls-vendored` +* `rustls-tls-native-roots` +* `rustls-tls-webpki-roots` + +Choose the one that is appropriate for your needs. + +By default **no TLS feature is activated**, so make sure you use one of the TLS features, +otherwise you won't be able to communicate with the TLS endpoints. + +There is no support for permessage-deflate at the moment, but the PRs are welcome :wink: + +Testing +------- + +Tungstenite is thoroughly tested and passes the [Autobahn Test Suite](https://github.com/crossbario/autobahn-testsuite) for +WebSockets. It is also covered by internal unit tests as well as possible. + +Contributing +------------ + +Please report bugs and make feature requests [here](https://github.com/snapview/tungstenite-rs/issues). diff --git a/.cargo-vendor/tungstenite/benches/buffer.rs b/.cargo-vendor/tungstenite/benches/buffer.rs new file mode 100644 index 0000000000..9f15d1384b --- /dev/null +++ b/.cargo-vendor/tungstenite/benches/buffer.rs @@ -0,0 +1,126 @@ +use std::io::{Cursor, Read, Result as IoResult}; + +use bytes::Buf; +use criterion::*; +use input_buffer::InputBuffer; + +use tungstenite::buffer::ReadBuffer; + +const CHUNK_SIZE: usize = 4096; + +/// A FIFO buffer for reading packets from the network. +#[derive(Debug)] +pub struct StackReadBuffer { + storage: Cursor>, + chunk: [u8; CHUNK_SIZE], +} + +impl StackReadBuffer { + /// Create a new empty input buffer. + pub fn new() -> Self { + Self::with_capacity(CHUNK_SIZE) + } + + /// Create a new empty input buffer with a given `capacity`. + pub fn with_capacity(capacity: usize) -> Self { + Self::from_partially_read(Vec::with_capacity(capacity)) + } + + /// Create a input buffer filled with previously read data. + pub fn from_partially_read(part: Vec) -> Self { + Self { storage: Cursor::new(part), chunk: [0; CHUNK_SIZE] } + } + + /// Get a cursor to the data storage. + pub fn as_cursor(&self) -> &Cursor> { + &self.storage + } + + /// Get a cursor to the mutable data storage. + pub fn as_cursor_mut(&mut self) -> &mut Cursor> { + &mut self.storage + } + + /// Consume the `ReadBuffer` and get the internal storage. + pub fn into_vec(mut self) -> Vec { + // Current implementation of `tungstenite-rs` expects that the `into_vec()` drains + // the data from the container that has already been read by the cursor. + self.clean_up(); + + // Now we can safely return the internal container. + self.storage.into_inner() + } + + /// Read next portion of data from the given input stream. + pub fn read_from(&mut self, stream: &mut S) -> IoResult { + self.clean_up(); + let size = stream.read(&mut self.chunk)?; + self.storage.get_mut().extend_from_slice(&self.chunk[..size]); + Ok(size) + } + + /// Cleans ups the part of the vector that has been already read by the cursor. + fn clean_up(&mut self) { + let pos = self.storage.position() as usize; + self.storage.get_mut().drain(0..pos).count(); + self.storage.set_position(0); + } +} + +impl Buf for StackReadBuffer { + fn remaining(&self) -> usize { + Buf::remaining(self.as_cursor()) + } + + fn chunk(&self) -> &[u8] { + Buf::chunk(self.as_cursor()) + } + + fn advance(&mut self, cnt: usize) { + Buf::advance(self.as_cursor_mut(), cnt) + } +} + +impl Default for StackReadBuffer { + fn default() -> Self { + Self::new() + } +} + +#[inline] +fn input_buffer(mut stream: impl Read) { + let mut buffer = InputBuffer::with_capacity(CHUNK_SIZE); + while buffer.read_from(&mut stream).unwrap() != 0 {} +} + +#[inline] +fn stack_read_buffer(mut stream: impl Read) { + let mut buffer = StackReadBuffer::::new(); + while buffer.read_from(&mut stream).unwrap() != 0 {} +} + +#[inline] +fn heap_read_buffer(mut stream: impl Read) { + let mut buffer = ReadBuffer::::new(); + while buffer.read_from(&mut stream).unwrap() != 0 {} +} + +fn benchmark(c: &mut Criterion) { + const STREAM_SIZE: usize = 1024 * 1024 * 4; + let data: Vec = (0..STREAM_SIZE).map(|_| rand::random()).collect(); + let stream = Cursor::new(data); + + let mut group = c.benchmark_group("buffers"); + group.throughput(Throughput::Bytes(STREAM_SIZE as u64)); + group.bench_function("InputBuffer", |b| b.iter(|| input_buffer(black_box(stream.clone())))); + group.bench_function("ReadBuffer (stack)", |b| { + b.iter(|| stack_read_buffer(black_box(stream.clone()))) + }); + group.bench_function("ReadBuffer (heap)", |b| { + b.iter(|| heap_read_buffer(black_box(stream.clone()))) + }); + group.finish(); +} + +criterion_group!(benches, benchmark); +criterion_main!(benches); diff --git a/.cargo-vendor/tungstenite/benches/write.rs b/.cargo-vendor/tungstenite/benches/write.rs new file mode 100644 index 0000000000..7908818395 --- /dev/null +++ b/.cargo-vendor/tungstenite/benches/write.rs @@ -0,0 +1,75 @@ +//! Benchmarks for write performance. +use criterion::{BatchSize, Criterion}; +use std::{ + hint, + io::{self, Read, Write}, + time::{Duration, Instant}, +}; +use tungstenite::{Message, WebSocket}; + +const MOCK_WRITE_LEN: usize = 8 * 1024 * 1024; + +/// `Write` impl that simulates slowish writes and slow flushes. +/// +/// Each `write` can buffer up to 8 MiB before flushing but takes an additional **~80ns** +/// to simulate stuff going on in the underlying stream. +/// Each `flush` takes **~8µs** to simulate flush io. +struct MockWrite(Vec); + +impl Read for MockWrite { + fn read(&mut self, _: &mut [u8]) -> io::Result { + Err(io::Error::new(io::ErrorKind::WouldBlock, "reads not supported")) + } +} +impl Write for MockWrite { + fn write(&mut self, buf: &[u8]) -> io::Result { + if self.0.len() + buf.len() > MOCK_WRITE_LEN { + self.flush()?; + } + // simulate io + spin(Duration::from_nanos(80)); + self.0.extend(buf); + Ok(buf.len()) + } + + fn flush(&mut self) -> io::Result<()> { + if !self.0.is_empty() { + // simulate io + spin(Duration::from_micros(8)); + self.0.clear(); + } + Ok(()) + } +} + +fn spin(duration: Duration) { + let a = Instant::now(); + while a.elapsed() < duration { + hint::spin_loop(); + } +} + +fn benchmark(c: &mut Criterion) { + // Writes 100k small json text messages then flushes + c.bench_function("write 100k small texts then flush", |b| { + let mut ws = WebSocket::from_raw_socket( + MockWrite(Vec::with_capacity(MOCK_WRITE_LEN)), + tungstenite::protocol::Role::Server, + None, + ); + + b.iter_batched( + || (0..100_000).map(|i| Message::Text(format!("{{\"id\":{i}}}"))), + |batch| { + for msg in batch { + ws.write(msg).unwrap(); + } + ws.flush().unwrap(); + }, + BatchSize::SmallInput, + ) + }); +} + +criterion::criterion_group!(write_benches, benchmark); +criterion::criterion_main!(write_benches); diff --git a/.cargo-vendor/tungstenite/examples/autobahn-client.rs b/.cargo-vendor/tungstenite/examples/autobahn-client.rs new file mode 100644 index 0000000000..ac7a7d1ab1 --- /dev/null +++ b/.cargo-vendor/tungstenite/examples/autobahn-client.rs @@ -0,0 +1,53 @@ +use log::*; +use url::Url; + +use tungstenite::{connect, Error, Message, Result}; + +const AGENT: &str = "Tungstenite"; + +fn get_case_count() -> Result { + let (mut socket, _) = connect(Url::parse("ws://localhost:9001/getCaseCount").unwrap())?; + let msg = socket.read()?; + socket.close(None)?; + Ok(msg.into_text()?.parse::().unwrap()) +} + +fn update_reports() -> Result<()> { + let (mut socket, _) = connect( + Url::parse(&format!("ws://localhost:9001/updateReports?agent={}", AGENT)).unwrap(), + )?; + socket.close(None)?; + Ok(()) +} + +fn run_test(case: u32) -> Result<()> { + info!("Running test case {}", case); + let case_url = + Url::parse(&format!("ws://localhost:9001/runCase?case={}&agent={}", case, AGENT)).unwrap(); + let (mut socket, _) = connect(case_url)?; + loop { + match socket.read()? { + msg @ Message::Text(_) | msg @ Message::Binary(_) => { + socket.send(msg)?; + } + Message::Ping(_) | Message::Pong(_) | Message::Close(_) | Message::Frame(_) => {} + } + } +} + +fn main() { + env_logger::init(); + + let total = get_case_count().unwrap(); + + for case in 1..=total { + if let Err(e) = run_test(case) { + match e { + Error::ConnectionClosed | Error::Protocol(_) | Error::Utf8 => (), + err => error!("test: {}", err), + } + } + } + + update_reports().unwrap(); +} diff --git a/.cargo-vendor/tungstenite/examples/autobahn-server.rs b/.cargo-vendor/tungstenite/examples/autobahn-server.rs new file mode 100644 index 0000000000..dafe37bd3a --- /dev/null +++ b/.cargo-vendor/tungstenite/examples/autobahn-server.rs @@ -0,0 +1,47 @@ +use std::{ + net::{TcpListener, TcpStream}, + thread::spawn, +}; + +use log::*; +use tungstenite::{accept, handshake::HandshakeRole, Error, HandshakeError, Message, Result}; + +fn must_not_block(err: HandshakeError) -> Error { + match err { + HandshakeError::Interrupted(_) => panic!("Bug: blocking socket would block"), + HandshakeError::Failure(f) => f, + } +} + +fn handle_client(stream: TcpStream) -> Result<()> { + let mut socket = accept(stream).map_err(must_not_block)?; + info!("Running test"); + loop { + match socket.read()? { + msg @ Message::Text(_) | msg @ Message::Binary(_) => { + socket.send(msg)?; + } + Message::Ping(_) | Message::Pong(_) | Message::Close(_) | Message::Frame(_) => {} + } + } +} + +fn main() { + env_logger::init(); + + let server = TcpListener::bind("127.0.0.1:9002").unwrap(); + + for stream in server.incoming() { + spawn(move || match stream { + Ok(stream) => { + if let Err(err) = handle_client(stream) { + match err { + Error::ConnectionClosed | Error::Protocol(_) | Error::Utf8 => (), + e => error!("test: {}", e), + } + } + } + Err(e) => error!("Error accepting stream: {}", e), + }); + } +} diff --git a/.cargo-vendor/tungstenite/examples/callback-error.rs b/.cargo-vendor/tungstenite/examples/callback-error.rs new file mode 100644 index 0000000000..cf78a2eedc --- /dev/null +++ b/.cargo-vendor/tungstenite/examples/callback-error.rs @@ -0,0 +1,23 @@ +use std::{net::TcpListener, thread::spawn}; + +use tungstenite::{ + accept_hdr, + handshake::server::{Request, Response}, + http::StatusCode, +}; + +fn main() { + let server = TcpListener::bind("127.0.0.1:3012").unwrap(); + for stream in server.incoming() { + spawn(move || { + let callback = |_req: &Request, _resp| { + let resp = Response::builder() + .status(StatusCode::FORBIDDEN) + .body(Some("Access denied".into())) + .unwrap(); + Err(resp) + }; + accept_hdr(stream.unwrap(), callback).unwrap_err(); + }); + } +} diff --git a/.cargo-vendor/tungstenite/examples/client.rs b/.cargo-vendor/tungstenite/examples/client.rs new file mode 100644 index 0000000000..a24f3161a4 --- /dev/null +++ b/.cargo-vendor/tungstenite/examples/client.rs @@ -0,0 +1,23 @@ +use tungstenite::{connect, Message}; +use url::Url; + +fn main() { + env_logger::init(); + + let (mut socket, response) = + connect(Url::parse("ws://localhost:3012/socket").unwrap()).expect("Can't connect"); + + println!("Connected to the server"); + println!("Response HTTP code: {}", response.status()); + println!("Response contains the following headers:"); + for (ref header, _value) in response.headers() { + println!("* {}", header); + } + + socket.send(Message::Text("Hello WebSocket".into())).unwrap(); + loop { + let msg = socket.read().expect("Error reading message"); + println!("Received: {}", msg); + } + // socket.close(None); +} diff --git a/.cargo-vendor/tungstenite/examples/server.rs b/.cargo-vendor/tungstenite/examples/server.rs new file mode 100644 index 0000000000..2183b9615f --- /dev/null +++ b/.cargo-vendor/tungstenite/examples/server.rs @@ -0,0 +1,38 @@ +use std::{net::TcpListener, thread::spawn}; + +use tungstenite::{ + accept_hdr, + handshake::server::{Request, Response}, +}; + +fn main() { + env_logger::init(); + let server = TcpListener::bind("127.0.0.1:3012").unwrap(); + for stream in server.incoming() { + spawn(move || { + let callback = |req: &Request, mut response: Response| { + println!("Received a new ws handshake"); + println!("The request's path is: {}", req.uri().path()); + println!("The request's headers are:"); + for (ref header, _value) in req.headers() { + println!("* {}", header); + } + + // Let's add an additional header to our response to the client. + let headers = response.headers_mut(); + headers.append("MyCustomHeader", ":)".parse().unwrap()); + headers.append("SOME_TUNGSTENITE_HEADER", "header_value".parse().unwrap()); + + Ok(response) + }; + let mut websocket = accept_hdr(stream.unwrap(), callback).unwrap(); + + loop { + let msg = websocket.read().unwrap(); + if msg.is_binary() || msg.is_text() { + websocket.send(msg).unwrap(); + } + } + }); + } +} diff --git a/.cargo-vendor/tungstenite/examples/srv_accept_unmasked_frames.rs b/.cargo-vendor/tungstenite/examples/srv_accept_unmasked_frames.rs new file mode 100644 index 0000000000..b65e4f7054 --- /dev/null +++ b/.cargo-vendor/tungstenite/examples/srv_accept_unmasked_frames.rs @@ -0,0 +1,48 @@ +use std::{net::TcpListener, thread::spawn}; +use tungstenite::{ + accept_hdr_with_config, + handshake::server::{Request, Response}, + protocol::WebSocketConfig, +}; + +fn main() { + env_logger::init(); + let server = TcpListener::bind("127.0.0.1:3012").unwrap(); + for stream in server.incoming() { + spawn(move || { + let callback = |req: &Request, mut response: Response| { + println!("Received a new ws handshake"); + println!("The request's path is: {}", req.uri().path()); + println!("The request's headers are:"); + for (ref header, _value) in req.headers() { + println!("* {}", header); + } + + // Let's add an additional header to our response to the client. + let headers = response.headers_mut(); + headers.append("MyCustomHeader", ":)".parse().unwrap()); + headers.append("SOME_TUNGSTENITE_HEADER", "header_value".parse().unwrap()); + + Ok(response) + }; + + let config = Some(WebSocketConfig { + // This setting allows to accept client frames which are not masked + // This is not in compliance with RFC 6455 but might be handy in some + // rare cases where it is necessary to integrate with existing/legacy + // clients which are sending unmasked frames + accept_unmasked_frames: true, + ..<_>::default() + }); + + let mut websocket = accept_hdr_with_config(stream.unwrap(), callback, config).unwrap(); + + loop { + let msg = websocket.read().unwrap(); + if msg.is_binary() || msg.is_text() { + println!("received message {}", msg); + } + } + }); + } +} diff --git a/.cargo-vendor/tungstenite/src/buffer.rs b/.cargo-vendor/tungstenite/src/buffer.rs new file mode 100644 index 0000000000..a5e749052b --- /dev/null +++ b/.cargo-vendor/tungstenite/src/buffer.rs @@ -0,0 +1,125 @@ +//! A buffer for reading data from the network. +//! +//! The `ReadBuffer` is a buffer of bytes similar to a first-in, first-out queue. +//! It is filled by reading from a stream supporting `Read` and is then +//! accessible as a cursor for reading bytes. + +use std::io::{Cursor, Read, Result as IoResult}; + +use bytes::Buf; + +/// A FIFO buffer for reading packets from the network. +#[derive(Debug)] +pub struct ReadBuffer { + storage: Cursor>, + chunk: Box<[u8; CHUNK_SIZE]>, +} + +impl ReadBuffer { + /// Create a new empty input buffer. + pub fn new() -> Self { + Self::with_capacity(CHUNK_SIZE) + } + + /// Create a new empty input buffer with a given `capacity`. + pub fn with_capacity(capacity: usize) -> Self { + Self::from_partially_read(Vec::with_capacity(capacity)) + } + + /// Create a input buffer filled with previously read data. + pub fn from_partially_read(part: Vec) -> Self { + Self { storage: Cursor::new(part), chunk: Box::new([0; CHUNK_SIZE]) } + } + + /// Get a cursor to the data storage. + pub fn as_cursor(&self) -> &Cursor> { + &self.storage + } + + /// Get a cursor to the mutable data storage. + pub fn as_cursor_mut(&mut self) -> &mut Cursor> { + &mut self.storage + } + + /// Consume the `ReadBuffer` and get the internal storage. + pub fn into_vec(mut self) -> Vec { + // Current implementation of `tungstenite-rs` expects that the `into_vec()` drains + // the data from the container that has already been read by the cursor. + self.clean_up(); + + // Now we can safely return the internal container. + self.storage.into_inner() + } + + /// Read next portion of data from the given input stream. + pub fn read_from(&mut self, stream: &mut S) -> IoResult { + self.clean_up(); + let size = stream.read(&mut *self.chunk)?; + self.storage.get_mut().extend_from_slice(&self.chunk[..size]); + Ok(size) + } + + /// Cleans ups the part of the vector that has been already read by the cursor. + fn clean_up(&mut self) { + let pos = self.storage.position() as usize; + self.storage.get_mut().drain(0..pos).count(); + self.storage.set_position(0); + } +} + +impl Buf for ReadBuffer { + fn remaining(&self) -> usize { + Buf::remaining(self.as_cursor()) + } + + fn chunk(&self) -> &[u8] { + Buf::chunk(self.as_cursor()) + } + + fn advance(&mut self, cnt: usize) { + Buf::advance(self.as_cursor_mut(), cnt) + } +} + +impl Default for ReadBuffer { + fn default() -> Self { + Self::new() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn simple_reading() { + let mut input = Cursor::new(b"Hello World!".to_vec()); + let mut buffer = ReadBuffer::<4096>::new(); + let size = buffer.read_from(&mut input).unwrap(); + assert_eq!(size, 12); + assert_eq!(buffer.chunk(), b"Hello World!"); + } + + #[test] + fn reading_in_chunks() { + let mut inp = Cursor::new(b"Hello World!".to_vec()); + let mut buf = ReadBuffer::<4>::new(); + + let size = buf.read_from(&mut inp).unwrap(); + assert_eq!(size, 4); + assert_eq!(buf.chunk(), b"Hell"); + + buf.advance(2); + assert_eq!(buf.chunk(), b"ll"); + assert_eq!(buf.storage.get_mut(), b"Hell"); + + let size = buf.read_from(&mut inp).unwrap(); + assert_eq!(size, 4); + assert_eq!(buf.chunk(), b"llo Wo"); + assert_eq!(buf.storage.get_mut(), b"llo Wo"); + + let size = buf.read_from(&mut inp).unwrap(); + assert_eq!(size, 4); + assert_eq!(buf.chunk(), b"llo World!"); + } +} diff --git a/.cargo-vendor/tungstenite/src/client.rs b/.cargo-vendor/tungstenite/src/client.rs new file mode 100644 index 0000000000..9b30037f67 --- /dev/null +++ b/.cargo-vendor/tungstenite/src/client.rs @@ -0,0 +1,267 @@ +//! Methods to connect to a WebSocket as a client. + +use std::{ + io::{Read, Write}, + net::{SocketAddr, TcpStream, ToSocketAddrs}, + result::Result as StdResult, +}; + +use http::{request::Parts, Uri}; +use log::*; + +use url::Url; + +use crate::{ + handshake::client::{generate_key, Request, Response}, + protocol::WebSocketConfig, + stream::MaybeTlsStream, +}; + +use crate::{ + error::{Error, Result, UrlError}, + handshake::{client::ClientHandshake, HandshakeError}, + protocol::WebSocket, + stream::{Mode, NoDelay}, +}; + +/// Connect to the given WebSocket in blocking mode. +/// +/// Uses a websocket configuration passed as an argument to the function. Calling it with `None` is +/// equal to calling `connect()` function. +/// +/// The URL may be either ws:// or wss://. +/// To support wss:// URLs, you must activate the TLS feature on the crate level. Please refer to the +/// project's [README][readme] for more information on available features. +/// +/// This function "just works" for those who wants a simple blocking solution +/// similar to `std::net::TcpStream`. If you want a non-blocking or other +/// custom stream, call `client` instead. +/// +/// This function uses `native_tls` or `rustls` to do TLS depending on the feature flags enabled. If +/// you want to use other TLS libraries, use `client` instead. There is no need to enable any of +/// the `*-tls` features if you don't call `connect` since it's the only function that uses them. +/// +/// [readme]: https://github.com/snapview/tungstenite-rs/#features +pub fn connect_with_config( + request: Req, + config: Option, + max_redirects: u8, +) -> Result<(WebSocket>, Response)> { + fn try_client_handshake( + request: Request, + config: Option, + ) -> Result<(WebSocket>, Response)> { + let uri = request.uri(); + let mode = uri_mode(uri)?; + let host = request.uri().host().ok_or(Error::Url(UrlError::NoHostName))?; + let host = if host.starts_with('[') { &host[1..host.len() - 1] } else { host }; + let port = uri.port_u16().unwrap_or(match mode { + Mode::Plain => 80, + Mode::Tls => 443, + }); + let addrs = (host, port).to_socket_addrs()?; + let mut stream = connect_to_some(addrs.as_slice(), request.uri())?; + NoDelay::set_nodelay(&mut stream, true)?; + + #[cfg(not(any(feature = "native-tls", feature = "__rustls-tls")))] + let client = client_with_config(request, MaybeTlsStream::Plain(stream), config); + #[cfg(any(feature = "native-tls", feature = "__rustls-tls"))] + let client = crate::tls::client_tls_with_config(request, stream, config, None); + + client.map_err(|e| match e { + HandshakeError::Failure(f) => f, + HandshakeError::Interrupted(_) => panic!("Bug: blocking handshake not blocked"), + }) + } + + fn create_request(parts: &Parts, uri: &Uri) -> Request { + let mut builder = + Request::builder().uri(uri.clone()).method(parts.method.clone()).version(parts.version); + *builder.headers_mut().expect("Failed to create `Request`") = parts.headers.clone(); + builder.body(()).expect("Failed to create `Request`") + } + + let (parts, _) = request.into_client_request()?.into_parts(); + let mut uri = parts.uri.clone(); + + for attempt in 0..(max_redirects + 1) { + let request = create_request(&parts, &uri); + + match try_client_handshake(request, config) { + Err(Error::Http(res)) if res.status().is_redirection() && attempt < max_redirects => { + if let Some(location) = res.headers().get("Location") { + uri = location.to_str()?.parse::()?; + debug!("Redirecting to {:?}", uri); + continue; + } else { + warn!("No `Location` found in redirect"); + return Err(Error::Http(res)); + } + } + other => return other, + } + } + + unreachable!("Bug in a redirect handling logic") +} + +/// Connect to the given WebSocket in blocking mode. +/// +/// The URL may be either ws:// or wss://. +/// To support wss:// URLs, feature `native-tls` or `rustls-tls` must be turned on. +/// +/// This function "just works" for those who wants a simple blocking solution +/// similar to `std::net::TcpStream`. If you want a non-blocking or other +/// custom stream, call `client` instead. +/// +/// This function uses `native_tls` or `rustls` to do TLS depending on the feature flags enabled. If +/// you want to use other TLS libraries, use `client` instead. There is no need to enable any of +/// the `*-tls` features if you don't call `connect` since it's the only function that uses them. +pub fn connect( + request: Req, +) -> Result<(WebSocket>, Response)> { + connect_with_config(request, None, 3) +} + +fn connect_to_some(addrs: &[SocketAddr], uri: &Uri) -> Result { + for addr in addrs { + debug!("Trying to contact {} at {}...", uri, addr); + if let Ok(stream) = TcpStream::connect(addr) { + return Ok(stream); + } + } + Err(Error::Url(UrlError::UnableToConnect(uri.to_string()))) +} + +/// Get the mode of the given URL. +/// +/// This function may be used to ease the creation of custom TLS streams +/// in non-blocking algorithms or for use with TLS libraries other than `native_tls` or `rustls`. +pub fn uri_mode(uri: &Uri) -> Result { + match uri.scheme_str() { + Some("ws") => Ok(Mode::Plain), + Some("wss") => Ok(Mode::Tls), + _ => Err(Error::Url(UrlError::UnsupportedUrlScheme)), + } +} + +/// Do the client handshake over the given stream given a web socket configuration. Passing `None` +/// as configuration is equal to calling `client()` function. +/// +/// Use this function if you need a nonblocking handshake support or if you +/// want to use a custom stream like `mio::net::TcpStream` or `openssl::ssl::SslStream`. +/// Any stream supporting `Read + Write` will do. +pub fn client_with_config( + request: Req, + stream: Stream, + config: Option, +) -> StdResult<(WebSocket, Response), HandshakeError>> +where + Stream: Read + Write, + Req: IntoClientRequest, +{ + ClientHandshake::start(stream, request.into_client_request()?, config)?.handshake() +} + +/// Do the client handshake over the given stream. +/// +/// Use this function if you need a nonblocking handshake support or if you +/// want to use a custom stream like `mio::net::TcpStream` or `openssl::ssl::SslStream`. +/// Any stream supporting `Read + Write` will do. +pub fn client( + request: Req, + stream: Stream, +) -> StdResult<(WebSocket, Response), HandshakeError>> +where + Stream: Read + Write, + Req: IntoClientRequest, +{ + client_with_config(request, stream, None) +} + +/// Trait for converting various types into HTTP requests used for a client connection. +/// +/// This trait is implemented by default for string slices, strings, `url::Url`, `http::Uri` and +/// `http::Request<()>`. Note that the implementation for `http::Request<()>` is trivial and will +/// simply take your request and pass it as is further without altering any headers or URLs, so +/// be aware of this. If you just want to connect to the endpoint with a certain URL, better pass +/// a regular string containing the URL in which case `tungstenite-rs` will take care for generating +/// the proper `http::Request<()>` for you. +pub trait IntoClientRequest { + /// Convert into a `Request` that can be used for a client connection. + fn into_client_request(self) -> Result; +} + +impl<'a> IntoClientRequest for &'a str { + fn into_client_request(self) -> Result { + self.parse::()?.into_client_request() + } +} + +impl<'a> IntoClientRequest for &'a String { + fn into_client_request(self) -> Result { + <&str as IntoClientRequest>::into_client_request(self) + } +} + +impl IntoClientRequest for String { + fn into_client_request(self) -> Result { + <&str as IntoClientRequest>::into_client_request(&self) + } +} + +impl<'a> IntoClientRequest for &'a Uri { + fn into_client_request(self) -> Result { + self.clone().into_client_request() + } +} + +impl IntoClientRequest for Uri { + fn into_client_request(self) -> Result { + let authority = self.authority().ok_or(Error::Url(UrlError::NoHostName))?.as_str(); + let host = authority + .find('@') + .map(|idx| authority.split_at(idx + 1).1) + .unwrap_or_else(|| authority); + + if host.is_empty() { + return Err(Error::Url(UrlError::EmptyHostName)); + } + + let req = Request::builder() + .method("GET") + .header("Host", host) + .header("Connection", "Upgrade") + .header("Upgrade", "websocket") + .header("Sec-WebSocket-Version", "13") + .header("Sec-WebSocket-Key", generate_key()) + .uri(self) + .body(())?; + Ok(req) + } +} + +impl<'a> IntoClientRequest for &'a Url { + fn into_client_request(self) -> Result { + self.as_str().into_client_request() + } +} + +impl IntoClientRequest for Url { + fn into_client_request(self) -> Result { + self.as_str().into_client_request() + } +} + +impl IntoClientRequest for Request { + fn into_client_request(self) -> Result { + Ok(self) + } +} + +impl<'h, 'b> IntoClientRequest for httparse::Request<'h, 'b> { + fn into_client_request(self) -> Result { + use crate::handshake::headers::FromHttparse; + Request::from_httparse(self) + } +} diff --git a/.cargo-vendor/tungstenite/src/error.rs b/.cargo-vendor/tungstenite/src/error.rs new file mode 100644 index 0000000000..faea80bf0c --- /dev/null +++ b/.cargo-vendor/tungstenite/src/error.rs @@ -0,0 +1,281 @@ +//! Error handling. + +use std::{io, result, str, string}; + +use crate::protocol::{frame::coding::Data, Message}; +#[cfg(feature = "handshake")] +use http::{header::HeaderName, Response}; +use thiserror::Error; + +/// Result type of all Tungstenite library calls. +pub type Result = result::Result; + +/// Possible WebSocket errors. +#[derive(Error, Debug)] +pub enum Error { + /// WebSocket connection closed normally. This informs you of the close. + /// It's not an error as such and nothing wrong happened. + /// + /// This is returned as soon as the close handshake is finished (we have both sent and + /// received a close frame) on the server end and as soon as the server has closed the + /// underlying connection if this endpoint is a client. + /// + /// Thus when you receive this, it is safe to drop the underlying connection. + /// + /// Receiving this error means that the WebSocket object is not usable anymore and the + /// only meaningful action with it is dropping it. + #[error("Connection closed normally")] + ConnectionClosed, + /// Trying to work with already closed connection. + /// + /// Trying to read or write after receiving `ConnectionClosed` causes this. + /// + /// As opposed to `ConnectionClosed`, this indicates your code tries to operate on the + /// connection when it really shouldn't anymore, so this really indicates a programmer + /// error on your part. + #[error("Trying to work with closed connection")] + AlreadyClosed, + /// Input-output error. Apart from WouldBlock, these are generally errors with the + /// underlying connection and you should probably consider them fatal. + #[error("IO error: {0}")] + Io(#[from] io::Error), + /// TLS error. + /// + /// Note that this error variant is enabled unconditionally even if no TLS feature is enabled, + /// to provide a feature-agnostic API surface. + #[error("TLS error: {0}")] + Tls(#[from] TlsError), + /// - When reading: buffer capacity exhausted. + /// - When writing: your message is bigger than the configured max message size + /// (64MB by default). + #[error("Space limit exceeded: {0}")] + Capacity(#[from] CapacityError), + /// Protocol violation. + #[error("WebSocket protocol error: {0}")] + Protocol(#[from] ProtocolError), + /// Message write buffer is full. + #[error("Write buffer is full")] + WriteBufferFull(Message), + /// UTF coding error. + #[error("UTF-8 encoding error")] + Utf8, + /// Attack attempt detected. + #[error("Attack attempt detected")] + AttackAttempt, + /// Invalid URL. + #[error("URL error: {0}")] + Url(#[from] UrlError), + /// HTTP error. + #[error("HTTP error: {}", .0.status())] + #[cfg(feature = "handshake")] + Http(Response>>), + /// HTTP format error. + #[error("HTTP format error: {0}")] + #[cfg(feature = "handshake")] + HttpFormat(#[from] http::Error), +} + +impl From for Error { + fn from(_: str::Utf8Error) -> Self { + Error::Utf8 + } +} + +impl From for Error { + fn from(_: string::FromUtf8Error) -> Self { + Error::Utf8 + } +} + +#[cfg(feature = "handshake")] +impl From for Error { + fn from(err: http::header::InvalidHeaderValue) -> Self { + Error::HttpFormat(err.into()) + } +} + +#[cfg(feature = "handshake")] +impl From for Error { + fn from(err: http::header::InvalidHeaderName) -> Self { + Error::HttpFormat(err.into()) + } +} + +#[cfg(feature = "handshake")] +impl From for Error { + fn from(_: http::header::ToStrError) -> Self { + Error::Utf8 + } +} + +#[cfg(feature = "handshake")] +impl From for Error { + fn from(err: http::uri::InvalidUri) -> Self { + Error::HttpFormat(err.into()) + } +} + +#[cfg(feature = "handshake")] +impl From for Error { + fn from(err: http::status::InvalidStatusCode) -> Self { + Error::HttpFormat(err.into()) + } +} + +#[cfg(feature = "handshake")] +impl From for Error { + fn from(err: httparse::Error) -> Self { + match err { + httparse::Error::TooManyHeaders => Error::Capacity(CapacityError::TooManyHeaders), + e => Error::Protocol(ProtocolError::HttparseError(e)), + } + } +} + +/// Indicates the specific type/cause of a capacity error. +#[derive(Error, Debug, PartialEq, Eq, Clone, Copy)] +pub enum CapacityError { + /// Too many headers provided (see [`httparse::Error::TooManyHeaders`]). + #[error("Too many headers")] + TooManyHeaders, + /// Received header is too long. + /// Message is bigger than the maximum allowed size. + #[error("Message too long: {size} > {max_size}")] + MessageTooLong { + /// The size of the message. + size: usize, + /// The maximum allowed message size. + max_size: usize, + }, +} + +/// Indicates the specific type/cause of a protocol error. +#[allow(missing_copy_implementations)] +#[derive(Error, Debug, PartialEq, Eq, Clone)] +pub enum ProtocolError { + /// Use of the wrong HTTP method (the WebSocket protocol requires the GET method be used). + #[error("Unsupported HTTP method used - only GET is allowed")] + WrongHttpMethod, + /// Wrong HTTP version used (the WebSocket protocol requires version 1.1 or higher). + #[error("HTTP version must be 1.1 or higher")] + WrongHttpVersion, + /// Missing `Connection: upgrade` HTTP header. + #[error("No \"Connection: upgrade\" header")] + MissingConnectionUpgradeHeader, + /// Missing `Upgrade: websocket` HTTP header. + #[error("No \"Upgrade: websocket\" header")] + MissingUpgradeWebSocketHeader, + /// Missing `Sec-WebSocket-Version: 13` HTTP header. + #[error("No \"Sec-WebSocket-Version: 13\" header")] + MissingSecWebSocketVersionHeader, + /// Missing `Sec-WebSocket-Key` HTTP header. + #[error("No \"Sec-WebSocket-Key\" header")] + MissingSecWebSocketKey, + /// The `Sec-WebSocket-Accept` header is either not present or does not specify the correct key value. + #[error("Key mismatch in \"Sec-WebSocket-Accept\" header")] + SecWebSocketAcceptKeyMismatch, + /// Garbage data encountered after client request. + #[error("Junk after client request")] + JunkAfterRequest, + /// Custom responses must be unsuccessful. + #[error("Custom response must not be successful")] + CustomResponseSuccessful, + /// Invalid header is passed. Or the header is missing in the request. Or not present at all. Check the request that you pass. + #[error("Missing, duplicated or incorrect header {0}")] + #[cfg(feature = "handshake")] + InvalidHeader(HeaderName), + /// No more data while still performing handshake. + #[error("Handshake not finished")] + HandshakeIncomplete, + /// Wrapper around a [`httparse::Error`] value. + #[error("httparse error: {0}")] + #[cfg(feature = "handshake")] + HttparseError(#[from] httparse::Error), + /// Not allowed to send after having sent a closing frame. + #[error("Sending after closing is not allowed")] + SendAfterClosing, + /// Remote sent data after sending a closing frame. + #[error("Remote sent after having closed")] + ReceivedAfterClosing, + /// Reserved bits in frame header are non-zero. + #[error("Reserved bits are non-zero")] + NonZeroReservedBits, + /// The server must close the connection when an unmasked frame is received. + #[error("Received an unmasked frame from client")] + UnmaskedFrameFromClient, + /// The client must close the connection when a masked frame is received. + #[error("Received a masked frame from server")] + MaskedFrameFromServer, + /// Control frames must not be fragmented. + #[error("Fragmented control frame")] + FragmentedControlFrame, + /// Control frames must have a payload of 125 bytes or less. + #[error("Control frame too big (payload must be 125 bytes or less)")] + ControlFrameTooBig, + /// Type of control frame not recognised. + #[error("Unknown control frame type: {0}")] + UnknownControlFrameType(u8), + /// Type of data frame not recognised. + #[error("Unknown data frame type: {0}")] + UnknownDataFrameType(u8), + /// Received a continue frame despite there being nothing to continue. + #[error("Continue frame but nothing to continue")] + UnexpectedContinueFrame, + /// Received data while waiting for more fragments. + #[error("While waiting for more fragments received: {0}")] + ExpectedFragment(Data), + /// Connection closed without performing the closing handshake. + #[error("Connection reset without closing handshake")] + ResetWithoutClosingHandshake, + /// Encountered an invalid opcode. + #[error("Encountered invalid opcode: {0}")] + InvalidOpcode(u8), + /// The payload for the closing frame is invalid. + #[error("Invalid close sequence")] + InvalidCloseSequence, +} + +/// Indicates the specific type/cause of URL error. +#[derive(Error, Debug, PartialEq, Eq)] +pub enum UrlError { + /// TLS is used despite not being compiled with the TLS feature enabled. + #[error("TLS support not compiled in")] + TlsFeatureNotEnabled, + /// The URL does not include a host name. + #[error("No host name in the URL")] + NoHostName, + /// Failed to connect with this URL. + #[error("Unable to connect to {0}")] + UnableToConnect(String), + /// Unsupported URL scheme used (only `ws://` or `wss://` may be used). + #[error("URL scheme not supported")] + UnsupportedUrlScheme, + /// The URL host name, though included, is empty. + #[error("URL contains empty host name")] + EmptyHostName, + /// The URL does not include a path/query. + #[error("No path/query in URL")] + NoPathOrQuery, +} + +/// TLS errors. +/// +/// Note that even if you enable only the rustls-based TLS support, the error at runtime could still +/// be `Native`, as another crate in the dependency graph may enable native TLS support. +#[allow(missing_copy_implementations)] +#[derive(Error, Debug)] +#[non_exhaustive] +pub enum TlsError { + /// Native TLS error. + #[cfg(feature = "native-tls")] + #[error("native-tls error: {0}")] + Native(#[from] native_tls_crate::Error), + /// Rustls error. + #[cfg(feature = "__rustls-tls")] + #[error("rustls error: {0}")] + Rustls(#[from] rustls::Error), + /// DNS name resolution error. + #[cfg(feature = "__rustls-tls")] + #[error("Invalid DNS name")] + InvalidDnsName, +} diff --git a/.cargo-vendor/tungstenite/src/handshake/client.rs b/.cargo-vendor/tungstenite/src/handshake/client.rs new file mode 100644 index 0000000000..08cc7b29b8 --- /dev/null +++ b/.cargo-vendor/tungstenite/src/handshake/client.rs @@ -0,0 +1,359 @@ +//! Client handshake machine. + +use std::{ + io::{Read, Write}, + marker::PhantomData, +}; + +use http::{ + header::HeaderName, HeaderMap, Request as HttpRequest, Response as HttpResponse, StatusCode, +}; +use httparse::Status; +use log::*; + +use super::{ + derive_accept_key, + headers::{FromHttparse, MAX_HEADERS}, + machine::{HandshakeMachine, StageResult, TryParse}, + HandshakeRole, MidHandshake, ProcessingResult, +}; +use crate::{ + error::{Error, ProtocolError, Result, UrlError}, + protocol::{Role, WebSocket, WebSocketConfig}, +}; + +/// Client request type. +pub type Request = HttpRequest<()>; + +/// Client response type. +pub type Response = HttpResponse>>; + +/// Client handshake role. +#[derive(Debug)] +pub struct ClientHandshake { + verify_data: VerifyData, + config: Option, + _marker: PhantomData, +} + +impl ClientHandshake { + /// Initiate a client handshake. + pub fn start( + stream: S, + request: Request, + config: Option, + ) -> Result> { + if request.method() != http::Method::GET { + return Err(Error::Protocol(ProtocolError::WrongHttpMethod)); + } + + if request.version() < http::Version::HTTP_11 { + return Err(Error::Protocol(ProtocolError::WrongHttpVersion)); + } + + // Check the URI scheme: only ws or wss are supported + let _ = crate::client::uri_mode(request.uri())?; + + // Convert and verify the `http::Request` and turn it into the request as per RFC. + // Also extract the key from it (it must be present in a correct request). + let (request, key) = generate_request(request)?; + + let machine = HandshakeMachine::start_write(stream, request); + + let client = { + let accept_key = derive_accept_key(key.as_ref()); + ClientHandshake { verify_data: VerifyData { accept_key }, config, _marker: PhantomData } + }; + + trace!("Client handshake initiated."); + Ok(MidHandshake { role: client, machine }) + } +} + +impl HandshakeRole for ClientHandshake { + type IncomingData = Response; + type InternalStream = S; + type FinalResult = (WebSocket, Response); + fn stage_finished( + &mut self, + finish: StageResult, + ) -> Result> { + Ok(match finish { + StageResult::DoneWriting(stream) => { + ProcessingResult::Continue(HandshakeMachine::start_read(stream)) + } + StageResult::DoneReading { stream, result, tail } => { + let result = match self.verify_data.verify_response(result) { + Ok(r) => r, + Err(Error::Http(mut e)) => { + *e.body_mut() = Some(tail); + return Err(Error::Http(e)); + } + Err(e) => return Err(e), + }; + + debug!("Client handshake done."); + let websocket = + WebSocket::from_partially_read(stream, tail, Role::Client, self.config); + ProcessingResult::Done((websocket, result)) + } + }) + } +} + +/// Verifies and generates a client WebSocket request from the original request and extracts a WebSocket key from it. +pub fn generate_request(mut request: Request) -> Result<(Vec, String)> { + let mut req = Vec::new(); + write!( + req, + "GET {path} {version:?}\r\n", + path = request.uri().path_and_query().ok_or(Error::Url(UrlError::NoPathOrQuery))?.as_str(), + version = request.version() + ) + .unwrap(); + + // Headers that must be present in a correct request. + const KEY_HEADERNAME: &str = "Sec-WebSocket-Key"; + const WEBSOCKET_HEADERS: [&str; 5] = + ["Host", "Connection", "Upgrade", "Sec-WebSocket-Version", KEY_HEADERNAME]; + + // We must extract a WebSocket key from a properly formed request or fail if it's not present. + let key = request + .headers() + .get(KEY_HEADERNAME) + .ok_or_else(|| { + Error::Protocol(ProtocolError::InvalidHeader( + HeaderName::from_bytes(KEY_HEADERNAME.as_bytes()).unwrap(), + )) + })? + .to_str()? + .to_owned(); + + // We must check that all necessary headers for a valid request are present. Note that we have to + // deal with the fact that some apps seem to have a case-sensitive check for headers which is not + // correct and should not considered the correct behavior, but it seems like some apps ignore it. + // `http` by default writes all headers in lower-case which is fine (and does not violate the RFC) + // but some servers seem to be poorely written and ignore RFC. + // + // See similar problem in `hyper`: https://github.com/hyperium/hyper/issues/1492 + let headers = request.headers_mut(); + for &header in &WEBSOCKET_HEADERS { + let value = headers.remove(header).ok_or_else(|| { + Error::Protocol(ProtocolError::InvalidHeader( + HeaderName::from_bytes(header.as_bytes()).unwrap(), + )) + })?; + write!(req, "{header}: {value}\r\n", header = header, value = value.to_str()?).unwrap(); + } + + // Now we must ensure that the headers that we've written once are not anymore present in the map. + // If they do, then the request is invalid (some headers are duplicated there for some reason). + let insensitive: Vec = + WEBSOCKET_HEADERS.iter().map(|h| h.to_ascii_lowercase()).collect(); + for (k, v) in headers { + let mut name = k.as_str(); + + // We have already written the necessary headers once (above) and removed them from the map. + // If we encounter them again, then the request is considered invalid and error is returned. + // Note that we can't use `.contains()`, since `&str` does not coerce to `&String` in Rust. + if insensitive.iter().any(|x| x == name) { + return Err(Error::Protocol(ProtocolError::InvalidHeader(k.clone()))); + } + + // Relates to the issue of some servers treating headers in a case-sensitive way, please see: + // https://github.com/snapview/tungstenite-rs/pull/119 (original fix of the problem) + if name == "sec-websocket-protocol" { + name = "Sec-WebSocket-Protocol"; + } + + if name == "origin" { + name = "Origin"; + } + + writeln!(req, "{}: {}\r", name, v.to_str()?).unwrap(); + } + + writeln!(req, "\r").unwrap(); + trace!("Request: {:?}", String::from_utf8_lossy(&req)); + Ok((req, key)) +} + +/// Information for handshake verification. +#[derive(Debug)] +struct VerifyData { + /// Accepted server key. + accept_key: String, +} + +impl VerifyData { + pub fn verify_response(&self, response: Response) -> Result { + // 1. If the status code received from the server is not 101, the + // client handles the response per HTTP [RFC2616] procedures. (RFC 6455) + if response.status() != StatusCode::SWITCHING_PROTOCOLS { + return Err(Error::Http(response)); + } + + let headers = response.headers(); + + // 2. If the response lacks an |Upgrade| header field or the |Upgrade| + // header field contains a value that is not an ASCII case- + // insensitive match for the value "websocket", the client MUST + // _Fail the WebSocket Connection_. (RFC 6455) + if !headers + .get("Upgrade") + .and_then(|h| h.to_str().ok()) + .map(|h| h.eq_ignore_ascii_case("websocket")) + .unwrap_or(false) + { + return Err(Error::Protocol(ProtocolError::MissingUpgradeWebSocketHeader)); + } + // 3. If the response lacks a |Connection| header field or the + // |Connection| header field doesn't contain a token that is an + // ASCII case-insensitive match for the value "Upgrade", the client + // MUST _Fail the WebSocket Connection_. (RFC 6455) + if !headers + .get("Connection") + .and_then(|h| h.to_str().ok()) + .map(|h| h.eq_ignore_ascii_case("Upgrade")) + .unwrap_or(false) + { + return Err(Error::Protocol(ProtocolError::MissingConnectionUpgradeHeader)); + } + // 4. If the response lacks a |Sec-WebSocket-Accept| header field or + // the |Sec-WebSocket-Accept| contains a value other than the + // base64-encoded SHA-1 of ... the client MUST _Fail the WebSocket + // Connection_. (RFC 6455) + if !headers.get("Sec-WebSocket-Accept").map(|h| h == &self.accept_key).unwrap_or(false) { + return Err(Error::Protocol(ProtocolError::SecWebSocketAcceptKeyMismatch)); + } + // 5. If the response includes a |Sec-WebSocket-Extensions| header + // field and this header field indicates the use of an extension + // that was not present in the client's handshake (the server has + // indicated an extension not requested by the client), the client + // MUST _Fail the WebSocket Connection_. (RFC 6455) + // TODO + + // 6. If the response includes a |Sec-WebSocket-Protocol| header field + // and this header field indicates the use of a subprotocol that was + // not present in the client's handshake (the server has indicated a + // subprotocol not requested by the client), the client MUST _Fail + // the WebSocket Connection_. (RFC 6455) + // TODO + + Ok(response) + } +} + +impl TryParse for Response { + fn try_parse(buf: &[u8]) -> Result> { + let mut hbuffer = [httparse::EMPTY_HEADER; MAX_HEADERS]; + let mut req = httparse::Response::new(&mut hbuffer); + Ok(match req.parse(buf)? { + Status::Partial => None, + Status::Complete(size) => Some((size, Response::from_httparse(req)?)), + }) + } +} + +impl<'h, 'b: 'h> FromHttparse> for Response { + fn from_httparse(raw: httparse::Response<'h, 'b>) -> Result { + if raw.version.expect("Bug: no HTTP version") < /*1.*/1 { + return Err(Error::Protocol(ProtocolError::WrongHttpVersion)); + } + + let headers = HeaderMap::from_httparse(raw.headers)?; + + let mut response = Response::new(None); + *response.status_mut() = StatusCode::from_u16(raw.code.expect("Bug: no HTTP status code"))?; + *response.headers_mut() = headers; + // TODO: httparse only supports HTTP 0.9/1.0/1.1 but not HTTP 2.0 + // so the only valid value we could get in the response would be 1.1. + *response.version_mut() = http::Version::HTTP_11; + + Ok(response) + } +} + +/// Generate a random key for the `Sec-WebSocket-Key` header. +pub fn generate_key() -> String { + // a base64-encoded (see Section 4 of [RFC4648]) value that, + // when decoded, is 16 bytes in length (RFC 6455) + let r: [u8; 16] = rand::random(); + data_encoding::BASE64.encode(&r) +} + +#[cfg(test)] +mod tests { + use super::{super::machine::TryParse, generate_key, generate_request, Response}; + use crate::client::IntoClientRequest; + + #[test] + fn random_keys() { + let k1 = generate_key(); + println!("Generated random key 1: {}", k1); + let k2 = generate_key(); + println!("Generated random key 2: {}", k2); + assert_ne!(k1, k2); + assert_eq!(k1.len(), k2.len()); + assert_eq!(k1.len(), 24); + assert_eq!(k2.len(), 24); + assert!(k1.ends_with("==")); + assert!(k2.ends_with("==")); + assert!(k1[..22].find('=').is_none()); + assert!(k2[..22].find('=').is_none()); + } + + fn construct_expected(host: &str, key: &str) -> Vec { + format!( + "\ + GET /getCaseCount HTTP/1.1\r\n\ + Host: {host}\r\n\ + Connection: Upgrade\r\n\ + Upgrade: websocket\r\n\ + Sec-WebSocket-Version: 13\r\n\ + Sec-WebSocket-Key: {key}\r\n\ + \r\n", + host = host, + key = key + ) + .into_bytes() + } + + #[test] + fn request_formatting() { + let request = "ws://localhost/getCaseCount".into_client_request().unwrap(); + let (request, key) = generate_request(request).unwrap(); + let correct = construct_expected("localhost", &key); + assert_eq!(&request[..], &correct[..]); + } + + #[test] + fn request_formatting_with_host() { + let request = "wss://localhost:9001/getCaseCount".into_client_request().unwrap(); + let (request, key) = generate_request(request).unwrap(); + let correct = construct_expected("localhost:9001", &key); + assert_eq!(&request[..], &correct[..]); + } + + #[test] + fn request_formatting_with_at() { + let request = "wss://user:pass@localhost:9001/getCaseCount".into_client_request().unwrap(); + let (request, key) = generate_request(request).unwrap(); + let correct = construct_expected("localhost:9001", &key); + assert_eq!(&request[..], &correct[..]); + } + + #[test] + fn response_parsing() { + const DATA: &[u8] = b"HTTP/1.1 200 OK\r\nContent-Type: text/html\r\n\r\n"; + let (_, resp) = Response::try_parse(DATA).unwrap().unwrap(); + assert_eq!(resp.status(), http::StatusCode::OK); + assert_eq!(resp.headers().get("Content-Type").unwrap(), &b"text/html"[..],); + } + + #[test] + fn invalid_custom_request() { + let request = http::Request::builder().method("GET").body(()).unwrap(); + assert!(generate_request(request).is_err()); + } +} diff --git a/.cargo-vendor/tungstenite/src/handshake/headers.rs b/.cargo-vendor/tungstenite/src/handshake/headers.rs new file mode 100644 index 0000000000..f336c65c72 --- /dev/null +++ b/.cargo-vendor/tungstenite/src/handshake/headers.rs @@ -0,0 +1,81 @@ +//! HTTP Request and response header handling. + +use http::header::{HeaderMap, HeaderName, HeaderValue}; +use httparse::Status; + +use super::machine::TryParse; +use crate::error::Result; + +/// Limit for the number of header lines. +pub const MAX_HEADERS: usize = 124; + +/// Trait to convert raw objects into HTTP parseables. +pub(crate) trait FromHttparse: Sized { + /// Convert raw object into parsed HTTP headers. + fn from_httparse(raw: T) -> Result; +} + +impl<'b: 'h, 'h> FromHttparse<&'b [httparse::Header<'h>]> for HeaderMap { + fn from_httparse(raw: &'b [httparse::Header<'h>]) -> Result { + let mut headers = HeaderMap::new(); + for h in raw { + headers.append( + HeaderName::from_bytes(h.name.as_bytes())?, + HeaderValue::from_bytes(h.value)?, + ); + } + + Ok(headers) + } +} +impl TryParse for HeaderMap { + fn try_parse(buf: &[u8]) -> Result> { + let mut hbuffer = [httparse::EMPTY_HEADER; MAX_HEADERS]; + Ok(match httparse::parse_headers(buf, &mut hbuffer)? { + Status::Partial => None, + Status::Complete((size, hdr)) => Some((size, HeaderMap::from_httparse(hdr)?)), + }) + } +} + +#[cfg(test)] +mod tests { + + use super::{super::machine::TryParse, HeaderMap}; + + #[test] + fn headers() { + const DATA: &[u8] = b"Host: foo.com\r\n\ + Connection: Upgrade\r\n\ + Upgrade: websocket\r\n\ + \r\n"; + let (_, hdr) = HeaderMap::try_parse(DATA).unwrap().unwrap(); + assert_eq!(hdr.get("Host").unwrap(), &b"foo.com"[..]); + assert_eq!(hdr.get("Upgrade").unwrap(), &b"websocket"[..]); + assert_eq!(hdr.get("Connection").unwrap(), &b"Upgrade"[..]); + } + + #[test] + fn headers_iter() { + const DATA: &[u8] = b"Host: foo.com\r\n\ + Sec-WebSocket-Extensions: permessage-deflate\r\n\ + Connection: Upgrade\r\n\ + Sec-WebSocket-ExtenSIONS: permessage-unknown\r\n\ + Upgrade: websocket\r\n\ + \r\n"; + let (_, hdr) = HeaderMap::try_parse(DATA).unwrap().unwrap(); + let mut iter = hdr.get_all("Sec-WebSocket-Extensions").iter(); + assert_eq!(iter.next().unwrap(), &b"permessage-deflate"[..]); + assert_eq!(iter.next().unwrap(), &b"permessage-unknown"[..]); + assert_eq!(iter.next(), None); + } + + #[test] + fn headers_incomplete() { + const DATA: &[u8] = b"Host: foo.com\r\n\ + Connection: Upgrade\r\n\ + Upgrade: websocket\r\n"; + let hdr = HeaderMap::try_parse(DATA).unwrap(); + assert!(hdr.is_none()); + } +} diff --git a/.cargo-vendor/tungstenite/src/handshake/machine.rs b/.cargo-vendor/tungstenite/src/handshake/machine.rs new file mode 100644 index 0000000000..2e3f2cbbda --- /dev/null +++ b/.cargo-vendor/tungstenite/src/handshake/machine.rs @@ -0,0 +1,178 @@ +//! WebSocket handshake machine. + +use bytes::Buf; +use log::*; +use std::io::{Cursor, Read, Write}; + +use crate::{ + error::{Error, ProtocolError, Result}, + util::NonBlockingResult, + ReadBuffer, +}; + +/// A generic handshake state machine. +#[derive(Debug)] +pub struct HandshakeMachine { + stream: Stream, + state: HandshakeState, +} + +impl HandshakeMachine { + /// Start reading data from the peer. + pub fn start_read(stream: Stream) -> Self { + Self { stream, state: HandshakeState::Reading(ReadBuffer::new(), AttackCheck::new()) } + } + /// Start writing data to the peer. + pub fn start_write>>(stream: Stream, data: D) -> Self { + HandshakeMachine { stream, state: HandshakeState::Writing(Cursor::new(data.into())) } + } + /// Returns a shared reference to the inner stream. + pub fn get_ref(&self) -> &Stream { + &self.stream + } + /// Returns a mutable reference to the inner stream. + pub fn get_mut(&mut self) -> &mut Stream { + &mut self.stream + } +} + +impl HandshakeMachine { + /// Perform a single handshake round. + pub fn single_round(mut self) -> Result> { + trace!("Doing handshake round."); + match self.state { + HandshakeState::Reading(mut buf, mut attack_check) => { + let read = buf.read_from(&mut self.stream).no_block()?; + match read { + Some(0) => Err(Error::Protocol(ProtocolError::HandshakeIncomplete)), + Some(count) => { + attack_check.check_incoming_packet_size(count)?; + // TODO: this is slow for big headers with too many small packets. + // The parser has to be reworked in order to work on streams instead + // of buffers. + Ok(if let Some((size, obj)) = Obj::try_parse(Buf::chunk(&buf))? { + buf.advance(size); + RoundResult::StageFinished(StageResult::DoneReading { + result: obj, + stream: self.stream, + tail: buf.into_vec(), + }) + } else { + RoundResult::Incomplete(HandshakeMachine { + state: HandshakeState::Reading(buf, attack_check), + ..self + }) + }) + } + None => Ok(RoundResult::WouldBlock(HandshakeMachine { + state: HandshakeState::Reading(buf, attack_check), + ..self + })), + } + } + HandshakeState::Writing(mut buf) => { + assert!(buf.has_remaining()); + if let Some(size) = self.stream.write(Buf::chunk(&buf)).no_block()? { + assert!(size > 0); + buf.advance(size); + Ok(if buf.has_remaining() { + RoundResult::Incomplete(HandshakeMachine { + state: HandshakeState::Writing(buf), + ..self + }) + } else { + RoundResult::StageFinished(StageResult::DoneWriting(self.stream)) + }) + } else { + Ok(RoundResult::WouldBlock(HandshakeMachine { + state: HandshakeState::Writing(buf), + ..self + })) + } + } + } + } +} + +/// The result of the round. +#[derive(Debug)] +pub enum RoundResult { + /// Round not done, I/O would block. + WouldBlock(HandshakeMachine), + /// Round done, state unchanged. + Incomplete(HandshakeMachine), + /// Stage complete. + StageFinished(StageResult), +} + +/// The result of the stage. +#[derive(Debug)] +pub enum StageResult { + /// Reading round finished. + #[allow(missing_docs)] + DoneReading { result: Obj, stream: Stream, tail: Vec }, + /// Writing round finished. + DoneWriting(Stream), +} + +/// The parseable object. +pub trait TryParse: Sized { + /// Return Ok(None) if incomplete, Err on syntax error. + fn try_parse(data: &[u8]) -> Result>; +} + +/// The handshake state. +#[derive(Debug)] +enum HandshakeState { + /// Reading data from the peer. + Reading(ReadBuffer, AttackCheck), + /// Sending data to the peer. + Writing(Cursor>), +} + +/// Attack mitigation. Contains counters needed to prevent DoS attacks +/// and reject valid but useless headers. +#[derive(Debug)] +pub(crate) struct AttackCheck { + /// Number of HTTP header successful reads (TCP packets). + number_of_packets: usize, + /// Total number of bytes in HTTP header. + number_of_bytes: usize, +} + +impl AttackCheck { + /// Initialize attack checking for incoming buffer. + fn new() -> Self { + Self { number_of_packets: 0, number_of_bytes: 0 } + } + + /// Check the size of an incoming packet. To be called immediately after `read()` + /// passing its returned bytes count as `size`. + fn check_incoming_packet_size(&mut self, size: usize) -> Result<()> { + self.number_of_packets += 1; + self.number_of_bytes += size; + + // TODO: these values are hardcoded. Instead of making them configurable, + // rework the way HTTP header is parsed to remove this check at all. + const MAX_BYTES: usize = 65536; + const MAX_PACKETS: usize = 512; + const MIN_PACKET_SIZE: usize = 128; + const MIN_PACKET_CHECK_THRESHOLD: usize = 64; + + if self.number_of_bytes > MAX_BYTES { + return Err(Error::AttackAttempt); + } + + if self.number_of_packets > MAX_PACKETS { + return Err(Error::AttackAttempt); + } + + if self.number_of_packets > MIN_PACKET_CHECK_THRESHOLD + && self.number_of_packets * MIN_PACKET_SIZE > self.number_of_bytes + { + return Err(Error::AttackAttempt); + } + + Ok(()) + } +} diff --git a/.cargo-vendor/tungstenite/src/handshake/mod.rs b/.cargo-vendor/tungstenite/src/handshake/mod.rs new file mode 100644 index 0000000000..a8db9a98e4 --- /dev/null +++ b/.cargo-vendor/tungstenite/src/handshake/mod.rs @@ -0,0 +1,135 @@ +//! WebSocket handshake control. + +pub mod client; +pub mod headers; +pub mod machine; +pub mod server; + +use std::{ + error::Error as ErrorTrait, + fmt, + io::{Read, Write}, +}; + +use sha1::{Digest, Sha1}; + +use self::machine::{HandshakeMachine, RoundResult, StageResult, TryParse}; +use crate::error::Error; + +/// A WebSocket handshake. +#[derive(Debug)] +pub struct MidHandshake { + role: Role, + machine: HandshakeMachine, +} + +impl MidHandshake { + /// Allow access to machine + pub fn get_ref(&self) -> &HandshakeMachine { + &self.machine + } + + /// Allow mutable access to machine + pub fn get_mut(&mut self) -> &mut HandshakeMachine { + &mut self.machine + } + + /// Restarts the handshake process. + pub fn handshake(mut self) -> Result> { + let mut mach = self.machine; + loop { + mach = match mach.single_round()? { + RoundResult::WouldBlock(m) => { + return Err(HandshakeError::Interrupted(MidHandshake { machine: m, ..self })) + } + RoundResult::Incomplete(m) => m, + RoundResult::StageFinished(s) => match self.role.stage_finished(s)? { + ProcessingResult::Continue(m) => m, + ProcessingResult::Done(result) => return Ok(result), + }, + } + } + } +} + +/// A handshake result. +pub enum HandshakeError { + /// Handshake was interrupted (would block). + Interrupted(MidHandshake), + /// Handshake failed. + Failure(Error), +} + +impl fmt::Debug for HandshakeError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + HandshakeError::Interrupted(_) => write!(f, "HandshakeError::Interrupted(...)"), + HandshakeError::Failure(ref e) => write!(f, "HandshakeError::Failure({:?})", e), + } + } +} + +impl fmt::Display for HandshakeError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + HandshakeError::Interrupted(_) => write!(f, "Interrupted handshake (WouldBlock)"), + HandshakeError::Failure(ref e) => write!(f, "{}", e), + } + } +} + +impl ErrorTrait for HandshakeError {} + +impl From for HandshakeError { + fn from(err: Error) -> Self { + HandshakeError::Failure(err) + } +} + +/// Handshake role. +pub trait HandshakeRole { + #[doc(hidden)] + type IncomingData: TryParse; + #[doc(hidden)] + type InternalStream: Read + Write; + #[doc(hidden)] + type FinalResult; + #[doc(hidden)] + fn stage_finished( + &mut self, + finish: StageResult, + ) -> Result, Error>; +} + +/// Stage processing result. +#[doc(hidden)] +#[derive(Debug)] +pub enum ProcessingResult { + Continue(HandshakeMachine), + Done(FinalResult), +} + +/// Derive the `Sec-WebSocket-Accept` response header from a `Sec-WebSocket-Key` request header. +/// +/// This function can be used to perform a handshake before passing a raw TCP stream to +/// [`WebSocket::from_raw_socket`][crate::protocol::WebSocket::from_raw_socket]. +pub fn derive_accept_key(request_key: &[u8]) -> String { + // ... field is constructed by concatenating /key/ ... + // ... with the string "258EAFA5-E914-47DA-95CA-C5AB0DC85B11" (RFC 6455) + const WS_GUID: &[u8] = b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11"; + let mut sha1 = Sha1::default(); + sha1.update(request_key); + sha1.update(WS_GUID); + data_encoding::BASE64.encode(&sha1.finalize()) +} + +#[cfg(test)] +mod tests { + use super::derive_accept_key; + + #[test] + fn key_conversion() { + // example from RFC 6455 + assert_eq!(derive_accept_key(b"dGhlIHNhbXBsZSBub25jZQ=="), "s3pPLMBiTxaQ9kYGzzhZRbK+xOo="); + } +} diff --git a/.cargo-vendor/tungstenite/src/handshake/server.rs b/.cargo-vendor/tungstenite/src/handshake/server.rs new file mode 100644 index 0000000000..bc072ce9ed --- /dev/null +++ b/.cargo-vendor/tungstenite/src/handshake/server.rs @@ -0,0 +1,324 @@ +//! Server handshake machine. + +use std::{ + io::{self, Read, Write}, + marker::PhantomData, + result::Result as StdResult, +}; + +use http::{ + response::Builder, HeaderMap, Request as HttpRequest, Response as HttpResponse, StatusCode, +}; +use httparse::Status; +use log::*; + +use super::{ + derive_accept_key, + headers::{FromHttparse, MAX_HEADERS}, + machine::{HandshakeMachine, StageResult, TryParse}, + HandshakeRole, MidHandshake, ProcessingResult, +}; +use crate::{ + error::{Error, ProtocolError, Result}, + protocol::{Role, WebSocket, WebSocketConfig}, +}; + +/// Server request type. +pub type Request = HttpRequest<()>; + +/// Server response type. +pub type Response = HttpResponse<()>; + +/// Server error response type. +pub type ErrorResponse = HttpResponse>; + +fn create_parts(request: &HttpRequest) -> Result { + if request.method() != http::Method::GET { + return Err(Error::Protocol(ProtocolError::WrongHttpMethod)); + } + + if request.version() < http::Version::HTTP_11 { + return Err(Error::Protocol(ProtocolError::WrongHttpVersion)); + } + + if !request + .headers() + .get("Connection") + .and_then(|h| h.to_str().ok()) + .map(|h| h.split(|c| c == ' ' || c == ',').any(|p| p.eq_ignore_ascii_case("Upgrade"))) + .unwrap_or(false) + { + return Err(Error::Protocol(ProtocolError::MissingConnectionUpgradeHeader)); + } + + if !request + .headers() + .get("Upgrade") + .and_then(|h| h.to_str().ok()) + .map(|h| h.eq_ignore_ascii_case("websocket")) + .unwrap_or(false) + { + return Err(Error::Protocol(ProtocolError::MissingUpgradeWebSocketHeader)); + } + + if !request.headers().get("Sec-WebSocket-Version").map(|h| h == "13").unwrap_or(false) { + return Err(Error::Protocol(ProtocolError::MissingSecWebSocketVersionHeader)); + } + + let key = request + .headers() + .get("Sec-WebSocket-Key") + .ok_or(Error::Protocol(ProtocolError::MissingSecWebSocketKey))?; + + let builder = Response::builder() + .status(StatusCode::SWITCHING_PROTOCOLS) + .version(request.version()) + .header("Connection", "Upgrade") + .header("Upgrade", "websocket") + .header("Sec-WebSocket-Accept", derive_accept_key(key.as_bytes())); + + Ok(builder) +} + +/// Create a response for the request. +pub fn create_response(request: &Request) -> Result { + Ok(create_parts(request)?.body(())?) +} + +/// Create a response for the request with a custom body. +pub fn create_response_with_body( + request: &HttpRequest, + generate_body: impl FnOnce() -> T, +) -> Result> { + Ok(create_parts(request)?.body(generate_body())?) +} + +/// Write `response` to the stream `w`. +pub fn write_response(mut w: impl io::Write, response: &HttpResponse) -> Result<()> { + writeln!( + w, + "{version:?} {status}\r", + version = response.version(), + status = response.status() + )?; + + for (k, v) in response.headers() { + writeln!(w, "{}: {}\r", k, v.to_str()?)?; + } + + writeln!(w, "\r")?; + + Ok(()) +} + +impl TryParse for Request { + fn try_parse(buf: &[u8]) -> Result> { + let mut hbuffer = [httparse::EMPTY_HEADER; MAX_HEADERS]; + let mut req = httparse::Request::new(&mut hbuffer); + Ok(match req.parse(buf)? { + Status::Partial => None, + Status::Complete(size) => Some((size, Request::from_httparse(req)?)), + }) + } +} + +impl<'h, 'b: 'h> FromHttparse> for Request { + fn from_httparse(raw: httparse::Request<'h, 'b>) -> Result { + if raw.method.expect("Bug: no method in header") != "GET" { + return Err(Error::Protocol(ProtocolError::WrongHttpMethod)); + } + + if raw.version.expect("Bug: no HTTP version") < /*1.*/1 { + return Err(Error::Protocol(ProtocolError::WrongHttpVersion)); + } + + let headers = HeaderMap::from_httparse(raw.headers)?; + + let mut request = Request::new(()); + *request.method_mut() = http::Method::GET; + *request.headers_mut() = headers; + *request.uri_mut() = raw.path.expect("Bug: no path in header").parse()?; + // TODO: httparse only supports HTTP 0.9/1.0/1.1 but not HTTP 2.0 + // so the only valid value we could get in the response would be 1.1. + *request.version_mut() = http::Version::HTTP_11; + + Ok(request) + } +} + +/// The callback trait. +/// +/// The callback is called when the server receives an incoming WebSocket +/// handshake request from the client. Specifying a callback allows you to analyze incoming headers +/// and add additional headers to the response that server sends to the client and/or reject the +/// connection based on the incoming headers. +pub trait Callback: Sized { + /// Called whenever the server read the request from the client and is ready to reply to it. + /// May return additional reply headers. + /// Returning an error resulting in rejecting the incoming connection. + fn on_request( + self, + request: &Request, + response: Response, + ) -> StdResult; +} + +impl Callback for F +where + F: FnOnce(&Request, Response) -> StdResult, +{ + fn on_request( + self, + request: &Request, + response: Response, + ) -> StdResult { + self(request, response) + } +} + +/// Stub for callback that does nothing. +#[derive(Clone, Copy, Debug)] +pub struct NoCallback; + +impl Callback for NoCallback { + fn on_request( + self, + _request: &Request, + response: Response, + ) -> StdResult { + Ok(response) + } +} + +/// Server handshake role. +#[allow(missing_copy_implementations)] +#[derive(Debug)] +pub struct ServerHandshake { + /// Callback which is called whenever the server read the request from the client and is ready + /// to reply to it. The callback returns an optional headers which will be added to the reply + /// which the server sends to the user. + callback: Option, + /// WebSocket configuration. + config: Option, + /// Error code/flag. If set, an error will be returned after sending response to the client. + error_response: Option, + /// Internal stream type. + _marker: PhantomData, +} + +impl ServerHandshake { + /// Start server handshake. `callback` specifies a custom callback which the user can pass to + /// the handshake, this callback will be called when the a websocket client connects to the + /// server, you can specify the callback if you want to add additional header to the client + /// upon join based on the incoming headers. + pub fn start(stream: S, callback: C, config: Option) -> MidHandshake { + trace!("Server handshake initiated."); + MidHandshake { + machine: HandshakeMachine::start_read(stream), + role: ServerHandshake { + callback: Some(callback), + config, + error_response: None, + _marker: PhantomData, + }, + } + } +} + +impl HandshakeRole for ServerHandshake { + type IncomingData = Request; + type InternalStream = S; + type FinalResult = WebSocket; + + fn stage_finished( + &mut self, + finish: StageResult, + ) -> Result> { + Ok(match finish { + StageResult::DoneReading { stream, result, tail } => { + if !tail.is_empty() { + return Err(Error::Protocol(ProtocolError::JunkAfterRequest)); + } + + let response = create_response(&result)?; + let callback_result = if let Some(callback) = self.callback.take() { + callback.on_request(&result, response) + } else { + Ok(response) + }; + + match callback_result { + Ok(response) => { + let mut output = vec![]; + write_response(&mut output, &response)?; + ProcessingResult::Continue(HandshakeMachine::start_write(stream, output)) + } + + Err(resp) => { + if resp.status().is_success() { + return Err(Error::Protocol(ProtocolError::CustomResponseSuccessful)); + } + + self.error_response = Some(resp); + let resp = self.error_response.as_ref().unwrap(); + + let mut output = vec![]; + write_response(&mut output, resp)?; + + if let Some(body) = resp.body() { + output.extend_from_slice(body.as_bytes()); + } + + ProcessingResult::Continue(HandshakeMachine::start_write(stream, output)) + } + } + } + + StageResult::DoneWriting(stream) => { + if let Some(err) = self.error_response.take() { + debug!("Server handshake failed."); + + let (parts, body) = err.into_parts(); + let body = body.map(|b| b.as_bytes().to_vec()); + return Err(Error::Http(http::Response::from_parts(parts, body))); + } else { + debug!("Server handshake done."); + let websocket = WebSocket::from_raw_socket(stream, Role::Server, self.config); + ProcessingResult::Done(websocket) + } + } + }) + } +} + +#[cfg(test)] +mod tests { + use super::{super::machine::TryParse, create_response, Request}; + + #[test] + fn request_parsing() { + const DATA: &[u8] = b"GET /script.ws HTTP/1.1\r\nHost: foo.com\r\n\r\n"; + let (_, req) = Request::try_parse(DATA).unwrap().unwrap(); + assert_eq!(req.uri().path(), "/script.ws"); + assert_eq!(req.headers().get("Host").unwrap(), &b"foo.com"[..]); + } + + #[test] + fn request_replying() { + const DATA: &[u8] = b"\ + GET /script.ws HTTP/1.1\r\n\ + Host: foo.com\r\n\ + Connection: upgrade\r\n\ + Upgrade: websocket\r\n\ + Sec-WebSocket-Version: 13\r\n\ + Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==\r\n\ + \r\n"; + let (_, req) = Request::try_parse(DATA).unwrap().unwrap(); + let response = create_response(&req).unwrap(); + + assert_eq!( + response.headers().get("Sec-WebSocket-Accept").unwrap(), + b"s3pPLMBiTxaQ9kYGzzhZRbK+xOo=".as_ref() + ); + } +} diff --git a/.cargo-vendor/tungstenite/src/lib.rs b/.cargo-vendor/tungstenite/src/lib.rs new file mode 100644 index 0000000000..4fdf0a666a --- /dev/null +++ b/.cargo-vendor/tungstenite/src/lib.rs @@ -0,0 +1,48 @@ +//! Lightweight, flexible WebSockets for Rust. +#![deny( + missing_docs, + missing_copy_implementations, + missing_debug_implementations, + trivial_casts, + trivial_numeric_casts, + unstable_features, + unused_must_use, + unused_mut, + unused_imports, + unused_import_braces +)] + +#[cfg(feature = "handshake")] +pub use http; + +pub mod buffer; +#[cfg(feature = "handshake")] +pub mod client; +pub mod error; +#[cfg(feature = "handshake")] +pub mod handshake; +pub mod protocol; +#[cfg(feature = "handshake")] +mod server; +pub mod stream; +#[cfg(all(any(feature = "native-tls", feature = "__rustls-tls"), feature = "handshake"))] +mod tls; +pub mod util; + +const READ_BUFFER_CHUNK_SIZE: usize = 4096; +type ReadBuffer = buffer::ReadBuffer; + +pub use crate::{ + error::{Error, Result}, + protocol::{Message, WebSocket}, +}; + +#[cfg(feature = "handshake")] +pub use crate::{ + client::{client, connect}, + handshake::{client::ClientHandshake, server::ServerHandshake, HandshakeError}, + server::{accept, accept_hdr, accept_hdr_with_config, accept_with_config}, +}; + +#[cfg(all(any(feature = "native-tls", feature = "__rustls-tls"), feature = "handshake"))] +pub use tls::{client_tls, client_tls_with_config, Connector}; diff --git a/.cargo-vendor/tungstenite/src/protocol/frame/coding.rs b/.cargo-vendor/tungstenite/src/protocol/frame/coding.rs new file mode 100644 index 0000000000..827b7caeee --- /dev/null +++ b/.cargo-vendor/tungstenite/src/protocol/frame/coding.rs @@ -0,0 +1,291 @@ +//! Various codes defined in RFC 6455. + +use std::{ + convert::{From, Into}, + fmt, +}; + +/// WebSocket message opcode as in RFC 6455. +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub enum OpCode { + /// Data (text or binary). + Data(Data), + /// Control message (close, ping, pong). + Control(Control), +} + +/// Data opcodes as in RFC 6455 +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub enum Data { + /// 0x0 denotes a continuation frame + Continue, + /// 0x1 denotes a text frame + Text, + /// 0x2 denotes a binary frame + Binary, + /// 0x3-7 are reserved for further non-control frames + Reserved(u8), +} + +/// Control opcodes as in RFC 6455 +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub enum Control { + /// 0x8 denotes a connection close + Close, + /// 0x9 denotes a ping + Ping, + /// 0xa denotes a pong + Pong, + /// 0xb-f are reserved for further control frames + Reserved(u8), +} + +impl fmt::Display for Data { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Data::Continue => write!(f, "CONTINUE"), + Data::Text => write!(f, "TEXT"), + Data::Binary => write!(f, "BINARY"), + Data::Reserved(x) => write!(f, "RESERVED_DATA_{}", x), + } + } +} + +impl fmt::Display for Control { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Control::Close => write!(f, "CLOSE"), + Control::Ping => write!(f, "PING"), + Control::Pong => write!(f, "PONG"), + Control::Reserved(x) => write!(f, "RESERVED_CONTROL_{}", x), + } + } +} + +impl fmt::Display for OpCode { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + OpCode::Data(d) => d.fmt(f), + OpCode::Control(c) => c.fmt(f), + } + } +} + +impl From for u8 { + fn from(code: OpCode) -> Self { + use self::{ + Control::{Close, Ping, Pong}, + Data::{Binary, Continue, Text}, + OpCode::*, + }; + match code { + Data(Continue) => 0, + Data(Text) => 1, + Data(Binary) => 2, + Data(self::Data::Reserved(i)) => i, + + Control(Close) => 8, + Control(Ping) => 9, + Control(Pong) => 10, + Control(self::Control::Reserved(i)) => i, + } + } +} + +impl From for OpCode { + fn from(byte: u8) -> OpCode { + use self::{ + Control::{Close, Ping, Pong}, + Data::{Binary, Continue, Text}, + OpCode::*, + }; + match byte { + 0 => Data(Continue), + 1 => Data(Text), + 2 => Data(Binary), + i @ 3..=7 => Data(self::Data::Reserved(i)), + 8 => Control(Close), + 9 => Control(Ping), + 10 => Control(Pong), + i @ 11..=15 => Control(self::Control::Reserved(i)), + _ => panic!("Bug: OpCode out of range"), + } + } +} + +use self::CloseCode::*; +/// Status code used to indicate why an endpoint is closing the WebSocket connection. +#[derive(Debug, Eq, PartialEq, Clone, Copy)] +pub enum CloseCode { + /// Indicates a normal closure, meaning that the purpose for + /// which the connection was established has been fulfilled. + Normal, + /// Indicates that an endpoint is "going away", such as a server + /// going down or a browser having navigated away from a page. + Away, + /// Indicates that an endpoint is terminating the connection due + /// to a protocol error. + Protocol, + /// Indicates that an endpoint is terminating the connection + /// because it has received a type of data it cannot accept (e.g., an + /// endpoint that understands only text data MAY send this if it + /// receives a binary message). + Unsupported, + /// Indicates that no status code was included in a closing frame. This + /// close code makes it possible to use a single method, `on_close` to + /// handle even cases where no close code was provided. + Status, + /// Indicates an abnormal closure. If the abnormal closure was due to an + /// error, this close code will not be used. Instead, the `on_error` method + /// of the handler will be called with the error. However, if the connection + /// is simply dropped, without an error, this close code will be sent to the + /// handler. + Abnormal, + /// Indicates that an endpoint is terminating the connection + /// because it has received data within a message that was not + /// consistent with the type of the message (e.g., non-UTF-8 \[RFC3629\] + /// data within a text message). + Invalid, + /// Indicates that an endpoint is terminating the connection + /// because it has received a message that violates its policy. This + /// is a generic status code that can be returned when there is no + /// other more suitable status code (e.g., Unsupported or Size) or if there + /// is a need to hide specific details about the policy. + Policy, + /// Indicates that an endpoint is terminating the connection + /// because it has received a message that is too big for it to + /// process. + Size, + /// Indicates that an endpoint (client) is terminating the + /// connection because it has expected the server to negotiate one or + /// more extension, but the server didn't return them in the response + /// message of the WebSocket handshake. The list of extensions that + /// are needed should be given as the reason for closing. + /// Note that this status code is not used by the server, because it + /// can fail the WebSocket handshake instead. + Extension, + /// Indicates that a server is terminating the connection because + /// it encountered an unexpected condition that prevented it from + /// fulfilling the request. + Error, + /// Indicates that the server is restarting. A client may choose to reconnect, + /// and if it does, it should use a randomized delay of 5-30 seconds between attempts. + Restart, + /// Indicates that the server is overloaded and the client should either connect + /// to a different IP (when multiple targets exist), or reconnect to the same IP + /// when a user has performed an action. + Again, + #[doc(hidden)] + Tls, + #[doc(hidden)] + Reserved(u16), + #[doc(hidden)] + Iana(u16), + #[doc(hidden)] + Library(u16), + #[doc(hidden)] + Bad(u16), +} + +impl CloseCode { + /// Check if this CloseCode is allowed. + pub fn is_allowed(self) -> bool { + !matches!(self, Bad(_) | Reserved(_) | Status | Abnormal | Tls) + } +} + +impl fmt::Display for CloseCode { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let code: u16 = self.into(); + write!(f, "{}", code) + } +} + +impl From for u16 { + fn from(code: CloseCode) -> u16 { + match code { + Normal => 1000, + Away => 1001, + Protocol => 1002, + Unsupported => 1003, + Status => 1005, + Abnormal => 1006, + Invalid => 1007, + Policy => 1008, + Size => 1009, + Extension => 1010, + Error => 1011, + Restart => 1012, + Again => 1013, + Tls => 1015, + Reserved(code) => code, + Iana(code) => code, + Library(code) => code, + Bad(code) => code, + } + } +} + +impl<'t> From<&'t CloseCode> for u16 { + fn from(code: &'t CloseCode) -> u16 { + (*code).into() + } +} + +impl From for CloseCode { + fn from(code: u16) -> CloseCode { + match code { + 1000 => Normal, + 1001 => Away, + 1002 => Protocol, + 1003 => Unsupported, + 1005 => Status, + 1006 => Abnormal, + 1007 => Invalid, + 1008 => Policy, + 1009 => Size, + 1010 => Extension, + 1011 => Error, + 1012 => Restart, + 1013 => Again, + 1015 => Tls, + 1..=999 => Bad(code), + 1016..=2999 => Reserved(code), + 3000..=3999 => Iana(code), + 4000..=4999 => Library(code), + _ => Bad(code), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn opcode_from_u8() { + let byte = 2u8; + assert_eq!(OpCode::from(byte), OpCode::Data(Data::Binary)); + } + + #[test] + fn opcode_into_u8() { + let text = OpCode::Data(Data::Text); + let byte: u8 = text.into(); + assert_eq!(byte, 1u8); + } + + #[test] + fn closecode_from_u16() { + let byte = 1008u16; + assert_eq!(CloseCode::from(byte), CloseCode::Policy); + } + + #[test] + fn closecode_into_u16() { + let text = CloseCode::Away; + let byte: u16 = text.into(); + assert_eq!(byte, 1001u16); + assert_eq!(u16::from(text), 1001u16); + } +} diff --git a/.cargo-vendor/tungstenite/src/protocol/frame/frame.rs b/.cargo-vendor/tungstenite/src/protocol/frame/frame.rs new file mode 100644 index 0000000000..6b797a92b1 --- /dev/null +++ b/.cargo-vendor/tungstenite/src/protocol/frame/frame.rs @@ -0,0 +1,486 @@ +use byteorder::{NetworkEndian, ReadBytesExt}; +use log::*; +use std::{ + borrow::Cow, + default::Default, + fmt, + io::{Cursor, ErrorKind, Read, Write}, + result::Result as StdResult, + str::Utf8Error, + string::{FromUtf8Error, String}, +}; + +use super::{ + coding::{CloseCode, Control, Data, OpCode}, + mask::{apply_mask, generate_mask}, +}; +use crate::error::{Error, ProtocolError, Result}; + +/// A struct representing the close command. +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct CloseFrame<'t> { + /// The reason as a code. + pub code: CloseCode, + /// The reason as text string. + pub reason: Cow<'t, str>, +} + +impl<'t> CloseFrame<'t> { + /// Convert into a owned string. + pub fn into_owned(self) -> CloseFrame<'static> { + CloseFrame { code: self.code, reason: self.reason.into_owned().into() } + } +} + +impl<'t> fmt::Display for CloseFrame<'t> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{} ({})", self.reason, self.code) + } +} + +/// A struct representing a WebSocket frame header. +#[allow(missing_copy_implementations)] +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct FrameHeader { + /// Indicates that the frame is the last one of a possibly fragmented message. + pub is_final: bool, + /// Reserved for protocol extensions. + pub rsv1: bool, + /// Reserved for protocol extensions. + pub rsv2: bool, + /// Reserved for protocol extensions. + pub rsv3: bool, + /// WebSocket protocol opcode. + pub opcode: OpCode, + /// A frame mask, if any. + pub mask: Option<[u8; 4]>, +} + +impl Default for FrameHeader { + fn default() -> Self { + FrameHeader { + is_final: true, + rsv1: false, + rsv2: false, + rsv3: false, + opcode: OpCode::Control(Control::Close), + mask: None, + } + } +} + +impl FrameHeader { + /// Parse a header from an input stream. + /// Returns `None` if insufficient data and does not consume anything in this case. + /// Payload size is returned along with the header. + pub fn parse(cursor: &mut Cursor>) -> Result> { + let initial = cursor.position(); + match Self::parse_internal(cursor) { + ret @ Ok(None) => { + cursor.set_position(initial); + ret + } + ret => ret, + } + } + + /// Get the size of the header formatted with given payload length. + #[allow(clippy::len_without_is_empty)] + pub fn len(&self, length: u64) -> usize { + 2 + LengthFormat::for_length(length).extra_bytes() + if self.mask.is_some() { 4 } else { 0 } + } + + /// Format a header for given payload size. + pub fn format(&self, length: u64, output: &mut impl Write) -> Result<()> { + let code: u8 = self.opcode.into(); + + let one = { + code | if self.is_final { 0x80 } else { 0 } + | if self.rsv1 { 0x40 } else { 0 } + | if self.rsv2 { 0x20 } else { 0 } + | if self.rsv3 { 0x10 } else { 0 } + }; + + let lenfmt = LengthFormat::for_length(length); + + let two = { lenfmt.length_byte() | if self.mask.is_some() { 0x80 } else { 0 } }; + + output.write_all(&[one, two])?; + match lenfmt { + LengthFormat::U8(_) => (), + LengthFormat::U16 => { + output.write_all(&(length as u16).to_be_bytes())?; + } + LengthFormat::U64 => { + output.write_all(&length.to_be_bytes())?; + } + } + + if let Some(ref mask) = self.mask { + output.write_all(mask)? + } + + Ok(()) + } + + /// Generate a random frame mask and store this in the header. + /// + /// Of course this does not change frame contents. It just generates a mask. + pub(crate) fn set_random_mask(&mut self) { + self.mask = Some(generate_mask()) + } +} + +impl FrameHeader { + /// Internal parse engine. + /// Returns `None` if insufficient data. + /// Payload size is returned along with the header. + fn parse_internal(cursor: &mut impl Read) -> Result> { + let (first, second) = { + let mut head = [0u8; 2]; + if cursor.read(&mut head)? != 2 { + return Ok(None); + } + trace!("Parsed headers {:?}", head); + (head[0], head[1]) + }; + + trace!("First: {:b}", first); + trace!("Second: {:b}", second); + + let is_final = first & 0x80 != 0; + + let rsv1 = first & 0x40 != 0; + let rsv2 = first & 0x20 != 0; + let rsv3 = first & 0x10 != 0; + + let opcode = OpCode::from(first & 0x0F); + trace!("Opcode: {:?}", opcode); + + let masked = second & 0x80 != 0; + trace!("Masked: {:?}", masked); + + let length = { + let length_byte = second & 0x7F; + let length_length = LengthFormat::for_byte(length_byte).extra_bytes(); + if length_length > 0 { + match cursor.read_uint::(length_length) { + Err(ref err) if err.kind() == ErrorKind::UnexpectedEof => { + return Ok(None); + } + Err(err) => { + return Err(err.into()); + } + Ok(read) => read, + } + } else { + u64::from(length_byte) + } + }; + + let mask = if masked { + let mut mask_bytes = [0u8; 4]; + if cursor.read(&mut mask_bytes)? != 4 { + return Ok(None); + } else { + Some(mask_bytes) + } + } else { + None + }; + + // Disallow bad opcode + match opcode { + OpCode::Control(Control::Reserved(_)) | OpCode::Data(Data::Reserved(_)) => { + return Err(Error::Protocol(ProtocolError::InvalidOpcode(first & 0x0F))) + } + _ => (), + } + + let hdr = FrameHeader { is_final, rsv1, rsv2, rsv3, opcode, mask }; + + Ok(Some((hdr, length))) + } +} + +/// A struct representing a WebSocket frame. +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct Frame { + header: FrameHeader, + payload: Vec, +} + +impl Frame { + /// Get the length of the frame. + /// This is the length of the header + the length of the payload. + #[inline] + pub fn len(&self) -> usize { + let length = self.payload.len(); + self.header.len(length as u64) + length + } + + /// Check if the frame is empty. + #[inline] + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Get a reference to the frame's header. + #[inline] + pub fn header(&self) -> &FrameHeader { + &self.header + } + + /// Get a mutable reference to the frame's header. + #[inline] + pub fn header_mut(&mut self) -> &mut FrameHeader { + &mut self.header + } + + /// Get a reference to the frame's payload. + #[inline] + pub fn payload(&self) -> &Vec { + &self.payload + } + + /// Get a mutable reference to the frame's payload. + #[inline] + pub fn payload_mut(&mut self) -> &mut Vec { + &mut self.payload + } + + /// Test whether the frame is masked. + #[inline] + pub(crate) fn is_masked(&self) -> bool { + self.header.mask.is_some() + } + + /// Generate a random mask for the frame. + /// + /// This just generates a mask, payload is not changed. The actual masking is performed + /// either on `format()` or on `apply_mask()` call. + #[inline] + pub(crate) fn set_random_mask(&mut self) { + self.header.set_random_mask() + } + + /// This method unmasks the payload and should only be called on frames that are actually + /// masked. In other words, those frames that have just been received from a client endpoint. + #[inline] + pub(crate) fn apply_mask(&mut self) { + if let Some(mask) = self.header.mask.take() { + apply_mask(&mut self.payload, mask) + } + } + + /// Consume the frame into its payload as binary. + #[inline] + pub fn into_data(self) -> Vec { + self.payload + } + + /// Consume the frame into its payload as string. + #[inline] + pub fn into_string(self) -> StdResult { + String::from_utf8(self.payload) + } + + /// Get frame payload as `&str`. + #[inline] + pub fn to_text(&self) -> Result<&str, Utf8Error> { + std::str::from_utf8(&self.payload) + } + + /// Consume the frame into a closing frame. + #[inline] + pub(crate) fn into_close(self) -> Result>> { + match self.payload.len() { + 0 => Ok(None), + 1 => Err(Error::Protocol(ProtocolError::InvalidCloseSequence)), + _ => { + let mut data = self.payload; + let code = u16::from_be_bytes([data[0], data[1]]).into(); + data.drain(0..2); + let text = String::from_utf8(data)?; + Ok(Some(CloseFrame { code, reason: text.into() })) + } + } + } + + /// Create a new data frame. + #[inline] + pub fn message(data: Vec, opcode: OpCode, is_final: bool) -> Frame { + debug_assert!(matches!(opcode, OpCode::Data(_)), "Invalid opcode for data frame."); + + Frame { header: FrameHeader { is_final, opcode, ..FrameHeader::default() }, payload: data } + } + + /// Create a new Pong control frame. + #[inline] + pub fn pong(data: Vec) -> Frame { + Frame { + header: FrameHeader { + opcode: OpCode::Control(Control::Pong), + ..FrameHeader::default() + }, + payload: data, + } + } + + /// Create a new Ping control frame. + #[inline] + pub fn ping(data: Vec) -> Frame { + Frame { + header: FrameHeader { + opcode: OpCode::Control(Control::Ping), + ..FrameHeader::default() + }, + payload: data, + } + } + + /// Create a new Close control frame. + #[inline] + pub fn close(msg: Option) -> Frame { + let payload = if let Some(CloseFrame { code, reason }) = msg { + let mut p = Vec::with_capacity(reason.as_bytes().len() + 2); + p.extend(u16::from(code).to_be_bytes()); + p.extend_from_slice(reason.as_bytes()); + p + } else { + Vec::new() + }; + + Frame { header: FrameHeader::default(), payload } + } + + /// Create a frame from given header and data. + pub fn from_payload(header: FrameHeader, payload: Vec) -> Self { + Frame { header, payload } + } + + /// Write a frame out to a buffer + pub fn format(mut self, output: &mut impl Write) -> Result<()> { + self.header.format(self.payload.len() as u64, output)?; + self.apply_mask(); + output.write_all(self.payload())?; + Ok(()) + } +} + +impl fmt::Display for Frame { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use std::fmt::Write; + + write!( + f, + " + +final: {} +reserved: {} {} {} +opcode: {} +length: {} +payload length: {} +payload: 0x{} + ", + self.header.is_final, + self.header.rsv1, + self.header.rsv2, + self.header.rsv3, + self.header.opcode, + // self.mask.map(|mask| format!("{:?}", mask)).unwrap_or("NONE".into()), + self.len(), + self.payload.len(), + self.payload.iter().fold(String::new(), |mut output, byte| { + _ = write!(output, "{byte:02x}"); + output + }) + ) + } +} + +/// Handling of the length format. +enum LengthFormat { + U8(u8), + U16, + U64, +} + +impl LengthFormat { + /// Get the length format for a given data size. + #[inline] + fn for_length(length: u64) -> Self { + if length < 126 { + LengthFormat::U8(length as u8) + } else if length < 65536 { + LengthFormat::U16 + } else { + LengthFormat::U64 + } + } + + /// Get the size of the length encoding. + #[inline] + fn extra_bytes(&self) -> usize { + match *self { + LengthFormat::U8(_) => 0, + LengthFormat::U16 => 2, + LengthFormat::U64 => 8, + } + } + + /// Encode the given length. + #[inline] + fn length_byte(&self) -> u8 { + match *self { + LengthFormat::U8(b) => b, + LengthFormat::U16 => 126, + LengthFormat::U64 => 127, + } + } + + /// Get the length format for a given length byte. + #[inline] + fn for_byte(byte: u8) -> Self { + match byte & 0x7F { + 126 => LengthFormat::U16, + 127 => LengthFormat::U64, + b => LengthFormat::U8(b), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use super::super::coding::{Data, OpCode}; + use std::io::Cursor; + + #[test] + fn parse() { + let mut raw: Cursor> = + Cursor::new(vec![0x82, 0x07, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07]); + let (header, length) = FrameHeader::parse(&mut raw).unwrap().unwrap(); + assert_eq!(length, 7); + let mut payload = Vec::new(); + raw.read_to_end(&mut payload).unwrap(); + let frame = Frame::from_payload(header, payload); + assert_eq!(frame.into_data(), vec![0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07]); + } + + #[test] + fn format() { + let frame = Frame::ping(vec![0x01, 0x02]); + let mut buf = Vec::with_capacity(frame.len()); + frame.format(&mut buf).unwrap(); + assert_eq!(buf, vec![0x89, 0x02, 0x01, 0x02]); + } + + #[test] + fn display() { + let f = Frame::message("hi there".into(), OpCode::Data(Data::Text), true); + let view = format!("{}", f); + assert!(view.contains("payload:")); + } +} diff --git a/.cargo-vendor/tungstenite/src/protocol/frame/mask.rs b/.cargo-vendor/tungstenite/src/protocol/frame/mask.rs new file mode 100644 index 0000000000..ff6eb759c4 --- /dev/null +++ b/.cargo-vendor/tungstenite/src/protocol/frame/mask.rs @@ -0,0 +1,73 @@ +/// Generate a random frame mask. +#[inline] +pub fn generate_mask() -> [u8; 4] { + rand::random() +} + +/// Mask/unmask a frame. +#[inline] +pub fn apply_mask(buf: &mut [u8], mask: [u8; 4]) { + apply_mask_fast32(buf, mask) +} + +/// A safe unoptimized mask application. +#[inline] +fn apply_mask_fallback(buf: &mut [u8], mask: [u8; 4]) { + for (i, byte) in buf.iter_mut().enumerate() { + *byte ^= mask[i & 3]; + } +} + +/// Faster version of `apply_mask()` which operates on 4-byte blocks. +#[inline] +pub fn apply_mask_fast32(buf: &mut [u8], mask: [u8; 4]) { + let mask_u32 = u32::from_ne_bytes(mask); + + let (prefix, words, suffix) = unsafe { buf.align_to_mut::() }; + apply_mask_fallback(prefix, mask); + let head = prefix.len() & 3; + let mask_u32 = if head > 0 { + if cfg!(target_endian = "big") { + mask_u32.rotate_left(8 * head as u32) + } else { + mask_u32.rotate_right(8 * head as u32) + } + } else { + mask_u32 + }; + for word in words.iter_mut() { + *word ^= mask_u32; + } + apply_mask_fallback(suffix, mask_u32.to_ne_bytes()); +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_apply_mask() { + let mask = [0x6d, 0xb6, 0xb2, 0x80]; + let unmasked = [ + 0xf3, 0x00, 0x01, 0x02, 0x03, 0x80, 0x81, 0x82, 0xff, 0xfe, 0x00, 0x17, 0x74, 0xf9, + 0x12, 0x03, + ]; + + for data_len in 0..=unmasked.len() { + let unmasked = &unmasked[0..data_len]; + // Check masking with different alignment. + for off in 0..=3 { + if unmasked.len() < off { + continue; + } + let mut masked = unmasked.to_vec(); + apply_mask_fallback(&mut masked[off..], mask); + + let mut masked_fast = unmasked.to_vec(); + apply_mask_fast32(&mut masked_fast[off..], mask); + + assert_eq!(masked, masked_fast); + } + } + } +} diff --git a/.cargo-vendor/tungstenite/src/protocol/frame/mod.rs b/.cargo-vendor/tungstenite/src/protocol/frame/mod.rs new file mode 100644 index 0000000000..7d2ee41833 --- /dev/null +++ b/.cargo-vendor/tungstenite/src/protocol/frame/mod.rs @@ -0,0 +1,325 @@ +//! Utilities to work with raw WebSocket frames. + +pub mod coding; + +#[allow(clippy::module_inception)] +mod frame; +mod mask; + +use crate::{ + error::{CapacityError, Error, Result}, + Message, ReadBuffer, +}; +use log::*; +use std::io::{Error as IoError, ErrorKind as IoErrorKind, Read, Write}; + +pub use self::frame::{CloseFrame, Frame, FrameHeader}; + +/// A reader and writer for WebSocket frames. +#[derive(Debug)] +pub struct FrameSocket { + /// The underlying network stream. + stream: Stream, + /// Codec for reading/writing frames. + codec: FrameCodec, +} + +impl FrameSocket { + /// Create a new frame socket. + pub fn new(stream: Stream) -> Self { + FrameSocket { stream, codec: FrameCodec::new() } + } + + /// Create a new frame socket from partially read data. + pub fn from_partially_read(stream: Stream, part: Vec) -> Self { + FrameSocket { stream, codec: FrameCodec::from_partially_read(part) } + } + + /// Extract a stream from the socket. + pub fn into_inner(self) -> (Stream, Vec) { + (self.stream, self.codec.in_buffer.into_vec()) + } + + /// Returns a shared reference to the inner stream. + pub fn get_ref(&self) -> &Stream { + &self.stream + } + + /// Returns a mutable reference to the inner stream. + pub fn get_mut(&mut self) -> &mut Stream { + &mut self.stream + } +} + +impl FrameSocket +where + Stream: Read, +{ + /// Read a frame from stream. + pub fn read(&mut self, max_size: Option) -> Result> { + self.codec.read_frame(&mut self.stream, max_size) + } +} + +impl FrameSocket +where + Stream: Write, +{ + /// Writes and immediately flushes a frame. + /// Equivalent to calling [`write`](Self::write) then [`flush`](Self::flush). + pub fn send(&mut self, frame: Frame) -> Result<()> { + self.write(frame)?; + self.flush() + } + + /// Write a frame to stream. + /// + /// A subsequent call should be made to [`flush`](Self::flush) to flush writes. + /// + /// This function guarantees that the frame is queued unless [`Error::WriteBufferFull`] + /// is returned. + /// In order to handle WouldBlock or Incomplete, call [`flush`](Self::flush) afterwards. + pub fn write(&mut self, frame: Frame) -> Result<()> { + self.codec.buffer_frame(&mut self.stream, frame) + } + + /// Flush writes. + pub fn flush(&mut self) -> Result<()> { + self.codec.write_out_buffer(&mut self.stream)?; + Ok(self.stream.flush()?) + } +} + +/// A codec for WebSocket frames. +#[derive(Debug)] +pub(super) struct FrameCodec { + /// Buffer to read data from the stream. + in_buffer: ReadBuffer, + /// Buffer to send packets to the network. + out_buffer: Vec, + /// Capacity limit for `out_buffer`. + max_out_buffer_len: usize, + /// Buffer target length to reach before writing to the stream + /// on calls to `buffer_frame`. + /// + /// Setting this to non-zero will buffer small writes from hitting + /// the stream. + out_buffer_write_len: usize, + /// Header and remaining size of the incoming packet being processed. + header: Option<(FrameHeader, u64)>, +} + +impl FrameCodec { + /// Create a new frame codec. + pub(super) fn new() -> Self { + Self { + in_buffer: ReadBuffer::new(), + out_buffer: Vec::new(), + max_out_buffer_len: usize::MAX, + out_buffer_write_len: 0, + header: None, + } + } + + /// Create a new frame codec from partially read data. + pub(super) fn from_partially_read(part: Vec) -> Self { + Self { + in_buffer: ReadBuffer::from_partially_read(part), + out_buffer: Vec::new(), + max_out_buffer_len: usize::MAX, + out_buffer_write_len: 0, + header: None, + } + } + + /// Sets a maximum size for the out buffer. + pub(super) fn set_max_out_buffer_len(&mut self, max: usize) { + self.max_out_buffer_len = max; + } + + /// Sets [`Self::buffer_frame`] buffer target length to reach before + /// writing to the stream. + pub(super) fn set_out_buffer_write_len(&mut self, len: usize) { + self.out_buffer_write_len = len; + } + + /// Read a frame from the provided stream. + pub(super) fn read_frame( + &mut self, + stream: &mut Stream, + max_size: Option, + ) -> Result> + where + Stream: Read, + { + let max_size = max_size.unwrap_or_else(usize::max_value); + + let payload = loop { + { + let cursor = self.in_buffer.as_cursor_mut(); + + if self.header.is_none() { + self.header = FrameHeader::parse(cursor)?; + } + + if let Some((_, ref length)) = self.header { + let length = *length; + + // Enforce frame size limit early and make sure `length` + // is not too big (fits into `usize`). + if length > max_size as u64 { + return Err(Error::Capacity(CapacityError::MessageTooLong { + size: length as usize, + max_size, + })); + } + + let input_size = cursor.get_ref().len() as u64 - cursor.position(); + if length <= input_size { + // No truncation here since `length` is checked above + let mut payload = Vec::with_capacity(length as usize); + if length > 0 { + cursor.take(length).read_to_end(&mut payload)?; + } + break payload; + } + } + } + + // Not enough data in buffer. + let size = self.in_buffer.read_from(stream)?; + if size == 0 { + trace!("no frame received"); + return Ok(None); + } + }; + + let (header, length) = self.header.take().expect("Bug: no frame header"); + debug_assert_eq!(payload.len() as u64, length); + let frame = Frame::from_payload(header, payload); + trace!("received frame {}", frame); + Ok(Some(frame)) + } + + /// Writes a frame into the `out_buffer`. + /// If the out buffer size is over the `out_buffer_write_len` will also write + /// the out buffer into the provided `stream`. + /// + /// To ensure buffered frames are written call [`Self::write_out_buffer`]. + /// + /// May write to the stream, will **not** flush. + pub(super) fn buffer_frame(&mut self, stream: &mut Stream, frame: Frame) -> Result<()> + where + Stream: Write, + { + if frame.len() + self.out_buffer.len() > self.max_out_buffer_len { + return Err(Error::WriteBufferFull(Message::Frame(frame))); + } + + trace!("writing frame {}", frame); + + self.out_buffer.reserve(frame.len()); + frame.format(&mut self.out_buffer).expect("Bug: can't write to vector"); + + if self.out_buffer.len() > self.out_buffer_write_len { + self.write_out_buffer(stream) + } else { + Ok(()) + } + } + + /// Writes the out_buffer to the provided stream. + /// + /// Does **not** flush. + pub(super) fn write_out_buffer(&mut self, stream: &mut Stream) -> Result<()> + where + Stream: Write, + { + while !self.out_buffer.is_empty() { + let len = stream.write(&self.out_buffer)?; + if len == 0 { + // This is the same as "Connection reset by peer" + return Err(IoError::new( + IoErrorKind::ConnectionReset, + "Connection reset while sending", + ) + .into()); + } + self.out_buffer.drain(0..len); + } + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + + use crate::error::{CapacityError, Error}; + + use super::{Frame, FrameSocket}; + + use std::io::Cursor; + + #[test] + fn read_frames() { + let raw = Cursor::new(vec![ + 0x82, 0x07, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x82, 0x03, 0x03, 0x02, 0x01, + 0x99, + ]); + let mut sock = FrameSocket::new(raw); + + assert_eq!( + sock.read(None).unwrap().unwrap().into_data(), + vec![0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07] + ); + assert_eq!(sock.read(None).unwrap().unwrap().into_data(), vec![0x03, 0x02, 0x01]); + assert!(sock.read(None).unwrap().is_none()); + + let (_, rest) = sock.into_inner(); + assert_eq!(rest, vec![0x99]); + } + + #[test] + fn from_partially_read() { + let raw = Cursor::new(vec![0x02, 0x03, 0x04, 0x05, 0x06, 0x07]); + let mut sock = FrameSocket::from_partially_read(raw, vec![0x82, 0x07, 0x01]); + assert_eq!( + sock.read(None).unwrap().unwrap().into_data(), + vec![0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07] + ); + } + + #[test] + fn write_frames() { + let mut sock = FrameSocket::new(Vec::new()); + + let frame = Frame::ping(vec![0x04, 0x05]); + sock.send(frame).unwrap(); + + let frame = Frame::pong(vec![0x01]); + sock.send(frame).unwrap(); + + let (buf, _) = sock.into_inner(); + assert_eq!(buf, vec![0x89, 0x02, 0x04, 0x05, 0x8a, 0x01, 0x01]); + } + + #[test] + fn parse_overflow() { + let raw = Cursor::new(vec![ + 0x83, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + ]); + let mut sock = FrameSocket::new(raw); + let _ = sock.read(None); // should not crash + } + + #[test] + fn size_limit_hit() { + let raw = Cursor::new(vec![0x82, 0x07, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07]); + let mut sock = FrameSocket::new(raw); + assert!(matches!( + sock.read(Some(5)), + Err(Error::Capacity(CapacityError::MessageTooLong { size: 7, max_size: 5 })) + )); + } +} diff --git a/.cargo-vendor/tungstenite/src/protocol/message.rs b/.cargo-vendor/tungstenite/src/protocol/message.rs new file mode 100644 index 0000000000..2b2ed0b17d --- /dev/null +++ b/.cargo-vendor/tungstenite/src/protocol/message.rs @@ -0,0 +1,370 @@ +use std::{ + convert::{AsRef, From, Into, TryFrom}, + fmt, + result::Result as StdResult, + str, +}; + +use super::frame::{CloseFrame, Frame}; +use crate::error::{CapacityError, Error, Result}; + +mod string_collect { + use utf8::DecodeError; + + use crate::error::{Error, Result}; + + #[derive(Debug)] + pub struct StringCollector { + data: String, + incomplete: Option, + } + + impl StringCollector { + pub fn new() -> Self { + StringCollector { data: String::new(), incomplete: None } + } + + pub fn len(&self) -> usize { + self.data + .len() + .saturating_add(self.incomplete.map(|i| i.buffer_len as usize).unwrap_or(0)) + } + + pub fn extend>(&mut self, tail: T) -> Result<()> { + let mut input: &[u8] = tail.as_ref(); + + if let Some(mut incomplete) = self.incomplete.take() { + if let Some((result, rest)) = incomplete.try_complete(input) { + input = rest; + if let Ok(text) = result { + self.data.push_str(text); + } else { + return Err(Error::Utf8); + } + } else { + input = &[]; + self.incomplete = Some(incomplete); + } + } + + if !input.is_empty() { + match utf8::decode(input) { + Ok(text) => { + self.data.push_str(text); + Ok(()) + } + Err(DecodeError::Incomplete { valid_prefix, incomplete_suffix }) => { + self.data.push_str(valid_prefix); + self.incomplete = Some(incomplete_suffix); + Ok(()) + } + Err(DecodeError::Invalid { valid_prefix, .. }) => { + self.data.push_str(valid_prefix); + Err(Error::Utf8) + } + } + } else { + Ok(()) + } + } + + pub fn into_string(self) -> Result { + if self.incomplete.is_some() { + Err(Error::Utf8) + } else { + Ok(self.data) + } + } + } +} + +use self::string_collect::StringCollector; + +/// A struct representing the incomplete message. +#[derive(Debug)] +pub struct IncompleteMessage { + collector: IncompleteMessageCollector, +} + +#[derive(Debug)] +enum IncompleteMessageCollector { + Text(StringCollector), + Binary(Vec), +} + +impl IncompleteMessage { + /// Create new. + pub fn new(message_type: IncompleteMessageType) -> Self { + IncompleteMessage { + collector: match message_type { + IncompleteMessageType::Binary => IncompleteMessageCollector::Binary(Vec::new()), + IncompleteMessageType::Text => { + IncompleteMessageCollector::Text(StringCollector::new()) + } + }, + } + } + + /// Get the current filled size of the buffer. + pub fn len(&self) -> usize { + match self.collector { + IncompleteMessageCollector::Text(ref t) => t.len(), + IncompleteMessageCollector::Binary(ref b) => b.len(), + } + } + + /// Add more data to an existing message. + pub fn extend>(&mut self, tail: T, size_limit: Option) -> Result<()> { + // Always have a max size. This ensures an error in case of concatenating two buffers + // of more than `usize::max_value()` bytes in total. + let max_size = size_limit.unwrap_or_else(usize::max_value); + let my_size = self.len(); + let portion_size = tail.as_ref().len(); + // Be careful about integer overflows here. + if my_size > max_size || portion_size > max_size - my_size { + return Err(Error::Capacity(CapacityError::MessageTooLong { + size: my_size + portion_size, + max_size, + })); + } + + match self.collector { + IncompleteMessageCollector::Binary(ref mut v) => { + v.extend(tail.as_ref()); + Ok(()) + } + IncompleteMessageCollector::Text(ref mut t) => t.extend(tail), + } + } + + /// Convert an incomplete message into a complete one. + pub fn complete(self) -> Result { + match self.collector { + IncompleteMessageCollector::Binary(v) => Ok(Message::Binary(v)), + IncompleteMessageCollector::Text(t) => { + let text = t.into_string()?; + Ok(Message::Text(text)) + } + } + } +} + +/// The type of incomplete message. +pub enum IncompleteMessageType { + Text, + Binary, +} + +/// An enum representing the various forms of a WebSocket message. +#[derive(Debug, Eq, PartialEq, Clone)] +pub enum Message { + /// A text WebSocket message + Text(String), + /// A binary WebSocket message + Binary(Vec), + /// A ping message with the specified payload + /// + /// The payload here must have a length less than 125 bytes + Ping(Vec), + /// A pong message with the specified payload + /// + /// The payload here must have a length less than 125 bytes + Pong(Vec), + /// A close message with the optional close frame. + Close(Option>), + /// Raw frame. Note, that you're not going to get this value while reading the message. + Frame(Frame), +} + +impl Message { + /// Create a new text WebSocket message from a stringable. + pub fn text(string: S) -> Message + where + S: Into, + { + Message::Text(string.into()) + } + + /// Create a new binary WebSocket message by converting to `Vec`. + pub fn binary(bin: B) -> Message + where + B: Into>, + { + Message::Binary(bin.into()) + } + + /// Indicates whether a message is a text message. + pub fn is_text(&self) -> bool { + matches!(*self, Message::Text(_)) + } + + /// Indicates whether a message is a binary message. + pub fn is_binary(&self) -> bool { + matches!(*self, Message::Binary(_)) + } + + /// Indicates whether a message is a ping message. + pub fn is_ping(&self) -> bool { + matches!(*self, Message::Ping(_)) + } + + /// Indicates whether a message is a pong message. + pub fn is_pong(&self) -> bool { + matches!(*self, Message::Pong(_)) + } + + /// Indicates whether a message is a close message. + pub fn is_close(&self) -> bool { + matches!(*self, Message::Close(_)) + } + + /// Get the length of the WebSocket message. + pub fn len(&self) -> usize { + match *self { + Message::Text(ref string) => string.len(), + Message::Binary(ref data) | Message::Ping(ref data) | Message::Pong(ref data) => { + data.len() + } + Message::Close(ref data) => data.as_ref().map(|d| d.reason.len()).unwrap_or(0), + Message::Frame(ref frame) => frame.len(), + } + } + + /// Returns true if the WebSocket message has no content. + /// For example, if the other side of the connection sent an empty string. + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Consume the WebSocket and return it as binary data. + pub fn into_data(self) -> Vec { + match self { + Message::Text(string) => string.into_bytes(), + Message::Binary(data) | Message::Ping(data) | Message::Pong(data) => data, + Message::Close(None) => Vec::new(), + Message::Close(Some(frame)) => frame.reason.into_owned().into_bytes(), + Message::Frame(frame) => frame.into_data(), + } + } + + /// Attempt to consume the WebSocket message and convert it to a String. + pub fn into_text(self) -> Result { + match self { + Message::Text(string) => Ok(string), + Message::Binary(data) | Message::Ping(data) | Message::Pong(data) => { + Ok(String::from_utf8(data)?) + } + Message::Close(None) => Ok(String::new()), + Message::Close(Some(frame)) => Ok(frame.reason.into_owned()), + Message::Frame(frame) => Ok(frame.into_string()?), + } + } + + /// Attempt to get a &str from the WebSocket message, + /// this will try to convert binary data to utf8. + pub fn to_text(&self) -> Result<&str> { + match *self { + Message::Text(ref string) => Ok(string), + Message::Binary(ref data) | Message::Ping(ref data) | Message::Pong(ref data) => { + Ok(str::from_utf8(data)?) + } + Message::Close(None) => Ok(""), + Message::Close(Some(ref frame)) => Ok(&frame.reason), + Message::Frame(ref frame) => Ok(frame.to_text()?), + } + } +} + +impl From for Message { + fn from(string: String) -> Self { + Message::text(string) + } +} + +impl<'s> From<&'s str> for Message { + fn from(string: &'s str) -> Self { + Message::text(string) + } +} + +impl<'b> From<&'b [u8]> for Message { + fn from(data: &'b [u8]) -> Self { + Message::binary(data) + } +} + +impl From> for Message { + fn from(data: Vec) -> Self { + Message::binary(data) + } +} + +impl From for Vec { + fn from(message: Message) -> Self { + message.into_data() + } +} + +impl TryFrom for String { + type Error = Error; + + fn try_from(value: Message) -> StdResult { + value.into_text() + } +} + +impl fmt::Display for Message { + fn fmt(&self, f: &mut fmt::Formatter) -> StdResult<(), fmt::Error> { + if let Ok(string) = self.to_text() { + write!(f, "{}", string) + } else { + write!(f, "Binary Data", self.len()) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn display() { + let t = Message::text("test".to_owned()); + assert_eq!(t.to_string(), "test".to_owned()); + + let bin = Message::binary(vec![0, 1, 3, 4, 241]); + assert_eq!(bin.to_string(), "Binary Data".to_owned()); + } + + #[test] + fn binary_convert() { + let bin = [6u8, 7, 8, 9, 10, 241]; + let msg = Message::from(&bin[..]); + assert!(msg.is_binary()); + assert!(msg.into_text().is_err()); + } + + #[test] + fn binary_convert_vec() { + let bin = vec![6u8, 7, 8, 9, 10, 241]; + let msg = Message::from(bin); + assert!(msg.is_binary()); + assert!(msg.into_text().is_err()); + } + + #[test] + fn binary_convert_into_vec() { + let bin = vec![6u8, 7, 8, 9, 10, 241]; + let bin_copy = bin.clone(); + let msg = Message::from(bin); + let serialized: Vec = msg.into(); + assert_eq!(bin_copy, serialized); + } + + #[test] + fn text_convert() { + let s = "kiwotsukete"; + let msg = Message::from(s); + assert!(msg.is_text()); + } +} diff --git a/.cargo-vendor/tungstenite/src/protocol/mod.rs b/.cargo-vendor/tungstenite/src/protocol/mod.rs new file mode 100644 index 0000000000..21c996a999 --- /dev/null +++ b/.cargo-vendor/tungstenite/src/protocol/mod.rs @@ -0,0 +1,861 @@ +//! Generic WebSocket message stream. + +pub mod frame; + +mod message; + +pub use self::{frame::CloseFrame, message::Message}; + +use self::{ + frame::{ + coding::{CloseCode, Control as OpCtl, Data as OpData, OpCode}, + Frame, FrameCodec, + }, + message::{IncompleteMessage, IncompleteMessageType}, +}; +use crate::error::{Error, ProtocolError, Result}; +use log::*; +use std::{ + io::{self, Read, Write}, + mem::replace, +}; + +/// Indicates a Client or Server role of the websocket +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum Role { + /// This socket is a server + Server, + /// This socket is a client + Client, +} + +/// The configuration for WebSocket connection. +#[derive(Debug, Clone, Copy)] +pub struct WebSocketConfig { + /// Does nothing, instead use `max_write_buffer_size`. + #[deprecated] + pub max_send_queue: Option, + /// The target minimum size of the write buffer to reach before writing the data + /// to the underlying stream. + /// The default value is 128 KiB. + /// + /// If set to `0` each message will be eagerly written to the underlying stream. + /// It is often more optimal to allow them to buffer a little, hence the default value. + /// + /// Note: [`flush`](WebSocket::flush) will always fully write the buffer regardless. + pub write_buffer_size: usize, + /// The max size of the write buffer in bytes. Setting this can provide backpressure + /// in the case the write buffer is filling up due to write errors. + /// The default value is unlimited. + /// + /// Note: The write buffer only builds up past [`write_buffer_size`](Self::write_buffer_size) + /// when writes to the underlying stream are failing. So the **write buffer can not + /// fill up if you are not observing write errors even if not flushing**. + /// + /// Note: Should always be at least [`write_buffer_size + 1 message`](Self::write_buffer_size) + /// and probably a little more depending on error handling strategy. + pub max_write_buffer_size: usize, + /// The maximum size of an incoming message. `None` means no size limit. The default value is 64 MiB + /// which should be reasonably big for all normal use-cases but small enough to prevent + /// memory eating by a malicious user. + pub max_message_size: Option, + /// The maximum size of a single incoming message frame. `None` means no size limit. The limit is for + /// frame payload NOT including the frame header. The default value is 16 MiB which should + /// be reasonably big for all normal use-cases but small enough to prevent memory eating + /// by a malicious user. + pub max_frame_size: Option, + /// When set to `true`, the server will accept and handle unmasked frames + /// from the client. According to the RFC 6455, the server must close the + /// connection to the client in such cases, however it seems like there are + /// some popular libraries that are sending unmasked frames, ignoring the RFC. + /// By default this option is set to `false`, i.e. according to RFC 6455. + pub accept_unmasked_frames: bool, +} + +impl Default for WebSocketConfig { + fn default() -> Self { + #[allow(deprecated)] + WebSocketConfig { + max_send_queue: None, + write_buffer_size: 128 * 1024, + max_write_buffer_size: usize::MAX, + max_message_size: Some(64 << 20), + max_frame_size: Some(16 << 20), + accept_unmasked_frames: false, + } + } +} + +impl WebSocketConfig { + /// Panic if values are invalid. + pub(crate) fn assert_valid(&self) { + assert!( + self.max_write_buffer_size > self.write_buffer_size, + "WebSocketConfig::max_write_buffer_size must be greater than write_buffer_size, \ + see WebSocketConfig docs`" + ); + } +} + +/// WebSocket input-output stream. +/// +/// This is THE structure you want to create to be able to speak the WebSocket protocol. +/// It may be created by calling `connect`, `accept` or `client` functions. +/// +/// Use [`WebSocket::read`], [`WebSocket::send`] to received and send messages. +#[derive(Debug)] +pub struct WebSocket { + /// The underlying socket. + socket: Stream, + /// The context for managing a WebSocket. + context: WebSocketContext, +} + +impl WebSocket { + /// Convert a raw socket into a WebSocket without performing a handshake. + /// + /// Call this function if you're using Tungstenite as a part of a web framework + /// or together with an existing one. If you need an initial handshake, use + /// `connect()` or `accept()` functions of the crate to construct a websocket. + /// + /// # Panics + /// Panics if config is invalid e.g. `max_write_buffer_size <= write_buffer_size`. + pub fn from_raw_socket(stream: Stream, role: Role, config: Option) -> Self { + WebSocket { socket: stream, context: WebSocketContext::new(role, config) } + } + + /// Convert a raw socket into a WebSocket without performing a handshake. + /// + /// Call this function if you're using Tungstenite as a part of a web framework + /// or together with an existing one. If you need an initial handshake, use + /// `connect()` or `accept()` functions of the crate to construct a websocket. + /// + /// # Panics + /// Panics if config is invalid e.g. `max_write_buffer_size <= write_buffer_size`. + pub fn from_partially_read( + stream: Stream, + part: Vec, + role: Role, + config: Option, + ) -> Self { + WebSocket { + socket: stream, + context: WebSocketContext::from_partially_read(part, role, config), + } + } + + /// Returns a shared reference to the inner stream. + pub fn get_ref(&self) -> &Stream { + &self.socket + } + /// Returns a mutable reference to the inner stream. + pub fn get_mut(&mut self) -> &mut Stream { + &mut self.socket + } + + /// Change the configuration. + /// + /// # Panics + /// Panics if config is invalid e.g. `max_write_buffer_size <= write_buffer_size`. + pub fn set_config(&mut self, set_func: impl FnOnce(&mut WebSocketConfig)) { + self.context.set_config(set_func) + } + + /// Read the configuration. + pub fn get_config(&self) -> &WebSocketConfig { + self.context.get_config() + } + + /// Check if it is possible to read messages. + /// + /// Reading is impossible after receiving `Message::Close`. It is still possible after + /// sending close frame since the peer still may send some data before confirming close. + pub fn can_read(&self) -> bool { + self.context.can_read() + } + + /// Check if it is possible to write messages. + /// + /// Writing gets impossible immediately after sending or receiving `Message::Close`. + pub fn can_write(&self) -> bool { + self.context.can_write() + } +} + +impl WebSocket { + /// Read a message from stream, if possible. + /// + /// This will also queue responses to ping and close messages. These responses + /// will be written and flushed on the next call to [`read`](Self::read), + /// [`write`](Self::write) or [`flush`](Self::flush). + /// + /// # Closing the connection + /// When the remote endpoint decides to close the connection this will return + /// the close message with an optional close frame. + /// + /// You should continue calling [`read`](Self::read), [`write`](Self::write) or + /// [`flush`](Self::flush) to drive the reply to the close frame until [`Error::ConnectionClosed`] + /// is returned. Once that happens it is safe to drop the underlying connection. + pub fn read(&mut self) -> Result { + self.context.read(&mut self.socket) + } + + /// Writes and immediately flushes a message. + /// Equivalent to calling [`write`](Self::write) then [`flush`](Self::flush). + pub fn send(&mut self, message: Message) -> Result<()> { + self.write(message)?; + self.flush() + } + + /// Write a message to the provided stream, if possible. + /// + /// A subsequent call should be made to [`flush`](Self::flush) to flush writes. + /// + /// In the event of stream write failure the message frame will be stored + /// in the write buffer and will try again on the next call to [`write`](Self::write) + /// or [`flush`](Self::flush). + /// + /// If the write buffer would exceed the configured [`WebSocketConfig::max_write_buffer_size`] + /// [`Err(WriteBufferFull(msg_frame))`](Error::WriteBufferFull) is returned. + /// + /// This call will generally not flush. However, if there are queued automatic messages + /// they will be written and eagerly flushed. + /// + /// For example, upon receiving ping messages tungstenite queues pong replies automatically. + /// The next call to [`read`](Self::read), [`write`](Self::write) or [`flush`](Self::flush) + /// will write & flush the pong reply. This means you should not respond to ping frames manually. + /// + /// You can however send pong frames manually in order to indicate a unidirectional heartbeat + /// as described in [RFC 6455](https://tools.ietf.org/html/rfc6455#section-5.5.3). Note that + /// if [`read`](Self::read) returns a ping, you should [`flush`](Self::flush) before passing + /// a custom pong to [`write`](Self::write), otherwise the automatic queued response to the + /// ping will not be sent as it will be replaced by your custom pong message. + /// + /// # Errors + /// - If the WebSocket's write buffer is full, [`Error::WriteBufferFull`] will be returned + /// along with the equivalent passed message frame. + /// - If the connection is closed and should be dropped, this will return [`Error::ConnectionClosed`]. + /// - If you try again after [`Error::ConnectionClosed`] was returned either from here or from + /// [`read`](Self::read), [`Error::AlreadyClosed`] will be returned. This indicates a program + /// error on your part. + /// - [`Error::Io`] is returned if the underlying connection returns an error + /// (consider these fatal except for WouldBlock). + /// - [`Error::Capacity`] if your message size is bigger than the configured max message size. + pub fn write(&mut self, message: Message) -> Result<()> { + self.context.write(&mut self.socket, message) + } + + /// Flush writes. + /// + /// Ensures all messages previously passed to [`write`](Self::write) and automatic + /// queued pong responses are written & flushed into the underlying stream. + pub fn flush(&mut self) -> Result<()> { + self.context.flush(&mut self.socket) + } + + /// Close the connection. + /// + /// This function guarantees that the close frame will be queued. + /// There is no need to call it again. Calling this function is + /// the same as calling `write(Message::Close(..))`. + /// + /// After queuing the close frame you should continue calling [`read`](Self::read) or + /// [`flush`](Self::flush) to drive the close handshake to completion. + /// + /// The websocket RFC defines that the underlying connection should be closed + /// by the server. Tungstenite takes care of this asymmetry for you. + /// + /// When the close handshake is finished (we have both sent and received + /// a close message), [`read`](Self::read) or [`flush`](Self::flush) will return + /// [Error::ConnectionClosed] if this endpoint is the server. + /// + /// If this endpoint is a client, [Error::ConnectionClosed] will only be + /// returned after the server has closed the underlying connection. + /// + /// It is thus safe to drop the underlying connection as soon as [Error::ConnectionClosed] + /// is returned from [`read`](Self::read) or [`flush`](Self::flush). + pub fn close(&mut self, code: Option) -> Result<()> { + self.context.close(&mut self.socket, code) + } + + /// Old name for [`read`](Self::read). + #[deprecated(note = "Use `read`")] + pub fn read_message(&mut self) -> Result { + self.read() + } + + /// Old name for [`send`](Self::send). + #[deprecated(note = "Use `send`")] + pub fn write_message(&mut self, message: Message) -> Result<()> { + self.send(message) + } + + /// Old name for [`flush`](Self::flush). + #[deprecated(note = "Use `flush`")] + pub fn write_pending(&mut self) -> Result<()> { + self.flush() + } +} + +/// A context for managing WebSocket stream. +#[derive(Debug)] +pub struct WebSocketContext { + /// Server or client? + role: Role, + /// encoder/decoder of frame. + frame: FrameCodec, + /// The state of processing, either "active" or "closing". + state: WebSocketState, + /// Receive: an incomplete message being processed. + incomplete: Option, + /// Send in addition to regular messages E.g. "pong" or "close". + additional_send: Option, + /// True indicates there is an additional message (like a pong) + /// that failed to flush previously and we should try again. + unflushed_additional: bool, + /// The configuration for the websocket session. + config: WebSocketConfig, +} + +impl WebSocketContext { + /// Create a WebSocket context that manages a post-handshake stream. + /// + /// # Panics + /// Panics if config is invalid e.g. `max_write_buffer_size <= write_buffer_size`. + pub fn new(role: Role, config: Option) -> Self { + Self::_new(role, FrameCodec::new(), config.unwrap_or_default()) + } + + /// Create a WebSocket context that manages an post-handshake stream. + /// + /// # Panics + /// Panics if config is invalid e.g. `max_write_buffer_size <= write_buffer_size`. + pub fn from_partially_read(part: Vec, role: Role, config: Option) -> Self { + Self::_new(role, FrameCodec::from_partially_read(part), config.unwrap_or_default()) + } + + fn _new(role: Role, mut frame: FrameCodec, config: WebSocketConfig) -> Self { + config.assert_valid(); + frame.set_max_out_buffer_len(config.max_write_buffer_size); + frame.set_out_buffer_write_len(config.write_buffer_size); + Self { + role, + frame, + state: WebSocketState::Active, + incomplete: None, + additional_send: None, + unflushed_additional: false, + config, + } + } + + /// Change the configuration. + /// + /// # Panics + /// Panics if config is invalid e.g. `max_write_buffer_size <= write_buffer_size`. + pub fn set_config(&mut self, set_func: impl FnOnce(&mut WebSocketConfig)) { + set_func(&mut self.config); + self.config.assert_valid(); + self.frame.set_max_out_buffer_len(self.config.max_write_buffer_size); + self.frame.set_out_buffer_write_len(self.config.write_buffer_size); + } + + /// Read the configuration. + pub fn get_config(&self) -> &WebSocketConfig { + &self.config + } + + /// Check if it is possible to read messages. + /// + /// Reading is impossible after receiving `Message::Close`. It is still possible after + /// sending close frame since the peer still may send some data before confirming close. + pub fn can_read(&self) -> bool { + self.state.can_read() + } + + /// Check if it is possible to write messages. + /// + /// Writing gets impossible immediately after sending or receiving `Message::Close`. + pub fn can_write(&self) -> bool { + self.state.is_active() + } + + /// Read a message from the provided stream, if possible. + /// + /// This function sends pong and close responses automatically. + /// However, it never blocks on write. + pub fn read(&mut self, stream: &mut Stream) -> Result + where + Stream: Read + Write, + { + // Do not read from already closed connections. + self.state.check_not_terminated()?; + + loop { + if self.additional_send.is_some() || self.unflushed_additional { + // Since we may get ping or close, we need to reply to the messages even during read. + match self.flush(stream) { + Ok(_) => {} + Err(Error::Io(err)) if err.kind() == io::ErrorKind::WouldBlock => { + // If blocked continue reading, but try again later + self.unflushed_additional = true; + } + Err(err) => return Err(err), + } + } else if self.role == Role::Server && !self.state.can_read() { + self.state = WebSocketState::Terminated; + return Err(Error::ConnectionClosed); + } + + // If we get here, either write blocks or we have nothing to write. + // Thus if read blocks, just let it return WouldBlock. + if let Some(message) = self.read_message_frame(stream)? { + trace!("Received message {}", message); + return Ok(message); + } + } + } + + /// Write a message to the provided stream. + /// + /// A subsequent call should be made to [`flush`](Self::flush) to flush writes. + /// + /// In the event of stream write failure the message frame will be stored + /// in the write buffer and will try again on the next call to [`write`](Self::write) + /// or [`flush`](Self::flush). + /// + /// If the write buffer would exceed the configured [`WebSocketConfig::max_write_buffer_size`] + /// [`Err(WriteBufferFull(msg_frame))`](Error::WriteBufferFull) is returned. + pub fn write(&mut self, stream: &mut Stream, message: Message) -> Result<()> + where + Stream: Read + Write, + { + // When terminated, return AlreadyClosed. + self.state.check_not_terminated()?; + + // Do not write after sending a close frame. + if !self.state.is_active() { + return Err(Error::Protocol(ProtocolError::SendAfterClosing)); + } + + let frame = match message { + Message::Text(data) => Frame::message(data.into(), OpCode::Data(OpData::Text), true), + Message::Binary(data) => Frame::message(data, OpCode::Data(OpData::Binary), true), + Message::Ping(data) => Frame::ping(data), + Message::Pong(data) => { + self.set_additional(Frame::pong(data)); + // Note: user pongs can be user flushed so no need to flush here + return self._write(stream, None).map(|_| ()); + } + Message::Close(code) => return self.close(stream, code), + Message::Frame(f) => f, + }; + + let should_flush = self._write(stream, Some(frame))?; + if should_flush { + self.flush(stream)?; + } + Ok(()) + } + + /// Flush writes. + /// + /// Ensures all messages previously passed to [`write`](Self::write) and automatically + /// queued pong responses are written & flushed into the `stream`. + #[inline] + pub fn flush(&mut self, stream: &mut Stream) -> Result<()> + where + Stream: Read + Write, + { + self._write(stream, None)?; + self.frame.write_out_buffer(stream)?; + stream.flush()?; + self.unflushed_additional = false; + Ok(()) + } + + /// Writes any data in the out_buffer, `additional_send` and given `data`. + /// + /// Does **not** flush. + /// + /// Returns true if the write contents indicate we should flush immediately. + fn _write(&mut self, stream: &mut Stream, data: Option) -> Result + where + Stream: Read + Write, + { + if let Some(data) = data { + self.buffer_frame(stream, data)?; + } + + // Upon receipt of a Ping frame, an endpoint MUST send a Pong frame in + // response, unless it already received a Close frame. It SHOULD + // respond with Pong frame as soon as is practical. (RFC 6455) + let should_flush = if let Some(msg) = self.additional_send.take() { + trace!("Sending pong/close"); + match self.buffer_frame(stream, msg) { + Err(Error::WriteBufferFull(Message::Frame(msg))) => { + // if an system message would exceed the buffer put it back in + // `additional_send` for retry. Otherwise returning this error + // may not make sense to the user, e.g. calling `flush`. + self.set_additional(msg); + false + } + Err(err) => return Err(err), + Ok(_) => true, + } + } else { + self.unflushed_additional + }; + + // If we're closing and there is nothing to send anymore, we should close the connection. + if self.role == Role::Server && !self.state.can_read() { + // The underlying TCP connection, in most normal cases, SHOULD be closed + // first by the server, so that it holds the TIME_WAIT state and not the + // client (as this would prevent it from re-opening the connection for 2 + // maximum segment lifetimes (2MSL), while there is no corresponding + // server impact as a TIME_WAIT connection is immediately reopened upon + // a new SYN with a higher seq number). (RFC 6455) + self.frame.write_out_buffer(stream)?; + self.state = WebSocketState::Terminated; + Err(Error::ConnectionClosed) + } else { + Ok(should_flush) + } + } + + /// Close the connection. + /// + /// This function guarantees that the close frame will be queued. + /// There is no need to call it again. Calling this function is + /// the same as calling `send(Message::Close(..))`. + pub fn close(&mut self, stream: &mut Stream, code: Option) -> Result<()> + where + Stream: Read + Write, + { + if let WebSocketState::Active = self.state { + self.state = WebSocketState::ClosedByUs; + let frame = Frame::close(code); + self._write(stream, Some(frame))?; + } + self.flush(stream) + } + + /// Try to decode one message frame. May return None. + fn read_message_frame(&mut self, stream: &mut Stream) -> Result> + where + Stream: Read + Write, + { + if let Some(mut frame) = self + .frame + .read_frame(stream, self.config.max_frame_size) + .check_connection_reset(self.state)? + { + if !self.state.can_read() { + return Err(Error::Protocol(ProtocolError::ReceivedAfterClosing)); + } + // MUST be 0 unless an extension is negotiated that defines meanings + // for non-zero values. If a nonzero value is received and none of + // the negotiated extensions defines the meaning of such a nonzero + // value, the receiving endpoint MUST _Fail the WebSocket + // Connection_. + { + let hdr = frame.header(); + if hdr.rsv1 || hdr.rsv2 || hdr.rsv3 { + return Err(Error::Protocol(ProtocolError::NonZeroReservedBits)); + } + } + + match self.role { + Role::Server => { + if frame.is_masked() { + // A server MUST remove masking for data frames received from a client + // as described in Section 5.3. (RFC 6455) + frame.apply_mask() + } else if !self.config.accept_unmasked_frames { + // The server MUST close the connection upon receiving a + // frame that is not masked. (RFC 6455) + // The only exception here is if the user explicitly accepts given + // stream by setting WebSocketConfig.accept_unmasked_frames to true + return Err(Error::Protocol(ProtocolError::UnmaskedFrameFromClient)); + } + } + Role::Client => { + if frame.is_masked() { + // A client MUST close a connection if it detects a masked frame. (RFC 6455) + return Err(Error::Protocol(ProtocolError::MaskedFrameFromServer)); + } + } + } + + match frame.header().opcode { + OpCode::Control(ctl) => { + match ctl { + // All control frames MUST have a payload length of 125 bytes or less + // and MUST NOT be fragmented. (RFC 6455) + _ if !frame.header().is_final => { + Err(Error::Protocol(ProtocolError::FragmentedControlFrame)) + } + _ if frame.payload().len() > 125 => { + Err(Error::Protocol(ProtocolError::ControlFrameTooBig)) + } + OpCtl::Close => Ok(self.do_close(frame.into_close()?).map(Message::Close)), + OpCtl::Reserved(i) => { + Err(Error::Protocol(ProtocolError::UnknownControlFrameType(i))) + } + OpCtl::Ping => { + let data = frame.into_data(); + // No ping processing after we sent a close frame. + if self.state.is_active() { + self.set_additional(Frame::pong(data.clone())); + } + Ok(Some(Message::Ping(data))) + } + OpCtl::Pong => Ok(Some(Message::Pong(frame.into_data()))), + } + } + + OpCode::Data(data) => { + let fin = frame.header().is_final; + match data { + OpData::Continue => { + if let Some(ref mut msg) = self.incomplete { + msg.extend(frame.into_data(), self.config.max_message_size)?; + } else { + return Err(Error::Protocol( + ProtocolError::UnexpectedContinueFrame, + )); + } + if fin { + Ok(Some(self.incomplete.take().unwrap().complete()?)) + } else { + Ok(None) + } + } + c if self.incomplete.is_some() => { + Err(Error::Protocol(ProtocolError::ExpectedFragment(c))) + } + OpData::Text | OpData::Binary => { + let msg = { + let message_type = match data { + OpData::Text => IncompleteMessageType::Text, + OpData::Binary => IncompleteMessageType::Binary, + _ => panic!("Bug: message is not text nor binary"), + }; + let mut m = IncompleteMessage::new(message_type); + m.extend(frame.into_data(), self.config.max_message_size)?; + m + }; + if fin { + Ok(Some(msg.complete()?)) + } else { + self.incomplete = Some(msg); + Ok(None) + } + } + OpData::Reserved(i) => { + Err(Error::Protocol(ProtocolError::UnknownDataFrameType(i))) + } + } + } + } // match opcode + } else { + // Connection closed by peer + match replace(&mut self.state, WebSocketState::Terminated) { + WebSocketState::ClosedByPeer | WebSocketState::CloseAcknowledged => { + Err(Error::ConnectionClosed) + } + _ => Err(Error::Protocol(ProtocolError::ResetWithoutClosingHandshake)), + } + } + } + + /// Received a close frame. Tells if we need to return a close frame to the user. + #[allow(clippy::option_option)] + fn do_close<'t>(&mut self, close: Option>) -> Option>> { + debug!("Received close frame: {:?}", close); + match self.state { + WebSocketState::Active => { + self.state = WebSocketState::ClosedByPeer; + + let close = close.map(|frame| { + if !frame.code.is_allowed() { + CloseFrame { + code: CloseCode::Protocol, + reason: "Protocol violation".into(), + } + } else { + frame + } + }); + + let reply = Frame::close(close.clone()); + debug!("Replying to close with {:?}", reply); + self.set_additional(reply); + + Some(close) + } + WebSocketState::ClosedByPeer | WebSocketState::CloseAcknowledged => { + // It is already closed, just ignore. + None + } + WebSocketState::ClosedByUs => { + // We received a reply. + self.state = WebSocketState::CloseAcknowledged; + Some(close) + } + WebSocketState::Terminated => unreachable!(), + } + } + + /// Write a single frame into the write-buffer. + fn buffer_frame(&mut self, stream: &mut Stream, mut frame: Frame) -> Result<()> + where + Stream: Read + Write, + { + match self.role { + Role::Server => {} + Role::Client => { + // 5. If the data is being sent by the client, the frame(s) MUST be + // masked as defined in Section 5.3. (RFC 6455) + frame.set_random_mask(); + } + } + + trace!("Sending frame: {:?}", frame); + self.frame.buffer_frame(stream, frame).check_connection_reset(self.state) + } + + /// Replace `additional_send` if it is currently a `Pong` message. + fn set_additional(&mut self, add: Frame) { + let empty_or_pong = self + .additional_send + .as_ref() + .map_or(true, |f| f.header().opcode == OpCode::Control(OpCtl::Pong)); + if empty_or_pong { + self.additional_send.replace(add); + } + } +} + +/// The current connection state. +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +enum WebSocketState { + /// The connection is active. + Active, + /// We initiated a close handshake. + ClosedByUs, + /// The peer initiated a close handshake. + ClosedByPeer, + /// The peer replied to our close handshake. + CloseAcknowledged, + /// The connection does not exist anymore. + Terminated, +} + +impl WebSocketState { + /// Tell if we're allowed to process normal messages. + fn is_active(self) -> bool { + matches!(self, WebSocketState::Active) + } + + /// Tell if we should process incoming data. Note that if we send a close frame + /// but the remote hasn't confirmed, they might have sent data before they receive our + /// close frame, so we should still pass those to client code, hence ClosedByUs is valid. + fn can_read(self) -> bool { + matches!(self, WebSocketState::Active | WebSocketState::ClosedByUs) + } + + /// Check if the state is active, return error if not. + fn check_not_terminated(self) -> Result<()> { + match self { + WebSocketState::Terminated => Err(Error::AlreadyClosed), + _ => Ok(()), + } + } +} + +/// Translate "Connection reset by peer" into `ConnectionClosed` if appropriate. +trait CheckConnectionReset { + fn check_connection_reset(self, state: WebSocketState) -> Self; +} + +impl CheckConnectionReset for Result { + fn check_connection_reset(self, state: WebSocketState) -> Self { + match self { + Err(Error::Io(io_error)) => Err({ + if !state.can_read() && io_error.kind() == io::ErrorKind::ConnectionReset { + Error::ConnectionClosed + } else { + Error::Io(io_error) + } + }), + x => x, + } + } +} + +#[cfg(test)] +mod tests { + use super::{Message, Role, WebSocket, WebSocketConfig}; + use crate::error::{CapacityError, Error}; + + use std::{io, io::Cursor}; + + struct WriteMoc(Stream); + + impl io::Write for WriteMoc { + fn write(&mut self, buf: &[u8]) -> io::Result { + Ok(buf.len()) + } + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } + } + + impl io::Read for WriteMoc { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + self.0.read(buf) + } + } + + #[test] + fn receive_messages() { + let incoming = Cursor::new(vec![ + 0x89, 0x02, 0x01, 0x02, 0x8a, 0x01, 0x03, 0x01, 0x07, 0x48, 0x65, 0x6c, 0x6c, 0x6f, + 0x2c, 0x20, 0x80, 0x06, 0x57, 0x6f, 0x72, 0x6c, 0x64, 0x21, 0x82, 0x03, 0x01, 0x02, + 0x03, + ]); + let mut socket = WebSocket::from_raw_socket(WriteMoc(incoming), Role::Client, None); + assert_eq!(socket.read().unwrap(), Message::Ping(vec![1, 2])); + assert_eq!(socket.read().unwrap(), Message::Pong(vec![3])); + assert_eq!(socket.read().unwrap(), Message::Text("Hello, World!".into())); + assert_eq!(socket.read().unwrap(), Message::Binary(vec![0x01, 0x02, 0x03])); + } + + #[test] + fn size_limiting_text_fragmented() { + let incoming = Cursor::new(vec![ + 0x01, 0x07, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x2c, 0x20, 0x80, 0x06, 0x57, 0x6f, 0x72, + 0x6c, 0x64, 0x21, + ]); + let limit = WebSocketConfig { max_message_size: Some(10), ..WebSocketConfig::default() }; + let mut socket = WebSocket::from_raw_socket(WriteMoc(incoming), Role::Client, Some(limit)); + + assert!(matches!( + socket.read(), + Err(Error::Capacity(CapacityError::MessageTooLong { size: 13, max_size: 10 })) + )); + } + + #[test] + fn size_limiting_binary() { + let incoming = Cursor::new(vec![0x82, 0x03, 0x01, 0x02, 0x03]); + let limit = WebSocketConfig { max_message_size: Some(2), ..WebSocketConfig::default() }; + let mut socket = WebSocket::from_raw_socket(WriteMoc(incoming), Role::Client, Some(limit)); + + assert!(matches!( + socket.read(), + Err(Error::Capacity(CapacityError::MessageTooLong { size: 3, max_size: 2 })) + )); + } +} diff --git a/.cargo-vendor/tungstenite/src/server.rs b/.cargo-vendor/tungstenite/src/server.rs new file mode 100644 index 0000000000..e79bccb646 --- /dev/null +++ b/.cargo-vendor/tungstenite/src/server.rs @@ -0,0 +1,68 @@ +//! Methods to accept an incoming WebSocket connection on a server. + +pub use crate::handshake::server::ServerHandshake; + +use crate::handshake::{ + server::{Callback, NoCallback}, + HandshakeError, +}; + +use crate::protocol::{WebSocket, WebSocketConfig}; + +use std::io::{Read, Write}; + +/// Accept the given Stream as a WebSocket. +/// +/// Uses a configuration provided as an argument. Calling it with `None` will use the default one +/// used by `accept()`. +/// +/// This function starts a server WebSocket handshake over the given stream. +/// If you want TLS support, use `native_tls::TlsStream`, `rustls::Stream` or +/// `openssl::ssl::SslStream` for the stream here. Any `Read + Write` streams are supported, +/// including those from `Mio` and others. +pub fn accept_with_config( + stream: S, + config: Option, +) -> Result, HandshakeError>> { + accept_hdr_with_config(stream, NoCallback, config) +} + +/// Accept the given Stream as a WebSocket. +/// +/// This function starts a server WebSocket handshake over the given stream. +/// If you want TLS support, use `native_tls::TlsStream`, `rustls::Stream` or +/// `openssl::ssl::SslStream` for the stream here. Any `Read + Write` streams are supported, +/// including those from `Mio` and others. +pub fn accept( + stream: S, +) -> Result, HandshakeError>> { + accept_with_config(stream, None) +} + +/// Accept the given Stream as a WebSocket. +/// +/// Uses a configuration provided as an argument. Calling it with `None` will use the default one +/// used by `accept_hdr()`. +/// +/// This function does the same as `accept()` but accepts an extra callback +/// for header processing. The callback receives headers of the incoming +/// requests and is able to add extra headers to the reply. +pub fn accept_hdr_with_config( + stream: S, + callback: C, + config: Option, +) -> Result, HandshakeError>> { + ServerHandshake::start(stream, callback, config).handshake() +} + +/// Accept the given Stream as a WebSocket. +/// +/// This function does the same as `accept()` but accepts an extra callback +/// for header processing. The callback receives headers of the incoming +/// requests and is able to add extra headers to the reply. +pub fn accept_hdr( + stream: S, + callback: C, +) -> Result, HandshakeError>> { + accept_hdr_with_config(stream, callback, None) +} diff --git a/.cargo-vendor/tungstenite/src/stream.rs b/.cargo-vendor/tungstenite/src/stream.rs new file mode 100644 index 0000000000..4775230b23 --- /dev/null +++ b/.cargo-vendor/tungstenite/src/stream.rs @@ -0,0 +1,145 @@ +//! Convenience wrapper for streams to switch between plain TCP and TLS at runtime. +//! +//! There is no dependency on actual TLS implementations. Everything like +//! `native_tls` or `openssl` will work as long as there is a TLS stream supporting standard +//! `Read + Write` traits. + +#[cfg(feature = "__rustls-tls")] +use std::ops::Deref; +use std::{ + fmt::{self, Debug}, + io::{Read, Result as IoResult, Write}, +}; + +use std::net::TcpStream; + +#[cfg(feature = "native-tls")] +use native_tls_crate::TlsStream; +#[cfg(feature = "__rustls-tls")] +use rustls::StreamOwned; + +/// Stream mode, either plain TCP or TLS. +#[derive(Clone, Copy, Debug)] +pub enum Mode { + /// Plain mode (`ws://` URL). + Plain, + /// TLS mode (`wss://` URL). + Tls, +} + +/// Trait to switch TCP_NODELAY. +pub trait NoDelay { + /// Set the TCP_NODELAY option to the given value. + fn set_nodelay(&mut self, nodelay: bool) -> IoResult<()>; +} + +impl NoDelay for TcpStream { + fn set_nodelay(&mut self, nodelay: bool) -> IoResult<()> { + TcpStream::set_nodelay(self, nodelay) + } +} + +#[cfg(feature = "native-tls")] +impl NoDelay for TlsStream { + fn set_nodelay(&mut self, nodelay: bool) -> IoResult<()> { + self.get_mut().set_nodelay(nodelay) + } +} + +#[cfg(feature = "__rustls-tls")] +impl NoDelay for StreamOwned +where + S: Deref>, + SD: rustls::SideData, + T: Read + Write + NoDelay, +{ + fn set_nodelay(&mut self, nodelay: bool) -> IoResult<()> { + self.sock.set_nodelay(nodelay) + } +} + +/// A stream that might be protected with TLS. +#[non_exhaustive] +pub enum MaybeTlsStream { + /// Unencrypted socket stream. + Plain(S), + #[cfg(feature = "native-tls")] + /// Encrypted socket stream using `native-tls`. + NativeTls(native_tls_crate::TlsStream), + #[cfg(feature = "__rustls-tls")] + /// Encrypted socket stream using `rustls`. + Rustls(rustls::StreamOwned), +} + +impl Debug for MaybeTlsStream { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Plain(s) => f.debug_tuple("MaybeTlsStream::Plain").field(s).finish(), + #[cfg(feature = "native-tls")] + Self::NativeTls(s) => f.debug_tuple("MaybeTlsStream::NativeTls").field(s).finish(), + #[cfg(feature = "__rustls-tls")] + Self::Rustls(s) => { + struct RustlsStreamDebug<'a, S: Read + Write>( + &'a rustls::StreamOwned, + ); + + impl<'a, S: Read + Write + Debug> Debug for RustlsStreamDebug<'a, S> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("StreamOwned") + .field("conn", &self.0.conn) + .field("sock", &self.0.sock) + .finish() + } + } + + f.debug_tuple("MaybeTlsStream::Rustls").field(&RustlsStreamDebug(s)).finish() + } + } + } +} + +impl Read for MaybeTlsStream { + fn read(&mut self, buf: &mut [u8]) -> IoResult { + match *self { + MaybeTlsStream::Plain(ref mut s) => s.read(buf), + #[cfg(feature = "native-tls")] + MaybeTlsStream::NativeTls(ref mut s) => s.read(buf), + #[cfg(feature = "__rustls-tls")] + MaybeTlsStream::Rustls(ref mut s) => s.read(buf), + } + } +} + +impl Write for MaybeTlsStream { + fn write(&mut self, buf: &[u8]) -> IoResult { + match *self { + MaybeTlsStream::Plain(ref mut s) => s.write(buf), + #[cfg(feature = "native-tls")] + MaybeTlsStream::NativeTls(ref mut s) => s.write(buf), + #[cfg(feature = "__rustls-tls")] + MaybeTlsStream::Rustls(ref mut s) => s.write(buf), + } + } + + fn flush(&mut self) -> IoResult<()> { + match *self { + MaybeTlsStream::Plain(ref mut s) => s.flush(), + #[cfg(feature = "native-tls")] + MaybeTlsStream::NativeTls(ref mut s) => s.flush(), + #[cfg(feature = "__rustls-tls")] + MaybeTlsStream::Rustls(ref mut s) => s.flush(), + } + } +} + +impl NoDelay for MaybeTlsStream { + fn set_nodelay(&mut self, nodelay: bool) -> IoResult<()> { + match *self { + MaybeTlsStream::Plain(ref mut s) => s.set_nodelay(nodelay), + #[cfg(feature = "native-tls")] + MaybeTlsStream::NativeTls(ref mut s) => s.set_nodelay(nodelay), + #[cfg(feature = "__rustls-tls")] + MaybeTlsStream::Rustls(ref mut s) => s.set_nodelay(nodelay), + } + } +} diff --git a/.cargo-vendor/tungstenite/src/tls.rs b/.cargo-vendor/tungstenite/src/tls.rs new file mode 100644 index 0000000000..836b7aef4d --- /dev/null +++ b/.cargo-vendor/tungstenite/src/tls.rs @@ -0,0 +1,228 @@ +//! Connection helper. +use std::io::{Read, Write}; + +use crate::{ + client::{client_with_config, uri_mode, IntoClientRequest}, + error::UrlError, + handshake::client::Response, + protocol::WebSocketConfig, + stream::MaybeTlsStream, + ClientHandshake, Error, HandshakeError, Result, WebSocket, +}; + +/// A connector that can be used when establishing connections, allowing to control whether +/// `native-tls` or `rustls` is used to create a TLS connection. Or TLS can be disabled with the +/// `Plain` variant. +#[non_exhaustive] +#[allow(missing_debug_implementations)] +pub enum Connector { + /// Plain (non-TLS) connector. + Plain, + /// `native-tls` TLS connector. + #[cfg(feature = "native-tls")] + NativeTls(native_tls_crate::TlsConnector), + /// `rustls` TLS connector. + #[cfg(feature = "__rustls-tls")] + Rustls(std::sync::Arc), +} + +mod encryption { + #[cfg(feature = "native-tls")] + pub mod native_tls { + use native_tls_crate::{HandshakeError as TlsHandshakeError, TlsConnector}; + + use std::io::{Read, Write}; + + use crate::{ + error::TlsError, + stream::{MaybeTlsStream, Mode}, + Error, Result, + }; + + pub fn wrap_stream( + socket: S, + domain: &str, + mode: Mode, + tls_connector: Option, + ) -> Result> + where + S: Read + Write, + { + match mode { + Mode::Plain => Ok(MaybeTlsStream::Plain(socket)), + Mode::Tls => { + let try_connector = tls_connector.map_or_else(TlsConnector::new, Ok); + let connector = try_connector.map_err(TlsError::Native)?; + let connected = connector.connect(domain, socket); + match connected { + Err(e) => match e { + TlsHandshakeError::Failure(f) => Err(Error::Tls(f.into())), + TlsHandshakeError::WouldBlock(_) => { + panic!("Bug: TLS handshake not blocked") + } + }, + Ok(s) => Ok(MaybeTlsStream::NativeTls(s)), + } + } + } + } + } + + #[cfg(feature = "__rustls-tls")] + pub mod rustls { + use rustls::{ClientConfig, ClientConnection, RootCertStore, StreamOwned}; + use rustls_pki_types::ServerName; + + use std::{ + convert::TryFrom, + io::{Read, Write}, + sync::Arc, + }; + + use crate::{ + error::TlsError, + stream::{MaybeTlsStream, Mode}, + Result, + }; + + pub fn wrap_stream( + socket: S, + domain: &str, + mode: Mode, + tls_connector: Option>, + ) -> Result> + where + S: Read + Write, + { + match mode { + Mode::Plain => Ok(MaybeTlsStream::Plain(socket)), + Mode::Tls => { + let config = match tls_connector { + Some(config) => config, + None => { + #[allow(unused_mut)] + let mut root_store = RootCertStore::empty(); + + #[cfg(feature = "rustls-tls-native-roots")] + { + let native_certs = rustls_native_certs::load_native_certs()?; + let total_number = native_certs.len(); + let (number_added, number_ignored) = + root_store.add_parsable_certificates(native_certs); + log::debug!("Added {number_added}/{total_number} native root certificates (ignored {number_ignored})"); + } + #[cfg(feature = "rustls-tls-webpki-roots")] + { + root_store.extend(webpki_roots::TLS_SERVER_ROOTS.iter().cloned()); + } + + Arc::new( + ClientConfig::builder() + .with_root_certificates(root_store) + .with_no_client_auth(), + ) + } + }; + let domain = ServerName::try_from(domain) + .map_err(|_| TlsError::InvalidDnsName)? + .to_owned(); + let client = ClientConnection::new(config, domain).map_err(TlsError::Rustls)?; + let stream = StreamOwned::new(client, socket); + + Ok(MaybeTlsStream::Rustls(stream)) + } + } + } + } + + pub mod plain { + use std::io::{Read, Write}; + + use crate::{ + error::UrlError, + stream::{MaybeTlsStream, Mode}, + Error, Result, + }; + + pub fn wrap_stream(socket: S, mode: Mode) -> Result> + where + S: Read + Write, + { + match mode { + Mode::Plain => Ok(MaybeTlsStream::Plain(socket)), + Mode::Tls => Err(Error::Url(UrlError::TlsFeatureNotEnabled)), + } + } + } +} + +type TlsHandshakeError = HandshakeError>>; + +/// Creates a WebSocket handshake from a request and a stream, +/// upgrading the stream to TLS if required. +pub fn client_tls( + request: R, + stream: S, +) -> Result<(WebSocket>, Response), TlsHandshakeError> +where + R: IntoClientRequest, + S: Read + Write, +{ + client_tls_with_config(request, stream, None, None) +} + +/// The same as [`client_tls()`] but one can specify a websocket configuration, +/// and an optional connector. If no connector is specified, a default one will +/// be created. +/// +/// Please refer to [`client_tls()`] for more details. +pub fn client_tls_with_config( + request: R, + stream: S, + config: Option, + connector: Option, +) -> Result<(WebSocket>, Response), TlsHandshakeError> +where + R: IntoClientRequest, + S: Read + Write, +{ + let request = request.into_client_request()?; + + #[cfg(any(feature = "native-tls", feature = "__rustls-tls"))] + let domain = match request.uri().host() { + Some(d) => Ok(d.to_string()), + None => Err(Error::Url(UrlError::NoHostName)), + }?; + + let mode = uri_mode(request.uri())?; + + let stream = match connector { + Some(conn) => match conn { + #[cfg(feature = "native-tls")] + Connector::NativeTls(conn) => { + self::encryption::native_tls::wrap_stream(stream, &domain, mode, Some(conn)) + } + #[cfg(feature = "__rustls-tls")] + Connector::Rustls(conn) => { + self::encryption::rustls::wrap_stream(stream, &domain, mode, Some(conn)) + } + Connector::Plain => self::encryption::plain::wrap_stream(stream, mode), + }, + None => { + #[cfg(feature = "native-tls")] + { + self::encryption::native_tls::wrap_stream(stream, &domain, mode, None) + } + #[cfg(all(feature = "__rustls-tls", not(feature = "native-tls")))] + { + self::encryption::rustls::wrap_stream(stream, &domain, mode, None) + } + #[cfg(not(any(feature = "native-tls", feature = "__rustls-tls")))] + { + self::encryption::plain::wrap_stream(stream, mode) + } + } + }?; + + client_with_config(request, stream, config) +} diff --git a/.cargo-vendor/tungstenite/src/util.rs b/.cargo-vendor/tungstenite/src/util.rs new file mode 100644 index 0000000000..f40ca43c07 --- /dev/null +++ b/.cargo-vendor/tungstenite/src/util.rs @@ -0,0 +1,58 @@ +//! Helper traits to ease non-blocking handling. + +use std::{ + io::{Error as IoError, ErrorKind as IoErrorKind}, + result::Result as StdResult, +}; + +use crate::error::Error; + +/// Non-blocking IO handling. +pub trait NonBlockingError: Sized { + /// Convert WouldBlock to None and don't touch other errors. + fn into_non_blocking(self) -> Option; +} + +impl NonBlockingError for IoError { + fn into_non_blocking(self) -> Option { + match self.kind() { + IoErrorKind::WouldBlock => None, + _ => Some(self), + } + } +} + +impl NonBlockingError for Error { + fn into_non_blocking(self) -> Option { + match self { + Error::Io(e) => e.into_non_blocking().map(|e| e.into()), + x => Some(x), + } + } +} + +/// Non-blocking IO wrapper. +/// +/// This trait is implemented for `Result`. +pub trait NonBlockingResult { + /// Type of the converted result: `Result, E>` + type Result; + /// Perform the non-block conversion. + fn no_block(self) -> Self::Result; +} + +impl NonBlockingResult for StdResult +where + E: NonBlockingError, +{ + type Result = StdResult, E>; + fn no_block(self) -> Self::Result { + match self { + Ok(x) => Ok(Some(x)), + Err(e) => match e.into_non_blocking() { + Some(e) => Err(e), + None => Ok(None), + }, + } + } +} diff --git a/.cargo-vendor/utf-8/.cargo-checksum.json b/.cargo-vendor/utf-8/.cargo-checksum.json new file mode 100644 index 0000000000..b31e50cf7a --- /dev/null +++ b/.cargo-vendor/utf-8/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"e0009dd2e366cbb98b75dbadb9de9e5662c5f3fe971e99dd98eaa9adb7ecb648","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"e5cd8b3b67c2962e13b0aa95fc2af9152999e1bd333df8be8a3be5eab53e540a","benches/from_utf8_lossy.rs":"ed57fc9fca84d160a70fa06bcf6658adca9f4518cb6e0be6a52accc291736b0e","src/lib.rs":"32e657c72a7a895b26288f271e3194270002548692368bdb1ef32b5698975395","src/lossy.rs":"c7d3f193fe04b60145a5e32f5e6c55c181664f82309ef59bb15533194d69e345","src/read.rs":"6eae22954e18a5afa8f62c876498a643563c5b68d03329a417aa354a28108046","tests/unit.rs":"9e920a552549009191d61147d60196fcce9cbc7f2065d33b6d9c757e258a9edd"},"package":"09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9"} \ No newline at end of file diff --git a/.cargo-vendor/utf-8/Cargo.toml b/.cargo-vendor/utf-8/Cargo.toml new file mode 100644 index 0000000000..c01a69d8da --- /dev/null +++ b/.cargo-vendor/utf-8/Cargo.toml @@ -0,0 +1,29 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "utf-8" +version = "0.7.6" +authors = ["Simon Sapin "] +description = "Incremental, zero-copy UTF-8 decoding with error handling" +license = "MIT OR Apache-2.0" +repository = "https://github.com/SimonSapin/rust-utf8" +[profile.bench] + +[profile.test] + +[lib] +name = "utf8" +test = false +bench = false + +[dependencies] diff --git a/.cargo-vendor/utf-8/LICENSE-APACHE b/.cargo-vendor/utf-8/LICENSE-APACHE new file mode 100644 index 0000000000..1b5ec8b78e --- /dev/null +++ b/.cargo-vendor/utf-8/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS diff --git a/.cargo-vendor/utf-8/LICENSE-MIT b/.cargo-vendor/utf-8/LICENSE-MIT new file mode 100644 index 0000000000..31aa79387f --- /dev/null +++ b/.cargo-vendor/utf-8/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/.cargo-vendor/utf-8/README.md b/.cargo-vendor/utf-8/README.md new file mode 100644 index 0000000000..145889b373 --- /dev/null +++ b/.cargo-vendor/utf-8/README.md @@ -0,0 +1,5 @@ +# rust-utf8 + +Incremental, zero-copy UTF-8 decoding for Rust + +[Documentation](https://docs.rs/utf-8/) diff --git a/.cargo-vendor/utf-8/benches/from_utf8_lossy.rs b/.cargo-vendor/utf-8/benches/from_utf8_lossy.rs new file mode 100644 index 0000000000..95d9edf392 --- /dev/null +++ b/.cargo-vendor/utf-8/benches/from_utf8_lossy.rs @@ -0,0 +1,30 @@ +#![feature(test)] + +extern crate test; +extern crate utf8; + +#[path = "../tests/shared/data.rs"] +mod data; + +#[path = "../tests/shared/string_from_utf8_lossy.rs"] +mod string_from_utf8_lossy; + +#[bench] +fn bench_our_string_from_utf8_lossy(bencher: &mut test::Bencher) { + bencher.bytes = data::DECODED_LOSSY.iter().map(|&(input, _expected)| input.len() as u64).sum(); + bencher.iter(|| { + for &(input, _expected) in data::DECODED_LOSSY { + test::black_box(string_from_utf8_lossy::string_from_utf8_lossy(input)); + } + }) +} + +#[bench] +fn bench_std_string_from_utf8_lossy(bencher: &mut test::Bencher) { + bencher.bytes = data::DECODED_LOSSY.iter().map(|&(input, _expected)| input.len() as u64).sum(); + bencher.iter(|| { + for &(input, _expected) in data::DECODED_LOSSY { + test::black_box(String::from_utf8_lossy(input)); + } + }) +} diff --git a/.cargo-vendor/utf-8/src/lib.rs b/.cargo-vendor/utf-8/src/lib.rs new file mode 100644 index 0000000000..ec223f2096 --- /dev/null +++ b/.cargo-vendor/utf-8/src/lib.rs @@ -0,0 +1,186 @@ +mod lossy; +mod read; + +pub use lossy::LossyDecoder; +pub use read::{BufReadDecoder, BufReadDecoderError}; + +use std::cmp; +use std::error::Error; +use std::fmt; +use std::str; + +/// The replacement character, U+FFFD. In lossy decoding, insert it for every decoding error. +pub const REPLACEMENT_CHARACTER: &'static str = "\u{FFFD}"; + +#[derive(Debug, Copy, Clone)] +pub enum DecodeError<'a> { + /// In lossy decoding insert `valid_prefix`, then `"\u{FFFD}"`, + /// then call `decode()` again with `remaining_input`. + Invalid { + valid_prefix: &'a str, + invalid_sequence: &'a [u8], + remaining_input: &'a [u8], + }, + + /// Call the `incomplete_suffix.try_complete` method with more input when available. + /// If no more input is available, this is an invalid byte sequence. + Incomplete { + valid_prefix: &'a str, + incomplete_suffix: Incomplete, + }, +} + +impl<'a> fmt::Display for DecodeError<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + DecodeError::Invalid { + valid_prefix, + invalid_sequence, + remaining_input, + } => write!( + f, + "found invalid byte sequence {invalid_sequence:02x?} after \ + {valid_byte_count} valid bytes, followed by {unprocessed_byte_count} more \ + unprocessed bytes", + invalid_sequence = invalid_sequence, + valid_byte_count = valid_prefix.len(), + unprocessed_byte_count = remaining_input.len() + ), + DecodeError::Incomplete { + valid_prefix, + incomplete_suffix, + } => write!( + f, + "found incomplete byte sequence {incomplete_suffix:02x?} after \ + {valid_byte_count} bytes", + incomplete_suffix = incomplete_suffix, + valid_byte_count = valid_prefix.len() + ), + } + } +} + +impl<'a> Error for DecodeError<'a> {} + +#[derive(Debug, Copy, Clone)] +pub struct Incomplete { + pub buffer: [u8; 4], + pub buffer_len: u8, +} + +pub fn decode(input: &[u8]) -> Result<&str, DecodeError> { + let error = match str::from_utf8(input) { + Ok(valid) => return Ok(valid), + Err(error) => error, + }; + + // FIXME: separate function from here to guide inlining? + let (valid, after_valid) = input.split_at(error.valid_up_to()); + let valid = unsafe { + str::from_utf8_unchecked(valid) + }; + + match error.error_len() { + Some(invalid_sequence_length) => { + let (invalid, rest) = after_valid.split_at(invalid_sequence_length); + Err(DecodeError::Invalid { + valid_prefix: valid, + invalid_sequence: invalid, + remaining_input: rest + }) + } + None => { + Err(DecodeError::Incomplete { + valid_prefix: valid, + incomplete_suffix: Incomplete::new(after_valid), + }) + } + } +} + +impl Incomplete { + pub fn empty() -> Self { + Incomplete { + buffer: [0, 0, 0, 0], + buffer_len: 0, + } + } + + pub fn is_empty(&self) -> bool { + self.buffer_len == 0 + } + + pub fn new(bytes: &[u8]) -> Self { + let mut buffer = [0, 0, 0, 0]; + let len = bytes.len(); + buffer[..len].copy_from_slice(bytes); + Incomplete { + buffer: buffer, + buffer_len: len as u8, + } + } + + /// * `None`: still incomplete, call `try_complete` again with more input. + /// If no more input is available, this is invalid byte sequence. + /// * `Some((result, remaining_input))`: We’re done with this `Incomplete`. + /// To keep decoding, pass `remaining_input` to `decode()`. + pub fn try_complete<'input>(&mut self, input: &'input [u8]) + -> Option<(Result<&str, &[u8]>, &'input [u8])> { + let (consumed, opt_result) = self.try_complete_offsets(input); + let result = opt_result?; + let remaining_input = &input[consumed..]; + let result_bytes = self.take_buffer(); + let result = match result { + Ok(()) => Ok(unsafe { str::from_utf8_unchecked(result_bytes) }), + Err(()) => Err(result_bytes), + }; + Some((result, remaining_input)) + } + + fn take_buffer(&mut self) -> &[u8] { + let len = self.buffer_len as usize; + self.buffer_len = 0; + &self.buffer[..len as usize] + } + + /// (consumed_from_input, None): not enough input + /// (consumed_from_input, Some(Err(()))): error bytes in buffer + /// (consumed_from_input, Some(Ok(()))): UTF-8 string in buffer + fn try_complete_offsets(&mut self, input: &[u8]) -> (usize, Option>) { + let initial_buffer_len = self.buffer_len as usize; + let copied_from_input; + { + let unwritten = &mut self.buffer[initial_buffer_len..]; + copied_from_input = cmp::min(unwritten.len(), input.len()); + unwritten[..copied_from_input].copy_from_slice(&input[..copied_from_input]); + } + let spliced = &self.buffer[..initial_buffer_len + copied_from_input]; + match str::from_utf8(spliced) { + Ok(_) => { + self.buffer_len = spliced.len() as u8; + (copied_from_input, Some(Ok(()))) + } + Err(error) => { + let valid_up_to = error.valid_up_to(); + if valid_up_to > 0 { + let consumed = valid_up_to.checked_sub(initial_buffer_len).unwrap(); + self.buffer_len = valid_up_to as u8; + (consumed, Some(Ok(()))) + } else { + match error.error_len() { + Some(invalid_sequence_length) => { + let consumed = invalid_sequence_length + .checked_sub(initial_buffer_len).unwrap(); + self.buffer_len = invalid_sequence_length as u8; + (consumed, Some(Err(()))) + } + None => { + self.buffer_len = spliced.len() as u8; + (copied_from_input, None) + } + } + } + } + } + } +} diff --git a/.cargo-vendor/utf-8/src/lossy.rs b/.cargo-vendor/utf-8/src/lossy.rs new file mode 100644 index 0000000000..00bcdecf0a --- /dev/null +++ b/.cargo-vendor/utf-8/src/lossy.rs @@ -0,0 +1,92 @@ +use super::*; + +/// A push-based, lossy decoder for UTF-8. +/// Errors are replaced with the U+FFFD replacement character. +/// +/// Users “push” bytes into the decoder, which in turn “pushes” `&str` slices into a callback. +/// +/// For example, `String::from_utf8_lossy` (but returning `String` instead of `Cow`) +/// can be rewritten as: +/// +/// ```rust +/// fn string_from_utf8_lossy(input: &[u8]) -> String { +/// let mut string = String::new(); +/// utf8::LossyDecoder::new(|s| string.push_str(s)).feed(input); +/// string +/// } +/// ``` +/// +/// **Note:** Dropping the decoder signals the end of the input: +/// If the last input chunk ended with an incomplete byte sequence for a code point, +/// this is an error and a replacement character is emitted. +/// Use `std::mem::forget` to inhibit this behavior. +pub struct LossyDecoder { + push_str: F, + incomplete: Incomplete, +} + +impl LossyDecoder { + /// Create a new decoder from a callback. + #[inline] + pub fn new(push_str: F) -> Self { + LossyDecoder { + push_str: push_str, + incomplete: Incomplete { + buffer: [0, 0, 0, 0], + buffer_len: 0, + }, + } + } + + /// Feed one chunk of input into the decoder. + /// + /// The input is decoded lossily + /// and the callback called once or more with `&str` string slices. + /// + /// If the UTF-8 byte sequence for one code point was split into this bytes chunk + /// and previous bytes chunks, it will be correctly pieced back together. + pub fn feed(&mut self, mut input: &[u8]) { + if self.incomplete.buffer_len > 0 { + match self.incomplete.try_complete(input) { + Some((Ok(s), remaining)) => { + (self.push_str)(s); + input = remaining + } + Some((Err(_), remaining)) => { + (self.push_str)(REPLACEMENT_CHARACTER); + input = remaining + } + None => { + return + } + } + } + loop { + match decode(input) { + Ok(s) => { + (self.push_str)(s); + return + } + Err(DecodeError::Incomplete { valid_prefix, incomplete_suffix }) => { + (self.push_str)(valid_prefix); + self.incomplete = incomplete_suffix; + return + } + Err(DecodeError::Invalid { valid_prefix, remaining_input, .. }) => { + (self.push_str)(valid_prefix); + (self.push_str)(REPLACEMENT_CHARACTER); + input = remaining_input + } + } + } + } +} + +impl Drop for LossyDecoder { + #[inline] + fn drop(&mut self) { + if self.incomplete.buffer_len > 0 { + (self.push_str)(REPLACEMENT_CHARACTER) + } + } +} diff --git a/.cargo-vendor/utf-8/src/read.rs b/.cargo-vendor/utf-8/src/read.rs new file mode 100644 index 0000000000..5e38f54a17 --- /dev/null +++ b/.cargo-vendor/utf-8/src/read.rs @@ -0,0 +1,167 @@ +use std::io::{self, BufRead}; +use std::error::Error; +use std::fmt; +use std::str; +use super::*; + +/// Wraps a `std::io::BufRead` buffered byte stream and decode it as UTF-8. +pub struct BufReadDecoder { + buf_read: B, + bytes_consumed: usize, + incomplete: Incomplete, +} + +#[derive(Debug)] +pub enum BufReadDecoderError<'a> { + /// Represents one UTF-8 error in the byte stream. + /// + /// In lossy decoding, each such error should be replaced with U+FFFD. + /// (See `BufReadDecoder::next_lossy` and `BufReadDecoderError::lossy`.) + InvalidByteSequence(&'a [u8]), + + /// An I/O error from the underlying byte stream + Io(io::Error), +} + +impl<'a> BufReadDecoderError<'a> { + /// Replace UTF-8 errors with U+FFFD + pub fn lossy(self) -> Result<&'static str, io::Error> { + match self { + BufReadDecoderError::Io(error) => Err(error), + BufReadDecoderError::InvalidByteSequence(_) => Ok(REPLACEMENT_CHARACTER), + } + } +} + +impl<'a> fmt::Display for BufReadDecoderError<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + BufReadDecoderError::InvalidByteSequence(bytes) => { + write!(f, "invalid byte sequence: {:02x?}", bytes) + } + BufReadDecoderError::Io(ref err) => write!(f, "underlying bytestream error: {}", err), + } + } +} + +impl<'a> Error for BufReadDecoderError<'a> { + fn source(&self) -> Option<&(dyn Error + 'static)> { + match *self { + BufReadDecoderError::InvalidByteSequence(_) => None, + BufReadDecoderError::Io(ref err) => Some(err), + } + } +} + +impl BufReadDecoder { + /// This is to `Read::read_to_string` what `String::from_utf8_lossy` is to `String::from_utf8`. + pub fn read_to_string_lossy(buf_read: B) -> io::Result { + let mut decoder = Self::new(buf_read); + let mut string = String::new(); + while let Some(result) = decoder.next_lossy() { + string.push_str(result?) + } + Ok(string) + } + + pub fn new(buf_read: B) -> Self { + Self { + buf_read, + bytes_consumed: 0, + incomplete: Incomplete::empty(), + } + } + + /// Same as `BufReadDecoder::next_strict`, but replace UTF-8 errors with U+FFFD. + pub fn next_lossy(&mut self) -> Option> { + self.next_strict().map(|result| result.or_else(|e| e.lossy())) + } + + /// Decode and consume the next chunk of UTF-8 input. + /// + /// This method is intended to be called repeatedly until it returns `None`, + /// which represents EOF from the underlying byte stream. + /// This is similar to `Iterator::next`, + /// except that decoded chunks borrow the decoder (~iterator) + /// so they need to be handled or copied before the next chunk can start decoding. + pub fn next_strict(&mut self) -> Option> { + enum BytesSource { + BufRead(usize), + Incomplete, + } + macro_rules! try_io { + ($io_result: expr) => { + match $io_result { + Ok(value) => value, + Err(error) => return Some(Err(BufReadDecoderError::Io(error))) + } + } + } + let (source, result) = loop { + if self.bytes_consumed > 0 { + self.buf_read.consume(self.bytes_consumed); + self.bytes_consumed = 0; + } + let buf = try_io!(self.buf_read.fill_buf()); + + // Force loop iteration to go through an explicit `continue` + enum Unreachable {} + let _: Unreachable = if self.incomplete.is_empty() { + if buf.is_empty() { + return None // EOF + } + match str::from_utf8(buf) { + Ok(_) => { + break (BytesSource::BufRead(buf.len()), Ok(())) + } + Err(error) => { + let valid_up_to = error.valid_up_to(); + if valid_up_to > 0 { + break (BytesSource::BufRead(valid_up_to), Ok(())) + } + match error.error_len() { + Some(invalid_sequence_length) => { + break (BytesSource::BufRead(invalid_sequence_length), Err(())) + } + None => { + self.bytes_consumed = buf.len(); + self.incomplete = Incomplete::new(buf); + // need more input bytes + continue + } + } + } + } + } else { + if buf.is_empty() { + break (BytesSource::Incomplete, Err(())) // EOF with incomplete code point + } + let (consumed, opt_result) = self.incomplete.try_complete_offsets(buf); + self.bytes_consumed = consumed; + match opt_result { + None => { + // need more input bytes + continue + } + Some(result) => { + break (BytesSource::Incomplete, result) + } + } + }; + }; + let bytes = match source { + BytesSource::BufRead(byte_count) => { + self.bytes_consumed = byte_count; + let buf = try_io!(self.buf_read.fill_buf()); + &buf[..byte_count] + } + BytesSource::Incomplete => { + self.incomplete.take_buffer() + } + }; + match result { + Ok(()) => Some(Ok(unsafe { str::from_utf8_unchecked(bytes) })), + Err(()) => Some(Err(BufReadDecoderError::InvalidByteSequence(bytes))), + } + } +} diff --git a/.cargo-vendor/utf-8/tests/unit.rs b/.cargo-vendor/utf-8/tests/unit.rs new file mode 100644 index 0000000000..6839e84f20 --- /dev/null +++ b/.cargo-vendor/utf-8/tests/unit.rs @@ -0,0 +1,197 @@ +extern crate utf8; + +use std::borrow::Cow; +use std::collections::VecDeque; +use std::io; +use utf8::*; + +/// A re-implementation of std::str::from_utf8 +pub fn str_from_utf8(input: &[u8]) -> Result<&str, usize> { + match decode(input) { + Ok(s) => return Ok(s), + Err(DecodeError::Invalid { valid_prefix, .. }) | + Err(DecodeError::Incomplete { valid_prefix, .. }) => Err(valid_prefix.len()), + } +} + +#[test] +fn test_str_from_utf8() { + let xs = b"hello"; + assert_eq!(str_from_utf8(xs), Ok("hello")); + + let xs = "ศไทย中华Việt Nam".as_bytes(); + assert_eq!(str_from_utf8(xs), Ok("ศไทย中华Việt Nam")); + + let xs = b"hello\xFF"; + assert!(str_from_utf8(xs).is_err()); +} + +#[test] +fn test_is_utf8() { + // Chars of 1, 2, 3, and 4 bytes + assert!(str_from_utf8("eé€\u{10000}".as_bytes()).is_ok()); + // invalid prefix + assert!(str_from_utf8(&[0x80]).is_err()); + // invalid 2 byte prefix + assert!(str_from_utf8(&[0xc0]).is_err()); + assert!(str_from_utf8(&[0xc0, 0x10]).is_err()); + // invalid 3 byte prefix + assert!(str_from_utf8(&[0xe0]).is_err()); + assert!(str_from_utf8(&[0xe0, 0x10]).is_err()); + assert!(str_from_utf8(&[0xe0, 0xff, 0x10]).is_err()); + // invalid 4 byte prefix + assert!(str_from_utf8(&[0xf0]).is_err()); + assert!(str_from_utf8(&[0xf0, 0x10]).is_err()); + assert!(str_from_utf8(&[0xf0, 0xff, 0x10]).is_err()); + assert!(str_from_utf8(&[0xf0, 0xff, 0xff, 0x10]).is_err()); + + // deny overlong encodings + assert!(str_from_utf8(&[0xc0, 0x80]).is_err()); + assert!(str_from_utf8(&[0xc0, 0xae]).is_err()); + assert!(str_from_utf8(&[0xe0, 0x80, 0x80]).is_err()); + assert!(str_from_utf8(&[0xe0, 0x80, 0xaf]).is_err()); + assert!(str_from_utf8(&[0xe0, 0x81, 0x81]).is_err()); + assert!(str_from_utf8(&[0xf0, 0x82, 0x82, 0xac]).is_err()); + assert!(str_from_utf8(&[0xf4, 0x90, 0x80, 0x80]).is_err()); + + // deny surrogates + assert!(str_from_utf8(&[0xED, 0xA0, 0x80]).is_err()); + assert!(str_from_utf8(&[0xED, 0xBF, 0xBF]).is_err()); + + assert!(str_from_utf8(&[0xC2, 0x80]).is_ok()); + assert!(str_from_utf8(&[0xDF, 0xBF]).is_ok()); + assert!(str_from_utf8(&[0xE0, 0xA0, 0x80]).is_ok()); + assert!(str_from_utf8(&[0xED, 0x9F, 0xBF]).is_ok()); + assert!(str_from_utf8(&[0xEE, 0x80, 0x80]).is_ok()); + assert!(str_from_utf8(&[0xEF, 0xBF, 0xBF]).is_ok()); + assert!(str_from_utf8(&[0xF0, 0x90, 0x80, 0x80]).is_ok()); + assert!(str_from_utf8(&[0xF4, 0x8F, 0xBF, 0xBF]).is_ok()); +} + +/// A re-implementation of String::from_utf8_lossy +pub fn string_from_utf8_lossy(input: &[u8]) -> Cow { + let mut result = decode(input); + if let Ok(s) = result { + return s.into() + } + let mut string = String::with_capacity(input.len() + REPLACEMENT_CHARACTER.len()); + loop { + match result { + Ok(s) => { + string.push_str(s); + return string.into() + } + Err(DecodeError::Incomplete { valid_prefix, .. }) => { + string.push_str(valid_prefix); + string.push_str(REPLACEMENT_CHARACTER); + return string.into() + } + Err(DecodeError::Invalid { valid_prefix, remaining_input, .. }) => { + string.push_str(valid_prefix); + string.push_str(REPLACEMENT_CHARACTER); + result = decode(remaining_input); + } + } + } +} + +pub const DECODED_LOSSY: &'static [(&'static [u8], &'static str)] = &[ + (b"hello", "hello"), + (b"\xe0\xb8\xa8\xe0\xb9\x84\xe0\xb8\x97\xe0\xb8\xa2\xe4\xb8\xad\xe5\x8d\x8e", "ศไทย中华"), + (b"Vi\xe1\xbb\x87t Nam", "Việt Nam"), + (b"Hello\xC2 There\xFF ", "Hello\u{FFFD} There\u{FFFD} "), + (b"Hello\xC0\x80 There", "Hello\u{FFFD}\u{FFFD} There"), + (b"\xE6\x83 Goodbye", "\u{FFFD} Goodbye"), + (b"\xF5foo\xF5\x80bar", "\u{FFFD}foo\u{FFFD}\u{FFFD}bar"), + (b"\xF5foo\xF5\xC2", "\u{FFFD}foo\u{FFFD}\u{FFFD}"), + (b"\xF1foo\xF1\x80bar\xF1\x80\x80baz", "\u{FFFD}foo\u{FFFD}bar\u{FFFD}baz"), + (b"\xF4foo\xF4\x80bar\xF4\xBFbaz", "\u{FFFD}foo\u{FFFD}bar\u{FFFD}\u{FFFD}baz"), + (b"\xF0\x80\x80\x80foo\xF0\x90\x80\x80bar", "\u{FFFD}\u{FFFD}\u{FFFD}\u{FFFD}foo\u{10000}bar"), + (b"\xF0\x90\x80foo", "\u{FFFD}foo"), + // surrogates + (b"\xED\xA0\x80foo\xED\xBF\xBFbar", "\u{FFFD}\u{FFFD}\u{FFFD}foo\u{FFFD}\u{FFFD}\u{FFFD}bar"), +]; + +#[test] +fn test_string_from_utf8_lossy() { + for &(input, expected) in DECODED_LOSSY { + assert_eq!(string_from_utf8_lossy(input), expected); + } +} + +pub fn all_partitions<'a, F>(input: &'a [u8], f: F) + where F: Fn(&[&[u8]]) +{ + + fn all_partitions_inner<'a, F>(chunks: &mut Vec<&'a [u8]>, input: &'a [u8], f: &F) + where F: Fn(&[&[u8]]) + { + if input.is_empty() { + f(chunks) + } + for i in 1..(input.len() + 1) { + chunks.push(&input[..i]); + all_partitions_inner(chunks, &input[i..], f); + chunks.pop(); + } + } + + let mut chunks = Vec::new(); + all_partitions_inner(&mut chunks, input, &f); + assert_eq!(chunks.len(), 0); +} + +#[test] +fn test_incremental_decoder() { + for &(input, expected) in DECODED_LOSSY { + all_partitions(input, |chunks| { + let mut string = String::new(); + { + let mut decoder = LossyDecoder::new(|s| string.push_str(s)); + for &chunk in &*chunks { + decoder.feed(chunk); + } + } + assert_eq!(string, expected); + }); + } +} + +#[test] +fn test_bufread_decoder() { + for &(input, expected) in DECODED_LOSSY { + all_partitions(input, |chunks| { + let chunks = Chunks(chunks.to_vec().into()); + let string = BufReadDecoder::read_to_string_lossy(chunks).unwrap(); + assert_eq!(string, expected) + }); + } +} + +struct Chunks<'a>(VecDeque<&'a [u8]>); + +impl<'a> io::Read for Chunks<'a> { + fn read(&mut self, _: &mut [u8]) -> io::Result { + unimplemented!() + } +} + +impl<'a> io::BufRead for Chunks<'a> { + fn fill_buf(&mut self) -> io::Result<&[u8]> { + Ok(*self.0.front().unwrap()) + } + + fn consume(&mut self, bytes: usize) { + { + let front = self.0.front_mut().unwrap(); + *front = &front[bytes..]; + if !front.is_empty() { + return + } + } + if self.0.len() > 1 { + self.0.pop_front(); + } + } + +} diff --git a/.github/typos.toml b/.github/typos.toml index b84d436c27..1ae92b3de3 100644 --- a/.github/typos.toml +++ b/.github/typos.toml @@ -8,3 +8,8 @@ extend-ignore-re = [ extend-ignore-re = [ "ws", ] + +[files] +extend-exclude = [ + "go.mod" +] diff --git a/Cargo.lock b/Cargo.lock index 2e65f2a820..ca8292aabc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -90,6 +90,18 @@ version = "1.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37bf3594c4c988a53154954629820791dde498571819ae4ca50ca811e060cc95" +[[package]] +name = "async-channel" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" +dependencies = [ + "concurrent-queue", + "event-listener-strategy", + "futures-core", + "pin-project-lite", +] + [[package]] name = "async-stream" version = "0.3.5" @@ -136,13 +148,43 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" dependencies = [ "async-trait", - "axum-core", + "axum-core 0.3.4", "bitflags 1.3.2", "bytes", "futures-util", - "http", - "http-body", - "hyper", + "http 0.2.12", + "http-body 0.4.6", + "hyper 0.14.30", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "sync_wrapper 0.1.2", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf" +dependencies = [ + "async-trait", + "axum-core 0.4.3", + "base64", + "bytes", + "futures-util", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.4.1", + "hyper-util", "itoa", "matchit", "memchr", @@ -151,10 +193,17 @@ dependencies = [ "pin-project-lite", "rustversion", "serde", - "sync_wrapper", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", + "sha1", + "sync_wrapper 1.0.1", + "tokio", + "tokio-tungstenite", "tower", "tower-layer", "tower-service", + "tracing", ] [[package]] @@ -166,12 +215,33 @@ dependencies = [ "async-trait", "bytes", "futures-util", - "http", - "http-body", + "http 0.2.12", + "http-body 0.4.6", + "mime", + "rustversion", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a15c63fd72d41492dc4f497196f5da1fb04fb7529e631d73630d1b491e47a2e3" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", "mime", + "pin-project-lite", "rustversion", + "sync_wrapper 0.1.2", "tower-layer", "tower-service", + "tracing", ] [[package]] @@ -351,6 +421,15 @@ dependencies = [ "tokio", ] +[[package]] +name = "concurrent-queue" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "conmon-common" version = "0.6.6" @@ -364,6 +443,8 @@ name = "conmonrs" version = "0.6.6" dependencies = [ "anyhow", + "async-channel", + "axum 0.7.5", "capnp", "capnp-rpc", "clap", @@ -399,6 +480,7 @@ dependencies = [ "tokio-eventfd", "tokio-seqpacket", "tokio-util", + "tower-http", "tracing", "tracing-opentelemetry", "tracing-subscriber", @@ -500,6 +582,12 @@ dependencies = [ "parking_lot_core", ] +[[package]] +name = "data-encoding" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" + [[package]] name = "digest" version = "0.10.7" @@ -545,6 +633,27 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "event-listener" +version = "5.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" +dependencies = [ + "event-listener", + "pin-project-lite", +] + [[package]] name = "fastrand" version = "1.9.0" @@ -784,7 +893,7 @@ dependencies = [ "futures-core", "futures-sink", "futures-util", - "http", + "http 0.2.12", "indexmap 2.2.6", "slab", "tokio", @@ -842,6 +951,17 @@ dependencies = [ "itoa", ] +[[package]] +name = "http" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + [[package]] name = "http-body" version = "0.4.6" @@ -849,7 +969,30 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", - "http", + "http 0.2.12", + "pin-project-lite", +] + +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http 1.1.0", +] + +[[package]] +name = "http-body-util" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" +dependencies = [ + "bytes", + "futures-util", + "http 1.1.0", + "http-body 1.0.1", "pin-project-lite", ] @@ -876,8 +1019,8 @@ dependencies = [ "futures-core", "futures-util", "h2", - "http", - "http-body", + "http 0.2.12", + "http-body 0.4.6", "httparse", "httpdate", "itoa", @@ -889,18 +1032,52 @@ dependencies = [ "want", ] +[[package]] +name = "hyper" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http 1.1.0", + "http-body 1.0.1", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "smallvec", + "tokio", +] + [[package]] name = "hyper-timeout" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" dependencies = [ - "hyper", + "hyper 0.14.30", "pin-project-lite", "tokio", "tokio-io-timeout", ] +[[package]] +name = "hyper-util" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cde7055719c54e36e95e8719f95883f22072a48ede39db7fc17a4e1d5281e9b9" +dependencies = [ + "bytes", + "futures-util", + "http 1.1.0", + "http-body 1.0.1", + "hyper 1.4.1", + "pin-project-lite", + "tokio", +] + [[package]] name = "iana-time-zone" version = "0.1.60" @@ -1353,7 +1530,7 @@ checksum = "a94c69209c05319cdf7460c6d4c055ed102be242a0a6245835d7bc42c6ec7f54" dependencies = [ "async-trait", "futures-core", - "http", + "http 0.2.12", "opentelemetry", "opentelemetry-proto", "opentelemetry_sdk", @@ -1768,6 +1945,39 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_path_to_error" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" +dependencies = [ + "itoa", + "serde", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "sha1" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + [[package]] name = "sha2" version = "0.10.8" @@ -1896,6 +2106,12 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +[[package]] +name = "sync_wrapper" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" + [[package]] name = "tempfile" version = "3.13.0" @@ -2068,6 +2284,18 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-tungstenite" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c83b561d025642014097b66e6c1bb422783339e0909e4429cde4749d1990bc38" +dependencies = [ + "futures-util", + "log", + "tokio", + "tungstenite", +] + [[package]] name = "tokio-util" version = "0.7.11" @@ -2090,13 +2318,13 @@ checksum = "76c4eb7a4e9ef9d4763600161f12f5070b92a578e1b634db88a6887844c91a13" dependencies = [ "async-stream", "async-trait", - "axum", + "axum 0.6.20", "base64", "bytes", "h2", - "http", - "http-body", - "hyper", + "http 0.2.12", + "http-body 0.4.6", + "hyper 0.14.30", "hyper-timeout", "percent-encoding", "pin-project", @@ -2129,6 +2357,23 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower-http" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" +dependencies = [ + "bitflags 2.6.0", + "bytes", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", + "pin-project-lite", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "tower-layer" version = "0.3.2" @@ -2147,6 +2392,7 @@ version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ + "log", "pin-project-lite", "tracing-attributes", "tracing-core", @@ -2222,6 +2468,25 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" +[[package]] +name = "tungstenite" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ef1a641ea34f399a848dea702823bbecfb4c486f911735368f1f137cb8257e1" +dependencies = [ + "byteorder", + "bytes", + "data-encoding", + "http 1.1.0", + "httparse", + "log", + "rand", + "sha1", + "thiserror", + "url", + "utf-8", +] + [[package]] name = "typenum" version = "1.17.0" @@ -2310,6 +2575,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "utf-8" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" + [[package]] name = "utf8parse" version = "0.2.2" diff --git a/Makefile b/Makefile index 5cd7e0afd0..79ed76342e 100644 --- a/Makefile +++ b/Makefile @@ -72,7 +72,7 @@ integration-static: .install.ginkgo ## Run the integration tests using the stati $(MAKE) release-static; \ fi && \ export RUNTIME_BINARY="$(RUNTIME_PATH)" && \ - export MAX_RSS_KB=9500 && \ + export MAX_RSS_KB=12000 && \ "$(GOTOOLS_BINDIR)/ginkgo" $(TEST_FLAGS) $(GINKGO_FLAGS) ##@ Verify targets: diff --git a/conmon-rs/common/proto/conmon.capnp b/conmon-rs/common/proto/conmon.capnp index 506e0b1608..8f3a06bacd 100644 --- a/conmon-rs/common/proto/conmon.capnp +++ b/conmon-rs/common/proto/conmon.capnp @@ -196,4 +196,52 @@ interface Conmon { key @0 :Text; value @1 :Text; } + + ############################################### + # ServeExecContainer + struct ServeExecContainerRequest { + metadata @0 :Metadata; # Standard metadata to carry. + id @1 :Text; + command @2 :List(Text); + tty @3 :Bool; + stdin @4 :Bool; + stdout @5 :Bool; + stderr @6 :Bool; + cgroupManager @7 :CgroupManager; + } + + struct ServeExecContainerResponse { + url @0 :Text; + } + + serveExecContainer @8 (request: ServeExecContainerRequest) -> (response: ServeExecContainerResponse); + + ############################################### + # ServeAttachContainer + struct ServeAttachContainerRequest { + metadata @0 :Metadata; # Standard metadata to carry. + id @1 :Text; + stdin @2 :Bool; + stdout @3 :Bool; + stderr @4 :Bool; + } + + struct ServeAttachContainerResponse { + url @0 :Text; + } + + serveAttachContainer @9 (request: ServeAttachContainerRequest) -> (response: ServeAttachContainerResponse); + + ############################################### + # ServePortForwardContainer + struct ServePortForwardContainerRequest { + metadata @0 :Metadata; # Standard metadata to carry. + netNsPath @1 :Text; + } + + struct ServePortForwardContainerResponse { + url @0 :Text; + } + + servePortForwardContainer @10 (request: ServePortForwardContainerRequest) -> (response: ServePortForwardContainerResponse); } diff --git a/conmon-rs/server/Cargo.toml b/conmon-rs/server/Cargo.toml index 55610fd27a..fdec177764 100644 --- a/conmon-rs/server/Cargo.toml +++ b/conmon-rs/server/Cargo.toml @@ -9,7 +9,9 @@ path = "src/main.rs" [dependencies] anyhow = "1.0.90" +async-channel = "2.3.1" capnp = "0.19.8" +axum = { version = "0.7.5", features = ["ws"]} capnp-rpc = "0.19.5" clap = { version = "4.3.24", features = ["color", "cargo", "deprecated", "derive", "deprecated", "env", "string", "unicode", "wrap_help"] } command-fds = { version = "0.3.0", features = ["tokio"] } @@ -41,6 +43,7 @@ tokio = { version = "1.38.1", features = ["fs", "io-std", "io-util", "macros", " tokio-eventfd = "0.2.1" tokio-seqpacket = "0.7.1" tokio-util = { version = "0.7.11", features = ["compat"] } +tower-http = { version = "0.5.2", features = ["trace"] } tracing = "0.1.40" tracing-opentelemetry = "0.24.0" tracing-subscriber = "0.3.18" diff --git a/conmon-rs/server/src/attach.rs b/conmon-rs/server/src/attach.rs index 72295c8944..0612b6d84d 100644 --- a/conmon-rs/server/src/attach.rs +++ b/conmon-rs/server/src/attach.rs @@ -104,6 +104,11 @@ impl SharedContainerAttach { } Ok(()) } + + /// Retrieve the stdin sender. + pub fn stdin(&self) -> &Sender> { + &self.read_half_tx + } } #[derive(Clone, Debug)] diff --git a/conmon-rs/server/src/bounded_hashmap.rs b/conmon-rs/server/src/bounded_hashmap.rs new file mode 100644 index 0000000000..84bfe5cdc5 --- /dev/null +++ b/conmon-rs/server/src/bounded_hashmap.rs @@ -0,0 +1,133 @@ +use std::{ + collections::HashMap, + fmt::Debug, + hash::Hash, + time::{Duration, Instant}, +}; +use tracing::warn; + +#[derive(Debug)] +/// A HashMap bounded by element age and maximum amount of items +pub struct BoundedHashMap { + map: HashMap, + max_duration: Duration, + max_items: usize, +} + +impl BoundedHashMap +where + K: Eq + Hash + Clone + Debug, + V: Debug, +{ + /// Insert an element into the hashmap by: + /// - removing timed-out elements + /// - removing the oldest element if no space left + pub fn insert(&mut self, k: K, v: V) -> Option { + let now = Instant::now(); + + // Remove timed-out items + let old_len = self.map.len(); + self.map + .retain(|_, (inserted, _)| now - *inserted <= self.max_duration); + if old_len < self.map.len() { + warn!("Removed {} timed out elements", self.map.len() - old_len) + } + + // Remove the oldest entry if still not enough space left + if self.map.len() >= self.max_items { + let mut key_to_remove = k.clone(); + + let mut oldest = now; + for (key, (inserted, _)) in self.map.iter() { + if *inserted < oldest { + oldest = *inserted; + key_to_remove = key.clone(); + } + } + + warn!("Removing oldest key: {:?}", key_to_remove); + self.map.remove(&key_to_remove); + } + + self.map.insert(k, (Instant::now(), v)).map(|v| v.1) + } + + /// Remove an element from the hashmap and return it if the element has not expired. + pub fn remove(&mut self, k: &K) -> Option { + let now = Instant::now(); + + if let Some((key, (inserted, value))) = self.map.remove_entry(k) { + if now - inserted > self.max_duration { + warn!("Max duration expired for key: {:?}", key); + None + } else { + Some(value) + } + } else { + None + } + } +} + +impl Default for BoundedHashMap { + fn default() -> Self { + Self { + map: HashMap::with_capacity(0), + max_duration: Duration::new(60 * 60, 0), // 1 hour + max_items: 1000, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::thread::sleep; + + #[test] + fn bounded_hashmap_test() { + let mut sut = BoundedHashMap { + max_items: 2, + ..Default::default() + }; + + assert_eq!(sut.map.len(), 0); + + // Insert first item should be fine + assert!(sut.insert(0, 0).is_none()); + assert_eq!(sut.map.len(), 1); + + // Insert second item should be fine, removal should work as well + assert!(sut.insert(1, 0).is_none()); + assert_eq!(sut.map.len(), 2); + assert!(sut.remove(&1).is_some()); + assert_eq!(sut.map.len(), 1); + assert!(sut.insert(1, 0).is_none()); + + // Insert third item should be fine, but remove oldest + assert!(sut.insert(2, 0).is_none()); + assert_eq!(sut.map.len(), 2); + assert!(!sut.map.contains_key(&0)); + assert!(sut.map.contains_key(&1)); + assert!(sut.map.contains_key(&2)); + + // Insert another item should be fine, but remove oldest + assert!(sut.insert(3, 0).is_none()); + assert_eq!(sut.map.len(), 2); + assert!(!sut.map.contains_key(&1)); + assert!(sut.map.contains_key(&2)); + assert!(sut.map.contains_key(&3)); + + // Change the max age of the elements, all should be timed out + sut.max_duration = Duration::from_millis(100); + sleep(Duration::from_millis(200)); + assert!(sut.insert(0, 0).is_none()); + assert!(!sut.map.contains_key(&1)); + assert!(!sut.map.contains_key(&2)); + assert!(!sut.map.contains_key(&3)); + + // The last element should be also timed out if we wait + sleep(Duration::from_millis(200)); + assert!(sut.remove(&0).is_none()); + } +} diff --git a/conmon-rs/server/src/config.rs b/conmon-rs/server/src/config.rs index d20ac572be..f941a55e13 100644 --- a/conmon-rs/server/src/config.rs +++ b/conmon-rs/server/src/config.rs @@ -12,7 +12,9 @@ macro_rules! prefix { }; } -#[derive(CopyGetters, Debug, Deserialize, Eq, Getters, Parser, PartialEq, Serialize, Setters)] +#[derive( + Clone, CopyGetters, Debug, Deserialize, Eq, Getters, Parser, PartialEq, Serialize, Setters, +)] #[serde(rename_all = "kebab-case")] #[command( after_help("More info at: https://github.com/containers/conmon-rs"), diff --git a/conmon-rs/server/src/container_io.rs b/conmon-rs/server/src/container_io.rs index 8118d6e220..36eb27cf3d 100644 --- a/conmon-rs/server/src/container_io.rs +++ b/conmon-rs/server/src/container_io.rs @@ -3,6 +3,7 @@ use crate::{ terminal::Terminal, }; use anyhow::{bail, Context, Result}; +use async_channel::{Receiver, Sender}; use getset::{Getters, MutGetters}; use nix::errno::Errno; use std::{ @@ -15,10 +16,7 @@ use tempfile::Builder; use tokio::{ io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}, select, - sync::{ - mpsc::{UnboundedReceiver, UnboundedSender}, - RwLock, - }, + sync::RwLock, time::{self, Instant}, }; use tokio_util::sync::CancellationToken; @@ -59,6 +57,11 @@ impl SharedContainerIO { pub async fn attach(&self) -> SharedContainerAttach { self.0.read().await.attach().clone() } + + /// Retrieve the underlying stdout and stderr channels. + pub async fn stdio(&self) -> Result<(Receiver, Receiver)> { + self.0.read().await.stdio() + } } #[derive(Debug, Getters, MutGetters)] @@ -155,6 +158,23 @@ impl ContainerIO { Ok(path) } + /// Retrieve clones of the stdout and stderr channels. + pub fn stdio(&self) -> Result<(Receiver, Receiver)> { + match self.typ() { + ContainerIOType::Terminal(t) => { + if let Some(message_rx) = t.message_rx() { + let (_, fake_rx) = async_channel::unbounded(); + Ok((message_rx.clone(), fake_rx)) + } else { + bail!("called before message receiver was registered") + } + } + ContainerIOType::Streams(s) => { + Ok((s.message_rx_stdout.clone(), s.message_rx_stderr.clone())) + } + } + } + pub async fn read_all_with_timeout( &mut self, time_to_timeout: Option, @@ -184,7 +204,7 @@ impl ContainerIO { async fn read_stream_with_timeout( time_to_timeout: Option, - receiver: &mut UnboundedReceiver, + receiver: &mut Receiver, ) -> (Vec, bool) { let mut stdio = vec![]; let mut timed_out = false; @@ -192,19 +212,18 @@ impl ContainerIO { let msg = if let Some(time_to_timeout) = time_to_timeout { { match time::timeout_at(time_to_timeout, receiver.recv()).await { - Ok(Some(msg)) => msg, - Err(_) => { + Ok(Ok(msg)) => msg, + _ => { timed_out = true; Message::Done } - Ok(None) => unreachable!(), } } } else { { match receiver.recv().await { - Some(msg) => msg, - None => Message::Done, + Ok(msg) => msg, + _ => Message::Done, } } }; @@ -230,7 +249,7 @@ impl ContainerIO { mut reader: T, pipe: Pipe, logger: SharedContainerLog, - message_tx: UnboundedSender, + message_tx: Sender, mut attach: SharedContainerAttach, ) -> Result<()> where @@ -251,6 +270,7 @@ impl ContainerIO { if !message_tx.is_closed() { message_tx .send(Message::Done) + .await .context("send done message")?; } @@ -275,6 +295,7 @@ impl ContainerIO { if !message_tx.is_closed() { message_tx .send(Message::Data(data.into(), pipe)) + .await .context("send data message")?; } } @@ -290,6 +311,7 @@ impl ContainerIO { if !message_tx.is_closed() { message_tx .send(Message::Done) + .await .context("send done message")?; } return Ok(()); diff --git a/conmon-rs/server/src/lib.rs b/conmon-rs/server/src/lib.rs index bfdaa2638d..126852be21 100644 --- a/conmon-rs/server/src/lib.rs +++ b/conmon-rs/server/src/lib.rs @@ -9,6 +9,7 @@ pub use version::Version; mod macros; mod attach; +mod bounded_hashmap; mod capnp_util; mod child; mod child_reaper; @@ -23,6 +24,7 @@ mod oom_watcher; mod pause; mod rpc; mod server; +mod streaming_server; mod streams; mod telemetry; mod terminal; diff --git a/conmon-rs/server/src/rpc.rs b/conmon-rs/server/src/rpc.rs index 19c17c61fe..fa80c9623e 100644 --- a/conmon-rs/server/src/rpc.rs +++ b/conmon-rs/server/src/rpc.rs @@ -462,4 +462,153 @@ impl conmon::Server for Server { .instrument(debug_span!("promise")), ) } + + fn serve_exec_container( + &mut self, + params: conmon::ServeExecContainerParams, + mut results: conmon::ServeExecContainerResults, + ) -> Promise<(), capnp::Error> { + debug!("Got a serve exec container request"); + let req = pry!(pry!(params.get()).get_request()); + + let span = debug_span!( + "serve_exec_container", + uuid = Uuid::new_v4().to_string().as_str() + ); + let _enter = span.enter(); + pry_err!(Telemetry::set_parent_context(pry!(req.get_metadata()))); + + let id = pry_err!(pry_err!(req.get_id()).to_string()); + + // Validate that the container actually exists + pry_err!(self.reaper().get(&id)); + + let command = capnp_vec_str!(req.get_command()); + let (tty, stdin, stdout, stderr) = ( + req.get_tty(), + req.get_stdin(), + req.get_stdout(), + req.get_stderr(), + ); + + let streaming_server = self.streaming_server().clone(); + let child_reaper = self.reaper().clone(); + let container_io = pry_err!(ContainerIO::new(tty, ContainerLog::new())); + let config = self.config().clone(); + let cgroup_manager = pry!(req.get_cgroup_manager()); + + Promise::from_future( + async move { + capnp_err!(streaming_server + .write() + .await + .start_if_required() + .await + .context("start streaming server if required"))?; + + let url = streaming_server + .read() + .await + .exec_url( + child_reaper, + container_io, + config, + cgroup_manager, + id, + command, + stdin, + stdout, + stderr, + ) + .await; + + results.get().init_response().set_url(&url); + Ok(()) + } + .instrument(debug_span!("promise")), + ) + } + + fn serve_attach_container( + &mut self, + params: conmon::ServeAttachContainerParams, + mut results: conmon::ServeAttachContainerResults, + ) -> Promise<(), capnp::Error> { + debug!("Got a serve attach container request"); + let req = pry!(pry!(params.get()).get_request()); + + let span = debug_span!( + "serve_attach_container", + uuid = Uuid::new_v4().to_string().as_str() + ); + let _enter = span.enter(); + pry_err!(Telemetry::set_parent_context(pry!(req.get_metadata()))); + + let id = pry_err!(pry_err!(req.get_id()).to_str()); + let (stdin, stdout, stderr) = (req.get_stdin(), req.get_stdout(), req.get_stderr()); + + let streaming_server = self.streaming_server().clone(); + let child = pry_err!(self.reaper().get(id)); + + Promise::from_future( + async move { + capnp_err!(streaming_server + .write() + .await + .start_if_required() + .await + .context("start streaming server"))?; + + let url = streaming_server + .read() + .await + .attach_url(child, stdin, stdout, stderr) + .await; + + results.get().init_response().set_url(&url); + Ok(()) + } + .instrument(debug_span!("promise")), + ) + } + + fn serve_port_forward_container( + &mut self, + params: conmon::ServePortForwardContainerParams, + mut results: conmon::ServePortForwardContainerResults, + ) -> Promise<(), capnp::Error> { + debug!("Got a serve port forward container request"); + let req = pry!(pry!(params.get()).get_request()); + + let span = debug_span!( + "serve_port_forward_container", + uuid = Uuid::new_v4().to_string().as_str() + ); + let _enter = span.enter(); + pry_err!(Telemetry::set_parent_context(pry!(req.get_metadata()))); + + let net_ns_path = pry_err!(pry_err!(req.get_net_ns_path()).to_string()); + let streaming_server = self.streaming_server().clone(); + + Promise::from_future( + async move { + capnp_err!(streaming_server + .write() + .await + .start_if_required() + .await + .context("start streaming server if required"))?; + + let url = streaming_server + .read() + .await + .port_forward_url(net_ns_path) + .await; + + results.get().init_response().set_url(&url); + Ok(()) + } + .instrument(debug_span!("promise")), + ) + } } diff --git a/conmon-rs/server/src/server.rs b/conmon-rs/server/src/server.rs index 7dd4cf9644..8a916fbe03 100644 --- a/conmon-rs/server/src/server.rs +++ b/conmon-rs/server/src/server.rs @@ -9,6 +9,7 @@ use crate::{ journal::Journal, listener::{DefaultListener, Listener}, pause::Pause, + streaming_server::StreamingServer, telemetry::Telemetry, version::Version, }; @@ -30,7 +31,7 @@ use tokio::{ fs, runtime::{Builder, Handle}, signal::unix::{signal, SignalKind}, - sync::oneshot, + sync::{oneshot, RwLock}, task::{self, LocalSet}, }; use tokio_util::compat::TokioAsyncReadCompatExt; @@ -53,6 +54,10 @@ pub struct Server { /// Fd socket instance. #[getset(get = "pub(crate)")] fd_socket: Arc, + + /// Streaming server instance. + #[getset(get = "pub(crate)")] + streaming_server: Arc>, } impl Server { @@ -62,6 +67,7 @@ impl Server { config: Default::default(), reaper: Default::default(), fd_socket: Default::default(), + streaming_server: Default::default(), }; if let Some(v) = server.config().version() { @@ -359,8 +365,29 @@ impl GenerateRuntimeArgs<'_> { /// Generate the OCI runtime CLI arguments from the provided parameters. pub(crate) fn exec_sync_args(&self, command: Reader) -> Result> { + let mut args = self + .exec_sync_args_without_command() + .context("exec sync args without command")?; + + for arg in command { + args.push(arg?.to_string()?); + } + + debug!("Exec args {:?}", args.join(" ")); + Ok(args) + } + + pub(crate) fn exec_sync_args_without_command(&self) -> Result> { let mut args = self.default_args().context("build default runtime args")?; + if let Some(rr) = self.config.runtime_root() { + args.push(format!("--root={}", rr.display())); + } + + if self.cgroup_manager == CgroupManager::Systemd { + args.push(Self::SYSTEMD_CGROUP_ARG.into()); + } + args.push("exec".to_string()); args.push("-d".to_string()); @@ -372,11 +399,6 @@ impl GenerateRuntimeArgs<'_> { args.push(format!("--pid-file={}", self.pidfile.display())); args.push(self.id.into()); - for arg in command { - args.push(arg?.to_string()?); - } - - debug!("Exec args {:?}", args.join(" ")); Ok(args) } diff --git a/conmon-rs/server/src/streaming_server.rs b/conmon-rs/server/src/streaming_server.rs new file mode 100644 index 0000000000..c3dcd5b68d --- /dev/null +++ b/conmon-rs/server/src/streaming_server.rs @@ -0,0 +1,577 @@ +use crate::{ + bounded_hashmap::BoundedHashMap, + child::Child, + child_reaper::{ChildReaper, ReapableChild}, + config::Config, + container_io::{ContainerIO, Message as IOMessage, SharedContainerIO}, + server::GenerateRuntimeArgs, +}; +use anyhow::{Context, Result}; +use axum::{ + extract::{ + ws::{close_code, CloseFrame, Message, WebSocket, WebSocketUpgrade}, + Path, State as AxumState, + }, + http::StatusCode, + response::IntoResponse, + routing::get, + Router, +}; +use conmon_common::conmon_capnp::conmon::CgroupManager; +use futures::{ + sink::SinkExt, + stream::{SplitSink, SplitStream, StreamExt}, +}; +use serde::{Deserialize, Serialize}; +use std::{fmt::Debug, ops::ControlFlow, sync::Arc}; +use tokio::{ + net::TcpListener, + sync::{ + mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}, + RwLock, + }, + task::{self, JoinHandle}, +}; +use tower_http::trace::{DefaultMakeSpan, TraceLayer}; +use tracing::{debug, debug_span, error, info, trace, warn}; +use uuid::Uuid; + +const ADDR: &str = "127.0.0.1"; + +const PROTOCOL_V5: &str = "v5.channel.k8s.io"; +const PROTOCOL_PORT_FORWARD: &str = "SPDY/3.1+portforward.k8s.io"; + +const EXEC_PATH: &str = "exec"; +const ATTACH_PATH: &str = "attach"; +const PORT_FORWARD_PATH: &str = "port-forward"; + +const STDIN_BYTE: u8 = 0; +const STDOUT_BYTE: u8 = 1; +const STDERR_BYTE: u8 = 2; +const STREAM_ERR_BYTE: u8 = 3; +const RESIZE_BYTE: u8 = 4; +const CLOSE_BYTE: u8 = 255; + +#[derive(Debug, Default)] +/// The main streaming server structure of this module. +pub struct StreamingServer { + running: bool, + port: u16, + state: Arc>, +} + +/// State handled by the streaming server. +type State = BoundedHashMap; + +#[derive(Debug)] +/// A dedicated session for each provided functionality. +enum Session { + Exec(ExecSession), + Attach(AttachSession), + PortForward(PortForwardSession), +} + +#[derive(Debug)] +/// Required exec session data. +struct ExecSession { + child_reaper: Arc, + container_io: ContainerIO, + server_config: Config, + cgroup_manager: CgroupManager, + container_id: String, + command: Vec, + stdin: bool, + stdout: bool, + stderr: bool, +} + +#[derive(Debug)] +/// Required attach session data. +struct AttachSession { + child: ReapableChild, + stdin: bool, + stdout: bool, + stderr: bool, +} + +#[derive(Debug)] +/// Required port forward session data. +struct PortForwardSession { + #[allow(dead_code)] + net_ns_path: String, +} + +#[derive(Debug, Deserialize)] +#[serde(rename_all = "PascalCase")] +/// Terminal resize event for exec and attach. +struct ResizeEvent { + width: u16, + height: u16, +} + +#[derive(Debug, Serialize)] +/// Error message type used for exec in case that the command fails. +struct ErrorMessage { + status: &'static str, + reason: &'static str, + details: ErrorDetails, + message: &'static str, +} + +impl ErrorMessage { + fn new(exit_code: T) -> Self + where + T: ToString, + { + Self { + status: "Failure", + reason: "NonZeroExitCode", + details: ErrorDetails { + causes: vec![ErrorCause { + reason: "ExitCode", + message: exit_code.to_string(), + }], + }, + message: "command terminated with non-zero exit code", + } + } +} + +#[derive(Debug, Serialize)] +/// Error details for the ErrorMessage. +struct ErrorDetails { + causes: Vec, +} + +#[derive(Debug, Serialize)] +/// Error cause for the ErrorDetails. +struct ErrorCause { + reason: &'static str, + message: String, +} + +impl StreamingServer { + /// Start the streaming server if not already running. + pub async fn start_if_required(&mut self) -> Result<()> { + if self.running { + return Ok(()); + } + + let listener = TcpListener::bind(ADDR.to_string() + ":0") + .await + .context("bind streaming server")?; + + let local_addr = listener + .local_addr() + .context("get listeners local address")?; + + self.port = local_addr.port(); + + info!("Starting streaming server on {local_addr}"); + task::spawn_local(Self::serve(listener, self.state.clone())); + self.running = true; + + Ok(()) + } + + /// Serve the main streaming server. + async fn serve(listener: TcpListener, state: Arc>) -> Result<()> { + let router = Router::new() + .route(&Self::path_for(EXEC_PATH), get(Self::handle)) + .route(&Self::path_for(ATTACH_PATH), get(Self::handle)) + .route(&Self::path_for(PORT_FORWARD_PATH), get(Self::handle)) + .fallback(Self::fallback) + .with_state(state) + .layer( + TraceLayer::new_for_http() + .make_span_with(DefaultMakeSpan::default().include_headers(true)), + ); + axum::serve(listener, router) + .await + .context("start streaming server") + } + + /// Token parse path for the web server. + fn path_for(p: &str) -> String { + format!("/{}/:token", p) + } + + /// Return the URL for a specific path and Uuid. + fn url_for(&self, p: &str, uuid: &Uuid) -> String { + format!("http://{}:{}/{}/{}", ADDR, self.port, p, uuid) + } + + /// Fallback response. + async fn fallback() -> impl IntoResponse { + StatusCode::NOT_FOUND + } + + #[allow(clippy::too_many_arguments)] + /// Returns the URL used for the provided exec parameters. + pub async fn exec_url( + &self, + child_reaper: Arc, + container_io: ContainerIO, + server_config: Config, + cgroup_manager: CgroupManager, + container_id: String, + command: Vec, + stdin: bool, + stdout: bool, + stderr: bool, + ) -> String { + let mut state_lock = self.state.write().await; + let uuid = Uuid::new_v4(); + state_lock.insert( + uuid, + Session::Exec(ExecSession { + child_reaper, + container_io, + server_config, + cgroup_manager, + container_id, + command, + stdin, + stdout, + stderr, + }), + ); + self.url_for(EXEC_PATH, &uuid) + } + + /// Returns the URL used for the provided attach parameters. + pub async fn attach_url( + &self, + child: ReapableChild, + stdin: bool, + stdout: bool, + stderr: bool, + ) -> String { + let mut state_lock = self.state.write().await; + let uuid = Uuid::new_v4(); + state_lock.insert( + uuid, + Session::Attach(AttachSession { + child, + stdin, + stdout, + stderr, + }), + ); + self.url_for(ATTACH_PATH, &uuid) + } + + /// Returns the URL used for the provided port forward parameters. + pub async fn port_forward_url(&self, net_ns_path: String) -> String { + let mut state_lock = self.state.write().await; + let uuid = Uuid::new_v4(); + state_lock.insert( + uuid, + Session::PortForward(PortForwardSession { net_ns_path }), + ); + self.url_for(PORT_FORWARD_PATH, &uuid) + } + + /// Handle a webserver connection which should be upgraded to become a websocket one. + async fn handle( + ws: WebSocketUpgrade, + Path(token): Path, + AxumState(state): AxumState>>, + ) -> impl IntoResponse { + let span = debug_span!("handle_common", token = token.to_string().as_str()); + let _enter = span.enter(); + + info!("Got request for token: {token}"); + let mut state_lock = state.write().await; + + match state_lock.remove(&token) { + Some(session) => { + info!("Got valid session for token {token}: {session:?}"); + ws.protocols([PROTOCOL_V5, PROTOCOL_PORT_FORWARD]) + .on_upgrade(move |socket| Self::handle_websocket(socket, session)) + } + None => { + error!("Unable to find session for token: {token}"); + StatusCode::NOT_FOUND.into_response() + } + } + } + + /// Handle a single websocket connection. + async fn handle_websocket(socket: WebSocket, session: Session) { + let (sender, receiver) = socket.split(); + let (stdin_tx, stdin_rx) = unbounded_channel(); + + let mut send_task = Self::write_task(sender, stdin_rx, session).await; + let mut recv_task = Self::read_task(receiver, stdin_tx).await; + + tokio::select! { + rv_a = (&mut send_task) => { + match rv_a { + Ok(_) => info!("All messages sent"), + Err(a) => error!("Error sending messages: {a:?}") + } + recv_task.abort(); + }, + rv_b = (&mut recv_task) => { + match rv_b { + Ok(_) => info!("All messages received"), + Err(b) => error!("Error receiving messages: {b:?}") + } + send_task.abort(); + } + } + + info!("Closing websocket connection"); + } + + /// Build a common write task based on the session type. + async fn write_task( + mut sender: SplitSink, + stdin_rx: UnboundedReceiver>, + session: Session, + ) -> JoinHandle<()> { + tokio::spawn(async move { + if let (Err(e), typ) = match session { + Session::Exec(s) => (Self::exec_loop(s, &mut sender, stdin_rx).await, "exec"), + Session::Attach(s) => (Self::attach_loop(s, &mut sender, stdin_rx).await, "attach"), + Session::PortForward(s) => ( + Self::port_forward_loop(s, &mut sender, stdin_rx).await, + "port forward", + ), + } { + error!("Unable to run {typ} for container: {e}"); + } + + if let Err(e) = sender + .send(Message::Close( + CloseFrame { + code: close_code::NORMAL, + reason: "done".into(), + } + .into(), + )) + .await + { + error!("Unable to send close message: {e}") + } + }) + } + + /// Build a common read task. + async fn read_task( + mut receiver: SplitStream, + mut stdin_tx: UnboundedSender>, + ) -> JoinHandle<()> { + tokio::spawn(async move { + while let Some(Ok(msg)) = receiver.next().await { + if Self::read_message(msg, &mut stdin_tx).is_break() { + break; + } + } + }) + } + + /// Read a single message and return the control flow decision. + fn read_message(msg: Message, stdin_tx: &mut UnboundedSender>) -> ControlFlow<(), ()> { + match msg { + Message::Binary(data) if !data.is_empty() => { + debug!("Got {} binary bytes", data.len()); + if let Err(e) = stdin_tx.send(data) { + error!("Unable to send stdin data: {e}"); + } + } + Message::Close(c) => { + if let Some(cf) = c { + info!( + "Got websocket close with code {} and reason `{}`", + cf.code, cf.reason + ); + } else { + warn!("Got close message without close frame"); + } + return ControlFlow::Break(()); + } + Message::Text(t) => trace!("Got text message: {t:?}"), + Message::Pong(_) => trace!("Got pong"), + Message::Ping(_) => trace!("Got ping"), + Message::Binary(_) => trace!("Got unknown binary data"), + } + + ControlFlow::Continue(()) + } + + /// The exec specific read/write loop. + async fn exec_loop( + mut session: ExecSession, + sender: &mut SplitSink, + mut stdin_rx: UnboundedReceiver>, + ) -> Result<()> { + let pidfile = ContainerIO::temp_file_name( + Some(session.server_config.runtime_dir()), + "exec_streaming", + "pid", + ) + .context("build pid file path")?; + + let args = GenerateRuntimeArgs { + config: &session.server_config, + id: &session.container_id, + container_io: &session.container_io, + pidfile: &pidfile, + cgroup_manager: session.cgroup_manager, + }; + let mut args = args + .exec_sync_args_without_command() + .context("exec sync args without command")?; + args.extend(session.command); + + let (grandchild_pid, token) = session + .child_reaper + .create_child( + session.server_config.runtime(), + &args, + session.stdin, + &mut session.container_io, + &pidfile, + vec![], + vec![], + ) + .await + .context("create new child process")?; + + let io = SharedContainerIO::new(session.container_io); + let child = Child::new( + session.container_id.clone(), + grandchild_pid, + vec![], + vec![], + None, + io.clone(), + vec![], + token.clone(), + ); + + let mut exit_rx = session + .child_reaper + .watch_grandchild(child, vec![]) + .context("watch grandchild for pid")?; + + let (stdout_rx, stderr_rx) = io + .stdio() + .await + .context("retrieve stdout and stderr channels")?; + + loop { + tokio::select! { + Some(mut data) = stdin_rx.recv() => if session.stdin { + // First element is the message type indicator + match data.remove(0) { + STDIN_BYTE => { + trace!("Got stdin message of len {}", data.len()); + io.attach().await.stdin().send(data).context("send to attach session")?; + }, + RESIZE_BYTE => { + let e = serde_json::from_slice::(&data).context("unmarshal resize event")?; + trace!("Got resize message: {e:?}"); + io.resize(e.width, e.height).await.context("resize terminal")?; + }, + CLOSE_BYTE => { + info!("Got close message"); + break + }, + x => warn!("Unknown start byte for stdin: {x}"), + }; + }, + + Ok(IOMessage::Data(mut data, _)) = stdout_rx.recv() => if session.stdout { + data.insert(0, STDOUT_BYTE); + sender.send(Message::Binary(data)).await.context("send to stdout")?; + }, + + Ok(IOMessage::Data(mut data, _)) = stderr_rx.recv() => if session.stderr { + data.insert(0, STDERR_BYTE); + sender.send(Message::Binary(data)).await.context("send to stderr")?; + }, + + Ok(exit_data) = exit_rx.recv() => { + if exit_data.exit_code != 0 { + let mut err = vec![STREAM_ERR_BYTE]; + let msg = ErrorMessage::new(exit_data.exit_code); + err.extend(serde_json::to_vec(&msg).context("serialize error message")?); + sender.send(Message::Binary(err)).await.context("send exit failure message")?; + } + break + }, + } + } + + Ok(()) + } + + /// The attach specific read/write loop. + async fn attach_loop( + session: AttachSession, + sender: &mut SplitSink, + mut stdin_rx: UnboundedReceiver>, + ) -> Result<()> { + let io = session.child.io(); + + let (stdout_rx, stderr_rx) = io + .stdio() + .await + .context("retrieve stdout and stderr channels")?; + + loop { + tokio::select! { + Some(mut data) = stdin_rx.recv() => if session.stdin { + // First element is the message type indicator + match data.remove(0) { + STDIN_BYTE => { + trace!("Got stdin message of len {}", data.len()); + io.attach().await.stdin().send(data).context("send to attach session")?; + }, + RESIZE_BYTE => { + let e = serde_json::from_slice::(&data).context("unmarshal resize event")?; + trace!("Got resize message: {e:?}"); + io.resize(e.width, e.height).await.context("resize terminal")?; + }, + CLOSE_BYTE => { + info!("Got close message"); + break + }, + x => warn!("Unknown start byte for stdin: {x}"), + }; + }, + + Ok(IOMessage::Data(mut data, _)) = stdout_rx.recv() => if session.stdout { + data.insert(0, STDOUT_BYTE); + sender.send(Message::Binary(data)).await.context("send to stdout")?; + }, + + Ok(IOMessage::Data(mut data, _)) = stderr_rx.recv() => if session.stderr { + data.insert(0, STDERR_BYTE); + sender.send(Message::Binary(data)).await.context("send to stderr")?; + }, + + _ = session.child.token().cancelled() => { + debug!("Exiting streaming attach because token cancelled"); + break + } + } + } + + Ok(()) + } + + /// The port forward specific read/write loop. + async fn port_forward_loop( + _session: PortForwardSession, + _sender: &mut SplitSink, + mut _in_rx: UnboundedReceiver>, + ) -> Result<()> { + unimplemented!( + "Requires SPDY protocol implementation from https://github.com/moby/spdystream" + ) + } +} diff --git a/conmon-rs/server/src/streams.rs b/conmon-rs/server/src/streams.rs index f6f8be5100..fadcc3544a 100644 --- a/conmon-rs/server/src/streams.rs +++ b/conmon-rs/server/src/streams.rs @@ -6,10 +6,10 @@ use crate::{ container_log::SharedContainerLog, }; use anyhow::Result; +use async_channel::{Receiver, Sender}; use getset::Getters; use tokio::{ process::{ChildStderr, ChildStdin, ChildStdout}, - sync::mpsc::{self, UnboundedReceiver, UnboundedSender}, task, }; use tokio_util::sync::CancellationToken; @@ -23,15 +23,15 @@ pub struct Streams { #[getset(get = "pub")] attach: SharedContainerAttach, - pub message_rx_stdout: UnboundedReceiver, + pub message_rx_stdout: Receiver, #[getset(get = "pub")] - message_tx_stdout: UnboundedSender, + message_tx_stdout: Sender, - pub message_rx_stderr: UnboundedReceiver, + pub message_rx_stderr: Receiver, #[getset(get = "pub")] - message_tx_stderr: UnboundedSender, + message_tx_stderr: Sender, } impl Streams { @@ -39,8 +39,8 @@ impl Streams { pub fn new(logger: SharedContainerLog, attach: SharedContainerAttach) -> Result { debug!("Creating new IO streams"); - let (message_tx_stdout, message_rx_stdout) = mpsc::unbounded_channel(); - let (message_tx_stderr, message_rx_stderr) = mpsc::unbounded_channel(); + let (message_tx_stdout, message_rx_stdout) = async_channel::unbounded(); + let (message_tx_stderr, message_rx_stderr) = async_channel::unbounded(); Ok(Self { logger, @@ -130,7 +130,7 @@ mod tests { let attach = SharedContainerAttach::default(); let token = CancellationToken::new(); - let mut sut = Streams::new(logger, attach)?; + let sut = Streams::new(logger, attach)?; let expected = "hello world"; let mut child = Command::new("echo") diff --git a/conmon-rs/server/src/terminal.rs b/conmon-rs/server/src/terminal.rs index 3b7e99dc0e..2068dd29e4 100644 --- a/conmon-rs/server/src/terminal.rs +++ b/conmon-rs/server/src/terminal.rs @@ -7,6 +7,7 @@ use crate::{ listener::{DefaultListener, Listener}, }; use anyhow::{format_err, Context as _, Result}; +use async_channel::Receiver as UnboundedReceiver; use getset::{Getters, MutGetters, Setters}; use libc::{winsize, TIOCSWINSZ}; use nix::{ @@ -29,7 +30,7 @@ use tokio::{ fs, io::{unix::AsyncFd, AsyncRead, AsyncWrite, AsyncWriteExt, Interest, ReadBuf}, net::UnixStream, - sync::mpsc::{self, Receiver, Sender, UnboundedReceiver}, + sync::mpsc::{self, Receiver, Sender}, task, }; use tokio_util::sync::CancellationToken; @@ -118,7 +119,7 @@ impl Terminal { let attach_clone = self.attach.clone(); let logger_clone = self.logger.clone(); - let (message_tx, message_rx) = mpsc::unbounded_channel(); + let (message_tx, message_rx) = async_channel::unbounded(); self.message_rx = Some(message_rx); task::spawn({ diff --git a/go.mod b/go.mod index b7dbdc0d1b..f16be204a8 100644 --- a/go.mod +++ b/go.mod @@ -15,30 +15,54 @@ require ( github.com/sirupsen/logrus v1.9.3 go.opentelemetry.io/otel v1.31.0 go.opentelemetry.io/otel/trace v1.31.0 + k8s.io/client-go v0.31.1 ) require ( github.com/colega/zeropool v0.0.0-20230505084239-6fb4a4f75381 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cyphar/filepath-securejoin v0.3.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/docker/go-units v0.5.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 // indirect + github.com/gorilla/websocket v1.5.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/moby/spdystream v0.4.0 // indirect github.com/moby/sys/mountinfo v0.7.2 // indirect github.com/moby/sys/user v0.2.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/seccomp/libseccomp-golang v0.10.0 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect + github.com/x448/float16 v0.8.4 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect go.opentelemetry.io/otel/metric v1.31.0 // indirect golang.org/x/net v0.28.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect golang.org/x/sync v0.8.0 // indirect golang.org/x/sys v0.24.0 // indirect + golang.org/x/term v0.23.0 // indirect golang.org/x/text v0.17.0 // indirect + golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.24.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/api v0.31.1 // indirect + k8s.io/apimachinery v0.31.1 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index 9d990e2b42..1c61488ec4 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,7 @@ capnproto.org/go/capnp/v3 v3.0.1-alpha.2 h1:W/cf+XEArUSwcBBE/9wS2NpWDkM5NLQOjmzEiHZpYi0= capnproto.org/go/capnp/v3 v3.0.1-alpha.2/go.mod h1:2vT5D2dtG8sJGEoEKU17e+j7shdaYp1Myl8X03B3hmc= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/colega/zeropool v0.0.0-20230505084239-6fb4a4f75381 h1:d5EKgQfRQvO97jnISfR89AiCCCJMwMFoSxUiU0OGCRU= @@ -18,30 +20,75 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 h1:5iH8iuqE5apketRbSFBy+X1V0o+l+8NF1avt4HWl7cA= github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/moby/spdystream v0.4.0 h1:Vy79D6mHeJJjiPdFEL2yku1kl0chZpJfZcPpb16BRl8= +github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg= github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4= github.com/moby/sys/user v0.2.0 h1:OnpapJsRp25vkhw8TFG6OLJODNh/3rEwRWtJ3kakwRM= github.com/moby/sys/user v0.2.0/go.mod h1:RYstrcWOJpVh+6qzUqp2bU3eaRpdiQeKGlKitaH0PM8= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/onsi/ginkgo/v2 v2.20.2 h1:7NVCeyIWROIAheY21RLS+3j2bb52W0W82tkberYytp4= github.com/onsi/ginkgo/v2 v2.20.2/go.mod h1:K9gyxPIlb+aIvnZ8bd9Ak+YP18w3APlR+5coaZoE2ag= github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8= @@ -59,11 +106,16 @@ github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY= github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= @@ -73,33 +125,92 @@ github.com/tinylib/msgp v1.1.9 h1:SHf3yoO2sGA0veCJeCBYLHuttAVFHGm2RHgNodW7wQU= github.com/tinylib/msgp v1.1.9/go.mod h1:BCXGB54lDD8qUEPmiG0cQQUANC4IUQyB2ItS2UDlO/k= github.com/tj/assert v0.0.3 h1:Df/BlaZ20mq6kuai7f5z2TvPFiwC3xaWJSDQNiIS3Rk= github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pvk= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= +golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= -google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= -google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= +k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI= +k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= +k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0= +k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/internal/proto/conmon.capnp.go b/internal/proto/conmon.capnp.go index 77ed9824ee..848bc2f928 100644 --- a/internal/proto/conmon.capnp.go +++ b/internal/proto/conmon.capnp.go @@ -176,6 +176,66 @@ func (c Conmon) StartFdSocket(ctx context.Context, params func(Conmon_startFdSoc } +func (c Conmon) ServeExecContainer(ctx context.Context, params func(Conmon_serveExecContainer_Params) error) (Conmon_serveExecContainer_Results_Future, capnp.ReleaseFunc) { + + s := capnp.Send{ + Method: capnp.Method{ + InterfaceID: 0xb737e899dd6633f1, + MethodID: 8, + InterfaceName: "internal/proto/conmon.capnp:Conmon", + MethodName: "serveExecContainer", + }, + } + if params != nil { + s.ArgsSize = capnp.ObjectSize{DataSize: 0, PointerCount: 1} + s.PlaceArgs = func(s capnp.Struct) error { return params(Conmon_serveExecContainer_Params(s)) } + } + + ans, release := capnp.Client(c).SendCall(ctx, s) + return Conmon_serveExecContainer_Results_Future{Future: ans.Future()}, release + +} + +func (c Conmon) ServeAttachContainer(ctx context.Context, params func(Conmon_serveAttachContainer_Params) error) (Conmon_serveAttachContainer_Results_Future, capnp.ReleaseFunc) { + + s := capnp.Send{ + Method: capnp.Method{ + InterfaceID: 0xb737e899dd6633f1, + MethodID: 9, + InterfaceName: "internal/proto/conmon.capnp:Conmon", + MethodName: "serveAttachContainer", + }, + } + if params != nil { + s.ArgsSize = capnp.ObjectSize{DataSize: 0, PointerCount: 1} + s.PlaceArgs = func(s capnp.Struct) error { return params(Conmon_serveAttachContainer_Params(s)) } + } + + ans, release := capnp.Client(c).SendCall(ctx, s) + return Conmon_serveAttachContainer_Results_Future{Future: ans.Future()}, release + +} + +func (c Conmon) ServePortForwardContainer(ctx context.Context, params func(Conmon_servePortForwardContainer_Params) error) (Conmon_servePortForwardContainer_Results_Future, capnp.ReleaseFunc) { + + s := capnp.Send{ + Method: capnp.Method{ + InterfaceID: 0xb737e899dd6633f1, + MethodID: 10, + InterfaceName: "internal/proto/conmon.capnp:Conmon", + MethodName: "servePortForwardContainer", + }, + } + if params != nil { + s.ArgsSize = capnp.ObjectSize{DataSize: 0, PointerCount: 1} + s.PlaceArgs = func(s capnp.Struct) error { return params(Conmon_servePortForwardContainer_Params(s)) } + } + + ans, release := capnp.Client(c).SendCall(ctx, s) + return Conmon_servePortForwardContainer_Results_Future{Future: ans.Future()}, release + +} + func (c Conmon) WaitStreaming() error { return capnp.Client(c).WaitStreaming() } @@ -264,6 +324,12 @@ type Conmon_Server interface { CreateNamespaces(context.Context, Conmon_createNamespaces) error StartFdSocket(context.Context, Conmon_startFdSocket) error + + ServeExecContainer(context.Context, Conmon_serveExecContainer) error + + ServeAttachContainer(context.Context, Conmon_serveAttachContainer) error + + ServePortForwardContainer(context.Context, Conmon_servePortForwardContainer) error } // Conmon_NewServer creates a new Server from an implementation of Conmon_Server. @@ -282,7 +348,7 @@ func Conmon_ServerToClient(s Conmon_Server) Conmon { // This can be used to create a more complicated Server. func Conmon_Methods(methods []server.Method, s Conmon_Server) []server.Method { if cap(methods) == 0 { - methods = make([]server.Method, 0, 8) + methods = make([]server.Method, 0, 11) } methods = append(methods, server.Method{ @@ -381,6 +447,42 @@ func Conmon_Methods(methods []server.Method, s Conmon_Server) []server.Method { }, }) + methods = append(methods, server.Method{ + Method: capnp.Method{ + InterfaceID: 0xb737e899dd6633f1, + MethodID: 8, + InterfaceName: "internal/proto/conmon.capnp:Conmon", + MethodName: "serveExecContainer", + }, + Impl: func(ctx context.Context, call *server.Call) error { + return s.ServeExecContainer(ctx, Conmon_serveExecContainer{call}) + }, + }) + + methods = append(methods, server.Method{ + Method: capnp.Method{ + InterfaceID: 0xb737e899dd6633f1, + MethodID: 9, + InterfaceName: "internal/proto/conmon.capnp:Conmon", + MethodName: "serveAttachContainer", + }, + Impl: func(ctx context.Context, call *server.Call) error { + return s.ServeAttachContainer(ctx, Conmon_serveAttachContainer{call}) + }, + }) + + methods = append(methods, server.Method{ + Method: capnp.Method{ + InterfaceID: 0xb737e899dd6633f1, + MethodID: 10, + InterfaceName: "internal/proto/conmon.capnp:Conmon", + MethodName: "servePortForwardContainer", + }, + Impl: func(ctx context.Context, call *server.Call) error { + return s.ServePortForwardContainer(ctx, Conmon_servePortForwardContainer{call}) + }, + }) + return methods } @@ -520,6 +622,57 @@ func (c Conmon_startFdSocket) AllocResults() (Conmon_startFdSocket_Results, erro return Conmon_startFdSocket_Results(r), err } +// Conmon_serveExecContainer holds the state for a server call to Conmon.serveExecContainer. +// See server.Call for documentation. +type Conmon_serveExecContainer struct { + *server.Call +} + +// Args returns the call's arguments. +func (c Conmon_serveExecContainer) Args() Conmon_serveExecContainer_Params { + return Conmon_serveExecContainer_Params(c.Call.Args()) +} + +// AllocResults allocates the results struct. +func (c Conmon_serveExecContainer) AllocResults() (Conmon_serveExecContainer_Results, error) { + r, err := c.Call.AllocResults(capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Conmon_serveExecContainer_Results(r), err +} + +// Conmon_serveAttachContainer holds the state for a server call to Conmon.serveAttachContainer. +// See server.Call for documentation. +type Conmon_serveAttachContainer struct { + *server.Call +} + +// Args returns the call's arguments. +func (c Conmon_serveAttachContainer) Args() Conmon_serveAttachContainer_Params { + return Conmon_serveAttachContainer_Params(c.Call.Args()) +} + +// AllocResults allocates the results struct. +func (c Conmon_serveAttachContainer) AllocResults() (Conmon_serveAttachContainer_Results, error) { + r, err := c.Call.AllocResults(capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Conmon_serveAttachContainer_Results(r), err +} + +// Conmon_servePortForwardContainer holds the state for a server call to Conmon.servePortForwardContainer. +// See server.Call for documentation. +type Conmon_servePortForwardContainer struct { + *server.Call +} + +// Args returns the call's arguments. +func (c Conmon_servePortForwardContainer) Args() Conmon_servePortForwardContainer_Params { + return Conmon_servePortForwardContainer_Params(c.Call.Args()) +} + +// AllocResults allocates the results struct. +func (c Conmon_servePortForwardContainer) AllocResults() (Conmon_servePortForwardContainer_Results, error) { + r, err := c.Call.AllocResults(capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Conmon_servePortForwardContainer_Results(r), err +} + // Conmon_List is a list of Conmon. type Conmon_List = capnp.CapList[Conmon] @@ -3117,1680 +3270,2920 @@ func (f Conmon_TextTextMapEntry_Future) Struct() (Conmon_TextTextMapEntry, error return Conmon_TextTextMapEntry(p.Struct()), err } -type Conmon_version_Params capnp.Struct +type Conmon_ServeExecContainerRequest capnp.Struct -// Conmon_version_Params_TypeID is the unique identifier for the type Conmon_version_Params. -const Conmon_version_Params_TypeID = 0xcc2f70676afee4e7 +// Conmon_ServeExecContainerRequest_TypeID is the unique identifier for the type Conmon_ServeExecContainerRequest. +const Conmon_ServeExecContainerRequest_TypeID = 0xd01c697281e61c21 -func NewConmon_version_Params(s *capnp.Segment) (Conmon_version_Params, error) { - st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) - return Conmon_version_Params(st), err +func NewConmon_ServeExecContainerRequest(s *capnp.Segment) (Conmon_ServeExecContainerRequest, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 3}) + return Conmon_ServeExecContainerRequest(st), err } -func NewRootConmon_version_Params(s *capnp.Segment) (Conmon_version_Params, error) { - st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) - return Conmon_version_Params(st), err +func NewRootConmon_ServeExecContainerRequest(s *capnp.Segment) (Conmon_ServeExecContainerRequest, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 3}) + return Conmon_ServeExecContainerRequest(st), err } -func ReadRootConmon_version_Params(msg *capnp.Message) (Conmon_version_Params, error) { +func ReadRootConmon_ServeExecContainerRequest(msg *capnp.Message) (Conmon_ServeExecContainerRequest, error) { root, err := msg.Root() - return Conmon_version_Params(root.Struct()), err + return Conmon_ServeExecContainerRequest(root.Struct()), err } -func (s Conmon_version_Params) String() string { - str, _ := text.Marshal(0xcc2f70676afee4e7, capnp.Struct(s)) +func (s Conmon_ServeExecContainerRequest) String() string { + str, _ := text.Marshal(0xd01c697281e61c21, capnp.Struct(s)) return str } -func (s Conmon_version_Params) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { +func (s Conmon_ServeExecContainerRequest) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { return capnp.Struct(s).EncodeAsPtr(seg) } -func (Conmon_version_Params) DecodeFromPtr(p capnp.Ptr) Conmon_version_Params { - return Conmon_version_Params(capnp.Struct{}.DecodeFromPtr(p)) +func (Conmon_ServeExecContainerRequest) DecodeFromPtr(p capnp.Ptr) Conmon_ServeExecContainerRequest { + return Conmon_ServeExecContainerRequest(capnp.Struct{}.DecodeFromPtr(p)) } -func (s Conmon_version_Params) ToPtr() capnp.Ptr { +func (s Conmon_ServeExecContainerRequest) ToPtr() capnp.Ptr { return capnp.Struct(s).ToPtr() } -func (s Conmon_version_Params) IsValid() bool { +func (s Conmon_ServeExecContainerRequest) IsValid() bool { return capnp.Struct(s).IsValid() } -func (s Conmon_version_Params) Message() *capnp.Message { +func (s Conmon_ServeExecContainerRequest) Message() *capnp.Message { return capnp.Struct(s).Message() } -func (s Conmon_version_Params) Segment() *capnp.Segment { +func (s Conmon_ServeExecContainerRequest) Segment() *capnp.Segment { return capnp.Struct(s).Segment() } -func (s Conmon_version_Params) Request() (Conmon_VersionRequest, error) { +func (s Conmon_ServeExecContainerRequest) Metadata() (Conmon_TextTextMapEntry_List, error) { p, err := capnp.Struct(s).Ptr(0) - return Conmon_VersionRequest(p.Struct()), err + return Conmon_TextTextMapEntry_List(p.List()), err } -func (s Conmon_version_Params) HasRequest() bool { +func (s Conmon_ServeExecContainerRequest) HasMetadata() bool { return capnp.Struct(s).HasPtr(0) } -func (s Conmon_version_Params) SetRequest(v Conmon_VersionRequest) error { - return capnp.Struct(s).SetPtr(0, capnp.Struct(v).ToPtr()) +func (s Conmon_ServeExecContainerRequest) SetMetadata(v Conmon_TextTextMapEntry_List) error { + return capnp.Struct(s).SetPtr(0, v.ToPtr()) } -// NewRequest sets the request field to a newly -// allocated Conmon_VersionRequest struct, preferring placement in s's segment. -func (s Conmon_version_Params) NewRequest() (Conmon_VersionRequest, error) { - ss, err := NewConmon_VersionRequest(capnp.Struct(s).Segment()) +// NewMetadata sets the metadata field to a newly +// allocated Conmon_TextTextMapEntry_List, preferring placement in s's segment. +func (s Conmon_ServeExecContainerRequest) NewMetadata(n int32) (Conmon_TextTextMapEntry_List, error) { + l, err := NewConmon_TextTextMapEntry_List(capnp.Struct(s).Segment(), n) if err != nil { - return Conmon_VersionRequest{}, err + return Conmon_TextTextMapEntry_List{}, err } - err = capnp.Struct(s).SetPtr(0, capnp.Struct(ss).ToPtr()) - return ss, err + err = capnp.Struct(s).SetPtr(0, l.ToPtr()) + return l, err +} +func (s Conmon_ServeExecContainerRequest) Id() (string, error) { + p, err := capnp.Struct(s).Ptr(1) + return p.Text(), err } -// Conmon_version_Params_List is a list of Conmon_version_Params. -type Conmon_version_Params_List = capnp.StructList[Conmon_version_Params] +func (s Conmon_ServeExecContainerRequest) HasId() bool { + return capnp.Struct(s).HasPtr(1) +} -// NewConmon_version_Params creates a new list of Conmon_version_Params. -func NewConmon_version_Params_List(s *capnp.Segment, sz int32) (Conmon_version_Params_List, error) { - l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) - return capnp.StructList[Conmon_version_Params](l), err +func (s Conmon_ServeExecContainerRequest) IdBytes() ([]byte, error) { + p, err := capnp.Struct(s).Ptr(1) + return p.TextBytes(), err } -// Conmon_version_Params_Future is a wrapper for a Conmon_version_Params promised by a client call. -type Conmon_version_Params_Future struct{ *capnp.Future } +func (s Conmon_ServeExecContainerRequest) SetId(v string) error { + return capnp.Struct(s).SetText(1, v) +} -func (f Conmon_version_Params_Future) Struct() (Conmon_version_Params, error) { - p, err := f.Future.Ptr() - return Conmon_version_Params(p.Struct()), err +func (s Conmon_ServeExecContainerRequest) Command() (capnp.TextList, error) { + p, err := capnp.Struct(s).Ptr(2) + return capnp.TextList(p.List()), err } -func (p Conmon_version_Params_Future) Request() Conmon_VersionRequest_Future { - return Conmon_VersionRequest_Future{Future: p.Future.Field(0, nil)} + +func (s Conmon_ServeExecContainerRequest) HasCommand() bool { + return capnp.Struct(s).HasPtr(2) } -type Conmon_version_Results capnp.Struct +func (s Conmon_ServeExecContainerRequest) SetCommand(v capnp.TextList) error { + return capnp.Struct(s).SetPtr(2, v.ToPtr()) +} -// Conmon_version_Results_TypeID is the unique identifier for the type Conmon_version_Results. -const Conmon_version_Results_TypeID = 0xe313695ea9477b30 +// NewCommand sets the command field to a newly +// allocated capnp.TextList, preferring placement in s's segment. +func (s Conmon_ServeExecContainerRequest) NewCommand(n int32) (capnp.TextList, error) { + l, err := capnp.NewTextList(capnp.Struct(s).Segment(), n) + if err != nil { + return capnp.TextList{}, err + } + err = capnp.Struct(s).SetPtr(2, l.ToPtr()) + return l, err +} +func (s Conmon_ServeExecContainerRequest) Tty() bool { + return capnp.Struct(s).Bit(0) +} -func NewConmon_version_Results(s *capnp.Segment) (Conmon_version_Results, error) { +func (s Conmon_ServeExecContainerRequest) SetTty(v bool) { + capnp.Struct(s).SetBit(0, v) +} + +func (s Conmon_ServeExecContainerRequest) Stdin() bool { + return capnp.Struct(s).Bit(1) +} + +func (s Conmon_ServeExecContainerRequest) SetStdin(v bool) { + capnp.Struct(s).SetBit(1, v) +} + +func (s Conmon_ServeExecContainerRequest) Stdout() bool { + return capnp.Struct(s).Bit(2) +} + +func (s Conmon_ServeExecContainerRequest) SetStdout(v bool) { + capnp.Struct(s).SetBit(2, v) +} + +func (s Conmon_ServeExecContainerRequest) Stderr() bool { + return capnp.Struct(s).Bit(3) +} + +func (s Conmon_ServeExecContainerRequest) SetStderr(v bool) { + capnp.Struct(s).SetBit(3, v) +} + +func (s Conmon_ServeExecContainerRequest) CgroupManager() Conmon_CgroupManager { + return Conmon_CgroupManager(capnp.Struct(s).Uint16(2)) +} + +func (s Conmon_ServeExecContainerRequest) SetCgroupManager(v Conmon_CgroupManager) { + capnp.Struct(s).SetUint16(2, uint16(v)) +} + +// Conmon_ServeExecContainerRequest_List is a list of Conmon_ServeExecContainerRequest. +type Conmon_ServeExecContainerRequest_List = capnp.StructList[Conmon_ServeExecContainerRequest] + +// NewConmon_ServeExecContainerRequest creates a new list of Conmon_ServeExecContainerRequest. +func NewConmon_ServeExecContainerRequest_List(s *capnp.Segment, sz int32) (Conmon_ServeExecContainerRequest_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 3}, sz) + return capnp.StructList[Conmon_ServeExecContainerRequest](l), err +} + +// Conmon_ServeExecContainerRequest_Future is a wrapper for a Conmon_ServeExecContainerRequest promised by a client call. +type Conmon_ServeExecContainerRequest_Future struct{ *capnp.Future } + +func (f Conmon_ServeExecContainerRequest_Future) Struct() (Conmon_ServeExecContainerRequest, error) { + p, err := f.Future.Ptr() + return Conmon_ServeExecContainerRequest(p.Struct()), err +} + +type Conmon_ServeExecContainerResponse capnp.Struct + +// Conmon_ServeExecContainerResponse_TypeID is the unique identifier for the type Conmon_ServeExecContainerResponse. +const Conmon_ServeExecContainerResponse_TypeID = 0xa9e93cf268b17735 + +func NewConmon_ServeExecContainerResponse(s *capnp.Segment) (Conmon_ServeExecContainerResponse, error) { st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) - return Conmon_version_Results(st), err + return Conmon_ServeExecContainerResponse(st), err } -func NewRootConmon_version_Results(s *capnp.Segment) (Conmon_version_Results, error) { +func NewRootConmon_ServeExecContainerResponse(s *capnp.Segment) (Conmon_ServeExecContainerResponse, error) { st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) - return Conmon_version_Results(st), err + return Conmon_ServeExecContainerResponse(st), err } -func ReadRootConmon_version_Results(msg *capnp.Message) (Conmon_version_Results, error) { +func ReadRootConmon_ServeExecContainerResponse(msg *capnp.Message) (Conmon_ServeExecContainerResponse, error) { root, err := msg.Root() - return Conmon_version_Results(root.Struct()), err + return Conmon_ServeExecContainerResponse(root.Struct()), err } -func (s Conmon_version_Results) String() string { - str, _ := text.Marshal(0xe313695ea9477b30, capnp.Struct(s)) +func (s Conmon_ServeExecContainerResponse) String() string { + str, _ := text.Marshal(0xa9e93cf268b17735, capnp.Struct(s)) return str } -func (s Conmon_version_Results) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { +func (s Conmon_ServeExecContainerResponse) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { return capnp.Struct(s).EncodeAsPtr(seg) } -func (Conmon_version_Results) DecodeFromPtr(p capnp.Ptr) Conmon_version_Results { - return Conmon_version_Results(capnp.Struct{}.DecodeFromPtr(p)) +func (Conmon_ServeExecContainerResponse) DecodeFromPtr(p capnp.Ptr) Conmon_ServeExecContainerResponse { + return Conmon_ServeExecContainerResponse(capnp.Struct{}.DecodeFromPtr(p)) } -func (s Conmon_version_Results) ToPtr() capnp.Ptr { +func (s Conmon_ServeExecContainerResponse) ToPtr() capnp.Ptr { return capnp.Struct(s).ToPtr() } -func (s Conmon_version_Results) IsValid() bool { +func (s Conmon_ServeExecContainerResponse) IsValid() bool { return capnp.Struct(s).IsValid() } -func (s Conmon_version_Results) Message() *capnp.Message { +func (s Conmon_ServeExecContainerResponse) Message() *capnp.Message { return capnp.Struct(s).Message() } -func (s Conmon_version_Results) Segment() *capnp.Segment { +func (s Conmon_ServeExecContainerResponse) Segment() *capnp.Segment { return capnp.Struct(s).Segment() } -func (s Conmon_version_Results) Response() (Conmon_VersionResponse, error) { +func (s Conmon_ServeExecContainerResponse) Url() (string, error) { p, err := capnp.Struct(s).Ptr(0) - return Conmon_VersionResponse(p.Struct()), err + return p.Text(), err } -func (s Conmon_version_Results) HasResponse() bool { +func (s Conmon_ServeExecContainerResponse) HasUrl() bool { return capnp.Struct(s).HasPtr(0) } -func (s Conmon_version_Results) SetResponse(v Conmon_VersionResponse) error { - return capnp.Struct(s).SetPtr(0, capnp.Struct(v).ToPtr()) +func (s Conmon_ServeExecContainerResponse) UrlBytes() ([]byte, error) { + p, err := capnp.Struct(s).Ptr(0) + return p.TextBytes(), err } -// NewResponse sets the response field to a newly -// allocated Conmon_VersionResponse struct, preferring placement in s's segment. -func (s Conmon_version_Results) NewResponse() (Conmon_VersionResponse, error) { - ss, err := NewConmon_VersionResponse(capnp.Struct(s).Segment()) - if err != nil { - return Conmon_VersionResponse{}, err - } - err = capnp.Struct(s).SetPtr(0, capnp.Struct(ss).ToPtr()) - return ss, err +func (s Conmon_ServeExecContainerResponse) SetUrl(v string) error { + return capnp.Struct(s).SetText(0, v) } -// Conmon_version_Results_List is a list of Conmon_version_Results. -type Conmon_version_Results_List = capnp.StructList[Conmon_version_Results] +// Conmon_ServeExecContainerResponse_List is a list of Conmon_ServeExecContainerResponse. +type Conmon_ServeExecContainerResponse_List = capnp.StructList[Conmon_ServeExecContainerResponse] -// NewConmon_version_Results creates a new list of Conmon_version_Results. -func NewConmon_version_Results_List(s *capnp.Segment, sz int32) (Conmon_version_Results_List, error) { +// NewConmon_ServeExecContainerResponse creates a new list of Conmon_ServeExecContainerResponse. +func NewConmon_ServeExecContainerResponse_List(s *capnp.Segment, sz int32) (Conmon_ServeExecContainerResponse_List, error) { l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) - return capnp.StructList[Conmon_version_Results](l), err + return capnp.StructList[Conmon_ServeExecContainerResponse](l), err } -// Conmon_version_Results_Future is a wrapper for a Conmon_version_Results promised by a client call. -type Conmon_version_Results_Future struct{ *capnp.Future } +// Conmon_ServeExecContainerResponse_Future is a wrapper for a Conmon_ServeExecContainerResponse promised by a client call. +type Conmon_ServeExecContainerResponse_Future struct{ *capnp.Future } -func (f Conmon_version_Results_Future) Struct() (Conmon_version_Results, error) { +func (f Conmon_ServeExecContainerResponse_Future) Struct() (Conmon_ServeExecContainerResponse, error) { p, err := f.Future.Ptr() - return Conmon_version_Results(p.Struct()), err -} -func (p Conmon_version_Results_Future) Response() Conmon_VersionResponse_Future { - return Conmon_VersionResponse_Future{Future: p.Future.Field(0, nil)} + return Conmon_ServeExecContainerResponse(p.Struct()), err } -type Conmon_createContainer_Params capnp.Struct +type Conmon_ServeAttachContainerRequest capnp.Struct -// Conmon_createContainer_Params_TypeID is the unique identifier for the type Conmon_createContainer_Params. -const Conmon_createContainer_Params_TypeID = 0xf44732c48f949ab8 +// Conmon_ServeAttachContainerRequest_TypeID is the unique identifier for the type Conmon_ServeAttachContainerRequest. +const Conmon_ServeAttachContainerRequest_TypeID = 0xca8c8e0d7826ae86 -func NewConmon_createContainer_Params(s *capnp.Segment) (Conmon_createContainer_Params, error) { - st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) - return Conmon_createContainer_Params(st), err +func NewConmon_ServeAttachContainerRequest(s *capnp.Segment) (Conmon_ServeAttachContainerRequest, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 2}) + return Conmon_ServeAttachContainerRequest(st), err } -func NewRootConmon_createContainer_Params(s *capnp.Segment) (Conmon_createContainer_Params, error) { - st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) - return Conmon_createContainer_Params(st), err +func NewRootConmon_ServeAttachContainerRequest(s *capnp.Segment) (Conmon_ServeAttachContainerRequest, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 2}) + return Conmon_ServeAttachContainerRequest(st), err } -func ReadRootConmon_createContainer_Params(msg *capnp.Message) (Conmon_createContainer_Params, error) { +func ReadRootConmon_ServeAttachContainerRequest(msg *capnp.Message) (Conmon_ServeAttachContainerRequest, error) { root, err := msg.Root() - return Conmon_createContainer_Params(root.Struct()), err + return Conmon_ServeAttachContainerRequest(root.Struct()), err } -func (s Conmon_createContainer_Params) String() string { - str, _ := text.Marshal(0xf44732c48f949ab8, capnp.Struct(s)) +func (s Conmon_ServeAttachContainerRequest) String() string { + str, _ := text.Marshal(0xca8c8e0d7826ae86, capnp.Struct(s)) return str } -func (s Conmon_createContainer_Params) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { +func (s Conmon_ServeAttachContainerRequest) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { return capnp.Struct(s).EncodeAsPtr(seg) } -func (Conmon_createContainer_Params) DecodeFromPtr(p capnp.Ptr) Conmon_createContainer_Params { - return Conmon_createContainer_Params(capnp.Struct{}.DecodeFromPtr(p)) +func (Conmon_ServeAttachContainerRequest) DecodeFromPtr(p capnp.Ptr) Conmon_ServeAttachContainerRequest { + return Conmon_ServeAttachContainerRequest(capnp.Struct{}.DecodeFromPtr(p)) } -func (s Conmon_createContainer_Params) ToPtr() capnp.Ptr { +func (s Conmon_ServeAttachContainerRequest) ToPtr() capnp.Ptr { return capnp.Struct(s).ToPtr() } -func (s Conmon_createContainer_Params) IsValid() bool { +func (s Conmon_ServeAttachContainerRequest) IsValid() bool { return capnp.Struct(s).IsValid() } -func (s Conmon_createContainer_Params) Message() *capnp.Message { +func (s Conmon_ServeAttachContainerRequest) Message() *capnp.Message { return capnp.Struct(s).Message() } -func (s Conmon_createContainer_Params) Segment() *capnp.Segment { +func (s Conmon_ServeAttachContainerRequest) Segment() *capnp.Segment { return capnp.Struct(s).Segment() } -func (s Conmon_createContainer_Params) Request() (Conmon_CreateContainerRequest, error) { +func (s Conmon_ServeAttachContainerRequest) Metadata() (Conmon_TextTextMapEntry_List, error) { p, err := capnp.Struct(s).Ptr(0) - return Conmon_CreateContainerRequest(p.Struct()), err + return Conmon_TextTextMapEntry_List(p.List()), err } -func (s Conmon_createContainer_Params) HasRequest() bool { +func (s Conmon_ServeAttachContainerRequest) HasMetadata() bool { return capnp.Struct(s).HasPtr(0) } -func (s Conmon_createContainer_Params) SetRequest(v Conmon_CreateContainerRequest) error { - return capnp.Struct(s).SetPtr(0, capnp.Struct(v).ToPtr()) +func (s Conmon_ServeAttachContainerRequest) SetMetadata(v Conmon_TextTextMapEntry_List) error { + return capnp.Struct(s).SetPtr(0, v.ToPtr()) } -// NewRequest sets the request field to a newly -// allocated Conmon_CreateContainerRequest struct, preferring placement in s's segment. -func (s Conmon_createContainer_Params) NewRequest() (Conmon_CreateContainerRequest, error) { - ss, err := NewConmon_CreateContainerRequest(capnp.Struct(s).Segment()) +// NewMetadata sets the metadata field to a newly +// allocated Conmon_TextTextMapEntry_List, preferring placement in s's segment. +func (s Conmon_ServeAttachContainerRequest) NewMetadata(n int32) (Conmon_TextTextMapEntry_List, error) { + l, err := NewConmon_TextTextMapEntry_List(capnp.Struct(s).Segment(), n) if err != nil { - return Conmon_CreateContainerRequest{}, err + return Conmon_TextTextMapEntry_List{}, err } - err = capnp.Struct(s).SetPtr(0, capnp.Struct(ss).ToPtr()) - return ss, err + err = capnp.Struct(s).SetPtr(0, l.ToPtr()) + return l, err +} +func (s Conmon_ServeAttachContainerRequest) Id() (string, error) { + p, err := capnp.Struct(s).Ptr(1) + return p.Text(), err } -// Conmon_createContainer_Params_List is a list of Conmon_createContainer_Params. -type Conmon_createContainer_Params_List = capnp.StructList[Conmon_createContainer_Params] +func (s Conmon_ServeAttachContainerRequest) HasId() bool { + return capnp.Struct(s).HasPtr(1) +} -// NewConmon_createContainer_Params creates a new list of Conmon_createContainer_Params. -func NewConmon_createContainer_Params_List(s *capnp.Segment, sz int32) (Conmon_createContainer_Params_List, error) { - l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) - return capnp.StructList[Conmon_createContainer_Params](l), err +func (s Conmon_ServeAttachContainerRequest) IdBytes() ([]byte, error) { + p, err := capnp.Struct(s).Ptr(1) + return p.TextBytes(), err } -// Conmon_createContainer_Params_Future is a wrapper for a Conmon_createContainer_Params promised by a client call. -type Conmon_createContainer_Params_Future struct{ *capnp.Future } +func (s Conmon_ServeAttachContainerRequest) SetId(v string) error { + return capnp.Struct(s).SetText(1, v) +} -func (f Conmon_createContainer_Params_Future) Struct() (Conmon_createContainer_Params, error) { - p, err := f.Future.Ptr() - return Conmon_createContainer_Params(p.Struct()), err +func (s Conmon_ServeAttachContainerRequest) Stdin() bool { + return capnp.Struct(s).Bit(0) } -func (p Conmon_createContainer_Params_Future) Request() Conmon_CreateContainerRequest_Future { - return Conmon_CreateContainerRequest_Future{Future: p.Future.Field(0, nil)} + +func (s Conmon_ServeAttachContainerRequest) SetStdin(v bool) { + capnp.Struct(s).SetBit(0, v) } -type Conmon_createContainer_Results capnp.Struct +func (s Conmon_ServeAttachContainerRequest) Stdout() bool { + return capnp.Struct(s).Bit(1) +} -// Conmon_createContainer_Results_TypeID is the unique identifier for the type Conmon_createContainer_Results. -const Conmon_createContainer_Results_TypeID = 0xceba3c1a97be15f8 +func (s Conmon_ServeAttachContainerRequest) SetStdout(v bool) { + capnp.Struct(s).SetBit(1, v) +} -func NewConmon_createContainer_Results(s *capnp.Segment) (Conmon_createContainer_Results, error) { +func (s Conmon_ServeAttachContainerRequest) Stderr() bool { + return capnp.Struct(s).Bit(2) +} + +func (s Conmon_ServeAttachContainerRequest) SetStderr(v bool) { + capnp.Struct(s).SetBit(2, v) +} + +// Conmon_ServeAttachContainerRequest_List is a list of Conmon_ServeAttachContainerRequest. +type Conmon_ServeAttachContainerRequest_List = capnp.StructList[Conmon_ServeAttachContainerRequest] + +// NewConmon_ServeAttachContainerRequest creates a new list of Conmon_ServeAttachContainerRequest. +func NewConmon_ServeAttachContainerRequest_List(s *capnp.Segment, sz int32) (Conmon_ServeAttachContainerRequest_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 2}, sz) + return capnp.StructList[Conmon_ServeAttachContainerRequest](l), err +} + +// Conmon_ServeAttachContainerRequest_Future is a wrapper for a Conmon_ServeAttachContainerRequest promised by a client call. +type Conmon_ServeAttachContainerRequest_Future struct{ *capnp.Future } + +func (f Conmon_ServeAttachContainerRequest_Future) Struct() (Conmon_ServeAttachContainerRequest, error) { + p, err := f.Future.Ptr() + return Conmon_ServeAttachContainerRequest(p.Struct()), err +} + +type Conmon_ServeAttachContainerResponse capnp.Struct + +// Conmon_ServeAttachContainerResponse_TypeID is the unique identifier for the type Conmon_ServeAttachContainerResponse. +const Conmon_ServeAttachContainerResponse_TypeID = 0x94a72d9a2ccb9a30 + +func NewConmon_ServeAttachContainerResponse(s *capnp.Segment) (Conmon_ServeAttachContainerResponse, error) { st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) - return Conmon_createContainer_Results(st), err + return Conmon_ServeAttachContainerResponse(st), err } -func NewRootConmon_createContainer_Results(s *capnp.Segment) (Conmon_createContainer_Results, error) { +func NewRootConmon_ServeAttachContainerResponse(s *capnp.Segment) (Conmon_ServeAttachContainerResponse, error) { st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) - return Conmon_createContainer_Results(st), err + return Conmon_ServeAttachContainerResponse(st), err } -func ReadRootConmon_createContainer_Results(msg *capnp.Message) (Conmon_createContainer_Results, error) { +func ReadRootConmon_ServeAttachContainerResponse(msg *capnp.Message) (Conmon_ServeAttachContainerResponse, error) { root, err := msg.Root() - return Conmon_createContainer_Results(root.Struct()), err + return Conmon_ServeAttachContainerResponse(root.Struct()), err } -func (s Conmon_createContainer_Results) String() string { - str, _ := text.Marshal(0xceba3c1a97be15f8, capnp.Struct(s)) +func (s Conmon_ServeAttachContainerResponse) String() string { + str, _ := text.Marshal(0x94a72d9a2ccb9a30, capnp.Struct(s)) return str } -func (s Conmon_createContainer_Results) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { +func (s Conmon_ServeAttachContainerResponse) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { return capnp.Struct(s).EncodeAsPtr(seg) } -func (Conmon_createContainer_Results) DecodeFromPtr(p capnp.Ptr) Conmon_createContainer_Results { - return Conmon_createContainer_Results(capnp.Struct{}.DecodeFromPtr(p)) +func (Conmon_ServeAttachContainerResponse) DecodeFromPtr(p capnp.Ptr) Conmon_ServeAttachContainerResponse { + return Conmon_ServeAttachContainerResponse(capnp.Struct{}.DecodeFromPtr(p)) } -func (s Conmon_createContainer_Results) ToPtr() capnp.Ptr { +func (s Conmon_ServeAttachContainerResponse) ToPtr() capnp.Ptr { return capnp.Struct(s).ToPtr() } -func (s Conmon_createContainer_Results) IsValid() bool { +func (s Conmon_ServeAttachContainerResponse) IsValid() bool { return capnp.Struct(s).IsValid() } -func (s Conmon_createContainer_Results) Message() *capnp.Message { +func (s Conmon_ServeAttachContainerResponse) Message() *capnp.Message { return capnp.Struct(s).Message() } -func (s Conmon_createContainer_Results) Segment() *capnp.Segment { +func (s Conmon_ServeAttachContainerResponse) Segment() *capnp.Segment { return capnp.Struct(s).Segment() } -func (s Conmon_createContainer_Results) Response() (Conmon_CreateContainerResponse, error) { +func (s Conmon_ServeAttachContainerResponse) Url() (string, error) { p, err := capnp.Struct(s).Ptr(0) - return Conmon_CreateContainerResponse(p.Struct()), err + return p.Text(), err } -func (s Conmon_createContainer_Results) HasResponse() bool { +func (s Conmon_ServeAttachContainerResponse) HasUrl() bool { return capnp.Struct(s).HasPtr(0) } -func (s Conmon_createContainer_Results) SetResponse(v Conmon_CreateContainerResponse) error { - return capnp.Struct(s).SetPtr(0, capnp.Struct(v).ToPtr()) +func (s Conmon_ServeAttachContainerResponse) UrlBytes() ([]byte, error) { + p, err := capnp.Struct(s).Ptr(0) + return p.TextBytes(), err } -// NewResponse sets the response field to a newly -// allocated Conmon_CreateContainerResponse struct, preferring placement in s's segment. -func (s Conmon_createContainer_Results) NewResponse() (Conmon_CreateContainerResponse, error) { - ss, err := NewConmon_CreateContainerResponse(capnp.Struct(s).Segment()) - if err != nil { - return Conmon_CreateContainerResponse{}, err - } - err = capnp.Struct(s).SetPtr(0, capnp.Struct(ss).ToPtr()) - return ss, err +func (s Conmon_ServeAttachContainerResponse) SetUrl(v string) error { + return capnp.Struct(s).SetText(0, v) } -// Conmon_createContainer_Results_List is a list of Conmon_createContainer_Results. -type Conmon_createContainer_Results_List = capnp.StructList[Conmon_createContainer_Results] +// Conmon_ServeAttachContainerResponse_List is a list of Conmon_ServeAttachContainerResponse. +type Conmon_ServeAttachContainerResponse_List = capnp.StructList[Conmon_ServeAttachContainerResponse] -// NewConmon_createContainer_Results creates a new list of Conmon_createContainer_Results. -func NewConmon_createContainer_Results_List(s *capnp.Segment, sz int32) (Conmon_createContainer_Results_List, error) { +// NewConmon_ServeAttachContainerResponse creates a new list of Conmon_ServeAttachContainerResponse. +func NewConmon_ServeAttachContainerResponse_List(s *capnp.Segment, sz int32) (Conmon_ServeAttachContainerResponse_List, error) { l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) - return capnp.StructList[Conmon_createContainer_Results](l), err + return capnp.StructList[Conmon_ServeAttachContainerResponse](l), err } -// Conmon_createContainer_Results_Future is a wrapper for a Conmon_createContainer_Results promised by a client call. -type Conmon_createContainer_Results_Future struct{ *capnp.Future } +// Conmon_ServeAttachContainerResponse_Future is a wrapper for a Conmon_ServeAttachContainerResponse promised by a client call. +type Conmon_ServeAttachContainerResponse_Future struct{ *capnp.Future } -func (f Conmon_createContainer_Results_Future) Struct() (Conmon_createContainer_Results, error) { +func (f Conmon_ServeAttachContainerResponse_Future) Struct() (Conmon_ServeAttachContainerResponse, error) { p, err := f.Future.Ptr() - return Conmon_createContainer_Results(p.Struct()), err -} -func (p Conmon_createContainer_Results_Future) Response() Conmon_CreateContainerResponse_Future { - return Conmon_CreateContainerResponse_Future{Future: p.Future.Field(0, nil)} + return Conmon_ServeAttachContainerResponse(p.Struct()), err } -type Conmon_execSyncContainer_Params capnp.Struct +type Conmon_ServePortForwardContainerRequest capnp.Struct -// Conmon_execSyncContainer_Params_TypeID is the unique identifier for the type Conmon_execSyncContainer_Params. -const Conmon_execSyncContainer_Params_TypeID = 0x83479da67279e173 +// Conmon_ServePortForwardContainerRequest_TypeID is the unique identifier for the type Conmon_ServePortForwardContainerRequest. +const Conmon_ServePortForwardContainerRequest_TypeID = 0xc865d8a1122038c5 -func NewConmon_execSyncContainer_Params(s *capnp.Segment) (Conmon_execSyncContainer_Params, error) { - st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) - return Conmon_execSyncContainer_Params(st), err +func NewConmon_ServePortForwardContainerRequest(s *capnp.Segment) (Conmon_ServePortForwardContainerRequest, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 2}) + return Conmon_ServePortForwardContainerRequest(st), err } -func NewRootConmon_execSyncContainer_Params(s *capnp.Segment) (Conmon_execSyncContainer_Params, error) { - st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) - return Conmon_execSyncContainer_Params(st), err +func NewRootConmon_ServePortForwardContainerRequest(s *capnp.Segment) (Conmon_ServePortForwardContainerRequest, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 2}) + return Conmon_ServePortForwardContainerRequest(st), err } -func ReadRootConmon_execSyncContainer_Params(msg *capnp.Message) (Conmon_execSyncContainer_Params, error) { +func ReadRootConmon_ServePortForwardContainerRequest(msg *capnp.Message) (Conmon_ServePortForwardContainerRequest, error) { root, err := msg.Root() - return Conmon_execSyncContainer_Params(root.Struct()), err + return Conmon_ServePortForwardContainerRequest(root.Struct()), err } -func (s Conmon_execSyncContainer_Params) String() string { - str, _ := text.Marshal(0x83479da67279e173, capnp.Struct(s)) +func (s Conmon_ServePortForwardContainerRequest) String() string { + str, _ := text.Marshal(0xc865d8a1122038c5, capnp.Struct(s)) return str } -func (s Conmon_execSyncContainer_Params) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { +func (s Conmon_ServePortForwardContainerRequest) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { return capnp.Struct(s).EncodeAsPtr(seg) } -func (Conmon_execSyncContainer_Params) DecodeFromPtr(p capnp.Ptr) Conmon_execSyncContainer_Params { - return Conmon_execSyncContainer_Params(capnp.Struct{}.DecodeFromPtr(p)) +func (Conmon_ServePortForwardContainerRequest) DecodeFromPtr(p capnp.Ptr) Conmon_ServePortForwardContainerRequest { + return Conmon_ServePortForwardContainerRequest(capnp.Struct{}.DecodeFromPtr(p)) } -func (s Conmon_execSyncContainer_Params) ToPtr() capnp.Ptr { +func (s Conmon_ServePortForwardContainerRequest) ToPtr() capnp.Ptr { return capnp.Struct(s).ToPtr() } -func (s Conmon_execSyncContainer_Params) IsValid() bool { +func (s Conmon_ServePortForwardContainerRequest) IsValid() bool { return capnp.Struct(s).IsValid() } -func (s Conmon_execSyncContainer_Params) Message() *capnp.Message { +func (s Conmon_ServePortForwardContainerRequest) Message() *capnp.Message { return capnp.Struct(s).Message() } -func (s Conmon_execSyncContainer_Params) Segment() *capnp.Segment { +func (s Conmon_ServePortForwardContainerRequest) Segment() *capnp.Segment { return capnp.Struct(s).Segment() } -func (s Conmon_execSyncContainer_Params) Request() (Conmon_ExecSyncContainerRequest, error) { +func (s Conmon_ServePortForwardContainerRequest) Metadata() (Conmon_TextTextMapEntry_List, error) { p, err := capnp.Struct(s).Ptr(0) - return Conmon_ExecSyncContainerRequest(p.Struct()), err + return Conmon_TextTextMapEntry_List(p.List()), err } -func (s Conmon_execSyncContainer_Params) HasRequest() bool { +func (s Conmon_ServePortForwardContainerRequest) HasMetadata() bool { return capnp.Struct(s).HasPtr(0) } -func (s Conmon_execSyncContainer_Params) SetRequest(v Conmon_ExecSyncContainerRequest) error { - return capnp.Struct(s).SetPtr(0, capnp.Struct(v).ToPtr()) +func (s Conmon_ServePortForwardContainerRequest) SetMetadata(v Conmon_TextTextMapEntry_List) error { + return capnp.Struct(s).SetPtr(0, v.ToPtr()) } -// NewRequest sets the request field to a newly -// allocated Conmon_ExecSyncContainerRequest struct, preferring placement in s's segment. -func (s Conmon_execSyncContainer_Params) NewRequest() (Conmon_ExecSyncContainerRequest, error) { - ss, err := NewConmon_ExecSyncContainerRequest(capnp.Struct(s).Segment()) +// NewMetadata sets the metadata field to a newly +// allocated Conmon_TextTextMapEntry_List, preferring placement in s's segment. +func (s Conmon_ServePortForwardContainerRequest) NewMetadata(n int32) (Conmon_TextTextMapEntry_List, error) { + l, err := NewConmon_TextTextMapEntry_List(capnp.Struct(s).Segment(), n) if err != nil { - return Conmon_ExecSyncContainerRequest{}, err + return Conmon_TextTextMapEntry_List{}, err } - err = capnp.Struct(s).SetPtr(0, capnp.Struct(ss).ToPtr()) - return ss, err + err = capnp.Struct(s).SetPtr(0, l.ToPtr()) + return l, err +} +func (s Conmon_ServePortForwardContainerRequest) NetNsPath() (string, error) { + p, err := capnp.Struct(s).Ptr(1) + return p.Text(), err } -// Conmon_execSyncContainer_Params_List is a list of Conmon_execSyncContainer_Params. -type Conmon_execSyncContainer_Params_List = capnp.StructList[Conmon_execSyncContainer_Params] +func (s Conmon_ServePortForwardContainerRequest) HasNetNsPath() bool { + return capnp.Struct(s).HasPtr(1) +} -// NewConmon_execSyncContainer_Params creates a new list of Conmon_execSyncContainer_Params. -func NewConmon_execSyncContainer_Params_List(s *capnp.Segment, sz int32) (Conmon_execSyncContainer_Params_List, error) { - l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) - return capnp.StructList[Conmon_execSyncContainer_Params](l), err +func (s Conmon_ServePortForwardContainerRequest) NetNsPathBytes() ([]byte, error) { + p, err := capnp.Struct(s).Ptr(1) + return p.TextBytes(), err } -// Conmon_execSyncContainer_Params_Future is a wrapper for a Conmon_execSyncContainer_Params promised by a client call. -type Conmon_execSyncContainer_Params_Future struct{ *capnp.Future } +func (s Conmon_ServePortForwardContainerRequest) SetNetNsPath(v string) error { + return capnp.Struct(s).SetText(1, v) +} -func (f Conmon_execSyncContainer_Params_Future) Struct() (Conmon_execSyncContainer_Params, error) { - p, err := f.Future.Ptr() - return Conmon_execSyncContainer_Params(p.Struct()), err +// Conmon_ServePortForwardContainerRequest_List is a list of Conmon_ServePortForwardContainerRequest. +type Conmon_ServePortForwardContainerRequest_List = capnp.StructList[Conmon_ServePortForwardContainerRequest] + +// NewConmon_ServePortForwardContainerRequest creates a new list of Conmon_ServePortForwardContainerRequest. +func NewConmon_ServePortForwardContainerRequest_List(s *capnp.Segment, sz int32) (Conmon_ServePortForwardContainerRequest_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 2}, sz) + return capnp.StructList[Conmon_ServePortForwardContainerRequest](l), err } -func (p Conmon_execSyncContainer_Params_Future) Request() Conmon_ExecSyncContainerRequest_Future { - return Conmon_ExecSyncContainerRequest_Future{Future: p.Future.Field(0, nil)} + +// Conmon_ServePortForwardContainerRequest_Future is a wrapper for a Conmon_ServePortForwardContainerRequest promised by a client call. +type Conmon_ServePortForwardContainerRequest_Future struct{ *capnp.Future } + +func (f Conmon_ServePortForwardContainerRequest_Future) Struct() (Conmon_ServePortForwardContainerRequest, error) { + p, err := f.Future.Ptr() + return Conmon_ServePortForwardContainerRequest(p.Struct()), err } -type Conmon_execSyncContainer_Results capnp.Struct +type Conmon_ServePortForwardContainerResponse capnp.Struct -// Conmon_execSyncContainer_Results_TypeID is the unique identifier for the type Conmon_execSyncContainer_Results. -const Conmon_execSyncContainer_Results_TypeID = 0xf8e86a5c0baa01bc +// Conmon_ServePortForwardContainerResponse_TypeID is the unique identifier for the type Conmon_ServePortForwardContainerResponse. +const Conmon_ServePortForwardContainerResponse_TypeID = 0xf7507d1843e734e4 -func NewConmon_execSyncContainer_Results(s *capnp.Segment) (Conmon_execSyncContainer_Results, error) { +func NewConmon_ServePortForwardContainerResponse(s *capnp.Segment) (Conmon_ServePortForwardContainerResponse, error) { st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) - return Conmon_execSyncContainer_Results(st), err + return Conmon_ServePortForwardContainerResponse(st), err } -func NewRootConmon_execSyncContainer_Results(s *capnp.Segment) (Conmon_execSyncContainer_Results, error) { +func NewRootConmon_ServePortForwardContainerResponse(s *capnp.Segment) (Conmon_ServePortForwardContainerResponse, error) { st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) - return Conmon_execSyncContainer_Results(st), err + return Conmon_ServePortForwardContainerResponse(st), err } -func ReadRootConmon_execSyncContainer_Results(msg *capnp.Message) (Conmon_execSyncContainer_Results, error) { +func ReadRootConmon_ServePortForwardContainerResponse(msg *capnp.Message) (Conmon_ServePortForwardContainerResponse, error) { root, err := msg.Root() - return Conmon_execSyncContainer_Results(root.Struct()), err + return Conmon_ServePortForwardContainerResponse(root.Struct()), err } -func (s Conmon_execSyncContainer_Results) String() string { - str, _ := text.Marshal(0xf8e86a5c0baa01bc, capnp.Struct(s)) +func (s Conmon_ServePortForwardContainerResponse) String() string { + str, _ := text.Marshal(0xf7507d1843e734e4, capnp.Struct(s)) return str } -func (s Conmon_execSyncContainer_Results) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { +func (s Conmon_ServePortForwardContainerResponse) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { return capnp.Struct(s).EncodeAsPtr(seg) } -func (Conmon_execSyncContainer_Results) DecodeFromPtr(p capnp.Ptr) Conmon_execSyncContainer_Results { - return Conmon_execSyncContainer_Results(capnp.Struct{}.DecodeFromPtr(p)) +func (Conmon_ServePortForwardContainerResponse) DecodeFromPtr(p capnp.Ptr) Conmon_ServePortForwardContainerResponse { + return Conmon_ServePortForwardContainerResponse(capnp.Struct{}.DecodeFromPtr(p)) } -func (s Conmon_execSyncContainer_Results) ToPtr() capnp.Ptr { +func (s Conmon_ServePortForwardContainerResponse) ToPtr() capnp.Ptr { return capnp.Struct(s).ToPtr() } -func (s Conmon_execSyncContainer_Results) IsValid() bool { +func (s Conmon_ServePortForwardContainerResponse) IsValid() bool { return capnp.Struct(s).IsValid() } -func (s Conmon_execSyncContainer_Results) Message() *capnp.Message { +func (s Conmon_ServePortForwardContainerResponse) Message() *capnp.Message { return capnp.Struct(s).Message() } -func (s Conmon_execSyncContainer_Results) Segment() *capnp.Segment { +func (s Conmon_ServePortForwardContainerResponse) Segment() *capnp.Segment { return capnp.Struct(s).Segment() } -func (s Conmon_execSyncContainer_Results) Response() (Conmon_ExecSyncContainerResponse, error) { +func (s Conmon_ServePortForwardContainerResponse) Url() (string, error) { p, err := capnp.Struct(s).Ptr(0) - return Conmon_ExecSyncContainerResponse(p.Struct()), err + return p.Text(), err } -func (s Conmon_execSyncContainer_Results) HasResponse() bool { +func (s Conmon_ServePortForwardContainerResponse) HasUrl() bool { return capnp.Struct(s).HasPtr(0) } -func (s Conmon_execSyncContainer_Results) SetResponse(v Conmon_ExecSyncContainerResponse) error { - return capnp.Struct(s).SetPtr(0, capnp.Struct(v).ToPtr()) +func (s Conmon_ServePortForwardContainerResponse) UrlBytes() ([]byte, error) { + p, err := capnp.Struct(s).Ptr(0) + return p.TextBytes(), err } -// NewResponse sets the response field to a newly -// allocated Conmon_ExecSyncContainerResponse struct, preferring placement in s's segment. -func (s Conmon_execSyncContainer_Results) NewResponse() (Conmon_ExecSyncContainerResponse, error) { - ss, err := NewConmon_ExecSyncContainerResponse(capnp.Struct(s).Segment()) - if err != nil { - return Conmon_ExecSyncContainerResponse{}, err - } - err = capnp.Struct(s).SetPtr(0, capnp.Struct(ss).ToPtr()) - return ss, err +func (s Conmon_ServePortForwardContainerResponse) SetUrl(v string) error { + return capnp.Struct(s).SetText(0, v) } -// Conmon_execSyncContainer_Results_List is a list of Conmon_execSyncContainer_Results. -type Conmon_execSyncContainer_Results_List = capnp.StructList[Conmon_execSyncContainer_Results] +// Conmon_ServePortForwardContainerResponse_List is a list of Conmon_ServePortForwardContainerResponse. +type Conmon_ServePortForwardContainerResponse_List = capnp.StructList[Conmon_ServePortForwardContainerResponse] -// NewConmon_execSyncContainer_Results creates a new list of Conmon_execSyncContainer_Results. -func NewConmon_execSyncContainer_Results_List(s *capnp.Segment, sz int32) (Conmon_execSyncContainer_Results_List, error) { +// NewConmon_ServePortForwardContainerResponse creates a new list of Conmon_ServePortForwardContainerResponse. +func NewConmon_ServePortForwardContainerResponse_List(s *capnp.Segment, sz int32) (Conmon_ServePortForwardContainerResponse_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) + return capnp.StructList[Conmon_ServePortForwardContainerResponse](l), err +} + +// Conmon_ServePortForwardContainerResponse_Future is a wrapper for a Conmon_ServePortForwardContainerResponse promised by a client call. +type Conmon_ServePortForwardContainerResponse_Future struct{ *capnp.Future } + +func (f Conmon_ServePortForwardContainerResponse_Future) Struct() (Conmon_ServePortForwardContainerResponse, error) { + p, err := f.Future.Ptr() + return Conmon_ServePortForwardContainerResponse(p.Struct()), err +} + +type Conmon_version_Params capnp.Struct + +// Conmon_version_Params_TypeID is the unique identifier for the type Conmon_version_Params. +const Conmon_version_Params_TypeID = 0xcc2f70676afee4e7 + +func NewConmon_version_Params(s *capnp.Segment) (Conmon_version_Params, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Conmon_version_Params(st), err +} + +func NewRootConmon_version_Params(s *capnp.Segment) (Conmon_version_Params, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Conmon_version_Params(st), err +} + +func ReadRootConmon_version_Params(msg *capnp.Message) (Conmon_version_Params, error) { + root, err := msg.Root() + return Conmon_version_Params(root.Struct()), err +} + +func (s Conmon_version_Params) String() string { + str, _ := text.Marshal(0xcc2f70676afee4e7, capnp.Struct(s)) + return str +} + +func (s Conmon_version_Params) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { + return capnp.Struct(s).EncodeAsPtr(seg) +} + +func (Conmon_version_Params) DecodeFromPtr(p capnp.Ptr) Conmon_version_Params { + return Conmon_version_Params(capnp.Struct{}.DecodeFromPtr(p)) +} + +func (s Conmon_version_Params) ToPtr() capnp.Ptr { + return capnp.Struct(s).ToPtr() +} +func (s Conmon_version_Params) IsValid() bool { + return capnp.Struct(s).IsValid() +} + +func (s Conmon_version_Params) Message() *capnp.Message { + return capnp.Struct(s).Message() +} + +func (s Conmon_version_Params) Segment() *capnp.Segment { + return capnp.Struct(s).Segment() +} +func (s Conmon_version_Params) Request() (Conmon_VersionRequest, error) { + p, err := capnp.Struct(s).Ptr(0) + return Conmon_VersionRequest(p.Struct()), err +} + +func (s Conmon_version_Params) HasRequest() bool { + return capnp.Struct(s).HasPtr(0) +} + +func (s Conmon_version_Params) SetRequest(v Conmon_VersionRequest) error { + return capnp.Struct(s).SetPtr(0, capnp.Struct(v).ToPtr()) +} + +// NewRequest sets the request field to a newly +// allocated Conmon_VersionRequest struct, preferring placement in s's segment. +func (s Conmon_version_Params) NewRequest() (Conmon_VersionRequest, error) { + ss, err := NewConmon_VersionRequest(capnp.Struct(s).Segment()) + if err != nil { + return Conmon_VersionRequest{}, err + } + err = capnp.Struct(s).SetPtr(0, capnp.Struct(ss).ToPtr()) + return ss, err +} + +// Conmon_version_Params_List is a list of Conmon_version_Params. +type Conmon_version_Params_List = capnp.StructList[Conmon_version_Params] + +// NewConmon_version_Params creates a new list of Conmon_version_Params. +func NewConmon_version_Params_List(s *capnp.Segment, sz int32) (Conmon_version_Params_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) + return capnp.StructList[Conmon_version_Params](l), err +} + +// Conmon_version_Params_Future is a wrapper for a Conmon_version_Params promised by a client call. +type Conmon_version_Params_Future struct{ *capnp.Future } + +func (f Conmon_version_Params_Future) Struct() (Conmon_version_Params, error) { + p, err := f.Future.Ptr() + return Conmon_version_Params(p.Struct()), err +} +func (p Conmon_version_Params_Future) Request() Conmon_VersionRequest_Future { + return Conmon_VersionRequest_Future{Future: p.Future.Field(0, nil)} +} + +type Conmon_version_Results capnp.Struct + +// Conmon_version_Results_TypeID is the unique identifier for the type Conmon_version_Results. +const Conmon_version_Results_TypeID = 0xe313695ea9477b30 + +func NewConmon_version_Results(s *capnp.Segment) (Conmon_version_Results, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Conmon_version_Results(st), err +} + +func NewRootConmon_version_Results(s *capnp.Segment) (Conmon_version_Results, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Conmon_version_Results(st), err +} + +func ReadRootConmon_version_Results(msg *capnp.Message) (Conmon_version_Results, error) { + root, err := msg.Root() + return Conmon_version_Results(root.Struct()), err +} + +func (s Conmon_version_Results) String() string { + str, _ := text.Marshal(0xe313695ea9477b30, capnp.Struct(s)) + return str +} + +func (s Conmon_version_Results) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { + return capnp.Struct(s).EncodeAsPtr(seg) +} + +func (Conmon_version_Results) DecodeFromPtr(p capnp.Ptr) Conmon_version_Results { + return Conmon_version_Results(capnp.Struct{}.DecodeFromPtr(p)) +} + +func (s Conmon_version_Results) ToPtr() capnp.Ptr { + return capnp.Struct(s).ToPtr() +} +func (s Conmon_version_Results) IsValid() bool { + return capnp.Struct(s).IsValid() +} + +func (s Conmon_version_Results) Message() *capnp.Message { + return capnp.Struct(s).Message() +} + +func (s Conmon_version_Results) Segment() *capnp.Segment { + return capnp.Struct(s).Segment() +} +func (s Conmon_version_Results) Response() (Conmon_VersionResponse, error) { + p, err := capnp.Struct(s).Ptr(0) + return Conmon_VersionResponse(p.Struct()), err +} + +func (s Conmon_version_Results) HasResponse() bool { + return capnp.Struct(s).HasPtr(0) +} + +func (s Conmon_version_Results) SetResponse(v Conmon_VersionResponse) error { + return capnp.Struct(s).SetPtr(0, capnp.Struct(v).ToPtr()) +} + +// NewResponse sets the response field to a newly +// allocated Conmon_VersionResponse struct, preferring placement in s's segment. +func (s Conmon_version_Results) NewResponse() (Conmon_VersionResponse, error) { + ss, err := NewConmon_VersionResponse(capnp.Struct(s).Segment()) + if err != nil { + return Conmon_VersionResponse{}, err + } + err = capnp.Struct(s).SetPtr(0, capnp.Struct(ss).ToPtr()) + return ss, err +} + +// Conmon_version_Results_List is a list of Conmon_version_Results. +type Conmon_version_Results_List = capnp.StructList[Conmon_version_Results] + +// NewConmon_version_Results creates a new list of Conmon_version_Results. +func NewConmon_version_Results_List(s *capnp.Segment, sz int32) (Conmon_version_Results_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) + return capnp.StructList[Conmon_version_Results](l), err +} + +// Conmon_version_Results_Future is a wrapper for a Conmon_version_Results promised by a client call. +type Conmon_version_Results_Future struct{ *capnp.Future } + +func (f Conmon_version_Results_Future) Struct() (Conmon_version_Results, error) { + p, err := f.Future.Ptr() + return Conmon_version_Results(p.Struct()), err +} +func (p Conmon_version_Results_Future) Response() Conmon_VersionResponse_Future { + return Conmon_VersionResponse_Future{Future: p.Future.Field(0, nil)} +} + +type Conmon_createContainer_Params capnp.Struct + +// Conmon_createContainer_Params_TypeID is the unique identifier for the type Conmon_createContainer_Params. +const Conmon_createContainer_Params_TypeID = 0xf44732c48f949ab8 + +func NewConmon_createContainer_Params(s *capnp.Segment) (Conmon_createContainer_Params, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Conmon_createContainer_Params(st), err +} + +func NewRootConmon_createContainer_Params(s *capnp.Segment) (Conmon_createContainer_Params, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Conmon_createContainer_Params(st), err +} + +func ReadRootConmon_createContainer_Params(msg *capnp.Message) (Conmon_createContainer_Params, error) { + root, err := msg.Root() + return Conmon_createContainer_Params(root.Struct()), err +} + +func (s Conmon_createContainer_Params) String() string { + str, _ := text.Marshal(0xf44732c48f949ab8, capnp.Struct(s)) + return str +} + +func (s Conmon_createContainer_Params) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { + return capnp.Struct(s).EncodeAsPtr(seg) +} + +func (Conmon_createContainer_Params) DecodeFromPtr(p capnp.Ptr) Conmon_createContainer_Params { + return Conmon_createContainer_Params(capnp.Struct{}.DecodeFromPtr(p)) +} + +func (s Conmon_createContainer_Params) ToPtr() capnp.Ptr { + return capnp.Struct(s).ToPtr() +} +func (s Conmon_createContainer_Params) IsValid() bool { + return capnp.Struct(s).IsValid() +} + +func (s Conmon_createContainer_Params) Message() *capnp.Message { + return capnp.Struct(s).Message() +} + +func (s Conmon_createContainer_Params) Segment() *capnp.Segment { + return capnp.Struct(s).Segment() +} +func (s Conmon_createContainer_Params) Request() (Conmon_CreateContainerRequest, error) { + p, err := capnp.Struct(s).Ptr(0) + return Conmon_CreateContainerRequest(p.Struct()), err +} + +func (s Conmon_createContainer_Params) HasRequest() bool { + return capnp.Struct(s).HasPtr(0) +} + +func (s Conmon_createContainer_Params) SetRequest(v Conmon_CreateContainerRequest) error { + return capnp.Struct(s).SetPtr(0, capnp.Struct(v).ToPtr()) +} + +// NewRequest sets the request field to a newly +// allocated Conmon_CreateContainerRequest struct, preferring placement in s's segment. +func (s Conmon_createContainer_Params) NewRequest() (Conmon_CreateContainerRequest, error) { + ss, err := NewConmon_CreateContainerRequest(capnp.Struct(s).Segment()) + if err != nil { + return Conmon_CreateContainerRequest{}, err + } + err = capnp.Struct(s).SetPtr(0, capnp.Struct(ss).ToPtr()) + return ss, err +} + +// Conmon_createContainer_Params_List is a list of Conmon_createContainer_Params. +type Conmon_createContainer_Params_List = capnp.StructList[Conmon_createContainer_Params] + +// NewConmon_createContainer_Params creates a new list of Conmon_createContainer_Params. +func NewConmon_createContainer_Params_List(s *capnp.Segment, sz int32) (Conmon_createContainer_Params_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) + return capnp.StructList[Conmon_createContainer_Params](l), err +} + +// Conmon_createContainer_Params_Future is a wrapper for a Conmon_createContainer_Params promised by a client call. +type Conmon_createContainer_Params_Future struct{ *capnp.Future } + +func (f Conmon_createContainer_Params_Future) Struct() (Conmon_createContainer_Params, error) { + p, err := f.Future.Ptr() + return Conmon_createContainer_Params(p.Struct()), err +} +func (p Conmon_createContainer_Params_Future) Request() Conmon_CreateContainerRequest_Future { + return Conmon_CreateContainerRequest_Future{Future: p.Future.Field(0, nil)} +} + +type Conmon_createContainer_Results capnp.Struct + +// Conmon_createContainer_Results_TypeID is the unique identifier for the type Conmon_createContainer_Results. +const Conmon_createContainer_Results_TypeID = 0xceba3c1a97be15f8 + +func NewConmon_createContainer_Results(s *capnp.Segment) (Conmon_createContainer_Results, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Conmon_createContainer_Results(st), err +} + +func NewRootConmon_createContainer_Results(s *capnp.Segment) (Conmon_createContainer_Results, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Conmon_createContainer_Results(st), err +} + +func ReadRootConmon_createContainer_Results(msg *capnp.Message) (Conmon_createContainer_Results, error) { + root, err := msg.Root() + return Conmon_createContainer_Results(root.Struct()), err +} + +func (s Conmon_createContainer_Results) String() string { + str, _ := text.Marshal(0xceba3c1a97be15f8, capnp.Struct(s)) + return str +} + +func (s Conmon_createContainer_Results) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { + return capnp.Struct(s).EncodeAsPtr(seg) +} + +func (Conmon_createContainer_Results) DecodeFromPtr(p capnp.Ptr) Conmon_createContainer_Results { + return Conmon_createContainer_Results(capnp.Struct{}.DecodeFromPtr(p)) +} + +func (s Conmon_createContainer_Results) ToPtr() capnp.Ptr { + return capnp.Struct(s).ToPtr() +} +func (s Conmon_createContainer_Results) IsValid() bool { + return capnp.Struct(s).IsValid() +} + +func (s Conmon_createContainer_Results) Message() *capnp.Message { + return capnp.Struct(s).Message() +} + +func (s Conmon_createContainer_Results) Segment() *capnp.Segment { + return capnp.Struct(s).Segment() +} +func (s Conmon_createContainer_Results) Response() (Conmon_CreateContainerResponse, error) { + p, err := capnp.Struct(s).Ptr(0) + return Conmon_CreateContainerResponse(p.Struct()), err +} + +func (s Conmon_createContainer_Results) HasResponse() bool { + return capnp.Struct(s).HasPtr(0) +} + +func (s Conmon_createContainer_Results) SetResponse(v Conmon_CreateContainerResponse) error { + return capnp.Struct(s).SetPtr(0, capnp.Struct(v).ToPtr()) +} + +// NewResponse sets the response field to a newly +// allocated Conmon_CreateContainerResponse struct, preferring placement in s's segment. +func (s Conmon_createContainer_Results) NewResponse() (Conmon_CreateContainerResponse, error) { + ss, err := NewConmon_CreateContainerResponse(capnp.Struct(s).Segment()) + if err != nil { + return Conmon_CreateContainerResponse{}, err + } + err = capnp.Struct(s).SetPtr(0, capnp.Struct(ss).ToPtr()) + return ss, err +} + +// Conmon_createContainer_Results_List is a list of Conmon_createContainer_Results. +type Conmon_createContainer_Results_List = capnp.StructList[Conmon_createContainer_Results] + +// NewConmon_createContainer_Results creates a new list of Conmon_createContainer_Results. +func NewConmon_createContainer_Results_List(s *capnp.Segment, sz int32) (Conmon_createContainer_Results_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) + return capnp.StructList[Conmon_createContainer_Results](l), err +} + +// Conmon_createContainer_Results_Future is a wrapper for a Conmon_createContainer_Results promised by a client call. +type Conmon_createContainer_Results_Future struct{ *capnp.Future } + +func (f Conmon_createContainer_Results_Future) Struct() (Conmon_createContainer_Results, error) { + p, err := f.Future.Ptr() + return Conmon_createContainer_Results(p.Struct()), err +} +func (p Conmon_createContainer_Results_Future) Response() Conmon_CreateContainerResponse_Future { + return Conmon_CreateContainerResponse_Future{Future: p.Future.Field(0, nil)} +} + +type Conmon_execSyncContainer_Params capnp.Struct + +// Conmon_execSyncContainer_Params_TypeID is the unique identifier for the type Conmon_execSyncContainer_Params. +const Conmon_execSyncContainer_Params_TypeID = 0x83479da67279e173 + +func NewConmon_execSyncContainer_Params(s *capnp.Segment) (Conmon_execSyncContainer_Params, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Conmon_execSyncContainer_Params(st), err +} + +func NewRootConmon_execSyncContainer_Params(s *capnp.Segment) (Conmon_execSyncContainer_Params, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Conmon_execSyncContainer_Params(st), err +} + +func ReadRootConmon_execSyncContainer_Params(msg *capnp.Message) (Conmon_execSyncContainer_Params, error) { + root, err := msg.Root() + return Conmon_execSyncContainer_Params(root.Struct()), err +} + +func (s Conmon_execSyncContainer_Params) String() string { + str, _ := text.Marshal(0x83479da67279e173, capnp.Struct(s)) + return str +} + +func (s Conmon_execSyncContainer_Params) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { + return capnp.Struct(s).EncodeAsPtr(seg) +} + +func (Conmon_execSyncContainer_Params) DecodeFromPtr(p capnp.Ptr) Conmon_execSyncContainer_Params { + return Conmon_execSyncContainer_Params(capnp.Struct{}.DecodeFromPtr(p)) +} + +func (s Conmon_execSyncContainer_Params) ToPtr() capnp.Ptr { + return capnp.Struct(s).ToPtr() +} +func (s Conmon_execSyncContainer_Params) IsValid() bool { + return capnp.Struct(s).IsValid() +} + +func (s Conmon_execSyncContainer_Params) Message() *capnp.Message { + return capnp.Struct(s).Message() +} + +func (s Conmon_execSyncContainer_Params) Segment() *capnp.Segment { + return capnp.Struct(s).Segment() +} +func (s Conmon_execSyncContainer_Params) Request() (Conmon_ExecSyncContainerRequest, error) { + p, err := capnp.Struct(s).Ptr(0) + return Conmon_ExecSyncContainerRequest(p.Struct()), err +} + +func (s Conmon_execSyncContainer_Params) HasRequest() bool { + return capnp.Struct(s).HasPtr(0) +} + +func (s Conmon_execSyncContainer_Params) SetRequest(v Conmon_ExecSyncContainerRequest) error { + return capnp.Struct(s).SetPtr(0, capnp.Struct(v).ToPtr()) +} + +// NewRequest sets the request field to a newly +// allocated Conmon_ExecSyncContainerRequest struct, preferring placement in s's segment. +func (s Conmon_execSyncContainer_Params) NewRequest() (Conmon_ExecSyncContainerRequest, error) { + ss, err := NewConmon_ExecSyncContainerRequest(capnp.Struct(s).Segment()) + if err != nil { + return Conmon_ExecSyncContainerRequest{}, err + } + err = capnp.Struct(s).SetPtr(0, capnp.Struct(ss).ToPtr()) + return ss, err +} + +// Conmon_execSyncContainer_Params_List is a list of Conmon_execSyncContainer_Params. +type Conmon_execSyncContainer_Params_List = capnp.StructList[Conmon_execSyncContainer_Params] + +// NewConmon_execSyncContainer_Params creates a new list of Conmon_execSyncContainer_Params. +func NewConmon_execSyncContainer_Params_List(s *capnp.Segment, sz int32) (Conmon_execSyncContainer_Params_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) + return capnp.StructList[Conmon_execSyncContainer_Params](l), err +} + +// Conmon_execSyncContainer_Params_Future is a wrapper for a Conmon_execSyncContainer_Params promised by a client call. +type Conmon_execSyncContainer_Params_Future struct{ *capnp.Future } + +func (f Conmon_execSyncContainer_Params_Future) Struct() (Conmon_execSyncContainer_Params, error) { + p, err := f.Future.Ptr() + return Conmon_execSyncContainer_Params(p.Struct()), err +} +func (p Conmon_execSyncContainer_Params_Future) Request() Conmon_ExecSyncContainerRequest_Future { + return Conmon_ExecSyncContainerRequest_Future{Future: p.Future.Field(0, nil)} +} + +type Conmon_execSyncContainer_Results capnp.Struct + +// Conmon_execSyncContainer_Results_TypeID is the unique identifier for the type Conmon_execSyncContainer_Results. +const Conmon_execSyncContainer_Results_TypeID = 0xf8e86a5c0baa01bc + +func NewConmon_execSyncContainer_Results(s *capnp.Segment) (Conmon_execSyncContainer_Results, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Conmon_execSyncContainer_Results(st), err +} + +func NewRootConmon_execSyncContainer_Results(s *capnp.Segment) (Conmon_execSyncContainer_Results, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Conmon_execSyncContainer_Results(st), err +} + +func ReadRootConmon_execSyncContainer_Results(msg *capnp.Message) (Conmon_execSyncContainer_Results, error) { + root, err := msg.Root() + return Conmon_execSyncContainer_Results(root.Struct()), err +} + +func (s Conmon_execSyncContainer_Results) String() string { + str, _ := text.Marshal(0xf8e86a5c0baa01bc, capnp.Struct(s)) + return str +} + +func (s Conmon_execSyncContainer_Results) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { + return capnp.Struct(s).EncodeAsPtr(seg) +} + +func (Conmon_execSyncContainer_Results) DecodeFromPtr(p capnp.Ptr) Conmon_execSyncContainer_Results { + return Conmon_execSyncContainer_Results(capnp.Struct{}.DecodeFromPtr(p)) +} + +func (s Conmon_execSyncContainer_Results) ToPtr() capnp.Ptr { + return capnp.Struct(s).ToPtr() +} +func (s Conmon_execSyncContainer_Results) IsValid() bool { + return capnp.Struct(s).IsValid() +} + +func (s Conmon_execSyncContainer_Results) Message() *capnp.Message { + return capnp.Struct(s).Message() +} + +func (s Conmon_execSyncContainer_Results) Segment() *capnp.Segment { + return capnp.Struct(s).Segment() +} +func (s Conmon_execSyncContainer_Results) Response() (Conmon_ExecSyncContainerResponse, error) { + p, err := capnp.Struct(s).Ptr(0) + return Conmon_ExecSyncContainerResponse(p.Struct()), err +} + +func (s Conmon_execSyncContainer_Results) HasResponse() bool { + return capnp.Struct(s).HasPtr(0) +} + +func (s Conmon_execSyncContainer_Results) SetResponse(v Conmon_ExecSyncContainerResponse) error { + return capnp.Struct(s).SetPtr(0, capnp.Struct(v).ToPtr()) +} + +// NewResponse sets the response field to a newly +// allocated Conmon_ExecSyncContainerResponse struct, preferring placement in s's segment. +func (s Conmon_execSyncContainer_Results) NewResponse() (Conmon_ExecSyncContainerResponse, error) { + ss, err := NewConmon_ExecSyncContainerResponse(capnp.Struct(s).Segment()) + if err != nil { + return Conmon_ExecSyncContainerResponse{}, err + } + err = capnp.Struct(s).SetPtr(0, capnp.Struct(ss).ToPtr()) + return ss, err +} + +// Conmon_execSyncContainer_Results_List is a list of Conmon_execSyncContainer_Results. +type Conmon_execSyncContainer_Results_List = capnp.StructList[Conmon_execSyncContainer_Results] + +// NewConmon_execSyncContainer_Results creates a new list of Conmon_execSyncContainer_Results. +func NewConmon_execSyncContainer_Results_List(s *capnp.Segment, sz int32) (Conmon_execSyncContainer_Results_List, error) { l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) return capnp.StructList[Conmon_execSyncContainer_Results](l), err } -// Conmon_execSyncContainer_Results_Future is a wrapper for a Conmon_execSyncContainer_Results promised by a client call. -type Conmon_execSyncContainer_Results_Future struct{ *capnp.Future } +// Conmon_execSyncContainer_Results_Future is a wrapper for a Conmon_execSyncContainer_Results promised by a client call. +type Conmon_execSyncContainer_Results_Future struct{ *capnp.Future } + +func (f Conmon_execSyncContainer_Results_Future) Struct() (Conmon_execSyncContainer_Results, error) { + p, err := f.Future.Ptr() + return Conmon_execSyncContainer_Results(p.Struct()), err +} +func (p Conmon_execSyncContainer_Results_Future) Response() Conmon_ExecSyncContainerResponse_Future { + return Conmon_ExecSyncContainerResponse_Future{Future: p.Future.Field(0, nil)} +} + +type Conmon_attachContainer_Params capnp.Struct + +// Conmon_attachContainer_Params_TypeID is the unique identifier for the type Conmon_attachContainer_Params. +const Conmon_attachContainer_Params_TypeID = 0xaa2f3c8ad1c3af24 + +func NewConmon_attachContainer_Params(s *capnp.Segment) (Conmon_attachContainer_Params, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Conmon_attachContainer_Params(st), err +} + +func NewRootConmon_attachContainer_Params(s *capnp.Segment) (Conmon_attachContainer_Params, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Conmon_attachContainer_Params(st), err +} + +func ReadRootConmon_attachContainer_Params(msg *capnp.Message) (Conmon_attachContainer_Params, error) { + root, err := msg.Root() + return Conmon_attachContainer_Params(root.Struct()), err +} + +func (s Conmon_attachContainer_Params) String() string { + str, _ := text.Marshal(0xaa2f3c8ad1c3af24, capnp.Struct(s)) + return str +} + +func (s Conmon_attachContainer_Params) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { + return capnp.Struct(s).EncodeAsPtr(seg) +} + +func (Conmon_attachContainer_Params) DecodeFromPtr(p capnp.Ptr) Conmon_attachContainer_Params { + return Conmon_attachContainer_Params(capnp.Struct{}.DecodeFromPtr(p)) +} + +func (s Conmon_attachContainer_Params) ToPtr() capnp.Ptr { + return capnp.Struct(s).ToPtr() +} +func (s Conmon_attachContainer_Params) IsValid() bool { + return capnp.Struct(s).IsValid() +} + +func (s Conmon_attachContainer_Params) Message() *capnp.Message { + return capnp.Struct(s).Message() +} + +func (s Conmon_attachContainer_Params) Segment() *capnp.Segment { + return capnp.Struct(s).Segment() +} +func (s Conmon_attachContainer_Params) Request() (Conmon_AttachRequest, error) { + p, err := capnp.Struct(s).Ptr(0) + return Conmon_AttachRequest(p.Struct()), err +} + +func (s Conmon_attachContainer_Params) HasRequest() bool { + return capnp.Struct(s).HasPtr(0) +} + +func (s Conmon_attachContainer_Params) SetRequest(v Conmon_AttachRequest) error { + return capnp.Struct(s).SetPtr(0, capnp.Struct(v).ToPtr()) +} + +// NewRequest sets the request field to a newly +// allocated Conmon_AttachRequest struct, preferring placement in s's segment. +func (s Conmon_attachContainer_Params) NewRequest() (Conmon_AttachRequest, error) { + ss, err := NewConmon_AttachRequest(capnp.Struct(s).Segment()) + if err != nil { + return Conmon_AttachRequest{}, err + } + err = capnp.Struct(s).SetPtr(0, capnp.Struct(ss).ToPtr()) + return ss, err +} + +// Conmon_attachContainer_Params_List is a list of Conmon_attachContainer_Params. +type Conmon_attachContainer_Params_List = capnp.StructList[Conmon_attachContainer_Params] + +// NewConmon_attachContainer_Params creates a new list of Conmon_attachContainer_Params. +func NewConmon_attachContainer_Params_List(s *capnp.Segment, sz int32) (Conmon_attachContainer_Params_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) + return capnp.StructList[Conmon_attachContainer_Params](l), err +} + +// Conmon_attachContainer_Params_Future is a wrapper for a Conmon_attachContainer_Params promised by a client call. +type Conmon_attachContainer_Params_Future struct{ *capnp.Future } + +func (f Conmon_attachContainer_Params_Future) Struct() (Conmon_attachContainer_Params, error) { + p, err := f.Future.Ptr() + return Conmon_attachContainer_Params(p.Struct()), err +} +func (p Conmon_attachContainer_Params_Future) Request() Conmon_AttachRequest_Future { + return Conmon_AttachRequest_Future{Future: p.Future.Field(0, nil)} +} + +type Conmon_attachContainer_Results capnp.Struct + +// Conmon_attachContainer_Results_TypeID is the unique identifier for the type Conmon_attachContainer_Results. +const Conmon_attachContainer_Results_TypeID = 0xc5e65eec3dcf5b10 + +func NewConmon_attachContainer_Results(s *capnp.Segment) (Conmon_attachContainer_Results, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Conmon_attachContainer_Results(st), err +} + +func NewRootConmon_attachContainer_Results(s *capnp.Segment) (Conmon_attachContainer_Results, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Conmon_attachContainer_Results(st), err +} + +func ReadRootConmon_attachContainer_Results(msg *capnp.Message) (Conmon_attachContainer_Results, error) { + root, err := msg.Root() + return Conmon_attachContainer_Results(root.Struct()), err +} + +func (s Conmon_attachContainer_Results) String() string { + str, _ := text.Marshal(0xc5e65eec3dcf5b10, capnp.Struct(s)) + return str +} + +func (s Conmon_attachContainer_Results) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { + return capnp.Struct(s).EncodeAsPtr(seg) +} + +func (Conmon_attachContainer_Results) DecodeFromPtr(p capnp.Ptr) Conmon_attachContainer_Results { + return Conmon_attachContainer_Results(capnp.Struct{}.DecodeFromPtr(p)) +} + +func (s Conmon_attachContainer_Results) ToPtr() capnp.Ptr { + return capnp.Struct(s).ToPtr() +} +func (s Conmon_attachContainer_Results) IsValid() bool { + return capnp.Struct(s).IsValid() +} + +func (s Conmon_attachContainer_Results) Message() *capnp.Message { + return capnp.Struct(s).Message() +} + +func (s Conmon_attachContainer_Results) Segment() *capnp.Segment { + return capnp.Struct(s).Segment() +} +func (s Conmon_attachContainer_Results) Response() (Conmon_AttachResponse, error) { + p, err := capnp.Struct(s).Ptr(0) + return Conmon_AttachResponse(p.Struct()), err +} + +func (s Conmon_attachContainer_Results) HasResponse() bool { + return capnp.Struct(s).HasPtr(0) +} + +func (s Conmon_attachContainer_Results) SetResponse(v Conmon_AttachResponse) error { + return capnp.Struct(s).SetPtr(0, capnp.Struct(v).ToPtr()) +} + +// NewResponse sets the response field to a newly +// allocated Conmon_AttachResponse struct, preferring placement in s's segment. +func (s Conmon_attachContainer_Results) NewResponse() (Conmon_AttachResponse, error) { + ss, err := NewConmon_AttachResponse(capnp.Struct(s).Segment()) + if err != nil { + return Conmon_AttachResponse{}, err + } + err = capnp.Struct(s).SetPtr(0, capnp.Struct(ss).ToPtr()) + return ss, err +} + +// Conmon_attachContainer_Results_List is a list of Conmon_attachContainer_Results. +type Conmon_attachContainer_Results_List = capnp.StructList[Conmon_attachContainer_Results] + +// NewConmon_attachContainer_Results creates a new list of Conmon_attachContainer_Results. +func NewConmon_attachContainer_Results_List(s *capnp.Segment, sz int32) (Conmon_attachContainer_Results_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) + return capnp.StructList[Conmon_attachContainer_Results](l), err +} + +// Conmon_attachContainer_Results_Future is a wrapper for a Conmon_attachContainer_Results promised by a client call. +type Conmon_attachContainer_Results_Future struct{ *capnp.Future } + +func (f Conmon_attachContainer_Results_Future) Struct() (Conmon_attachContainer_Results, error) { + p, err := f.Future.Ptr() + return Conmon_attachContainer_Results(p.Struct()), err +} +func (p Conmon_attachContainer_Results_Future) Response() Conmon_AttachResponse_Future { + return Conmon_AttachResponse_Future{Future: p.Future.Field(0, nil)} +} + +type Conmon_reopenLogContainer_Params capnp.Struct + +// Conmon_reopenLogContainer_Params_TypeID is the unique identifier for the type Conmon_reopenLogContainer_Params. +const Conmon_reopenLogContainer_Params_TypeID = 0xe5ea916eb0c31336 + +func NewConmon_reopenLogContainer_Params(s *capnp.Segment) (Conmon_reopenLogContainer_Params, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Conmon_reopenLogContainer_Params(st), err +} + +func NewRootConmon_reopenLogContainer_Params(s *capnp.Segment) (Conmon_reopenLogContainer_Params, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Conmon_reopenLogContainer_Params(st), err +} + +func ReadRootConmon_reopenLogContainer_Params(msg *capnp.Message) (Conmon_reopenLogContainer_Params, error) { + root, err := msg.Root() + return Conmon_reopenLogContainer_Params(root.Struct()), err +} + +func (s Conmon_reopenLogContainer_Params) String() string { + str, _ := text.Marshal(0xe5ea916eb0c31336, capnp.Struct(s)) + return str +} + +func (s Conmon_reopenLogContainer_Params) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { + return capnp.Struct(s).EncodeAsPtr(seg) +} + +func (Conmon_reopenLogContainer_Params) DecodeFromPtr(p capnp.Ptr) Conmon_reopenLogContainer_Params { + return Conmon_reopenLogContainer_Params(capnp.Struct{}.DecodeFromPtr(p)) +} + +func (s Conmon_reopenLogContainer_Params) ToPtr() capnp.Ptr { + return capnp.Struct(s).ToPtr() +} +func (s Conmon_reopenLogContainer_Params) IsValid() bool { + return capnp.Struct(s).IsValid() +} + +func (s Conmon_reopenLogContainer_Params) Message() *capnp.Message { + return capnp.Struct(s).Message() +} + +func (s Conmon_reopenLogContainer_Params) Segment() *capnp.Segment { + return capnp.Struct(s).Segment() +} +func (s Conmon_reopenLogContainer_Params) Request() (Conmon_ReopenLogRequest, error) { + p, err := capnp.Struct(s).Ptr(0) + return Conmon_ReopenLogRequest(p.Struct()), err +} + +func (s Conmon_reopenLogContainer_Params) HasRequest() bool { + return capnp.Struct(s).HasPtr(0) +} + +func (s Conmon_reopenLogContainer_Params) SetRequest(v Conmon_ReopenLogRequest) error { + return capnp.Struct(s).SetPtr(0, capnp.Struct(v).ToPtr()) +} + +// NewRequest sets the request field to a newly +// allocated Conmon_ReopenLogRequest struct, preferring placement in s's segment. +func (s Conmon_reopenLogContainer_Params) NewRequest() (Conmon_ReopenLogRequest, error) { + ss, err := NewConmon_ReopenLogRequest(capnp.Struct(s).Segment()) + if err != nil { + return Conmon_ReopenLogRequest{}, err + } + err = capnp.Struct(s).SetPtr(0, capnp.Struct(ss).ToPtr()) + return ss, err +} + +// Conmon_reopenLogContainer_Params_List is a list of Conmon_reopenLogContainer_Params. +type Conmon_reopenLogContainer_Params_List = capnp.StructList[Conmon_reopenLogContainer_Params] + +// NewConmon_reopenLogContainer_Params creates a new list of Conmon_reopenLogContainer_Params. +func NewConmon_reopenLogContainer_Params_List(s *capnp.Segment, sz int32) (Conmon_reopenLogContainer_Params_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) + return capnp.StructList[Conmon_reopenLogContainer_Params](l), err +} + +// Conmon_reopenLogContainer_Params_Future is a wrapper for a Conmon_reopenLogContainer_Params promised by a client call. +type Conmon_reopenLogContainer_Params_Future struct{ *capnp.Future } + +func (f Conmon_reopenLogContainer_Params_Future) Struct() (Conmon_reopenLogContainer_Params, error) { + p, err := f.Future.Ptr() + return Conmon_reopenLogContainer_Params(p.Struct()), err +} +func (p Conmon_reopenLogContainer_Params_Future) Request() Conmon_ReopenLogRequest_Future { + return Conmon_ReopenLogRequest_Future{Future: p.Future.Field(0, nil)} +} + +type Conmon_reopenLogContainer_Results capnp.Struct + +// Conmon_reopenLogContainer_Results_TypeID is the unique identifier for the type Conmon_reopenLogContainer_Results. +const Conmon_reopenLogContainer_Results_TypeID = 0xa0ef8355b64ee985 + +func NewConmon_reopenLogContainer_Results(s *capnp.Segment) (Conmon_reopenLogContainer_Results, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Conmon_reopenLogContainer_Results(st), err +} + +func NewRootConmon_reopenLogContainer_Results(s *capnp.Segment) (Conmon_reopenLogContainer_Results, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Conmon_reopenLogContainer_Results(st), err +} + +func ReadRootConmon_reopenLogContainer_Results(msg *capnp.Message) (Conmon_reopenLogContainer_Results, error) { + root, err := msg.Root() + return Conmon_reopenLogContainer_Results(root.Struct()), err +} + +func (s Conmon_reopenLogContainer_Results) String() string { + str, _ := text.Marshal(0xa0ef8355b64ee985, capnp.Struct(s)) + return str +} + +func (s Conmon_reopenLogContainer_Results) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { + return capnp.Struct(s).EncodeAsPtr(seg) +} + +func (Conmon_reopenLogContainer_Results) DecodeFromPtr(p capnp.Ptr) Conmon_reopenLogContainer_Results { + return Conmon_reopenLogContainer_Results(capnp.Struct{}.DecodeFromPtr(p)) +} -func (f Conmon_execSyncContainer_Results_Future) Struct() (Conmon_execSyncContainer_Results, error) { +func (s Conmon_reopenLogContainer_Results) ToPtr() capnp.Ptr { + return capnp.Struct(s).ToPtr() +} +func (s Conmon_reopenLogContainer_Results) IsValid() bool { + return capnp.Struct(s).IsValid() +} + +func (s Conmon_reopenLogContainer_Results) Message() *capnp.Message { + return capnp.Struct(s).Message() +} + +func (s Conmon_reopenLogContainer_Results) Segment() *capnp.Segment { + return capnp.Struct(s).Segment() +} +func (s Conmon_reopenLogContainer_Results) Response() (Conmon_ReopenLogResponse, error) { + p, err := capnp.Struct(s).Ptr(0) + return Conmon_ReopenLogResponse(p.Struct()), err +} + +func (s Conmon_reopenLogContainer_Results) HasResponse() bool { + return capnp.Struct(s).HasPtr(0) +} + +func (s Conmon_reopenLogContainer_Results) SetResponse(v Conmon_ReopenLogResponse) error { + return capnp.Struct(s).SetPtr(0, capnp.Struct(v).ToPtr()) +} + +// NewResponse sets the response field to a newly +// allocated Conmon_ReopenLogResponse struct, preferring placement in s's segment. +func (s Conmon_reopenLogContainer_Results) NewResponse() (Conmon_ReopenLogResponse, error) { + ss, err := NewConmon_ReopenLogResponse(capnp.Struct(s).Segment()) + if err != nil { + return Conmon_ReopenLogResponse{}, err + } + err = capnp.Struct(s).SetPtr(0, capnp.Struct(ss).ToPtr()) + return ss, err +} + +// Conmon_reopenLogContainer_Results_List is a list of Conmon_reopenLogContainer_Results. +type Conmon_reopenLogContainer_Results_List = capnp.StructList[Conmon_reopenLogContainer_Results] + +// NewConmon_reopenLogContainer_Results creates a new list of Conmon_reopenLogContainer_Results. +func NewConmon_reopenLogContainer_Results_List(s *capnp.Segment, sz int32) (Conmon_reopenLogContainer_Results_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) + return capnp.StructList[Conmon_reopenLogContainer_Results](l), err +} + +// Conmon_reopenLogContainer_Results_Future is a wrapper for a Conmon_reopenLogContainer_Results promised by a client call. +type Conmon_reopenLogContainer_Results_Future struct{ *capnp.Future } + +func (f Conmon_reopenLogContainer_Results_Future) Struct() (Conmon_reopenLogContainer_Results, error) { p, err := f.Future.Ptr() - return Conmon_execSyncContainer_Results(p.Struct()), err + return Conmon_reopenLogContainer_Results(p.Struct()), err } -func (p Conmon_execSyncContainer_Results_Future) Response() Conmon_ExecSyncContainerResponse_Future { - return Conmon_ExecSyncContainerResponse_Future{Future: p.Future.Field(0, nil)} +func (p Conmon_reopenLogContainer_Results_Future) Response() Conmon_ReopenLogResponse_Future { + return Conmon_ReopenLogResponse_Future{Future: p.Future.Field(0, nil)} } -type Conmon_attachContainer_Params capnp.Struct +type Conmon_setWindowSizeContainer_Params capnp.Struct -// Conmon_attachContainer_Params_TypeID is the unique identifier for the type Conmon_attachContainer_Params. -const Conmon_attachContainer_Params_TypeID = 0xaa2f3c8ad1c3af24 +// Conmon_setWindowSizeContainer_Params_TypeID is the unique identifier for the type Conmon_setWindowSizeContainer_Params. +const Conmon_setWindowSizeContainer_Params_TypeID = 0xc76ccd4502bb61e7 -func NewConmon_attachContainer_Params(s *capnp.Segment) (Conmon_attachContainer_Params, error) { +func NewConmon_setWindowSizeContainer_Params(s *capnp.Segment) (Conmon_setWindowSizeContainer_Params, error) { st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) - return Conmon_attachContainer_Params(st), err + return Conmon_setWindowSizeContainer_Params(st), err +} + +func NewRootConmon_setWindowSizeContainer_Params(s *capnp.Segment) (Conmon_setWindowSizeContainer_Params, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Conmon_setWindowSizeContainer_Params(st), err +} + +func ReadRootConmon_setWindowSizeContainer_Params(msg *capnp.Message) (Conmon_setWindowSizeContainer_Params, error) { + root, err := msg.Root() + return Conmon_setWindowSizeContainer_Params(root.Struct()), err +} + +func (s Conmon_setWindowSizeContainer_Params) String() string { + str, _ := text.Marshal(0xc76ccd4502bb61e7, capnp.Struct(s)) + return str +} + +func (s Conmon_setWindowSizeContainer_Params) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { + return capnp.Struct(s).EncodeAsPtr(seg) +} + +func (Conmon_setWindowSizeContainer_Params) DecodeFromPtr(p capnp.Ptr) Conmon_setWindowSizeContainer_Params { + return Conmon_setWindowSizeContainer_Params(capnp.Struct{}.DecodeFromPtr(p)) +} + +func (s Conmon_setWindowSizeContainer_Params) ToPtr() capnp.Ptr { + return capnp.Struct(s).ToPtr() +} +func (s Conmon_setWindowSizeContainer_Params) IsValid() bool { + return capnp.Struct(s).IsValid() +} + +func (s Conmon_setWindowSizeContainer_Params) Message() *capnp.Message { + return capnp.Struct(s).Message() +} + +func (s Conmon_setWindowSizeContainer_Params) Segment() *capnp.Segment { + return capnp.Struct(s).Segment() +} +func (s Conmon_setWindowSizeContainer_Params) Request() (Conmon_SetWindowSizeRequest, error) { + p, err := capnp.Struct(s).Ptr(0) + return Conmon_SetWindowSizeRequest(p.Struct()), err +} + +func (s Conmon_setWindowSizeContainer_Params) HasRequest() bool { + return capnp.Struct(s).HasPtr(0) +} + +func (s Conmon_setWindowSizeContainer_Params) SetRequest(v Conmon_SetWindowSizeRequest) error { + return capnp.Struct(s).SetPtr(0, capnp.Struct(v).ToPtr()) +} + +// NewRequest sets the request field to a newly +// allocated Conmon_SetWindowSizeRequest struct, preferring placement in s's segment. +func (s Conmon_setWindowSizeContainer_Params) NewRequest() (Conmon_SetWindowSizeRequest, error) { + ss, err := NewConmon_SetWindowSizeRequest(capnp.Struct(s).Segment()) + if err != nil { + return Conmon_SetWindowSizeRequest{}, err + } + err = capnp.Struct(s).SetPtr(0, capnp.Struct(ss).ToPtr()) + return ss, err +} + +// Conmon_setWindowSizeContainer_Params_List is a list of Conmon_setWindowSizeContainer_Params. +type Conmon_setWindowSizeContainer_Params_List = capnp.StructList[Conmon_setWindowSizeContainer_Params] + +// NewConmon_setWindowSizeContainer_Params creates a new list of Conmon_setWindowSizeContainer_Params. +func NewConmon_setWindowSizeContainer_Params_List(s *capnp.Segment, sz int32) (Conmon_setWindowSizeContainer_Params_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) + return capnp.StructList[Conmon_setWindowSizeContainer_Params](l), err +} + +// Conmon_setWindowSizeContainer_Params_Future is a wrapper for a Conmon_setWindowSizeContainer_Params promised by a client call. +type Conmon_setWindowSizeContainer_Params_Future struct{ *capnp.Future } + +func (f Conmon_setWindowSizeContainer_Params_Future) Struct() (Conmon_setWindowSizeContainer_Params, error) { + p, err := f.Future.Ptr() + return Conmon_setWindowSizeContainer_Params(p.Struct()), err +} +func (p Conmon_setWindowSizeContainer_Params_Future) Request() Conmon_SetWindowSizeRequest_Future { + return Conmon_SetWindowSizeRequest_Future{Future: p.Future.Field(0, nil)} +} + +type Conmon_setWindowSizeContainer_Results capnp.Struct + +// Conmon_setWindowSizeContainer_Results_TypeID is the unique identifier for the type Conmon_setWindowSizeContainer_Results. +const Conmon_setWindowSizeContainer_Results_TypeID = 0xe00e522611477055 + +func NewConmon_setWindowSizeContainer_Results(s *capnp.Segment) (Conmon_setWindowSizeContainer_Results, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Conmon_setWindowSizeContainer_Results(st), err +} + +func NewRootConmon_setWindowSizeContainer_Results(s *capnp.Segment) (Conmon_setWindowSizeContainer_Results, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Conmon_setWindowSizeContainer_Results(st), err +} + +func ReadRootConmon_setWindowSizeContainer_Results(msg *capnp.Message) (Conmon_setWindowSizeContainer_Results, error) { + root, err := msg.Root() + return Conmon_setWindowSizeContainer_Results(root.Struct()), err +} + +func (s Conmon_setWindowSizeContainer_Results) String() string { + str, _ := text.Marshal(0xe00e522611477055, capnp.Struct(s)) + return str +} + +func (s Conmon_setWindowSizeContainer_Results) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { + return capnp.Struct(s).EncodeAsPtr(seg) +} + +func (Conmon_setWindowSizeContainer_Results) DecodeFromPtr(p capnp.Ptr) Conmon_setWindowSizeContainer_Results { + return Conmon_setWindowSizeContainer_Results(capnp.Struct{}.DecodeFromPtr(p)) +} + +func (s Conmon_setWindowSizeContainer_Results) ToPtr() capnp.Ptr { + return capnp.Struct(s).ToPtr() +} +func (s Conmon_setWindowSizeContainer_Results) IsValid() bool { + return capnp.Struct(s).IsValid() +} + +func (s Conmon_setWindowSizeContainer_Results) Message() *capnp.Message { + return capnp.Struct(s).Message() +} + +func (s Conmon_setWindowSizeContainer_Results) Segment() *capnp.Segment { + return capnp.Struct(s).Segment() +} +func (s Conmon_setWindowSizeContainer_Results) Response() (Conmon_SetWindowSizeResponse, error) { + p, err := capnp.Struct(s).Ptr(0) + return Conmon_SetWindowSizeResponse(p.Struct()), err +} + +func (s Conmon_setWindowSizeContainer_Results) HasResponse() bool { + return capnp.Struct(s).HasPtr(0) +} + +func (s Conmon_setWindowSizeContainer_Results) SetResponse(v Conmon_SetWindowSizeResponse) error { + return capnp.Struct(s).SetPtr(0, capnp.Struct(v).ToPtr()) +} + +// NewResponse sets the response field to a newly +// allocated Conmon_SetWindowSizeResponse struct, preferring placement in s's segment. +func (s Conmon_setWindowSizeContainer_Results) NewResponse() (Conmon_SetWindowSizeResponse, error) { + ss, err := NewConmon_SetWindowSizeResponse(capnp.Struct(s).Segment()) + if err != nil { + return Conmon_SetWindowSizeResponse{}, err + } + err = capnp.Struct(s).SetPtr(0, capnp.Struct(ss).ToPtr()) + return ss, err +} + +// Conmon_setWindowSizeContainer_Results_List is a list of Conmon_setWindowSizeContainer_Results. +type Conmon_setWindowSizeContainer_Results_List = capnp.StructList[Conmon_setWindowSizeContainer_Results] + +// NewConmon_setWindowSizeContainer_Results creates a new list of Conmon_setWindowSizeContainer_Results. +func NewConmon_setWindowSizeContainer_Results_List(s *capnp.Segment, sz int32) (Conmon_setWindowSizeContainer_Results_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) + return capnp.StructList[Conmon_setWindowSizeContainer_Results](l), err +} + +// Conmon_setWindowSizeContainer_Results_Future is a wrapper for a Conmon_setWindowSizeContainer_Results promised by a client call. +type Conmon_setWindowSizeContainer_Results_Future struct{ *capnp.Future } + +func (f Conmon_setWindowSizeContainer_Results_Future) Struct() (Conmon_setWindowSizeContainer_Results, error) { + p, err := f.Future.Ptr() + return Conmon_setWindowSizeContainer_Results(p.Struct()), err +} +func (p Conmon_setWindowSizeContainer_Results_Future) Response() Conmon_SetWindowSizeResponse_Future { + return Conmon_SetWindowSizeResponse_Future{Future: p.Future.Field(0, nil)} +} + +type Conmon_createNamespaces_Params capnp.Struct + +// Conmon_createNamespaces_Params_TypeID is the unique identifier for the type Conmon_createNamespaces_Params. +const Conmon_createNamespaces_Params_TypeID = 0x8b4c03a0662a38dc + +func NewConmon_createNamespaces_Params(s *capnp.Segment) (Conmon_createNamespaces_Params, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Conmon_createNamespaces_Params(st), err } -func NewRootConmon_attachContainer_Params(s *capnp.Segment) (Conmon_attachContainer_Params, error) { +func NewRootConmon_createNamespaces_Params(s *capnp.Segment) (Conmon_createNamespaces_Params, error) { st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) - return Conmon_attachContainer_Params(st), err + return Conmon_createNamespaces_Params(st), err } -func ReadRootConmon_attachContainer_Params(msg *capnp.Message) (Conmon_attachContainer_Params, error) { +func ReadRootConmon_createNamespaces_Params(msg *capnp.Message) (Conmon_createNamespaces_Params, error) { root, err := msg.Root() - return Conmon_attachContainer_Params(root.Struct()), err + return Conmon_createNamespaces_Params(root.Struct()), err } -func (s Conmon_attachContainer_Params) String() string { - str, _ := text.Marshal(0xaa2f3c8ad1c3af24, capnp.Struct(s)) +func (s Conmon_createNamespaces_Params) String() string { + str, _ := text.Marshal(0x8b4c03a0662a38dc, capnp.Struct(s)) return str } -func (s Conmon_attachContainer_Params) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { +func (s Conmon_createNamespaces_Params) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { return capnp.Struct(s).EncodeAsPtr(seg) } -func (Conmon_attachContainer_Params) DecodeFromPtr(p capnp.Ptr) Conmon_attachContainer_Params { - return Conmon_attachContainer_Params(capnp.Struct{}.DecodeFromPtr(p)) +func (Conmon_createNamespaces_Params) DecodeFromPtr(p capnp.Ptr) Conmon_createNamespaces_Params { + return Conmon_createNamespaces_Params(capnp.Struct{}.DecodeFromPtr(p)) } -func (s Conmon_attachContainer_Params) ToPtr() capnp.Ptr { +func (s Conmon_createNamespaces_Params) ToPtr() capnp.Ptr { return capnp.Struct(s).ToPtr() } -func (s Conmon_attachContainer_Params) IsValid() bool { +func (s Conmon_createNamespaces_Params) IsValid() bool { return capnp.Struct(s).IsValid() } -func (s Conmon_attachContainer_Params) Message() *capnp.Message { +func (s Conmon_createNamespaces_Params) Message() *capnp.Message { return capnp.Struct(s).Message() } -func (s Conmon_attachContainer_Params) Segment() *capnp.Segment { +func (s Conmon_createNamespaces_Params) Segment() *capnp.Segment { return capnp.Struct(s).Segment() } -func (s Conmon_attachContainer_Params) Request() (Conmon_AttachRequest, error) { +func (s Conmon_createNamespaces_Params) Request() (Conmon_CreateNamespacesRequest, error) { p, err := capnp.Struct(s).Ptr(0) - return Conmon_AttachRequest(p.Struct()), err + return Conmon_CreateNamespacesRequest(p.Struct()), err } -func (s Conmon_attachContainer_Params) HasRequest() bool { +func (s Conmon_createNamespaces_Params) HasRequest() bool { return capnp.Struct(s).HasPtr(0) } -func (s Conmon_attachContainer_Params) SetRequest(v Conmon_AttachRequest) error { +func (s Conmon_createNamespaces_Params) SetRequest(v Conmon_CreateNamespacesRequest) error { return capnp.Struct(s).SetPtr(0, capnp.Struct(v).ToPtr()) } // NewRequest sets the request field to a newly -// allocated Conmon_AttachRequest struct, preferring placement in s's segment. -func (s Conmon_attachContainer_Params) NewRequest() (Conmon_AttachRequest, error) { - ss, err := NewConmon_AttachRequest(capnp.Struct(s).Segment()) +// allocated Conmon_CreateNamespacesRequest struct, preferring placement in s's segment. +func (s Conmon_createNamespaces_Params) NewRequest() (Conmon_CreateNamespacesRequest, error) { + ss, err := NewConmon_CreateNamespacesRequest(capnp.Struct(s).Segment()) if err != nil { - return Conmon_AttachRequest{}, err + return Conmon_CreateNamespacesRequest{}, err } err = capnp.Struct(s).SetPtr(0, capnp.Struct(ss).ToPtr()) return ss, err } -// Conmon_attachContainer_Params_List is a list of Conmon_attachContainer_Params. -type Conmon_attachContainer_Params_List = capnp.StructList[Conmon_attachContainer_Params] +// Conmon_createNamespaces_Params_List is a list of Conmon_createNamespaces_Params. +type Conmon_createNamespaces_Params_List = capnp.StructList[Conmon_createNamespaces_Params] -// NewConmon_attachContainer_Params creates a new list of Conmon_attachContainer_Params. -func NewConmon_attachContainer_Params_List(s *capnp.Segment, sz int32) (Conmon_attachContainer_Params_List, error) { +// NewConmon_createNamespaces_Params creates a new list of Conmon_createNamespaces_Params. +func NewConmon_createNamespaces_Params_List(s *capnp.Segment, sz int32) (Conmon_createNamespaces_Params_List, error) { l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) - return capnp.StructList[Conmon_attachContainer_Params](l), err + return capnp.StructList[Conmon_createNamespaces_Params](l), err } -// Conmon_attachContainer_Params_Future is a wrapper for a Conmon_attachContainer_Params promised by a client call. -type Conmon_attachContainer_Params_Future struct{ *capnp.Future } +// Conmon_createNamespaces_Params_Future is a wrapper for a Conmon_createNamespaces_Params promised by a client call. +type Conmon_createNamespaces_Params_Future struct{ *capnp.Future } -func (f Conmon_attachContainer_Params_Future) Struct() (Conmon_attachContainer_Params, error) { +func (f Conmon_createNamespaces_Params_Future) Struct() (Conmon_createNamespaces_Params, error) { p, err := f.Future.Ptr() - return Conmon_attachContainer_Params(p.Struct()), err + return Conmon_createNamespaces_Params(p.Struct()), err } -func (p Conmon_attachContainer_Params_Future) Request() Conmon_AttachRequest_Future { - return Conmon_AttachRequest_Future{Future: p.Future.Field(0, nil)} +func (p Conmon_createNamespaces_Params_Future) Request() Conmon_CreateNamespacesRequest_Future { + return Conmon_CreateNamespacesRequest_Future{Future: p.Future.Field(0, nil)} } -type Conmon_attachContainer_Results capnp.Struct +type Conmon_createNamespaces_Results capnp.Struct -// Conmon_attachContainer_Results_TypeID is the unique identifier for the type Conmon_attachContainer_Results. -const Conmon_attachContainer_Results_TypeID = 0xc5e65eec3dcf5b10 +// Conmon_createNamespaces_Results_TypeID is the unique identifier for the type Conmon_createNamespaces_Results. +const Conmon_createNamespaces_Results_TypeID = 0x8aef91973dc8a4f5 -func NewConmon_attachContainer_Results(s *capnp.Segment) (Conmon_attachContainer_Results, error) { +func NewConmon_createNamespaces_Results(s *capnp.Segment) (Conmon_createNamespaces_Results, error) { st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) - return Conmon_attachContainer_Results(st), err + return Conmon_createNamespaces_Results(st), err } -func NewRootConmon_attachContainer_Results(s *capnp.Segment) (Conmon_attachContainer_Results, error) { +func NewRootConmon_createNamespaces_Results(s *capnp.Segment) (Conmon_createNamespaces_Results, error) { st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) - return Conmon_attachContainer_Results(st), err + return Conmon_createNamespaces_Results(st), err } -func ReadRootConmon_attachContainer_Results(msg *capnp.Message) (Conmon_attachContainer_Results, error) { +func ReadRootConmon_createNamespaces_Results(msg *capnp.Message) (Conmon_createNamespaces_Results, error) { root, err := msg.Root() - return Conmon_attachContainer_Results(root.Struct()), err + return Conmon_createNamespaces_Results(root.Struct()), err } -func (s Conmon_attachContainer_Results) String() string { - str, _ := text.Marshal(0xc5e65eec3dcf5b10, capnp.Struct(s)) +func (s Conmon_createNamespaces_Results) String() string { + str, _ := text.Marshal(0x8aef91973dc8a4f5, capnp.Struct(s)) return str } -func (s Conmon_attachContainer_Results) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { +func (s Conmon_createNamespaces_Results) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { return capnp.Struct(s).EncodeAsPtr(seg) } -func (Conmon_attachContainer_Results) DecodeFromPtr(p capnp.Ptr) Conmon_attachContainer_Results { - return Conmon_attachContainer_Results(capnp.Struct{}.DecodeFromPtr(p)) +func (Conmon_createNamespaces_Results) DecodeFromPtr(p capnp.Ptr) Conmon_createNamespaces_Results { + return Conmon_createNamespaces_Results(capnp.Struct{}.DecodeFromPtr(p)) } -func (s Conmon_attachContainer_Results) ToPtr() capnp.Ptr { +func (s Conmon_createNamespaces_Results) ToPtr() capnp.Ptr { return capnp.Struct(s).ToPtr() } -func (s Conmon_attachContainer_Results) IsValid() bool { +func (s Conmon_createNamespaces_Results) IsValid() bool { return capnp.Struct(s).IsValid() } -func (s Conmon_attachContainer_Results) Message() *capnp.Message { +func (s Conmon_createNamespaces_Results) Message() *capnp.Message { return capnp.Struct(s).Message() } -func (s Conmon_attachContainer_Results) Segment() *capnp.Segment { +func (s Conmon_createNamespaces_Results) Segment() *capnp.Segment { return capnp.Struct(s).Segment() } -func (s Conmon_attachContainer_Results) Response() (Conmon_AttachResponse, error) { +func (s Conmon_createNamespaces_Results) Response() (Conmon_CreateNamespacesResponse, error) { p, err := capnp.Struct(s).Ptr(0) - return Conmon_AttachResponse(p.Struct()), err + return Conmon_CreateNamespacesResponse(p.Struct()), err } -func (s Conmon_attachContainer_Results) HasResponse() bool { +func (s Conmon_createNamespaces_Results) HasResponse() bool { return capnp.Struct(s).HasPtr(0) } -func (s Conmon_attachContainer_Results) SetResponse(v Conmon_AttachResponse) error { +func (s Conmon_createNamespaces_Results) SetResponse(v Conmon_CreateNamespacesResponse) error { return capnp.Struct(s).SetPtr(0, capnp.Struct(v).ToPtr()) } // NewResponse sets the response field to a newly -// allocated Conmon_AttachResponse struct, preferring placement in s's segment. -func (s Conmon_attachContainer_Results) NewResponse() (Conmon_AttachResponse, error) { - ss, err := NewConmon_AttachResponse(capnp.Struct(s).Segment()) +// allocated Conmon_CreateNamespacesResponse struct, preferring placement in s's segment. +func (s Conmon_createNamespaces_Results) NewResponse() (Conmon_CreateNamespacesResponse, error) { + ss, err := NewConmon_CreateNamespacesResponse(capnp.Struct(s).Segment()) if err != nil { - return Conmon_AttachResponse{}, err + return Conmon_CreateNamespacesResponse{}, err } err = capnp.Struct(s).SetPtr(0, capnp.Struct(ss).ToPtr()) return ss, err } -// Conmon_attachContainer_Results_List is a list of Conmon_attachContainer_Results. -type Conmon_attachContainer_Results_List = capnp.StructList[Conmon_attachContainer_Results] +// Conmon_createNamespaces_Results_List is a list of Conmon_createNamespaces_Results. +type Conmon_createNamespaces_Results_List = capnp.StructList[Conmon_createNamespaces_Results] -// NewConmon_attachContainer_Results creates a new list of Conmon_attachContainer_Results. -func NewConmon_attachContainer_Results_List(s *capnp.Segment, sz int32) (Conmon_attachContainer_Results_List, error) { +// NewConmon_createNamespaces_Results creates a new list of Conmon_createNamespaces_Results. +func NewConmon_createNamespaces_Results_List(s *capnp.Segment, sz int32) (Conmon_createNamespaces_Results_List, error) { l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) - return capnp.StructList[Conmon_attachContainer_Results](l), err + return capnp.StructList[Conmon_createNamespaces_Results](l), err } -// Conmon_attachContainer_Results_Future is a wrapper for a Conmon_attachContainer_Results promised by a client call. -type Conmon_attachContainer_Results_Future struct{ *capnp.Future } +// Conmon_createNamespaces_Results_Future is a wrapper for a Conmon_createNamespaces_Results promised by a client call. +type Conmon_createNamespaces_Results_Future struct{ *capnp.Future } -func (f Conmon_attachContainer_Results_Future) Struct() (Conmon_attachContainer_Results, error) { +func (f Conmon_createNamespaces_Results_Future) Struct() (Conmon_createNamespaces_Results, error) { p, err := f.Future.Ptr() - return Conmon_attachContainer_Results(p.Struct()), err + return Conmon_createNamespaces_Results(p.Struct()), err } -func (p Conmon_attachContainer_Results_Future) Response() Conmon_AttachResponse_Future { - return Conmon_AttachResponse_Future{Future: p.Future.Field(0, nil)} +func (p Conmon_createNamespaces_Results_Future) Response() Conmon_CreateNamespacesResponse_Future { + return Conmon_CreateNamespacesResponse_Future{Future: p.Future.Field(0, nil)} } -type Conmon_reopenLogContainer_Params capnp.Struct +type Conmon_startFdSocket_Params capnp.Struct -// Conmon_reopenLogContainer_Params_TypeID is the unique identifier for the type Conmon_reopenLogContainer_Params. -const Conmon_reopenLogContainer_Params_TypeID = 0xe5ea916eb0c31336 +// Conmon_startFdSocket_Params_TypeID is the unique identifier for the type Conmon_startFdSocket_Params. +const Conmon_startFdSocket_Params_TypeID = 0xce733f0914c80b6b -func NewConmon_reopenLogContainer_Params(s *capnp.Segment) (Conmon_reopenLogContainer_Params, error) { +func NewConmon_startFdSocket_Params(s *capnp.Segment) (Conmon_startFdSocket_Params, error) { st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) - return Conmon_reopenLogContainer_Params(st), err + return Conmon_startFdSocket_Params(st), err } -func NewRootConmon_reopenLogContainer_Params(s *capnp.Segment) (Conmon_reopenLogContainer_Params, error) { +func NewRootConmon_startFdSocket_Params(s *capnp.Segment) (Conmon_startFdSocket_Params, error) { st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) - return Conmon_reopenLogContainer_Params(st), err + return Conmon_startFdSocket_Params(st), err } -func ReadRootConmon_reopenLogContainer_Params(msg *capnp.Message) (Conmon_reopenLogContainer_Params, error) { +func ReadRootConmon_startFdSocket_Params(msg *capnp.Message) (Conmon_startFdSocket_Params, error) { root, err := msg.Root() - return Conmon_reopenLogContainer_Params(root.Struct()), err + return Conmon_startFdSocket_Params(root.Struct()), err } -func (s Conmon_reopenLogContainer_Params) String() string { - str, _ := text.Marshal(0xe5ea916eb0c31336, capnp.Struct(s)) +func (s Conmon_startFdSocket_Params) String() string { + str, _ := text.Marshal(0xce733f0914c80b6b, capnp.Struct(s)) return str } -func (s Conmon_reopenLogContainer_Params) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { +func (s Conmon_startFdSocket_Params) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { return capnp.Struct(s).EncodeAsPtr(seg) } -func (Conmon_reopenLogContainer_Params) DecodeFromPtr(p capnp.Ptr) Conmon_reopenLogContainer_Params { - return Conmon_reopenLogContainer_Params(capnp.Struct{}.DecodeFromPtr(p)) +func (Conmon_startFdSocket_Params) DecodeFromPtr(p capnp.Ptr) Conmon_startFdSocket_Params { + return Conmon_startFdSocket_Params(capnp.Struct{}.DecodeFromPtr(p)) } -func (s Conmon_reopenLogContainer_Params) ToPtr() capnp.Ptr { +func (s Conmon_startFdSocket_Params) ToPtr() capnp.Ptr { return capnp.Struct(s).ToPtr() } -func (s Conmon_reopenLogContainer_Params) IsValid() bool { +func (s Conmon_startFdSocket_Params) IsValid() bool { return capnp.Struct(s).IsValid() } -func (s Conmon_reopenLogContainer_Params) Message() *capnp.Message { +func (s Conmon_startFdSocket_Params) Message() *capnp.Message { return capnp.Struct(s).Message() } -func (s Conmon_reopenLogContainer_Params) Segment() *capnp.Segment { +func (s Conmon_startFdSocket_Params) Segment() *capnp.Segment { return capnp.Struct(s).Segment() } -func (s Conmon_reopenLogContainer_Params) Request() (Conmon_ReopenLogRequest, error) { +func (s Conmon_startFdSocket_Params) Request() (Conmon_StartFdSocketRequest, error) { p, err := capnp.Struct(s).Ptr(0) - return Conmon_ReopenLogRequest(p.Struct()), err + return Conmon_StartFdSocketRequest(p.Struct()), err } -func (s Conmon_reopenLogContainer_Params) HasRequest() bool { +func (s Conmon_startFdSocket_Params) HasRequest() bool { return capnp.Struct(s).HasPtr(0) } -func (s Conmon_reopenLogContainer_Params) SetRequest(v Conmon_ReopenLogRequest) error { +func (s Conmon_startFdSocket_Params) SetRequest(v Conmon_StartFdSocketRequest) error { return capnp.Struct(s).SetPtr(0, capnp.Struct(v).ToPtr()) } // NewRequest sets the request field to a newly -// allocated Conmon_ReopenLogRequest struct, preferring placement in s's segment. -func (s Conmon_reopenLogContainer_Params) NewRequest() (Conmon_ReopenLogRequest, error) { - ss, err := NewConmon_ReopenLogRequest(capnp.Struct(s).Segment()) +// allocated Conmon_StartFdSocketRequest struct, preferring placement in s's segment. +func (s Conmon_startFdSocket_Params) NewRequest() (Conmon_StartFdSocketRequest, error) { + ss, err := NewConmon_StartFdSocketRequest(capnp.Struct(s).Segment()) if err != nil { - return Conmon_ReopenLogRequest{}, err + return Conmon_StartFdSocketRequest{}, err } err = capnp.Struct(s).SetPtr(0, capnp.Struct(ss).ToPtr()) return ss, err } -// Conmon_reopenLogContainer_Params_List is a list of Conmon_reopenLogContainer_Params. -type Conmon_reopenLogContainer_Params_List = capnp.StructList[Conmon_reopenLogContainer_Params] +// Conmon_startFdSocket_Params_List is a list of Conmon_startFdSocket_Params. +type Conmon_startFdSocket_Params_List = capnp.StructList[Conmon_startFdSocket_Params] -// NewConmon_reopenLogContainer_Params creates a new list of Conmon_reopenLogContainer_Params. -func NewConmon_reopenLogContainer_Params_List(s *capnp.Segment, sz int32) (Conmon_reopenLogContainer_Params_List, error) { +// NewConmon_startFdSocket_Params creates a new list of Conmon_startFdSocket_Params. +func NewConmon_startFdSocket_Params_List(s *capnp.Segment, sz int32) (Conmon_startFdSocket_Params_List, error) { l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) - return capnp.StructList[Conmon_reopenLogContainer_Params](l), err + return capnp.StructList[Conmon_startFdSocket_Params](l), err } -// Conmon_reopenLogContainer_Params_Future is a wrapper for a Conmon_reopenLogContainer_Params promised by a client call. -type Conmon_reopenLogContainer_Params_Future struct{ *capnp.Future } +// Conmon_startFdSocket_Params_Future is a wrapper for a Conmon_startFdSocket_Params promised by a client call. +type Conmon_startFdSocket_Params_Future struct{ *capnp.Future } -func (f Conmon_reopenLogContainer_Params_Future) Struct() (Conmon_reopenLogContainer_Params, error) { +func (f Conmon_startFdSocket_Params_Future) Struct() (Conmon_startFdSocket_Params, error) { p, err := f.Future.Ptr() - return Conmon_reopenLogContainer_Params(p.Struct()), err + return Conmon_startFdSocket_Params(p.Struct()), err } -func (p Conmon_reopenLogContainer_Params_Future) Request() Conmon_ReopenLogRequest_Future { - return Conmon_ReopenLogRequest_Future{Future: p.Future.Field(0, nil)} +func (p Conmon_startFdSocket_Params_Future) Request() Conmon_StartFdSocketRequest_Future { + return Conmon_StartFdSocketRequest_Future{Future: p.Future.Field(0, nil)} } -type Conmon_reopenLogContainer_Results capnp.Struct +type Conmon_startFdSocket_Results capnp.Struct -// Conmon_reopenLogContainer_Results_TypeID is the unique identifier for the type Conmon_reopenLogContainer_Results. -const Conmon_reopenLogContainer_Results_TypeID = 0xa0ef8355b64ee985 +// Conmon_startFdSocket_Results_TypeID is the unique identifier for the type Conmon_startFdSocket_Results. +const Conmon_startFdSocket_Results_TypeID = 0xf4e3e92ae0815f15 -func NewConmon_reopenLogContainer_Results(s *capnp.Segment) (Conmon_reopenLogContainer_Results, error) { +func NewConmon_startFdSocket_Results(s *capnp.Segment) (Conmon_startFdSocket_Results, error) { st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) - return Conmon_reopenLogContainer_Results(st), err + return Conmon_startFdSocket_Results(st), err } -func NewRootConmon_reopenLogContainer_Results(s *capnp.Segment) (Conmon_reopenLogContainer_Results, error) { +func NewRootConmon_startFdSocket_Results(s *capnp.Segment) (Conmon_startFdSocket_Results, error) { st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) - return Conmon_reopenLogContainer_Results(st), err + return Conmon_startFdSocket_Results(st), err } -func ReadRootConmon_reopenLogContainer_Results(msg *capnp.Message) (Conmon_reopenLogContainer_Results, error) { +func ReadRootConmon_startFdSocket_Results(msg *capnp.Message) (Conmon_startFdSocket_Results, error) { root, err := msg.Root() - return Conmon_reopenLogContainer_Results(root.Struct()), err + return Conmon_startFdSocket_Results(root.Struct()), err } -func (s Conmon_reopenLogContainer_Results) String() string { - str, _ := text.Marshal(0xa0ef8355b64ee985, capnp.Struct(s)) +func (s Conmon_startFdSocket_Results) String() string { + str, _ := text.Marshal(0xf4e3e92ae0815f15, capnp.Struct(s)) return str } -func (s Conmon_reopenLogContainer_Results) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { +func (s Conmon_startFdSocket_Results) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { return capnp.Struct(s).EncodeAsPtr(seg) } -func (Conmon_reopenLogContainer_Results) DecodeFromPtr(p capnp.Ptr) Conmon_reopenLogContainer_Results { - return Conmon_reopenLogContainer_Results(capnp.Struct{}.DecodeFromPtr(p)) +func (Conmon_startFdSocket_Results) DecodeFromPtr(p capnp.Ptr) Conmon_startFdSocket_Results { + return Conmon_startFdSocket_Results(capnp.Struct{}.DecodeFromPtr(p)) } -func (s Conmon_reopenLogContainer_Results) ToPtr() capnp.Ptr { +func (s Conmon_startFdSocket_Results) ToPtr() capnp.Ptr { return capnp.Struct(s).ToPtr() } -func (s Conmon_reopenLogContainer_Results) IsValid() bool { +func (s Conmon_startFdSocket_Results) IsValid() bool { return capnp.Struct(s).IsValid() } -func (s Conmon_reopenLogContainer_Results) Message() *capnp.Message { +func (s Conmon_startFdSocket_Results) Message() *capnp.Message { return capnp.Struct(s).Message() } -func (s Conmon_reopenLogContainer_Results) Segment() *capnp.Segment { +func (s Conmon_startFdSocket_Results) Segment() *capnp.Segment { return capnp.Struct(s).Segment() } -func (s Conmon_reopenLogContainer_Results) Response() (Conmon_ReopenLogResponse, error) { +func (s Conmon_startFdSocket_Results) Response() (Conmon_StartFdSocketResponse, error) { p, err := capnp.Struct(s).Ptr(0) - return Conmon_ReopenLogResponse(p.Struct()), err + return Conmon_StartFdSocketResponse(p.Struct()), err } -func (s Conmon_reopenLogContainer_Results) HasResponse() bool { +func (s Conmon_startFdSocket_Results) HasResponse() bool { return capnp.Struct(s).HasPtr(0) } -func (s Conmon_reopenLogContainer_Results) SetResponse(v Conmon_ReopenLogResponse) error { +func (s Conmon_startFdSocket_Results) SetResponse(v Conmon_StartFdSocketResponse) error { return capnp.Struct(s).SetPtr(0, capnp.Struct(v).ToPtr()) } // NewResponse sets the response field to a newly -// allocated Conmon_ReopenLogResponse struct, preferring placement in s's segment. -func (s Conmon_reopenLogContainer_Results) NewResponse() (Conmon_ReopenLogResponse, error) { - ss, err := NewConmon_ReopenLogResponse(capnp.Struct(s).Segment()) +// allocated Conmon_StartFdSocketResponse struct, preferring placement in s's segment. +func (s Conmon_startFdSocket_Results) NewResponse() (Conmon_StartFdSocketResponse, error) { + ss, err := NewConmon_StartFdSocketResponse(capnp.Struct(s).Segment()) if err != nil { - return Conmon_ReopenLogResponse{}, err + return Conmon_StartFdSocketResponse{}, err } err = capnp.Struct(s).SetPtr(0, capnp.Struct(ss).ToPtr()) return ss, err } -// Conmon_reopenLogContainer_Results_List is a list of Conmon_reopenLogContainer_Results. -type Conmon_reopenLogContainer_Results_List = capnp.StructList[Conmon_reopenLogContainer_Results] +// Conmon_startFdSocket_Results_List is a list of Conmon_startFdSocket_Results. +type Conmon_startFdSocket_Results_List = capnp.StructList[Conmon_startFdSocket_Results] -// NewConmon_reopenLogContainer_Results creates a new list of Conmon_reopenLogContainer_Results. -func NewConmon_reopenLogContainer_Results_List(s *capnp.Segment, sz int32) (Conmon_reopenLogContainer_Results_List, error) { +// NewConmon_startFdSocket_Results creates a new list of Conmon_startFdSocket_Results. +func NewConmon_startFdSocket_Results_List(s *capnp.Segment, sz int32) (Conmon_startFdSocket_Results_List, error) { l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) - return capnp.StructList[Conmon_reopenLogContainer_Results](l), err + return capnp.StructList[Conmon_startFdSocket_Results](l), err } -// Conmon_reopenLogContainer_Results_Future is a wrapper for a Conmon_reopenLogContainer_Results promised by a client call. -type Conmon_reopenLogContainer_Results_Future struct{ *capnp.Future } +// Conmon_startFdSocket_Results_Future is a wrapper for a Conmon_startFdSocket_Results promised by a client call. +type Conmon_startFdSocket_Results_Future struct{ *capnp.Future } -func (f Conmon_reopenLogContainer_Results_Future) Struct() (Conmon_reopenLogContainer_Results, error) { +func (f Conmon_startFdSocket_Results_Future) Struct() (Conmon_startFdSocket_Results, error) { p, err := f.Future.Ptr() - return Conmon_reopenLogContainer_Results(p.Struct()), err + return Conmon_startFdSocket_Results(p.Struct()), err } -func (p Conmon_reopenLogContainer_Results_Future) Response() Conmon_ReopenLogResponse_Future { - return Conmon_ReopenLogResponse_Future{Future: p.Future.Field(0, nil)} +func (p Conmon_startFdSocket_Results_Future) Response() Conmon_StartFdSocketResponse_Future { + return Conmon_StartFdSocketResponse_Future{Future: p.Future.Field(0, nil)} } -type Conmon_setWindowSizeContainer_Params capnp.Struct +type Conmon_serveExecContainer_Params capnp.Struct -// Conmon_setWindowSizeContainer_Params_TypeID is the unique identifier for the type Conmon_setWindowSizeContainer_Params. -const Conmon_setWindowSizeContainer_Params_TypeID = 0xc76ccd4502bb61e7 +// Conmon_serveExecContainer_Params_TypeID is the unique identifier for the type Conmon_serveExecContainer_Params. +const Conmon_serveExecContainer_Params_TypeID = 0x90a3950a51412b8b -func NewConmon_setWindowSizeContainer_Params(s *capnp.Segment) (Conmon_setWindowSizeContainer_Params, error) { +func NewConmon_serveExecContainer_Params(s *capnp.Segment) (Conmon_serveExecContainer_Params, error) { st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) - return Conmon_setWindowSizeContainer_Params(st), err + return Conmon_serveExecContainer_Params(st), err } -func NewRootConmon_setWindowSizeContainer_Params(s *capnp.Segment) (Conmon_setWindowSizeContainer_Params, error) { +func NewRootConmon_serveExecContainer_Params(s *capnp.Segment) (Conmon_serveExecContainer_Params, error) { st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) - return Conmon_setWindowSizeContainer_Params(st), err + return Conmon_serveExecContainer_Params(st), err } -func ReadRootConmon_setWindowSizeContainer_Params(msg *capnp.Message) (Conmon_setWindowSizeContainer_Params, error) { +func ReadRootConmon_serveExecContainer_Params(msg *capnp.Message) (Conmon_serveExecContainer_Params, error) { root, err := msg.Root() - return Conmon_setWindowSizeContainer_Params(root.Struct()), err + return Conmon_serveExecContainer_Params(root.Struct()), err } -func (s Conmon_setWindowSizeContainer_Params) String() string { - str, _ := text.Marshal(0xc76ccd4502bb61e7, capnp.Struct(s)) +func (s Conmon_serveExecContainer_Params) String() string { + str, _ := text.Marshal(0x90a3950a51412b8b, capnp.Struct(s)) return str } -func (s Conmon_setWindowSizeContainer_Params) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { +func (s Conmon_serveExecContainer_Params) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { return capnp.Struct(s).EncodeAsPtr(seg) } -func (Conmon_setWindowSizeContainer_Params) DecodeFromPtr(p capnp.Ptr) Conmon_setWindowSizeContainer_Params { - return Conmon_setWindowSizeContainer_Params(capnp.Struct{}.DecodeFromPtr(p)) +func (Conmon_serveExecContainer_Params) DecodeFromPtr(p capnp.Ptr) Conmon_serveExecContainer_Params { + return Conmon_serveExecContainer_Params(capnp.Struct{}.DecodeFromPtr(p)) } -func (s Conmon_setWindowSizeContainer_Params) ToPtr() capnp.Ptr { +func (s Conmon_serveExecContainer_Params) ToPtr() capnp.Ptr { return capnp.Struct(s).ToPtr() } -func (s Conmon_setWindowSizeContainer_Params) IsValid() bool { +func (s Conmon_serveExecContainer_Params) IsValid() bool { return capnp.Struct(s).IsValid() } -func (s Conmon_setWindowSizeContainer_Params) Message() *capnp.Message { +func (s Conmon_serveExecContainer_Params) Message() *capnp.Message { return capnp.Struct(s).Message() } -func (s Conmon_setWindowSizeContainer_Params) Segment() *capnp.Segment { +func (s Conmon_serveExecContainer_Params) Segment() *capnp.Segment { return capnp.Struct(s).Segment() } -func (s Conmon_setWindowSizeContainer_Params) Request() (Conmon_SetWindowSizeRequest, error) { +func (s Conmon_serveExecContainer_Params) Request() (Conmon_ServeExecContainerRequest, error) { p, err := capnp.Struct(s).Ptr(0) - return Conmon_SetWindowSizeRequest(p.Struct()), err + return Conmon_ServeExecContainerRequest(p.Struct()), err } -func (s Conmon_setWindowSizeContainer_Params) HasRequest() bool { +func (s Conmon_serveExecContainer_Params) HasRequest() bool { return capnp.Struct(s).HasPtr(0) } -func (s Conmon_setWindowSizeContainer_Params) SetRequest(v Conmon_SetWindowSizeRequest) error { +func (s Conmon_serveExecContainer_Params) SetRequest(v Conmon_ServeExecContainerRequest) error { return capnp.Struct(s).SetPtr(0, capnp.Struct(v).ToPtr()) } // NewRequest sets the request field to a newly -// allocated Conmon_SetWindowSizeRequest struct, preferring placement in s's segment. -func (s Conmon_setWindowSizeContainer_Params) NewRequest() (Conmon_SetWindowSizeRequest, error) { - ss, err := NewConmon_SetWindowSizeRequest(capnp.Struct(s).Segment()) +// allocated Conmon_ServeExecContainerRequest struct, preferring placement in s's segment. +func (s Conmon_serveExecContainer_Params) NewRequest() (Conmon_ServeExecContainerRequest, error) { + ss, err := NewConmon_ServeExecContainerRequest(capnp.Struct(s).Segment()) if err != nil { - return Conmon_SetWindowSizeRequest{}, err + return Conmon_ServeExecContainerRequest{}, err } err = capnp.Struct(s).SetPtr(0, capnp.Struct(ss).ToPtr()) return ss, err } -// Conmon_setWindowSizeContainer_Params_List is a list of Conmon_setWindowSizeContainer_Params. -type Conmon_setWindowSizeContainer_Params_List = capnp.StructList[Conmon_setWindowSizeContainer_Params] +// Conmon_serveExecContainer_Params_List is a list of Conmon_serveExecContainer_Params. +type Conmon_serveExecContainer_Params_List = capnp.StructList[Conmon_serveExecContainer_Params] -// NewConmon_setWindowSizeContainer_Params creates a new list of Conmon_setWindowSizeContainer_Params. -func NewConmon_setWindowSizeContainer_Params_List(s *capnp.Segment, sz int32) (Conmon_setWindowSizeContainer_Params_List, error) { +// NewConmon_serveExecContainer_Params creates a new list of Conmon_serveExecContainer_Params. +func NewConmon_serveExecContainer_Params_List(s *capnp.Segment, sz int32) (Conmon_serveExecContainer_Params_List, error) { l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) - return capnp.StructList[Conmon_setWindowSizeContainer_Params](l), err + return capnp.StructList[Conmon_serveExecContainer_Params](l), err } -// Conmon_setWindowSizeContainer_Params_Future is a wrapper for a Conmon_setWindowSizeContainer_Params promised by a client call. -type Conmon_setWindowSizeContainer_Params_Future struct{ *capnp.Future } +// Conmon_serveExecContainer_Params_Future is a wrapper for a Conmon_serveExecContainer_Params promised by a client call. +type Conmon_serveExecContainer_Params_Future struct{ *capnp.Future } -func (f Conmon_setWindowSizeContainer_Params_Future) Struct() (Conmon_setWindowSizeContainer_Params, error) { +func (f Conmon_serveExecContainer_Params_Future) Struct() (Conmon_serveExecContainer_Params, error) { p, err := f.Future.Ptr() - return Conmon_setWindowSizeContainer_Params(p.Struct()), err + return Conmon_serveExecContainer_Params(p.Struct()), err } -func (p Conmon_setWindowSizeContainer_Params_Future) Request() Conmon_SetWindowSizeRequest_Future { - return Conmon_SetWindowSizeRequest_Future{Future: p.Future.Field(0, nil)} +func (p Conmon_serveExecContainer_Params_Future) Request() Conmon_ServeExecContainerRequest_Future { + return Conmon_ServeExecContainerRequest_Future{Future: p.Future.Field(0, nil)} } -type Conmon_setWindowSizeContainer_Results capnp.Struct +type Conmon_serveExecContainer_Results capnp.Struct -// Conmon_setWindowSizeContainer_Results_TypeID is the unique identifier for the type Conmon_setWindowSizeContainer_Results. -const Conmon_setWindowSizeContainer_Results_TypeID = 0xe00e522611477055 +// Conmon_serveExecContainer_Results_TypeID is the unique identifier for the type Conmon_serveExecContainer_Results. +const Conmon_serveExecContainer_Results_TypeID = 0xdebaeed2a782ac80 -func NewConmon_setWindowSizeContainer_Results(s *capnp.Segment) (Conmon_setWindowSizeContainer_Results, error) { +func NewConmon_serveExecContainer_Results(s *capnp.Segment) (Conmon_serveExecContainer_Results, error) { st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) - return Conmon_setWindowSizeContainer_Results(st), err + return Conmon_serveExecContainer_Results(st), err } -func NewRootConmon_setWindowSizeContainer_Results(s *capnp.Segment) (Conmon_setWindowSizeContainer_Results, error) { +func NewRootConmon_serveExecContainer_Results(s *capnp.Segment) (Conmon_serveExecContainer_Results, error) { st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) - return Conmon_setWindowSizeContainer_Results(st), err + return Conmon_serveExecContainer_Results(st), err } -func ReadRootConmon_setWindowSizeContainer_Results(msg *capnp.Message) (Conmon_setWindowSizeContainer_Results, error) { +func ReadRootConmon_serveExecContainer_Results(msg *capnp.Message) (Conmon_serveExecContainer_Results, error) { root, err := msg.Root() - return Conmon_setWindowSizeContainer_Results(root.Struct()), err + return Conmon_serveExecContainer_Results(root.Struct()), err } -func (s Conmon_setWindowSizeContainer_Results) String() string { - str, _ := text.Marshal(0xe00e522611477055, capnp.Struct(s)) +func (s Conmon_serveExecContainer_Results) String() string { + str, _ := text.Marshal(0xdebaeed2a782ac80, capnp.Struct(s)) return str } -func (s Conmon_setWindowSizeContainer_Results) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { +func (s Conmon_serveExecContainer_Results) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { return capnp.Struct(s).EncodeAsPtr(seg) } -func (Conmon_setWindowSizeContainer_Results) DecodeFromPtr(p capnp.Ptr) Conmon_setWindowSizeContainer_Results { - return Conmon_setWindowSizeContainer_Results(capnp.Struct{}.DecodeFromPtr(p)) +func (Conmon_serveExecContainer_Results) DecodeFromPtr(p capnp.Ptr) Conmon_serveExecContainer_Results { + return Conmon_serveExecContainer_Results(capnp.Struct{}.DecodeFromPtr(p)) } -func (s Conmon_setWindowSizeContainer_Results) ToPtr() capnp.Ptr { +func (s Conmon_serveExecContainer_Results) ToPtr() capnp.Ptr { return capnp.Struct(s).ToPtr() } -func (s Conmon_setWindowSizeContainer_Results) IsValid() bool { +func (s Conmon_serveExecContainer_Results) IsValid() bool { return capnp.Struct(s).IsValid() } -func (s Conmon_setWindowSizeContainer_Results) Message() *capnp.Message { +func (s Conmon_serveExecContainer_Results) Message() *capnp.Message { return capnp.Struct(s).Message() } -func (s Conmon_setWindowSizeContainer_Results) Segment() *capnp.Segment { +func (s Conmon_serveExecContainer_Results) Segment() *capnp.Segment { return capnp.Struct(s).Segment() } -func (s Conmon_setWindowSizeContainer_Results) Response() (Conmon_SetWindowSizeResponse, error) { +func (s Conmon_serveExecContainer_Results) Response() (Conmon_ServeExecContainerResponse, error) { p, err := capnp.Struct(s).Ptr(0) - return Conmon_SetWindowSizeResponse(p.Struct()), err + return Conmon_ServeExecContainerResponse(p.Struct()), err } -func (s Conmon_setWindowSizeContainer_Results) HasResponse() bool { +func (s Conmon_serveExecContainer_Results) HasResponse() bool { return capnp.Struct(s).HasPtr(0) } -func (s Conmon_setWindowSizeContainer_Results) SetResponse(v Conmon_SetWindowSizeResponse) error { +func (s Conmon_serveExecContainer_Results) SetResponse(v Conmon_ServeExecContainerResponse) error { return capnp.Struct(s).SetPtr(0, capnp.Struct(v).ToPtr()) } // NewResponse sets the response field to a newly -// allocated Conmon_SetWindowSizeResponse struct, preferring placement in s's segment. -func (s Conmon_setWindowSizeContainer_Results) NewResponse() (Conmon_SetWindowSizeResponse, error) { - ss, err := NewConmon_SetWindowSizeResponse(capnp.Struct(s).Segment()) +// allocated Conmon_ServeExecContainerResponse struct, preferring placement in s's segment. +func (s Conmon_serveExecContainer_Results) NewResponse() (Conmon_ServeExecContainerResponse, error) { + ss, err := NewConmon_ServeExecContainerResponse(capnp.Struct(s).Segment()) if err != nil { - return Conmon_SetWindowSizeResponse{}, err + return Conmon_ServeExecContainerResponse{}, err } err = capnp.Struct(s).SetPtr(0, capnp.Struct(ss).ToPtr()) return ss, err } -// Conmon_setWindowSizeContainer_Results_List is a list of Conmon_setWindowSizeContainer_Results. -type Conmon_setWindowSizeContainer_Results_List = capnp.StructList[Conmon_setWindowSizeContainer_Results] +// Conmon_serveExecContainer_Results_List is a list of Conmon_serveExecContainer_Results. +type Conmon_serveExecContainer_Results_List = capnp.StructList[Conmon_serveExecContainer_Results] -// NewConmon_setWindowSizeContainer_Results creates a new list of Conmon_setWindowSizeContainer_Results. -func NewConmon_setWindowSizeContainer_Results_List(s *capnp.Segment, sz int32) (Conmon_setWindowSizeContainer_Results_List, error) { +// NewConmon_serveExecContainer_Results creates a new list of Conmon_serveExecContainer_Results. +func NewConmon_serveExecContainer_Results_List(s *capnp.Segment, sz int32) (Conmon_serveExecContainer_Results_List, error) { l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) - return capnp.StructList[Conmon_setWindowSizeContainer_Results](l), err + return capnp.StructList[Conmon_serveExecContainer_Results](l), err } -// Conmon_setWindowSizeContainer_Results_Future is a wrapper for a Conmon_setWindowSizeContainer_Results promised by a client call. -type Conmon_setWindowSizeContainer_Results_Future struct{ *capnp.Future } +// Conmon_serveExecContainer_Results_Future is a wrapper for a Conmon_serveExecContainer_Results promised by a client call. +type Conmon_serveExecContainer_Results_Future struct{ *capnp.Future } -func (f Conmon_setWindowSizeContainer_Results_Future) Struct() (Conmon_setWindowSizeContainer_Results, error) { +func (f Conmon_serveExecContainer_Results_Future) Struct() (Conmon_serveExecContainer_Results, error) { p, err := f.Future.Ptr() - return Conmon_setWindowSizeContainer_Results(p.Struct()), err + return Conmon_serveExecContainer_Results(p.Struct()), err } -func (p Conmon_setWindowSizeContainer_Results_Future) Response() Conmon_SetWindowSizeResponse_Future { - return Conmon_SetWindowSizeResponse_Future{Future: p.Future.Field(0, nil)} +func (p Conmon_serveExecContainer_Results_Future) Response() Conmon_ServeExecContainerResponse_Future { + return Conmon_ServeExecContainerResponse_Future{Future: p.Future.Field(0, nil)} } -type Conmon_createNamespaces_Params capnp.Struct +type Conmon_serveAttachContainer_Params capnp.Struct -// Conmon_createNamespaces_Params_TypeID is the unique identifier for the type Conmon_createNamespaces_Params. -const Conmon_createNamespaces_Params_TypeID = 0x8b4c03a0662a38dc +// Conmon_serveAttachContainer_Params_TypeID is the unique identifier for the type Conmon_serveAttachContainer_Params. +const Conmon_serveAttachContainer_Params_TypeID = 0xa3cb406c522dcab1 -func NewConmon_createNamespaces_Params(s *capnp.Segment) (Conmon_createNamespaces_Params, error) { +func NewConmon_serveAttachContainer_Params(s *capnp.Segment) (Conmon_serveAttachContainer_Params, error) { st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) - return Conmon_createNamespaces_Params(st), err + return Conmon_serveAttachContainer_Params(st), err } -func NewRootConmon_createNamespaces_Params(s *capnp.Segment) (Conmon_createNamespaces_Params, error) { +func NewRootConmon_serveAttachContainer_Params(s *capnp.Segment) (Conmon_serveAttachContainer_Params, error) { st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) - return Conmon_createNamespaces_Params(st), err + return Conmon_serveAttachContainer_Params(st), err } -func ReadRootConmon_createNamespaces_Params(msg *capnp.Message) (Conmon_createNamespaces_Params, error) { +func ReadRootConmon_serveAttachContainer_Params(msg *capnp.Message) (Conmon_serveAttachContainer_Params, error) { root, err := msg.Root() - return Conmon_createNamespaces_Params(root.Struct()), err + return Conmon_serveAttachContainer_Params(root.Struct()), err } -func (s Conmon_createNamespaces_Params) String() string { - str, _ := text.Marshal(0x8b4c03a0662a38dc, capnp.Struct(s)) +func (s Conmon_serveAttachContainer_Params) String() string { + str, _ := text.Marshal(0xa3cb406c522dcab1, capnp.Struct(s)) return str } -func (s Conmon_createNamespaces_Params) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { +func (s Conmon_serveAttachContainer_Params) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { return capnp.Struct(s).EncodeAsPtr(seg) } -func (Conmon_createNamespaces_Params) DecodeFromPtr(p capnp.Ptr) Conmon_createNamespaces_Params { - return Conmon_createNamespaces_Params(capnp.Struct{}.DecodeFromPtr(p)) +func (Conmon_serveAttachContainer_Params) DecodeFromPtr(p capnp.Ptr) Conmon_serveAttachContainer_Params { + return Conmon_serveAttachContainer_Params(capnp.Struct{}.DecodeFromPtr(p)) } -func (s Conmon_createNamespaces_Params) ToPtr() capnp.Ptr { +func (s Conmon_serveAttachContainer_Params) ToPtr() capnp.Ptr { return capnp.Struct(s).ToPtr() } -func (s Conmon_createNamespaces_Params) IsValid() bool { +func (s Conmon_serveAttachContainer_Params) IsValid() bool { return capnp.Struct(s).IsValid() } -func (s Conmon_createNamespaces_Params) Message() *capnp.Message { +func (s Conmon_serveAttachContainer_Params) Message() *capnp.Message { return capnp.Struct(s).Message() } -func (s Conmon_createNamespaces_Params) Segment() *capnp.Segment { +func (s Conmon_serveAttachContainer_Params) Segment() *capnp.Segment { return capnp.Struct(s).Segment() } -func (s Conmon_createNamespaces_Params) Request() (Conmon_CreateNamespacesRequest, error) { +func (s Conmon_serveAttachContainer_Params) Request() (Conmon_ServeAttachContainerRequest, error) { p, err := capnp.Struct(s).Ptr(0) - return Conmon_CreateNamespacesRequest(p.Struct()), err + return Conmon_ServeAttachContainerRequest(p.Struct()), err } -func (s Conmon_createNamespaces_Params) HasRequest() bool { +func (s Conmon_serveAttachContainer_Params) HasRequest() bool { return capnp.Struct(s).HasPtr(0) } -func (s Conmon_createNamespaces_Params) SetRequest(v Conmon_CreateNamespacesRequest) error { +func (s Conmon_serveAttachContainer_Params) SetRequest(v Conmon_ServeAttachContainerRequest) error { return capnp.Struct(s).SetPtr(0, capnp.Struct(v).ToPtr()) } // NewRequest sets the request field to a newly -// allocated Conmon_CreateNamespacesRequest struct, preferring placement in s's segment. -func (s Conmon_createNamespaces_Params) NewRequest() (Conmon_CreateNamespacesRequest, error) { - ss, err := NewConmon_CreateNamespacesRequest(capnp.Struct(s).Segment()) +// allocated Conmon_ServeAttachContainerRequest struct, preferring placement in s's segment. +func (s Conmon_serveAttachContainer_Params) NewRequest() (Conmon_ServeAttachContainerRequest, error) { + ss, err := NewConmon_ServeAttachContainerRequest(capnp.Struct(s).Segment()) if err != nil { - return Conmon_CreateNamespacesRequest{}, err + return Conmon_ServeAttachContainerRequest{}, err } err = capnp.Struct(s).SetPtr(0, capnp.Struct(ss).ToPtr()) return ss, err } -// Conmon_createNamespaces_Params_List is a list of Conmon_createNamespaces_Params. -type Conmon_createNamespaces_Params_List = capnp.StructList[Conmon_createNamespaces_Params] +// Conmon_serveAttachContainer_Params_List is a list of Conmon_serveAttachContainer_Params. +type Conmon_serveAttachContainer_Params_List = capnp.StructList[Conmon_serveAttachContainer_Params] -// NewConmon_createNamespaces_Params creates a new list of Conmon_createNamespaces_Params. -func NewConmon_createNamespaces_Params_List(s *capnp.Segment, sz int32) (Conmon_createNamespaces_Params_List, error) { +// NewConmon_serveAttachContainer_Params creates a new list of Conmon_serveAttachContainer_Params. +func NewConmon_serveAttachContainer_Params_List(s *capnp.Segment, sz int32) (Conmon_serveAttachContainer_Params_List, error) { l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) - return capnp.StructList[Conmon_createNamespaces_Params](l), err + return capnp.StructList[Conmon_serveAttachContainer_Params](l), err } -// Conmon_createNamespaces_Params_Future is a wrapper for a Conmon_createNamespaces_Params promised by a client call. -type Conmon_createNamespaces_Params_Future struct{ *capnp.Future } +// Conmon_serveAttachContainer_Params_Future is a wrapper for a Conmon_serveAttachContainer_Params promised by a client call. +type Conmon_serveAttachContainer_Params_Future struct{ *capnp.Future } -func (f Conmon_createNamespaces_Params_Future) Struct() (Conmon_createNamespaces_Params, error) { +func (f Conmon_serveAttachContainer_Params_Future) Struct() (Conmon_serveAttachContainer_Params, error) { p, err := f.Future.Ptr() - return Conmon_createNamespaces_Params(p.Struct()), err + return Conmon_serveAttachContainer_Params(p.Struct()), err } -func (p Conmon_createNamespaces_Params_Future) Request() Conmon_CreateNamespacesRequest_Future { - return Conmon_CreateNamespacesRequest_Future{Future: p.Future.Field(0, nil)} +func (p Conmon_serveAttachContainer_Params_Future) Request() Conmon_ServeAttachContainerRequest_Future { + return Conmon_ServeAttachContainerRequest_Future{Future: p.Future.Field(0, nil)} } -type Conmon_createNamespaces_Results capnp.Struct +type Conmon_serveAttachContainer_Results capnp.Struct -// Conmon_createNamespaces_Results_TypeID is the unique identifier for the type Conmon_createNamespaces_Results. -const Conmon_createNamespaces_Results_TypeID = 0x8aef91973dc8a4f5 +// Conmon_serveAttachContainer_Results_TypeID is the unique identifier for the type Conmon_serveAttachContainer_Results. +const Conmon_serveAttachContainer_Results_TypeID = 0xedd2e5b018f17bbb -func NewConmon_createNamespaces_Results(s *capnp.Segment) (Conmon_createNamespaces_Results, error) { +func NewConmon_serveAttachContainer_Results(s *capnp.Segment) (Conmon_serveAttachContainer_Results, error) { st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) - return Conmon_createNamespaces_Results(st), err + return Conmon_serveAttachContainer_Results(st), err } -func NewRootConmon_createNamespaces_Results(s *capnp.Segment) (Conmon_createNamespaces_Results, error) { +func NewRootConmon_serveAttachContainer_Results(s *capnp.Segment) (Conmon_serveAttachContainer_Results, error) { st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) - return Conmon_createNamespaces_Results(st), err + return Conmon_serveAttachContainer_Results(st), err } -func ReadRootConmon_createNamespaces_Results(msg *capnp.Message) (Conmon_createNamespaces_Results, error) { +func ReadRootConmon_serveAttachContainer_Results(msg *capnp.Message) (Conmon_serveAttachContainer_Results, error) { root, err := msg.Root() - return Conmon_createNamespaces_Results(root.Struct()), err + return Conmon_serveAttachContainer_Results(root.Struct()), err } -func (s Conmon_createNamespaces_Results) String() string { - str, _ := text.Marshal(0x8aef91973dc8a4f5, capnp.Struct(s)) +func (s Conmon_serveAttachContainer_Results) String() string { + str, _ := text.Marshal(0xedd2e5b018f17bbb, capnp.Struct(s)) return str } -func (s Conmon_createNamespaces_Results) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { +func (s Conmon_serveAttachContainer_Results) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { return capnp.Struct(s).EncodeAsPtr(seg) } -func (Conmon_createNamespaces_Results) DecodeFromPtr(p capnp.Ptr) Conmon_createNamespaces_Results { - return Conmon_createNamespaces_Results(capnp.Struct{}.DecodeFromPtr(p)) +func (Conmon_serveAttachContainer_Results) DecodeFromPtr(p capnp.Ptr) Conmon_serveAttachContainer_Results { + return Conmon_serveAttachContainer_Results(capnp.Struct{}.DecodeFromPtr(p)) } -func (s Conmon_createNamespaces_Results) ToPtr() capnp.Ptr { +func (s Conmon_serveAttachContainer_Results) ToPtr() capnp.Ptr { return capnp.Struct(s).ToPtr() } -func (s Conmon_createNamespaces_Results) IsValid() bool { +func (s Conmon_serveAttachContainer_Results) IsValid() bool { return capnp.Struct(s).IsValid() } -func (s Conmon_createNamespaces_Results) Message() *capnp.Message { +func (s Conmon_serveAttachContainer_Results) Message() *capnp.Message { return capnp.Struct(s).Message() } -func (s Conmon_createNamespaces_Results) Segment() *capnp.Segment { +func (s Conmon_serveAttachContainer_Results) Segment() *capnp.Segment { return capnp.Struct(s).Segment() } -func (s Conmon_createNamespaces_Results) Response() (Conmon_CreateNamespacesResponse, error) { +func (s Conmon_serveAttachContainer_Results) Response() (Conmon_ServeAttachContainerResponse, error) { p, err := capnp.Struct(s).Ptr(0) - return Conmon_CreateNamespacesResponse(p.Struct()), err + return Conmon_ServeAttachContainerResponse(p.Struct()), err } -func (s Conmon_createNamespaces_Results) HasResponse() bool { +func (s Conmon_serveAttachContainer_Results) HasResponse() bool { return capnp.Struct(s).HasPtr(0) } -func (s Conmon_createNamespaces_Results) SetResponse(v Conmon_CreateNamespacesResponse) error { +func (s Conmon_serveAttachContainer_Results) SetResponse(v Conmon_ServeAttachContainerResponse) error { return capnp.Struct(s).SetPtr(0, capnp.Struct(v).ToPtr()) } // NewResponse sets the response field to a newly -// allocated Conmon_CreateNamespacesResponse struct, preferring placement in s's segment. -func (s Conmon_createNamespaces_Results) NewResponse() (Conmon_CreateNamespacesResponse, error) { - ss, err := NewConmon_CreateNamespacesResponse(capnp.Struct(s).Segment()) +// allocated Conmon_ServeAttachContainerResponse struct, preferring placement in s's segment. +func (s Conmon_serveAttachContainer_Results) NewResponse() (Conmon_ServeAttachContainerResponse, error) { + ss, err := NewConmon_ServeAttachContainerResponse(capnp.Struct(s).Segment()) if err != nil { - return Conmon_CreateNamespacesResponse{}, err + return Conmon_ServeAttachContainerResponse{}, err } err = capnp.Struct(s).SetPtr(0, capnp.Struct(ss).ToPtr()) return ss, err } -// Conmon_createNamespaces_Results_List is a list of Conmon_createNamespaces_Results. -type Conmon_createNamespaces_Results_List = capnp.StructList[Conmon_createNamespaces_Results] +// Conmon_serveAttachContainer_Results_List is a list of Conmon_serveAttachContainer_Results. +type Conmon_serveAttachContainer_Results_List = capnp.StructList[Conmon_serveAttachContainer_Results] -// NewConmon_createNamespaces_Results creates a new list of Conmon_createNamespaces_Results. -func NewConmon_createNamespaces_Results_List(s *capnp.Segment, sz int32) (Conmon_createNamespaces_Results_List, error) { +// NewConmon_serveAttachContainer_Results creates a new list of Conmon_serveAttachContainer_Results. +func NewConmon_serveAttachContainer_Results_List(s *capnp.Segment, sz int32) (Conmon_serveAttachContainer_Results_List, error) { l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) - return capnp.StructList[Conmon_createNamespaces_Results](l), err + return capnp.StructList[Conmon_serveAttachContainer_Results](l), err } -// Conmon_createNamespaces_Results_Future is a wrapper for a Conmon_createNamespaces_Results promised by a client call. -type Conmon_createNamespaces_Results_Future struct{ *capnp.Future } +// Conmon_serveAttachContainer_Results_Future is a wrapper for a Conmon_serveAttachContainer_Results promised by a client call. +type Conmon_serveAttachContainer_Results_Future struct{ *capnp.Future } -func (f Conmon_createNamespaces_Results_Future) Struct() (Conmon_createNamespaces_Results, error) { +func (f Conmon_serveAttachContainer_Results_Future) Struct() (Conmon_serveAttachContainer_Results, error) { p, err := f.Future.Ptr() - return Conmon_createNamespaces_Results(p.Struct()), err + return Conmon_serveAttachContainer_Results(p.Struct()), err } -func (p Conmon_createNamespaces_Results_Future) Response() Conmon_CreateNamespacesResponse_Future { - return Conmon_CreateNamespacesResponse_Future{Future: p.Future.Field(0, nil)} +func (p Conmon_serveAttachContainer_Results_Future) Response() Conmon_ServeAttachContainerResponse_Future { + return Conmon_ServeAttachContainerResponse_Future{Future: p.Future.Field(0, nil)} } -type Conmon_startFdSocket_Params capnp.Struct +type Conmon_servePortForwardContainer_Params capnp.Struct -// Conmon_startFdSocket_Params_TypeID is the unique identifier for the type Conmon_startFdSocket_Params. -const Conmon_startFdSocket_Params_TypeID = 0xce733f0914c80b6b +// Conmon_servePortForwardContainer_Params_TypeID is the unique identifier for the type Conmon_servePortForwardContainer_Params. +const Conmon_servePortForwardContainer_Params_TypeID = 0x9d82529754851252 -func NewConmon_startFdSocket_Params(s *capnp.Segment) (Conmon_startFdSocket_Params, error) { +func NewConmon_servePortForwardContainer_Params(s *capnp.Segment) (Conmon_servePortForwardContainer_Params, error) { st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) - return Conmon_startFdSocket_Params(st), err + return Conmon_servePortForwardContainer_Params(st), err } -func NewRootConmon_startFdSocket_Params(s *capnp.Segment) (Conmon_startFdSocket_Params, error) { +func NewRootConmon_servePortForwardContainer_Params(s *capnp.Segment) (Conmon_servePortForwardContainer_Params, error) { st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) - return Conmon_startFdSocket_Params(st), err + return Conmon_servePortForwardContainer_Params(st), err } -func ReadRootConmon_startFdSocket_Params(msg *capnp.Message) (Conmon_startFdSocket_Params, error) { +func ReadRootConmon_servePortForwardContainer_Params(msg *capnp.Message) (Conmon_servePortForwardContainer_Params, error) { root, err := msg.Root() - return Conmon_startFdSocket_Params(root.Struct()), err + return Conmon_servePortForwardContainer_Params(root.Struct()), err } -func (s Conmon_startFdSocket_Params) String() string { - str, _ := text.Marshal(0xce733f0914c80b6b, capnp.Struct(s)) +func (s Conmon_servePortForwardContainer_Params) String() string { + str, _ := text.Marshal(0x9d82529754851252, capnp.Struct(s)) return str } -func (s Conmon_startFdSocket_Params) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { +func (s Conmon_servePortForwardContainer_Params) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { return capnp.Struct(s).EncodeAsPtr(seg) } -func (Conmon_startFdSocket_Params) DecodeFromPtr(p capnp.Ptr) Conmon_startFdSocket_Params { - return Conmon_startFdSocket_Params(capnp.Struct{}.DecodeFromPtr(p)) +func (Conmon_servePortForwardContainer_Params) DecodeFromPtr(p capnp.Ptr) Conmon_servePortForwardContainer_Params { + return Conmon_servePortForwardContainer_Params(capnp.Struct{}.DecodeFromPtr(p)) } -func (s Conmon_startFdSocket_Params) ToPtr() capnp.Ptr { +func (s Conmon_servePortForwardContainer_Params) ToPtr() capnp.Ptr { return capnp.Struct(s).ToPtr() } -func (s Conmon_startFdSocket_Params) IsValid() bool { +func (s Conmon_servePortForwardContainer_Params) IsValid() bool { return capnp.Struct(s).IsValid() } -func (s Conmon_startFdSocket_Params) Message() *capnp.Message { +func (s Conmon_servePortForwardContainer_Params) Message() *capnp.Message { return capnp.Struct(s).Message() } -func (s Conmon_startFdSocket_Params) Segment() *capnp.Segment { +func (s Conmon_servePortForwardContainer_Params) Segment() *capnp.Segment { return capnp.Struct(s).Segment() } -func (s Conmon_startFdSocket_Params) Request() (Conmon_StartFdSocketRequest, error) { +func (s Conmon_servePortForwardContainer_Params) Request() (Conmon_ServePortForwardContainerRequest, error) { p, err := capnp.Struct(s).Ptr(0) - return Conmon_StartFdSocketRequest(p.Struct()), err + return Conmon_ServePortForwardContainerRequest(p.Struct()), err } -func (s Conmon_startFdSocket_Params) HasRequest() bool { +func (s Conmon_servePortForwardContainer_Params) HasRequest() bool { return capnp.Struct(s).HasPtr(0) } -func (s Conmon_startFdSocket_Params) SetRequest(v Conmon_StartFdSocketRequest) error { +func (s Conmon_servePortForwardContainer_Params) SetRequest(v Conmon_ServePortForwardContainerRequest) error { return capnp.Struct(s).SetPtr(0, capnp.Struct(v).ToPtr()) } // NewRequest sets the request field to a newly -// allocated Conmon_StartFdSocketRequest struct, preferring placement in s's segment. -func (s Conmon_startFdSocket_Params) NewRequest() (Conmon_StartFdSocketRequest, error) { - ss, err := NewConmon_StartFdSocketRequest(capnp.Struct(s).Segment()) +// allocated Conmon_ServePortForwardContainerRequest struct, preferring placement in s's segment. +func (s Conmon_servePortForwardContainer_Params) NewRequest() (Conmon_ServePortForwardContainerRequest, error) { + ss, err := NewConmon_ServePortForwardContainerRequest(capnp.Struct(s).Segment()) if err != nil { - return Conmon_StartFdSocketRequest{}, err + return Conmon_ServePortForwardContainerRequest{}, err } err = capnp.Struct(s).SetPtr(0, capnp.Struct(ss).ToPtr()) return ss, err } -// Conmon_startFdSocket_Params_List is a list of Conmon_startFdSocket_Params. -type Conmon_startFdSocket_Params_List = capnp.StructList[Conmon_startFdSocket_Params] +// Conmon_servePortForwardContainer_Params_List is a list of Conmon_servePortForwardContainer_Params. +type Conmon_servePortForwardContainer_Params_List = capnp.StructList[Conmon_servePortForwardContainer_Params] -// NewConmon_startFdSocket_Params creates a new list of Conmon_startFdSocket_Params. -func NewConmon_startFdSocket_Params_List(s *capnp.Segment, sz int32) (Conmon_startFdSocket_Params_List, error) { +// NewConmon_servePortForwardContainer_Params creates a new list of Conmon_servePortForwardContainer_Params. +func NewConmon_servePortForwardContainer_Params_List(s *capnp.Segment, sz int32) (Conmon_servePortForwardContainer_Params_List, error) { l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) - return capnp.StructList[Conmon_startFdSocket_Params](l), err + return capnp.StructList[Conmon_servePortForwardContainer_Params](l), err } -// Conmon_startFdSocket_Params_Future is a wrapper for a Conmon_startFdSocket_Params promised by a client call. -type Conmon_startFdSocket_Params_Future struct{ *capnp.Future } +// Conmon_servePortForwardContainer_Params_Future is a wrapper for a Conmon_servePortForwardContainer_Params promised by a client call. +type Conmon_servePortForwardContainer_Params_Future struct{ *capnp.Future } -func (f Conmon_startFdSocket_Params_Future) Struct() (Conmon_startFdSocket_Params, error) { +func (f Conmon_servePortForwardContainer_Params_Future) Struct() (Conmon_servePortForwardContainer_Params, error) { p, err := f.Future.Ptr() - return Conmon_startFdSocket_Params(p.Struct()), err + return Conmon_servePortForwardContainer_Params(p.Struct()), err } -func (p Conmon_startFdSocket_Params_Future) Request() Conmon_StartFdSocketRequest_Future { - return Conmon_StartFdSocketRequest_Future{Future: p.Future.Field(0, nil)} +func (p Conmon_servePortForwardContainer_Params_Future) Request() Conmon_ServePortForwardContainerRequest_Future { + return Conmon_ServePortForwardContainerRequest_Future{Future: p.Future.Field(0, nil)} } -type Conmon_startFdSocket_Results capnp.Struct +type Conmon_servePortForwardContainer_Results capnp.Struct -// Conmon_startFdSocket_Results_TypeID is the unique identifier for the type Conmon_startFdSocket_Results. -const Conmon_startFdSocket_Results_TypeID = 0xf4e3e92ae0815f15 +// Conmon_servePortForwardContainer_Results_TypeID is the unique identifier for the type Conmon_servePortForwardContainer_Results. +const Conmon_servePortForwardContainer_Results_TypeID = 0xae5e0ae5001ebdfe -func NewConmon_startFdSocket_Results(s *capnp.Segment) (Conmon_startFdSocket_Results, error) { +func NewConmon_servePortForwardContainer_Results(s *capnp.Segment) (Conmon_servePortForwardContainer_Results, error) { st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) - return Conmon_startFdSocket_Results(st), err + return Conmon_servePortForwardContainer_Results(st), err } -func NewRootConmon_startFdSocket_Results(s *capnp.Segment) (Conmon_startFdSocket_Results, error) { +func NewRootConmon_servePortForwardContainer_Results(s *capnp.Segment) (Conmon_servePortForwardContainer_Results, error) { st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) - return Conmon_startFdSocket_Results(st), err + return Conmon_servePortForwardContainer_Results(st), err } -func ReadRootConmon_startFdSocket_Results(msg *capnp.Message) (Conmon_startFdSocket_Results, error) { +func ReadRootConmon_servePortForwardContainer_Results(msg *capnp.Message) (Conmon_servePortForwardContainer_Results, error) { root, err := msg.Root() - return Conmon_startFdSocket_Results(root.Struct()), err + return Conmon_servePortForwardContainer_Results(root.Struct()), err } -func (s Conmon_startFdSocket_Results) String() string { - str, _ := text.Marshal(0xf4e3e92ae0815f15, capnp.Struct(s)) +func (s Conmon_servePortForwardContainer_Results) String() string { + str, _ := text.Marshal(0xae5e0ae5001ebdfe, capnp.Struct(s)) return str } -func (s Conmon_startFdSocket_Results) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { +func (s Conmon_servePortForwardContainer_Results) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { return capnp.Struct(s).EncodeAsPtr(seg) } -func (Conmon_startFdSocket_Results) DecodeFromPtr(p capnp.Ptr) Conmon_startFdSocket_Results { - return Conmon_startFdSocket_Results(capnp.Struct{}.DecodeFromPtr(p)) +func (Conmon_servePortForwardContainer_Results) DecodeFromPtr(p capnp.Ptr) Conmon_servePortForwardContainer_Results { + return Conmon_servePortForwardContainer_Results(capnp.Struct{}.DecodeFromPtr(p)) } -func (s Conmon_startFdSocket_Results) ToPtr() capnp.Ptr { +func (s Conmon_servePortForwardContainer_Results) ToPtr() capnp.Ptr { return capnp.Struct(s).ToPtr() } -func (s Conmon_startFdSocket_Results) IsValid() bool { +func (s Conmon_servePortForwardContainer_Results) IsValid() bool { return capnp.Struct(s).IsValid() } -func (s Conmon_startFdSocket_Results) Message() *capnp.Message { +func (s Conmon_servePortForwardContainer_Results) Message() *capnp.Message { return capnp.Struct(s).Message() } -func (s Conmon_startFdSocket_Results) Segment() *capnp.Segment { +func (s Conmon_servePortForwardContainer_Results) Segment() *capnp.Segment { return capnp.Struct(s).Segment() } -func (s Conmon_startFdSocket_Results) Response() (Conmon_StartFdSocketResponse, error) { +func (s Conmon_servePortForwardContainer_Results) Response() (Conmon_ServePortForwardContainerResponse, error) { p, err := capnp.Struct(s).Ptr(0) - return Conmon_StartFdSocketResponse(p.Struct()), err + return Conmon_ServePortForwardContainerResponse(p.Struct()), err } -func (s Conmon_startFdSocket_Results) HasResponse() bool { +func (s Conmon_servePortForwardContainer_Results) HasResponse() bool { return capnp.Struct(s).HasPtr(0) } -func (s Conmon_startFdSocket_Results) SetResponse(v Conmon_StartFdSocketResponse) error { +func (s Conmon_servePortForwardContainer_Results) SetResponse(v Conmon_ServePortForwardContainerResponse) error { return capnp.Struct(s).SetPtr(0, capnp.Struct(v).ToPtr()) } // NewResponse sets the response field to a newly -// allocated Conmon_StartFdSocketResponse struct, preferring placement in s's segment. -func (s Conmon_startFdSocket_Results) NewResponse() (Conmon_StartFdSocketResponse, error) { - ss, err := NewConmon_StartFdSocketResponse(capnp.Struct(s).Segment()) +// allocated Conmon_ServePortForwardContainerResponse struct, preferring placement in s's segment. +func (s Conmon_servePortForwardContainer_Results) NewResponse() (Conmon_ServePortForwardContainerResponse, error) { + ss, err := NewConmon_ServePortForwardContainerResponse(capnp.Struct(s).Segment()) if err != nil { - return Conmon_StartFdSocketResponse{}, err + return Conmon_ServePortForwardContainerResponse{}, err } err = capnp.Struct(s).SetPtr(0, capnp.Struct(ss).ToPtr()) return ss, err } -// Conmon_startFdSocket_Results_List is a list of Conmon_startFdSocket_Results. -type Conmon_startFdSocket_Results_List = capnp.StructList[Conmon_startFdSocket_Results] +// Conmon_servePortForwardContainer_Results_List is a list of Conmon_servePortForwardContainer_Results. +type Conmon_servePortForwardContainer_Results_List = capnp.StructList[Conmon_servePortForwardContainer_Results] -// NewConmon_startFdSocket_Results creates a new list of Conmon_startFdSocket_Results. -func NewConmon_startFdSocket_Results_List(s *capnp.Segment, sz int32) (Conmon_startFdSocket_Results_List, error) { +// NewConmon_servePortForwardContainer_Results creates a new list of Conmon_servePortForwardContainer_Results. +func NewConmon_servePortForwardContainer_Results_List(s *capnp.Segment, sz int32) (Conmon_servePortForwardContainer_Results_List, error) { l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) - return capnp.StructList[Conmon_startFdSocket_Results](l), err + return capnp.StructList[Conmon_servePortForwardContainer_Results](l), err } -// Conmon_startFdSocket_Results_Future is a wrapper for a Conmon_startFdSocket_Results promised by a client call. -type Conmon_startFdSocket_Results_Future struct{ *capnp.Future } +// Conmon_servePortForwardContainer_Results_Future is a wrapper for a Conmon_servePortForwardContainer_Results promised by a client call. +type Conmon_servePortForwardContainer_Results_Future struct{ *capnp.Future } -func (f Conmon_startFdSocket_Results_Future) Struct() (Conmon_startFdSocket_Results, error) { +func (f Conmon_servePortForwardContainer_Results_Future) Struct() (Conmon_servePortForwardContainer_Results, error) { p, err := f.Future.Ptr() - return Conmon_startFdSocket_Results(p.Struct()), err -} -func (p Conmon_startFdSocket_Results_Future) Response() Conmon_StartFdSocketResponse_Future { - return Conmon_StartFdSocketResponse_Future{Future: p.Future.Field(0, nil)} -} - -const schema_ffaaf7385bc4adad = "x\xda\xc4Y\x7fp\x14\xf7u\x7fo\xf7N\xab\xd3\x0f" + - "N\xdb=\x09$P\xa5\x12HA\x0d\xe1\x87p\x8d\x19" + - "2\x92\x00\x85B\xc0\xd6\xea\xb0i!qY\xee\x16\xe9" + - "\xc4i\xf7\xd8\xdd\x03D\xec\xca$\xf5\xd8Q\x82cT" + - "\x98\xc4L\x99A\x8e\xa1@\xa0\xc6I\xa0\x86\xe0\x0c\x10" + - "\x98\x001M\xa1\x85\x06OH\xc1\x98\xda0\xc66\xb5" + - ";\xc5\x1e\xe8v\xde\xf7n\x7f\xe8$\xe3\x93\xeaN\xff" + - "\xd0\xe8\xee\xedg\xdf\xfb~\xbf\xef}\xdf{\x9fwS" + - "\xee\x84\x1a\x03SK\x7f3\x028yo\xb0\xc06\xaf" + - "v\x19;\xb7\xcd\xfb6\x88_@\x80 \x0a\x00\xf5\x8b" + - "\x847\x11PR\x84\x06@\xfb?_:\xf5\x95\x1fl" + - "z\xbf\xc7\x0f\xd8\x90\x01la\x80\xdf\xcd\xa8[\xb9\x9d" + - "_\xf8]?\xe0\x90\xf0\x06\x01\xce2\xc0_-\x0fo" + - "\xfe\x9b\x8ae\x0c`\xdf\xae_y\xf9\x85w\x1e\xfc\x07" + - "\x08\x0a\x04\xbc%\xbc\x81R\xa8\x90>\x06\x0b\xbf\x8f\x80" + - "\xf6\xaf\xffp\xfd\x92\xf0\xceg~\x98\x83fj\xaf\x87" + - "\xdeD\x09\x8b\x04\x00\xe9^\x88T?}\xe3\xe1\x83\x8f" + - "~\xfb\xfd\xed~\xdbS\x8b\xfe\x83l7\x17\x11\xe0\x85" + - "e\xef\xacj\x9e\x1f~\xb1\xbf\xb6\x00\xe1\x12E{P" + - "\xdaP$\x00oW|r\xed\xa5\x8b\xd1\x19\xbbA\xfe" + - "\x02\x0e0\xfa\x0d\xc2\xa5\x99\xd1\xd5Ek\x01\xedq/" + - "\xff\xf2\\\xcf\xac\xc9{\xfcF\xcf\x16\x9d'\xa3W\x99" + - "\xd1\x9e\xbfS\xff\xf8\xd8\xe1\xaf\x11\x80\xf3\xb4\x01\xd6c" + - "q\x0fJ\x95\xc5\xa4\xaa\xbc\xf8A@{\xed\xf2S/" + - "\xaf\x97\xaf\xef\x1ddy\xd5\xc5\xbd(=PL\xcb\x93" + - "\xe6N9t\xb1\xben_\xee\xf28\xc2\x89\x84\x9b\xc8" + - "t\x8e/~\x19\xd0\x9e\xde\xf7\xd3\x83\xcf\xbd\xb7\xee\xef" + - "\x09\xcd\xe5n\xe6Bq\x07J7\x8aG\x02H\xb7\x19" + - "\xfa\x9b\xe7n\xeez\xee\xbbM\x07ru\xf3\x84\xfeN" + - "\xc9q\x94\xfaJ\xe8\xe3\xb6\x92\x1a\xf2\x0e\x7f\xe5\xad\xa2" + - "\xe7\x9a&\x1f\x1c\xcc;'K\xcf\xa0t\xb5\x94Vr" + - "\xb9\x94\xce\xc1}.\x8e\xe1\xed}\xfbN,\x9b\xf1_" + - "{l:\x87{\xa5UX_:b\x09\x92\xe2\xb0\xc0" + - "I!Q\x00\xb0'\xbe\xfb\xec\xceg~\x1c=<\x98" + - "\xf2\xdbe\xc71\x03\x93\x82\")?sp\xf7\xccO" + - "\xae\xad=\x9c\xbbp\xb2_\xff\x90x\x1e\xa5G\x09]" + - "/\x8b\xcf\xf0\x80v\xd9\xb2\xdf|\xe5\xdd\xc7\xff\xfd\xa4" + - "\xdfi'\xcbY\x94^*'}o+?\xe7\x9a\xcf" + - "&\x7f\xe5\x07\xdc+/\xe2\x00\xa5\xf2\x0a\x06x\xeb\xbf" + - ";\xdaR\x93_\xf7\x03\x1e\xa8\xe8e\xb1\xc6\x00\xab\x8a" + - "OEB\x0d\xe6?\xfa\x01j\xc5q\x02\xa4\x19\xe0N" + - "\xf9/~P5\xebp?\xc0\x96\x0a\xb6\x86\xdd\x0cP" + - "\xd5tnzX\x9b\xf7O9\x07\xc0|q\xb6\xe2E" + - "\x94\xaeW\xd0\x01\\\xad \xcf\xfd\xe8\xc3]\xcb\x0fl" + - "\x8a\\\x1c\x10e\x9bFv\xa0\xb4c$!\xfbFv" + - "\x03\xdaw\x9f\x9e\xf5Tu\xf5\xc5K\x83\xc6\xcf\x85\x91" + - "7Q\xba\xcd\xd0\xb7F\xbe\x0dho\xfd\x93\xb5\xa9\xc7" + - "W\xcc\xfc}\x0e\x9a\x05\xe5\xe9Qo\xa0t}\x14[" + - "\xc4(Z\xf1\xdd\x99w\x7f\xb1}V\xea\xdfrU\x07" + - "\xd9\x8d\xae\xecA\xa9\xba\x92>VV.\xa1\xf0y4" + - "5O\xfcb\xeb\x88+\xfe\x13\xd8X\xf5\x07t\xc8;" + - "\xaaH\xdf\x94o\xce\xdb\xfdxB\xba\xe6\x07\x9c\xae\xda" + - "\xca\xdc\xc4\x00\x7f*\xfdr\xbf\xb6\xe9\xe6\xf5~n\xaa" + - "\xbaI\x00q4\x01\x8e-\xabo\xf9\xd7k_\xfc\x00" + - "\xc4I\x9cw\x17\x00\xeb\xa7\x8e\xeeEi\xfehZ{" + - "\xf3\xe8G\x00\xeds\xef\xd5\xec\xfd\xf5\xf5\xaf}\x98\xbb" + - "\xf6\x10\xcb\x0e\xa3\xb7\xa2\xb4\x81\xd0\xf5O\x8e~\x90\x03" + - "\xb4w\xae\xfe\xd1\xf3w\xc6\x8a\x1f\xe5\xde+\xb6\xd5s" + - "\xd5o\xa2t\xab\x9a>\xde\xa8\xfe\x15m\xf5\xd5\xad\x9b" + - "\xbf\x7fb\xda\xbc\x8f\xfc\x0b\xbd]\xc3\xb2D\xb0\x96\x16" + - "Z\xfe\x97\x1b\xae\xd4\xdd\xb8\xd6\x0f0\xbe\xf6\x0c\x01\x1e" + - "b\x80#\xb8\xa7\xf8\xeb\x1d\xef\xdc\xf1\x03\xbeQ\xcb\xb6" + - "\xba\x9a\x01\xee\xf4\xfd\xb8\xfe\xa9\xb3?\xfdx\x90\xec\xb1" + - "\xa5\xf6\x0cJ\xaf\xd4R\xf6\xe8\xfd\xe7\x05\x9d\xbf\xbf\xf7" + - "\xf3Or\x82\x8a9\x7fc\xed\x8b(\xed\xa8e\xa1R" + - "\xbb\x16&\xd9\x09\xcdR\x0dMI\x16LN\x19\xba\xa5" + - "O\x8e\xe9Z\xa7\xae}9\xa6\xa4\xb4\xd4\xcc9\x99/" + - "\xea:5\x16\xed\xd2bst\xcdR\x12\x9aj\x8ck" + - "Q\x0cA\xe94\xe5\x00\x1f\x00\x08 \x80X:\x1b@" + - ".\xe4Q\x8ep\xd8m\xa8\xab\xd3\xaaia\x99w\x86" + - "\x80X\x06\x98\x97\xb9\x98\xa1*\x96\xfa\xb0\xd2\xa9\x9a)" + - "%\xa6\x9a\xe3ZU3-$\xad~\xe6\x16\x00\xc8%" + - "<\xca\xa38\xb4\x0d\xd5L\xe9\x9a\xa9\x02\x00\x96y\xf5" + - "\xe4\x7fc\xb2E1\x14>\x9f\x0d\xba\xb5n\x08\xd6\xe6" + - "\xe4Xk%m\xbci\xb5 \xcac\\\x83\x07V\x00" + - "\xc8?\xe3Q>\xca\xa1\x88\x18A\x12\xbe\xb6\x14@>" + - "\xc2\xa3\xfc[\x0eE\x8e\x8b \x07 ^ \xe4\xbf\xf0" + - "(\x7f\xc0\xa1\xc8\xf3\x11\xe4\x01\xc4[$|\x97\xc7h" + - "!r(\x06\x02\x11\x0cP*\xc5\x05\x00\xd1\x00\xf2\x18" + - "-#y0\x18\xc1 \x80T\x8a\xd3\x00\xa2\x85$\x8f" + - "\x90\xbc\xa0 \x82\x05\x00\x92\xc8\xf0e$\xff\x12rh" + - "w\xaa\x96\x12W,\x05\x84G\x92q,\x05\x0eK\x01" + - "m-\xbb\x15\xe0U\x13G\x00\xb6\xf0\x88a/_\x01" + - "\x92\xd0N'\xe2\x8b\x94T*\x01\x82\xd6\xe6\xc2J\x80" + - "c\x0f\xdb\xee\xf7p\x85b\xaa-\x8a\xd5N\x0e&Y" + - "\x09`MJ\x8f\xcf\x8f;\xdf\xbcu\x018/\x97y" + - "\x17!\xbb\x80\xe1\xf9\xc6L\xe9\x82f\xaa\xe4\x1c_4" + - ",\xcd\xc6\xdf\x04n\xf0\xed\x97y=\xc6\x10\xac\x1b\xaa" + - "\x9eR\xb5\x85z\x9bw\xd5Z\xd5\x1a3\x9dw\xf0\xbb" + - "\xedON8\x06\xefc\xb4\xd51J{\x0d\xeb\x99\xbd" + - "\xe6\xf5\xa6{L\xfe7\xe5Bw\xa1\x13\xeb\x00\xe4q" + - "<\xcaS8t\"x\x12\xc9&\xf0(O\xe70l" + - "u\xa5\xd4\x9cH\x09\x03\x86S\x8a\xd5\xee\xba6\x9fs" + - "S,K\x89\xb5\xf7\xcbOJ'\xe6q}\xddr6" + - "\x84\xf3\x9a\xd3f\xe8\xe9\xd4\"ES\xdaT\x03\x80m" + - "\x99\xddCq6\xa9\x11C\x0b\x00\xba\xcd.\xd3R;" + - "\xe3v\x8c\x81W\x9a\x00\x90\x97\xf2&\xb6\x93\xd6\x8cS" + - "1oO<\xa6\x1afB\xd7X&1\x91e\x92\x12" + - "w\xef\xcd\xb4\xf7F\x1e\xe5\x85\x9e\x1b\xe6Sz\xf83" + - "\x1e\xe5\xc5\x94H0\x93Hd\x0a\xac\x16\x1e\xe5$\x87" + - "\xddkTc\x85n\xaa\x88\xc0!\xc2\xa7]\xfd!]" + - "\xbc\xc0}v\xb0Po\x9bk\x84\x13kTC\x0e\xa0" + - "\xbf\xaac]xqWJ\xf5\xef\xa7n\x90\xfd\x90l" + - ".\x8fr\x8bo?\x8bf{\x9btb\xcdU\x8a\xb6\xc3" + - "\xf0:^q\xc7q\xafE\x11w\x9f\xf1:g\xf1\x95" + - "\xf3^r\x10\x0f\x19>\xe6vh\xbd\xafo?\xd4\xe3" + - "\xa3\x9c\xaf\xf5z\xf4J<\xb6\xc7\xd7\xaf\x9d\xfc\x89\x8f" + - "\x15\x9f>\xee\xeb\xe5\xcf\xb6\xfa\x18\xf0\xd93^\xd9\x10" + - "/\xf4\xfa\x08\xd2\xa5=>*v\xf9'\xbe&\xefj" + - "\x8f\xed\xdc~h\xc8\x84\x9f+\xe0\x1d'd\xca\x9a\x9b" + - " [\x1d \xbbu\x895*\xa0a;y\x0djX" + - "f\xb3\x9dw\x82\xceK\x8e\xb2\xe6\xdc~\xd0\x89z\xb0" + - "\x9dG\x9c\xefY6\x95\xd9Nj\x83\x9a\x8cm\xf7{" + - "CF\xaf\xed\x14\"l\xf3\x14\xfae\x8e\"\xe7\xc6\xa1" + - "s\xe5\xc2L_\xae\xd8\xac\xc9\xa8uJ:\xdf\xaf\xdf" + - "2-p\xca\x17z\x18\xae_\xddg\xe1k{0\xdf" + - "\x12\xb2\xa1\x8e\xd9Xw\x96\x90#v\x96\xb0X]g" + - "\xd1\x1f.RR\xcd\x9aet\x01\xc8\xb5|\x10\xc0%" + - "\x99\xe8\x10!\xf1\xf6l\xe0\xc4\xeb\x02z\x8c\x02\x1d\x1e" + - ")^\xfa\x16p\xe29\x019w\x8c\x83\x0ei\x10O" + - "\xf6\x02'\x1e\x13\x90w\xe7\x15\xe8p`\xf1\x00\xbd\xb7" + - "O\xc0\x80K\xa7\xd0\x99\xa4\x88}[\x81\x13\xb7\x09\x18" + - "t\x191:\xacM\xdct\x188q\xa3\x80\x05\xee\xd0" + - "\x07\x9d\xf1\x90\xb8\xa1\x078\xf1I\x01\x05\x97\x07\xa3\xc3" + - "p\xc4\xd5\x06pbB\xa0\x1aBq\xd8\x88v,\x1b" + - "L\x98\x0d\x0bhD\xdb\xe1\x15\xe8\x04\x0b\x1a\x8dh;" + - "\xb5\xdc\x8f4\xdc(\xc8By\x95\xa0f?\x8f\xcf\xd1" + - "\xb5\x86\xcc+\xae\xbd\x87\x15t\x1c\x0a\xa4\xc7\xcc\xfa\x07" + - "j\x98\x83\x1a\xd1__\x87\x90\xda\xbc$?HW6" + - "\x81\xfb\xdc\xfb\xd1\x9c\x9b\x96)\xf0\x8d\x8ei\xe9\x15\xac" + - "\x02\x88\xee\xa5.\xfdU\xf4\xd8\x82t\x00\x97\x02D\x7f" + - "F\xf2\xa3\xc8!f\xf8\x82\xf4\x1ak\xea\x8f\x90\xf8\x14" + - "z\x85F:\xc9H\xc0Q\x92\xbf\x8e^\xad\x91Nc" + - "+@\xf4\x14\xc9\xdfb\xa4\x81\xcf\x90\x86\xab\xd8\x01\x10" + - "\xbdB\xf2\xbb\x8c4\x042\xa4\xe1cf\xf6\x0e#\x13" + - "\x1c\x87\xa2\x10\x8c\x10\x97\x95D\x8e\xe4e\x1c\x91\x09\x92" + - "\x17\x16D\xb0\x10@\x9a\xc8\xe4\x13H>\x97\xe4!!" + - "\x82!\x00\xa9\x89[\x01\x10m$\xf9\xd7I^T\x18" + - "\xc1\"\x00\xe9/\x98\xfc\xcfI\x1e'yq(\x82\xc5" + - "\x00\x92\xc2\xd1\xbe\x96\x93\xfc\x09\x92\x97\x14E\xb0\x04@" + - "\xea\xe2f\x03D-\x92?O\xf2R\x8c`)\x80\xb4" + - "\x913\x00\xa2\xdf#\xf9\x0fI>\xa28\x82#\x00\xa4" + - "-L\xbe\x99\xe4\xfbI\x1e.\x89`\x18@\xda\xc7\xf4" + - "\xec\"\xf9\x09\xae_\xd9\xb5W\xa4\xb5xRmQ\x80" + - "\xf7\x154K5:\x13\x9a\x92\xa4 \xc8vQ5\xa6" + - "\x15OhnO\xa5\xaeKX\x8c\xd8\xe0\x00\xce\xa3\xeb" + - "\x9d\xcd\xf4\x14\xc2\x8a\xd5>\xe0i\xd2\xc9\xdb\xbc\xe1\xa3" + - "\x1c\xbe\xe9\x07C\xc5\x92\xaa\xa2\xa5Ss\x80\xef\x8c\x0f" + - " \\I}\x85\x92l2\x80\x1f\xc8\xb7bzg\xa7" + - "\xa2\xc5\x9b@0\x06>\x1c~\x13\xd1\xadjk\x1eS" + - "\xfc\x0b\xce\xbd\x11\xb1\xfe%\x08\xc3^\xdd\xce\xb4k\xb6" + - "\x12\x8f'\xac\x84\xaeA\x8d\x92\xfcj\xdcU\x15\xca," + - "\xae;\xa9*\xab\x06\x8a\x87\xc5!ZU3\x9d\xe4\xf3" + - "%^n{\x90C$\x84\xfbX4\xfd\x9dc\x0ey" + - "1\x01>\x9b\xbd\xb8\xcd\xc6\x10\xd8K6%\xe7O\x91" + - "\xdcnk\x08\x13\x0e\xd3\x9f.\x9d\x0d}\xb6)\xb7\xdd" + - "\x19\xf2\xe8f\xb8ns;\xc1\xe1\xf1\xe5\xd5iA5" + - "s\xd9V\x95\xc7N\xc4\xc1\xe9\x167\x90n\xf9\x93\xc9" + - "\xff1\xd3b\xfdL\x98\x8a\"c)l)\x0f\x8ce" + - "\xd4u\x12\xfd\xe3\xc4\xf1\xf4\x8f\x17\xab\xeb\x000 \x96" + - "\x8f\x05\x10\x12\xa9\x98\xa0\xa9\x96\x90J\xc4\xc3iS5" + - "\x84\xb4e\xe6\xe5\x9fA\x9aE\xdf\xac\xa0\xcc=6\x85" + - "\x0ecy\xe60\x9cSK\x10\x1d\x8a\xf3(\xa7|\x1c" + - "\xa9\x93\x84\xed<\xca\x16\x95\xae\xda\x0cGZMo\xa7" + - "x\x94\x9f\xe02Yu\x8e\x1eg.\x0e\x00\x87\x01\xc0" + - "\x06\xd3\x8a\xebi\xcb9L\xfa\xaa\x1a\x86{\xb6V\xa2" + - "S\x8d?\x92\xb6|\x99zx\xd5\x99b\x8b\x1f0," + - "\xea\xf0\xc5_,\x0b\x86\xb0\xd1\x92\x88c!pX\x98" + - "g\xe09\x0ds\xb67&#\xa3\\#/P\xd8m" + - "\xe6Q\xde\xee\x0b\xbbmK\x01\xe4\xbf\xe5Q\xde\xe5\x0b" + - "\xbb\x1d\x06\x80\xfc\x12\x8f\xf2~\x0e1;-\xdc\xd7\x0b" + - " \xef\xe7Q>Bu\x9f\xcfp\xccC\x14\xb4\xaf\xf2" + - "(\x9f\xa0\xa2\x1f`E_\x97\xe4\xa1\xc2lg" + - "\xcc:\xf8F\x92/D\x0e\xed\x94\xa1\xc7T\xd3\x9c\x0f" + - "\xe8f\x14\x87{9wN\xb0\x946\xe7s\x035x" + - "\x09\xcb\xd7\xbd&\x92\xf1\xb9\x8a\x05\xa8\xba\x10K1\xda" + - "T\x0fb\xa4M\x8b\x8e\x1a\x04\x9fN;\xa6\x18m\xfa" + - "c\xaa\x01as\x80x\xb1\xa1\xfa\xf4\xf5\xbb\xc0\xce\xa5" + - "\x1ef\xfd\xf0\xaan\xad\x1b\xce\xe7(\xfd\xbd\x9e\xf9a" + - "\xc4\xc9~\x17\x96f\x7f\x17\xb9\xe2+\x1f\x97)\xee\x7f" + - "\xcb\xa3\xfc\x11\xf9\xb71\x93\xfen\xd3\x05\xfc\x80G\xf9" + - "\xaeo\xc4\xf61\xa5\xbf;\x03\xfbgL\x8a~=mE\x81" + - "Wc\xce\xc4\xb1;\xdbz\xe76\xdd\x83P\x89\xff\xe7" + - "F|8\xbdY\xde=\xa7;\xc7\x1bv\xcf\x99\xc9\xe7" + - "9]\xf4\xa7\xa7Yw\xb66\x04\x83\x03\x7f\x16mU" + - "\xcdp\xfe\xbf\xd5\xb8\xb3\xc6!\xd8\xcc\x196\xfbF\xac" + - "y%Kg\x0e\xc5\xc6P\x82et\xe5\xfc^3\xd6" + - "\xfb\xbd\xc6\xed!&M\xf3~\xb0\x11V\xa9]\xee\xa8" + - "y\x8d\x92L\xbb\xf7\xfb\x7f\x02\x00\x00\xff\xffz\xdfF" + - "\xb7" + return Conmon_servePortForwardContainer_Results(p.Struct()), err +} +func (p Conmon_servePortForwardContainer_Results_Future) Response() Conmon_ServePortForwardContainerResponse_Future { + return Conmon_ServePortForwardContainerResponse_Future{Future: p.Future.Field(0, nil)} +} + +const schema_ffaaf7385bc4adad = "x\xda\xc4z}t\x14e\x96\xf7\xbdUI*\x09\xe9" + + "t\x8a\xea\xcc\x8b11\x04\x02o\x92!\xc3G`\x05" + + "\x0e\x1c\x12 \xb20\xa0\xa94\xea\x0a3.Ew\x11" + + "\x1a:\xddMU5\x10\xd4E\x9deu\xf0c\x84\xc5" + + "\xa3p\x86s\xc4o\x18P\x9c\x19\x1cE\x99#\x8c\x1e" + + "\x11u\xd7\xb0\x8b\xab\x1eqQ`\x14\x8e\xa88z\x04" + + "\x0eX{\xee\xd3]\x1f]i\xa1;rv\xff\xc8I" + + "\xf7\xad[\xf7>\xcf}\xee\xe7\xef\xe9Q{\xcaZ\x0b" + + "F\xfb\xbe\x1c\x08\x9c\xfcaa\x91\xa9\x7f\xdc\xa3=\xb9" + + "y\xc6\xaf@\x1c\x8a\x00\x85(\x00\xb4\xfc\xb2\xe4\x13\x04" + + "\x94\x96\x95L\x014\xbf}|\xff\xe4\x87\xd6}\xb9\xd6" + + "\xcd\xb0.\xc5\xf0\x04c\xf8p|\xd3\xa2G\xf8\xd9\xf7" + + "\xb8\x19\xde(\xf9\x80\x18\x0e3\x86\x7fZ\xe0\xdf\xf0\xaf" + + "?\x99\xcf\x18\xcc\xd3-\x8b\x0eo\xfc\xec\xea?A\xa1" + + "@\x8c\x17J>@\xe9\x8aR\xfaXY\xfa\x1b\x044" + + "\xef\xf9i\x9b\\\xfa\xe0c\x0f\xb8\xc5};\xe0$\x89" + + "+)#q\xa36\xbd9bS\xf3S\x1b<\xe2\x18" + + "cc\x19\xc7I\xede\x02\x80\xd4\xc6\x98\xdf\xbcj\xd5" + + "\x8d\xfe'\xefz8\x1b\xb3R\xf6\x09J\xb71\xe6\x1e" + + "\xc6\xdc9p\xcd\xdc\x87:\xef\xdc\xecV\xbd\xa3l\x08" + + "\x07(\xbd\xc6\x18\xd6\x9c\xb8\xf6\xf9\xeb\x7f\xf5\xe5#n" + + "\x86\xe3e_\xd3\xda\xce2\x86\x8d\xf3?[\xda>\xd3" + + "\xffh\xa6\xba\x02\xe2\xab\xf1mCi\x9cO\x00\xde|" + + "\xee@sg\xb4\xf5\xcd\xc7\xdcbD\xdf9\x123\xcc" + + "Gb~r\xee\xe8\xe3\xef\x06\xc7o\x05y(\xf6Y" + + "v;\x09\xfa\xa5\x8f\x96}\x93o\x05\xa09n\xc5s" + + "\x8b\xbf\x9etbk\xb6=\xee\xf2}\x8dR/c~" + + "\x9b\x89\xae\x7f\xf6/\xbdk'\x8d\xdc\xe6\xd6}\xcaw" + + "\x90tc91\xac}J\xfd\xff{w\xff\x9c\x188" + + "G\x1a`K]\xf9Z\x94&\x94\x93\xa8q\xe5W\x03" + + "\x9a+\x16\xec\x7fv\x95||{\x96\xcdN._\x8f" + + "\xd2\xf5\xe5\xb4Yi\xfa\xa8\x17\xdfmi\xda\xe1\xdd\x0b" + + "G|\xa3\x89o&\x93\xd9^\xfe,\xe0w\xdf\xef\xb9" + + "\xeax\xe9\xcd\xcf\xb8\xcf\xbe\xbc\x89\x0e\xc0\xe7\xa7\xc5\x8d" + + "\xdd\xf2\x87\xe7\xef\xffb\xe53$\x8c\xf3\xeeu\xb4\x7f" + + "\x09J\xed\xfe\xff\x07 \xcd\xf1?\x0bh\xde\xd2{\xf2" + + "\xe9\xfb\xefi\xdb\xe5U\xcd\xb3s\xf3\xefC\xe9\x82\x9f" + + ">\x9e\xf5\xd7\x92\xe7\xf1G\x8e\x95\xde\xdf6\xf2\xf9l" + + "v\xac\x13\x0f\xa04Y\xa4\x85N\x10i%\xf6s\xb1" + + "\x9a7w\xecxu\xfe\xf8\xef\xb6\x99d\xa6\x9b\xc4*" + + "l\x89\x88\x7fB\x8a\x13\xe9uNZW)\x00\x98\x8d" + + "\x9f\xdf\xfd\xe4]\xbf\x0b\xee\xce&\xbc\xa7r\x1f\xa6\xd8" + + "\xa4\xfb*I\xf8\x81\xe7\xb7NU^\xe2\xda\xdf\x8e\xbe\xeefX" + + "SUJv\xdd\xcc\x18^\x1b?x\xe0\x96\xf7\xd4\xfd" + + "\x9e\xd5\xb13\xdaS5\x84\x93\x0eW\xd1\xea\xde\xaf\"" + + "\x7f\xfb\x97g\x86\xaf\xf4\xdd\x7f\xef\x81\xac':\xe7\xca" + + "s(E\xae\xa4\x8f\xea\x95\xcc\xac\x9f\x1e\xfb~IW" + + "b\xe4[n\xe5\xbf\xae^O\xab\xdbXM\xca\x97\x0e" + + "\xd8\x1f(\x99\xa2\xff\x9b\x9b\xe1\xc5\xea}\xc4\xf0\x06c" + + "8S\xf9\xe7\x87\xaa&\xed\xce`8Q\xcd\xf6w\x81" + + "1\xd4U\xff\xf5\x0e-R\xfdN\xd6\x83\xae\xab9\x89" + + "\xd2\xe4\x1a\xfa8\xa1\xe6uZQU[\xefX\x7fl" + + "\xc6;\x9e\xdd2\xeea\xb5\x8f\xa24\xb9\x96\x1dt-" + + "9\xd1c\x7f{z\xc1\xaeu\x81w\xfb\xc4\xc3\x89\xda" + + "%(]`\x9cgkW\x03\x9a\xe7\xd7L\xba\xbd\xa6" + + "\xe6\xdd\xf7\xb3\xda\xa5y\xf0I\x94f\x0ef\x9e>\xf8" + + "S@s\xd3OW$n^8\xf1#\x0f7\x0b\x9f" + + "\xba\xba\x0fP\x9a\\\xc7\x16QG\x1b\xbc}\xfb\x9dO" + + "\x1d\xfcb\xf7G\x19Y\xba\x8e%\x9ee\x8c\xe1\xfc\xc4" + + "\xf3\x7f~dR\xe2\xbf\xbd\xba\x0b\x89\xf3\xc1\xba\xb5(" + + "\xed q-[\xebn$\x0b\\\x9f\x98!\x0e\xef," + + "?\xe2\x96W3t 9\xc4\xb8\xa1,\xc9\xde2c" + + "\xeb\xcd\x11\xe9\xa8\x9b\xe1\xa6\xa1\x9bHa\x841\xfc\x9d" + + "\xf4\x97\x9d\xb1u'\x8f\xbb\x19\xee\x1b\xca\xd2\xf4\x16\xc6" + + "\xf0\xd2-\xa7\x07\xed<~\xf0\x94\x9ba\xefP\x8eT" + + "\x1cb\x0c{\xe7\xb7t\xfc\xd7\xd1\xe1_\x81\xd8\xcc9" + + "\x81\x0d\xd8rv\xe8z\x94\xc4z\xda\xbd\xaf\xfe:@" + + "\xb3\xf7\x8b\xda\xedo\x1e\xff\xf9\xdf\xbc\x9b+aQ_" + + "\xbf\x09\xa5\x99\xf5,3\xd6_\xcd\x01\x9aO.{\xec" + + "\x813C\xc4o\xbcI\x82\xd9b\xe3\xf0OP\xda5" + + "\x9c>>7\x9cy\xc3\x0b\x9b6\xfc\xe6\xd513\xbe" + + "\xc9p\xbf\x06\x96\x11\xdfn\xa0\x85V\xfe\xe3\x1dG\x9a" + + "N\x1c\xcd`8\xd5p\x80\xa5\xccFb86\xf6\xd3" + + "i\x83n\xeb\xf8.[l\x0fkl\xe2\xa4\xf6FV" + + "\x91\x18\xf3\xcb\xb8m\xc0/\x96|v\xc6-Mid" + + "\x86K2\x863[~\xd7r\xfb\xdb\x7f8\x9b%\xad" + + "nl<\x80\xd2\xaeFJ\xab\xeb\xffcV\xf7G\x17" + + "^:\x97-b\xd75>\x8a\xd2V\xa6\xf3\x89\xc6\x15" + + "\xd0lFb\x86\xaa\xc5\x94h\xd1\xc8\x84\x167\xe2#" + + "C\xf1Xw<\xf6\xb3\x90\x92\x88%&NK}Q" + + "W\xaa\xa1`O,4-\x1e3\x94HL\xd5\xea;" + + "\x14MP\xbau\xb9\x80/\x00(@\x00\xd17\x15@" + + ".\xe6Q\x0ep\xb8ZS\x97%U\xdd\xc0\x0a\xc7\xe0" + + "\x80X\x01\x98\x93\xba\x90\xa6*\x86z\xad\xd2\xad\xea\x09" + + "%\xa4\xea\xf5\x9d\xaa\x9e\x14\xa2F\x86\xbaY\x00r\x19" + + "\x8f\xf2 \x0eMM\xd5\x13\xf1\x98\xae\x02\x00V8u" + + "\xfd\xc7\xa8\xecP4\x85\xcfe\x83v\x07\x93\x87\xb6i" + + "\x1em\x9d$\x8d\xd7\x8d\x0eD\xb9\xdaV\xb8k!\x80" + + "\xfcG\x1e\xe5W8\x14\x11\x03H\xc4=\xf3\x00\xe4\x97" + + "y\x94\xdf\xe3P\xe4\xb8\x00r\x00\xe2!\xe2\xfcO\x1e" + + "\xe5\xaf8\x14y>\x80<\x80x\x8a\x88\x9f\xf3\x18," + + "F\x0e\xc5\x82\x82\x00\x16\x00H\x858\x0b X\x80<" + + "\x06+\x88^X\x18\xc0B\x8a&\x1c\x03\x10,&z" + + "\x80\xe8EE\x01,\x02\x90D\xc6_A\xf4\x11\xc8\xa1" + + "\xd9\xad\x1aJX1\x14\x10\xae\x8b\x86\xd1\x07\x1c\xfa\x00" + + "\xcdXz+\xc0\xab:\x96\x03v\xf0\x88~'=\x02" + + "\x12\xd1LF\xc2s\x94D\"\x02B\xac\xcbf+\x03" + + "\x8e=\xec\xba\xd8\xc3\x85\x8a\xaev(\xc6b:`\xa2" + + "\x95\x01\xd6&\xe2\xe1\x99a\xeb\x9b\xb3.\x00\xeb\xe5\x0a" + + "'\x10\xd2\x0b\xc8\xe5ltU[\xae\xb6\xafT3\x9c" + + "\xdd\xaf\xe5\xe4\xecv\xad\xf1\xf8\x82p\x11}A\xd2\xd7" + + "f\x18Jh\xb1\xad\xb1\xd3\xf2gr\x08\x97\xd6!\x8e" + + "V!\xa9E\xed\xcd\xf7\xcf\xe7\xf4D\\\x88\xe9\xaaG" + + "\xc7\xbct\\5p\xd9\x8f\xb5\xc2\xe9@=V\x15." + + "e\xd5\x8e\xb8f\\\x13\xd7V(Z\xb8\x1f\x99\xc4\xee" + + "C\xf2\x084M\x8d'\xd4\xd8\xecx\x97\xa3\xafS\xad" + + "\xd5\x939\xe7\x12\xbbi\xf7(-\xbc\x88\xd2NK)" + + "\x99\xd8\x1fO\x998w\xdf\xf3\xf8B\xfd\x14JE\xb9" + + "\x18\xc8\xee\xbd\xf2X\xab\xed\x0f\xee\xb5\xca\xc5\xb6\xaa\xc6" + + "&\x00\xb9\x9eGy\x14\x87V\x0aj&Z\x03\x8f\xf2" + + "X\x0e\xfdFOB\xf5\x84\xba\x1f\xd0\x9fP\x8c\xc5y" + + "\xb9g\xb0O\xd8u\xaazm\"\xde\xd7A\x7fL\x10" + + "(\x1e\xdb2\xd3b\x0e\xb6\xb5{\xa8" + + "\xd5\xc3\xb1\xf9\xc5x\xd2\x06\x90G\xf0\x85\x006z\x85" + + "\x16\"\"\xc98\x158\xa9\x1d\x05t\xb0\x03\xb4 *" + + "i\x02\xde\x09\x9c4\x1a\x05\xe4ll\x1d\xad\x99_\x1a" + + "\x86\xeb\x81\x93\xeaP@\xdeFb\xd1\x82\xef\xa4J\xf6" + + "\xae\x0f\x05,\xb0\xf1\x15\xb40g\x09q\x13p\xe2\x05" + + "\x01\x0bm<\x0f-\x1cG<\xbd\x1b8\xf1\x94\x80E" + + "6\x1c\x8f\x16p/~\xbc\x168\xf1\xb0\x80\x82\x8d\xb4" + + "\xa1\x05i\x88\xbd\x1ap\xe2\x1b\x02\x16\xdb\xb8;ZX" + + "\x93\xb8\x87\xf4\xbd(`\x89\x0dX\xa3\x05\xea\x88;\xb6" + + "\x01'n\x15\xb0\xd4\x06\xcd\xf1\xfb=W\x01\xc3n7" + + "\x1f\x04N\xdc,P\xd3@!\xda\x8af(\x1dg\x98" + + "62\xb4\xa2i\xe1\x0dh\x99\x1e\xb5V4\xad\xe6\xcd" + + "\xcd\xa9\xd9\x01\x92f\xe5Ub\xd53\x82aZ<6" + + "%\xf5\x8a\xad\xefZ\x05-_\x07\x92\xa3\xa7]\x17j" + + "\x99\xef2\x11)'\xc4\x90Gr\xca\xc1\xd0r0\x7f" + + "Z\xae\xd5\x97p^\xb7\xa1\xb5\xe7:\x00x\xaa\x93S" + + "\xa7\xb3\xb44\x0d\xdce,\x8b\xde\x0c\xe9\xea\x0f[-" + + "\xd5\xd2sX\x05\x10\xdc\x8e<\x06_@\x07\x93\x90v" + + "\xe1<\x80\xe0\x1f\x89\xfe\x0ar\x88)TB\xda\xc3\xa0" + + "\x83\x97\x89\xbc\x1f\x9d^Az\x8dA\x0d\xaf\x10\xfd-" + + "t\xda\x05\xe9\x0d\xec\x04\x08\xee'\xfa1\x06M\xf0)" + + "h\xe2c\\\x02\x10\x9bz\x88\x84\x16\x0f\xa9\xba>\x13\xd0\xce\x9e\x16" + + "\x0ed\x85\xba`(]\xd6\xe7)\xd4CD\x0c\xd7\xec" + + "\x1b\x89\x86\xa7So\xaf\xda,\x86\xa2u\xa9\x0e\x8b\x96" + + "\xd4\x0d25\x08.\x99fH\xd1\xba\xe27\xa8\x1a\xf8" + + "\xf5>\xe4\xb9\x9a\xea\x92\x97\x917\xac\\\xd2\xcfZ\xe9" + + "t\x18\xae\x96\x8f\xb2\xee[\xa9\x1foXI\xf7\xd0\xbc" + + "\xf4o7\x8e\xb8J\xe5a\x0a\xb7\xf7x\x94\xbf\xa1\xf3" + + "mMe\xdd\xd3\x14\x10_\xf1(\x9fw\x0d\x0fg)" + + "\xeb\x9eI\xf7\x866d\x82d\xfaN\xdbC,\xc4\xe4" + + "\x0a\xe6Q\xccCF\xa1\x0b1if\xdd\xde\x08\xa2\x8f" + + "\xc7\xccDM\xde\x1fO\x1aA\xe0\xd5\x90u\xe5\xf4\x03" + + "\xdd]6 \xe2\xffx\x8c\xef\xcf\x84\x90\xf3\xe4c_" + + "\xe4\xf4{\xf2\xb1\xb2^\x8ei\xcf\xbeZ\xb9&2"}, + Tty: terminal, + Stdout: true, + Stderr: true, + }) + + Expect(err).To(Succeed()) + Expect(result.URL).NotTo(BeEmpty()) + + config := &rest.Config{TLSClientConfig: rest.TLSClientConfig{Insecure: true}} + executor, err := remotecommand.NewWebSocketExecutor(config, "GET", result.URL) + Expect(err).NotTo(HaveOccurred()) + + stdout := &bytes.Buffer{} + stderr := &bytes.Buffer{} + + streamOptions := remotecommand.StreamOptions{ + Stdout: stdout, + Stderr: stderr, + Tty: terminal, + } + err = executor.StreamWithContext(context.Background(), streamOptions) + Expect(err).NotTo(HaveOccurred()) + + if terminal { + Expect(stdout.String()).To(Equal("stdoutstderr")) + Expect(stderr.Len()).To(BeZero()) + } else { + Expect(stdout.String()).To(Equal("stdout")) + Expect(stderr.String()).To(Equal("stderr")) + } + }) + + It(testName("should fail if container does not exist", terminal), func() { + tr = newTestRunner() + tr.createRuntimeConfigWithProcessArgs(terminal, []string{"/busybox", "sleep", "10"}, nil) + sut = tr.configGivenEnv() + + result, err := sut.ServeExecContainer(context.Background(), &client.ServeExecContainerConfig{ + ID: "wrong", + Command: []string{"/busybox", "sh", "-c", "echo -n stdout && echo -n stderr >&2"}, + Tty: terminal, + Stdout: true, + Stderr: true, + }) + + Expect(err).To(HaveOccurred()) + Expect(result).To(BeNil()) + }) + } + }) + + Describe("ServeAttachContainer", func() { + const terminal = true + + It(testName("should succeed", terminal), func() { + tr = newTestRunner() + tr.createRuntimeConfigWithProcessArgs(terminal, []string{"/busybox", "watch", "-n0.5", "echo", "test"}, nil) + sut = tr.configGivenEnv() + tr.createContainer(sut, terminal) + tr.startContainer(sut) + + result, err := sut.ServeAttachContainer(context.Background(), &client.ServeAttachContainerConfig{ + ID: tr.ctrID, + Stdin: true, + Stdout: true, + Stderr: true, + }) + + Expect(err).To(Succeed()) + Expect(result.URL).NotTo(BeEmpty()) + + config := &rest.Config{TLSClientConfig: rest.TLSClientConfig{Insecure: true}} + executor, err := remotecommand.NewWebSocketExecutor(config, "GET", result.URL) + Expect(err).NotTo(HaveOccurred()) + + stdout := &bytes.Buffer{} + stderr := &bytes.Buffer{} + + streamOptions := remotecommand.StreamOptions{ + Stdout: stdout, + Stderr: stderr, + Tty: terminal, + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + err = executor.StreamWithContext(ctx, streamOptions) + Expect(err).To(HaveOccurred()) + Expect(tr.rr.RunCommand("delete", "-f", tr.ctrID)).To(Succeed()) + Expect(stdout.String()).To(ContainSubstring("echo test")) + Expect(stderr.String()).To(BeEmpty()) + }) + + It(testName("should fail if container does not exist", terminal), func() { + tr = newTestRunner() + tr.createRuntimeConfigWithProcessArgs(terminal, []string{"/busybox", "sleep", "10"}, nil) + sut = tr.configGivenEnv() + + result, err := sut.ServeAttachContainer(context.Background(), &client.ServeAttachContainerConfig{ + ID: "wrong", + }) + + Expect(err).To(HaveOccurred()) + Expect(result).To(BeNil()) + }) + }) +}) diff --git a/pkg/client/suite_test.go b/pkg/client/suite_test.go index 5e35a46dfd..729bf294a4 100644 --- a/pkg/client/suite_test.go +++ b/pkg/client/suite_test.go @@ -38,7 +38,7 @@ var ( busyboxDest = filepath.Join(busyboxDestDir, "busybox") runtimePath = os.Getenv("RUNTIME_BINARY") conmonPath = os.Getenv(conmonBinaryKey) - maxRSSKB = 9500 + maxRSSKB = 12000 ) // TestConmonClient runs the created specs.