From 4911334a403aac2182e2298c579d724757602b3e Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 31 Mar 2023 15:01:19 +0200 Subject: [PATCH 01/74] Small comment fix --- lib/src/libp2p/connection/yamux.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index c344c1dc1c..9dbeee8e85 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -118,9 +118,6 @@ pub struct Yamux { /// List of substream IDs that have been reset locally. For each entry, a RST header should /// be sent to the remote and the entry removed. - /// - /// The FNV hasher is used because the substream IDs are allocated locally, and as such there - /// is no risk of HashDoS attack. rsts_to_send: VecDeque, /// Source of randomness used for various purposes. From a65dd8f472ec14871c1eabf1009647614a056726 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 31 Mar 2023 15:05:54 +0200 Subject: [PATCH 02/74] Wrap all fields in a `Box` to reduce movements --- lib/src/libp2p/connection/yamux.rs | 278 ++++++++++++++++------------- 1 file changed, 150 insertions(+), 128 deletions(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index 9dbeee8e85..d8a75c1949 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -84,6 +84,12 @@ pub struct Config { } pub struct Yamux { + /// The actual fields are wrapped in a `Box` because the `Yamux` object is moved around pretty + /// often. + inner: Box>, +} + +struct YamuxInner { /// List of substreams currently open in the Yamux state machine. /// /// A `SipHasher` is used in order to avoid hash collision attacks on substream IDs. @@ -268,8 +274,8 @@ enum OutgoingSubstreamData { /// Buffer of buffers to be written out to the socket. write_buffers: Vec>, - /// Number of bytes in `self.write_buffers[0]` has have already been written out to the - /// socket. + /// Number of bytes in `self.inner.write_buffers[0]` has have already been written out to + /// the socket. first_write_buffer_offset: usize, }, } @@ -280,25 +286,27 @@ impl Yamux { let mut randomness = ChaCha20Rng::from_seed(config.randomness_seed); Yamux { - substreams: hashbrown::HashMap::with_capacity_and_hasher( - config.capacity, - SipHasherBuild::new(randomness.gen()), - ), - num_inbound: 0, - received_goaway: None, - incoming: Incoming::Header(arrayvec::ArrayVec::new()), - outgoing: Outgoing::Idle, - outgoing_goaway: OutgoingGoAway::NotRequired, - next_outbound_substream: if config.is_initiator { - NonZeroU32::new(1).unwrap() - } else { - NonZeroU32::new(2).unwrap() - }, - pings_to_send: 0, - // We leave the initial capacity at 0, as it is likely that no ping is sent at all. - pings_waiting_reply: VecDeque::new(), - rsts_to_send: VecDeque::with_capacity(config.capacity), - randomness, + inner: Box::new(YamuxInner { + substreams: hashbrown::HashMap::with_capacity_and_hasher( + config.capacity, + SipHasherBuild::new(randomness.gen()), + ), + num_inbound: 0, + received_goaway: None, + incoming: Incoming::Header(arrayvec::ArrayVec::new()), + outgoing: Outgoing::Idle, + outgoing_goaway: OutgoingGoAway::NotRequired, + next_outbound_substream: if config.is_initiator { + NonZeroU32::new(1).unwrap() + } else { + NonZeroU32::new(2).unwrap() + }, + pings_to_send: 0, + // We leave the initial capacity at 0, as it is likely that no ping is sent at all. + pings_waiting_reply: VecDeque::new(), + rsts_to_send: VecDeque::with_capacity(config.capacity), + randomness, + }), } } @@ -307,24 +315,24 @@ impl Yamux { /// > **Note**: After a substream has been closed or reset, it must be removed using /// > [`Yamux::remove_dead_substream`] before this function can return `true`. pub fn is_empty(&self) -> bool { - self.substreams.is_empty() + self.inner.substreams.is_empty() } /// Returns the number of substreams in the Yamux state machine. Includes substreams that are /// dead but haven't been removed yet. pub fn len(&self) -> usize { - self.substreams.len() + self.inner.substreams.len() } /// Returns the number of inbound substreams in the Yamux state machine. Includes substreams /// that are dead but haven't been removed yet. pub fn num_inbound(&self) -> usize { debug_assert_eq!( - self.num_inbound, - self.substreams.values().filter(|s| s.inbound).count() + self.inner.num_inbound, + self.inner.substreams.values().filter(|s| s.inbound).count() ); - self.num_inbound + self.inner.num_inbound } /// Opens a new substream. @@ -351,13 +359,18 @@ impl Yamux { /// pub fn open_substream(&mut self, user_data: T) -> SubstreamMut { // It is forbidden to open new substreams if a `GoAway` frame has been received. - assert!(self.received_goaway.is_none()); + assert!(self.inner.received_goaway.is_none()); // Make sure that the `loop` below can finish. - assert!(usize::try_from(u32::max_value() / 2 - 1) - .map_or(true, |full_len| self.substreams.len() < full_len)); + assert!( + usize::try_from(u32::max_value() / 2 - 1).map_or(true, |full_len| self + .inner + .substreams + .len() + < full_len) + ); - // Grab a `VacantEntry` in `self.substreams`. + // Grab a `VacantEntry` in `self.inner.substreams`. let entry = loop { // Allocating a substream ID is surprisingly difficult because overflows in the // identifier are possible if the software runs for a very long time. @@ -365,9 +378,9 @@ impl Yamux { // this ID exists, the code below properly handles wrapping around and ignores IDs // already in use . // TODO: simply skill whole connection if overflow - let id_attempt = self.next_outbound_substream; - self.next_outbound_substream = { - let mut id = self.next_outbound_substream.get(); + let id_attempt = self.inner.next_outbound_substream; + self.inner.next_outbound_substream = { + let mut id = self.inner.next_outbound_substream.get(); loop { // Odd ids are reserved for the initiator and even ids are reserved for the // listener. Assuming that the current id is valid, incrementing by 2 will @@ -380,7 +393,7 @@ impl Yamux { } } }; - if let Entry::Vacant(e) = self.substreams.entry(id_attempt) { + if let Entry::Vacant(e) = self.inner.substreams.entry(id_attempt) { break e; } }; @@ -404,11 +417,11 @@ impl Yamux { user_data, }); - match self.substreams.entry(substream_id.0) { + match self.inner.substreams.entry(substream_id.0) { Entry::Occupied(e) => SubstreamMut { substream: e, - outgoing: &mut self.outgoing, - rsts_to_send: &mut self.rsts_to_send, + outgoing: &mut self.inner.outgoing, + rsts_to_send: &mut self.inner.rsts_to_send, }, _ => unreachable!(), } @@ -419,19 +432,21 @@ impl Yamux { /// /// If `Some` is returned, it is forbidden to open new outbound substreams. pub fn received_goaway(&self) -> Option { - self.received_goaway + self.inner.received_goaway } /// Returns an iterator to the list of all substream user datas. pub fn user_datas(&self) -> impl ExactSizeIterator { - self.substreams + self.inner + .substreams .iter() .map(|(id, s)| (SubstreamId(*id), &s.user_data)) } /// Returns an iterator to the list of all substream user datas. pub fn user_datas_mut(&mut self) -> impl ExactSizeIterator { - self.substreams + self.inner + .substreams .iter_mut() .map(|(id, s)| (SubstreamId(*id), &mut s.user_data)) } @@ -441,18 +456,18 @@ impl Yamux { pub fn substream_by_id(&self, id: SubstreamId) -> Option> { Some(SubstreamRef { id, - substream: self.substreams.get(&id.0)?, + substream: self.inner.substreams.get(&id.0)?, }) } /// Returns a reference to a substream by its ID. Returns `None` if no substream with this ID /// is open. pub fn substream_by_id_mut(&mut self, id: SubstreamId) -> Option> { - if let Entry::Occupied(e) = self.substreams.entry(id.0) { + if let Entry::Occupied(e) = self.inner.substreams.entry(id.0) { Some(SubstreamMut { substream: e, - outgoing: &mut self.outgoing, - rsts_to_send: &mut self.rsts_to_send, + outgoing: &mut self.inner.outgoing, + rsts_to_send: &mut self.inner.rsts_to_send, }) } else { None @@ -461,7 +476,7 @@ impl Yamux { /// Queues sending out a ping to the remote. pub fn queue_ping(&mut self) { - self.pings_to_send += 1; + self.inner.pings_to_send += 1; } /// Returns `true` if [`Yamux::send_goaway`] has been called in the past. @@ -469,13 +484,13 @@ impl Yamux { /// In other words, returns `true` if a `GoAway` frame has been either queued for sending /// (and is available through [`Yamux::extract_out`]) or has already been sent out. pub fn goaway_queued_or_sent(&self) -> bool { - !matches!(self.outgoing_goaway, OutgoingGoAway::NotRequired) + !matches!(self.inner.outgoing_goaway, OutgoingGoAway::NotRequired) } /// Returns `true` if [`Yamux::send_goaway`] has been called in the past and that this /// `GoAway` frame has been extracted through [`Yamux::extract_out`]. pub fn goaway_sent(&self) -> bool { - matches!(self.outgoing_goaway, OutgoingGoAway::Sent) + matches!(self.inner.outgoing_goaway, OutgoingGoAway::Sent) } /// Queues a `GoAway` frame, requesting the remote to no longer open any substream. @@ -494,8 +509,10 @@ impl Yamux { /// the same instance of [`Yamux`]. /// pub fn send_goaway(&mut self, code: GoAwayErrorCode) { - match self.outgoing_goaway { - OutgoingGoAway::NotRequired => self.outgoing_goaway = OutgoingGoAway::Required(code), + match self.inner.outgoing_goaway { + OutgoingGoAway::NotRequired => { + self.inner.outgoing_goaway = OutgoingGoAway::Required(code) + } _ => panic!("send_goaway called multiple times"), } @@ -505,9 +522,9 @@ impl Yamux { data_frame_size, fin, .. - } = self.incoming + } = self.inner.incoming { - self.incoming = if data_frame_size == 0 { + self.inner.incoming = if data_frame_size == 0 { Incoming::Header(arrayvec::ArrayVec::new()) } else { Incoming::DataFrame { @@ -528,7 +545,8 @@ impl Yamux { &'_ self, ) -> impl Iterator + '_ { // TODO: O(n) - self.substreams + self.inner + .substreams .iter() .filter_map(|(id, substream)| { match &substream.state { @@ -563,7 +581,7 @@ impl Yamux { } }) .inspect(|(dead_id, _, _)| { - debug_assert!(!matches!(self.outgoing, + debug_assert!(!matches!(self.inner.outgoing, Outgoing::Header { substream_data_frame: Some((OutgoingSubstreamData::Healthy(id), _)), .. @@ -582,11 +600,11 @@ impl Yamux { /// Panics if the substream with that id doesn't exist or isn't dead. /// pub fn remove_dead_substream(&mut self, id: SubstreamId) -> T { - let substream = self.substreams.remove(&id.0).unwrap(); + let substream = self.inner.substreams.remove(&id.0).unwrap(); // TODO: check whether substream is dead using the same criteria as in dead_substreams() if substream.inbound { - self.num_inbound -= 1; + self.inner.num_inbound -= 1; } substream.user_data @@ -619,7 +637,7 @@ impl Yamux { let mut total_read: usize = 0; loop { - match self.incoming { + match self.inner.incoming { Incoming::PendingIncomingSubstream { .. } => break, Incoming::DataFrame { @@ -627,7 +645,7 @@ impl Yamux { remaining_bytes: 0, fin: true, } => { - self.incoming = Incoming::Header(arrayvec::ArrayVec::new()); + self.inner.incoming = Incoming::Header(arrayvec::ArrayVec::new()); if let Some(Substream { state: @@ -636,7 +654,7 @@ impl Yamux { .. }, .. - }) = self.substreams.get_mut(&substream_id.0) + }) = self.inner.substreams.get_mut(&substream_id.0) { *remote_write_closed = true; @@ -653,7 +671,7 @@ impl Yamux { fin: false, .. } => { - self.incoming = Incoming::Header(arrayvec::ArrayVec::new()); + self.inner.incoming = Incoming::Header(arrayvec::ArrayVec::new()); } Incoming::DataFrame { @@ -691,7 +709,7 @@ impl Yamux { .. }, .. - }) = self.substreams.get_mut(&substream_id.0) + }) = self.inner.substreams.get_mut(&substream_id.0) { debug_assert!(!*remote_write_closed); return Ok(IncomingDataOutcome { @@ -704,9 +722,9 @@ impl Yamux { }); } - // Also note that we don't switch back `self.incoming` to `Header`. Instead, - // the next iteration will pick up `DataFrame` again and transition again. - // This is necessary to handle the `fin` flag elegantly. + // Also note that we don't switch back `self.inner.incoming` to `Header`. + // Instead, the next iteration will pick up `DataFrame` again and transition + // again. This is necessary to handle the `fin` flag elegantly. } Incoming::DataFrame { @@ -744,11 +762,11 @@ impl Yamux { // Ping. In order to queue the pong message, the outgoing queue must // be empty. If it is not the case, we simply leave the ping header // there and prevent any further data from being read. - if !matches!(self.outgoing, Outgoing::Idle) { + if !matches!(self.inner.outgoing, Outgoing::Idle) { break; } - self.outgoing = Outgoing::Header { + self.inner.outgoing = Outgoing::Header { header: { let mut header = arrayvec::ArrayVec::new(); header @@ -775,10 +793,11 @@ impl Yamux { is_goaway: false, }; - self.incoming = Incoming::Header(arrayvec::ArrayVec::new()); + self.inner.incoming = Incoming::Header(arrayvec::ArrayVec::new()); } header::DecodedYamuxHeader::PingResponse { opaque_value } => { let pos = match self + .inner .pings_waiting_reply .iter() .position(|v| *v == opaque_value) @@ -787,8 +806,8 @@ impl Yamux { None => return Err(Error::PingResponseNotMatching), }; - self.pings_waiting_reply.remove(pos); - self.incoming = Incoming::Header(arrayvec::ArrayVec::new()); + self.inner.pings_waiting_reply.remove(pos); + self.inner.incoming = Incoming::Header(arrayvec::ArrayVec::new()); return Ok(IncomingDataOutcome { yamux: self, bytes_read: total_read, @@ -796,12 +815,13 @@ impl Yamux { }); } header::DecodedYamuxHeader::GoAway { error_code } => { - self.incoming = Incoming::Header(arrayvec::ArrayVec::new()); + self.inner.incoming = Incoming::Header(arrayvec::ArrayVec::new()); // TODO: error if we have received one in the past before? - self.received_goaway = Some(error_code); + self.inner.received_goaway = Some(error_code); - let mut reset_substreams = Vec::with_capacity(self.substreams.len()); - for (substream_id, substream) in self.substreams.iter_mut() { + let mut reset_substreams = + Vec::with_capacity(self.inner.substreams.len()); + for (substream_id, substream) in self.inner.substreams.iter_mut() { if !matches!( substream.state, SubstreamState::Healthy { @@ -818,7 +838,7 @@ impl Yamux { // being reset. If that happens, we need to update some internal // state regarding this frame of data. match ( - &mut self.outgoing, + &mut self.inner.outgoing, mem::replace(&mut substream.state, SubstreamState::Reset), ) { ( @@ -878,18 +898,18 @@ impl Yamux { return Err(Error::DataWithRst); } - self.incoming = Incoming::Header(arrayvec::ArrayVec::new()); + self.inner.incoming = Incoming::Header(arrayvec::ArrayVec::new()); // The remote might have sent a RST frame concerning a substream for // which we have sent a RST frame earlier. Considering that we don't // always keep traces of old substreams, we have no way to know whether // this is the case or not. - if let Some(s) = self.substreams.get_mut(&stream_id) { + if let Some(s) = self.inner.substreams.get_mut(&stream_id) { // We might be currently writing a frame of data of the substream // being reset. If that happens, we need to update some internal // state regarding this frame of data. match ( - &mut self.outgoing, + &mut self.inner.outgoing, mem::replace(&mut s.state, SubstreamState::Reset), ) { ( @@ -947,7 +967,7 @@ impl Yamux { length, .. } => { - match self.substreams.get(&stream_id) { + match self.inner.substreams.get(&stream_id) { Some(Substream { state: SubstreamState::Healthy { .. }, .. @@ -972,8 +992,8 @@ impl Yamux { // If we have queued or sent a GoAway frame, then the substream is // automatically rejected. - if !matches!(self.outgoing_goaway, OutgoingGoAway::NotRequired) { - self.incoming = if !is_data || length == 0 { + if !matches!(self.inner.outgoing_goaway, OutgoingGoAway::NotRequired) { + self.inner.incoming = if !is_data || length == 0 { Incoming::Header(arrayvec::ArrayVec::new()) } else { Incoming::DataFrame { @@ -990,11 +1010,11 @@ impl Yamux { // order to potentially queue the substream rejection message later. // If it is not the case, we simply leave the header there and prevent // any further data from being read. - if !matches!(self.outgoing, Outgoing::Idle) { + if !matches!(self.inner.outgoing, Outgoing::Idle) { break; } - self.incoming = Incoming::PendingIncomingSubstream { + self.inner.incoming = Incoming::PendingIncomingSubstream { substream_id: SubstreamId(stream_id), extra_window: if !is_data { length } else { 0 }, data_frame_size: if is_data { length } else { 0 }, @@ -1031,7 +1051,7 @@ impl Yamux { .. }, .. - }) = self.substreams.get_mut(&stream_id) + }) = self.inner.substreams.get_mut(&stream_id) { if *remote_write_closed { return Err(Error::WriteAfterFin); @@ -1048,7 +1068,7 @@ impl Yamux { *remote_window_pending_increase += 256 * 1024; } - self.incoming = Incoming::DataFrame { + self.inner.incoming = Incoming::DataFrame { substream_id: SubstreamId(stream_id), remaining_bytes: length, fin, @@ -1072,7 +1092,7 @@ impl Yamux { if let Some(Substream { state: SubstreamState::Healthy { allowed_window, .. }, .. - }) = self.substreams.get_mut(&stream_id) + }) = self.inner.substreams.get_mut(&stream_id) { *allowed_window = allowed_window .checked_add(u64::from(length)) @@ -1085,7 +1105,7 @@ impl Yamux { // We transition to `DataFrame` to make the handling a bit more // elegant. - self.incoming = Incoming::DataFrame { + self.inner.incoming = Incoming::DataFrame { substream_id: SubstreamId(stream_id), remaining_bytes: 0, fin, @@ -1141,14 +1161,14 @@ impl Yamux { /// Panics if no incoming substream is currently pending. /// pub fn accept_pending_substream(&mut self, user_data: T) -> SubstreamMut { - match self.incoming { + match self.inner.incoming { Incoming::PendingIncomingSubstream { substream_id, extra_window, data_frame_size, fin, } => { - let _was_before = self.substreams.insert( + let _was_before = self.inner.substreams.insert( substream_id.0, Substream { state: SubstreamState::Healthy { @@ -1168,9 +1188,9 @@ impl Yamux { ); debug_assert!(_was_before.is_none()); - self.num_inbound += 1; + self.inner.num_inbound += 1; - self.incoming = if data_frame_size == 0 { + self.inner.incoming = if data_frame_size == 0 { Incoming::Header(arrayvec::ArrayVec::new()) } else { Incoming::DataFrame { @@ -1181,12 +1201,12 @@ impl Yamux { }; SubstreamMut { - substream: match self.substreams.entry(substream_id.0) { + substream: match self.inner.substreams.entry(substream_id.0) { Entry::Occupied(e) => e, _ => unreachable!(), }, - outgoing: &mut self.outgoing, - rsts_to_send: &mut self.rsts_to_send, + outgoing: &mut self.inner.outgoing, + rsts_to_send: &mut self.inner.rsts_to_send, } } _ => panic!(), @@ -1209,18 +1229,18 @@ impl Yamux { /// pub fn reject_pending_substream(&mut self) { // Implementation note: the rejection mechanism could alternatively be implemented by - // queuing the substream rejection, rather than immediately putting it in `self.outgoing`. + // queuing the substream rejection, rather than immediately putting it in `self.inner.outgoing`. // However, this could open a DoS attack vector, as the remote could send a huge number // of substream open request which would inevitably increase the memory consumption of the // local node. - match self.incoming { + match self.inner.incoming { Incoming::PendingIncomingSubstream { substream_id, data_frame_size, fin, .. } => { - self.incoming = if data_frame_size == 0 { + self.inner.incoming = if data_frame_size == 0 { Incoming::Header(arrayvec::ArrayVec::new()) } else { Incoming::DataFrame { @@ -1240,8 +1260,8 @@ impl Yamux { header.try_extend_from_slice(&0u32.to_be_bytes()).unwrap(); debug_assert_eq!(header.len(), 12); - debug_assert!(matches!(self.outgoing, Outgoing::Idle)); - self.outgoing = Outgoing::Header { + debug_assert!(matches!(self.inner.outgoing, Outgoing::Idle)); + self.inner.outgoing = Outgoing::Header { header, substream_data_frame: None, is_goaway: false, @@ -1251,11 +1271,11 @@ impl Yamux { } } - /// Writes a data frame header in `self.outgoing`. + /// Writes a data frame header in `self.inner.outgoing`. /// /// # Panic /// - /// Panics if `self.outgoing` is not `Idle`. + /// Panics if `self.inner.outgoing` is not `Idle`. /// fn queue_data_frame_header( &mut self, @@ -1264,11 +1284,11 @@ impl Yamux { substream_id: NonZeroU32, data_length: u32, ) { - assert!(matches!(self.outgoing, Outgoing::Idle)); + assert!(matches!(self.inner.outgoing, Outgoing::Idle)); let mut flags: u16 = 0; if syn_ack_flag { - if (substream_id.get() % 2) == (self.next_outbound_substream.get() % 2) { + if (substream_id.get() % 2) == (self.inner.next_outbound_substream.get() % 2) { // SYN flags |= 0x1; } else { @@ -1292,7 +1312,7 @@ impl Yamux { .unwrap(); debug_assert_eq!(header.len(), 12); - self.outgoing = Outgoing::Header { + self.inner.outgoing = Outgoing::Header { header, is_goaway: false, substream_data_frame: NonZeroUsize::new(usize::try_from(data_length).unwrap()).map( @@ -1306,11 +1326,11 @@ impl Yamux { }; } - /// Writes a window size update frame header in `self.outgoing`. + /// Writes a window size update frame header in `self.inner.outgoing`. /// /// # Panic /// - /// Panics if `self.outgoing` is not `Idle`. + /// Panics if `self.inner.outgoing` is not `Idle`. /// fn queue_window_size_frame_header( &mut self, @@ -1318,11 +1338,11 @@ impl Yamux { substream_id: NonZeroU32, window_size: u32, ) { - assert!(matches!(self.outgoing, Outgoing::Idle)); + assert!(matches!(self.inner.outgoing, Outgoing::Idle)); let mut flags: u16 = 0; if syn_ack_flag { - if (substream_id.get() % 2) == (self.next_outbound_substream.get() % 2) { + if (substream_id.get() % 2) == (self.inner.next_outbound_substream.get() % 2) { // SYN flags |= 0x1; } else { @@ -1343,21 +1363,21 @@ impl Yamux { .unwrap(); debug_assert_eq!(header.len(), 12); - self.outgoing = Outgoing::Header { + self.inner.outgoing = Outgoing::Header { header, substream_data_frame: None, is_goaway: false, }; } - /// Writes a ping frame header in `self.outgoing`. + /// Writes a ping frame header in `self.inner.outgoing`. /// /// # Panic /// - /// Panics if `self.outgoing` is not `Idle`. + /// Panics if `self.inner.outgoing` is not `Idle`. /// fn queue_ping_request_header(&mut self, opaque_value: u32) { - assert!(matches!(self.outgoing, Outgoing::Idle)); + assert!(matches!(self.inner.outgoing, Outgoing::Idle)); let mut header = arrayvec::ArrayVec::new(); header.push(0); @@ -1369,7 +1389,7 @@ impl Yamux { .unwrap(); debug_assert_eq!(header.len(), 12); - self.outgoing = Outgoing::Header { + self.inner.outgoing = Outgoing::Header { header, substream_data_frame: None, is_goaway: false, @@ -1389,7 +1409,7 @@ where { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list() - .entries(self.0.substreams.values().map(|v| &v.user_data)) + .entries(self.0.inner.substreams.values().map(|v| &v.user_data)) .finish() } } @@ -1675,7 +1695,7 @@ impl<'a, T> ExtractOut<'a, T> { /// Builds the next buffer to send out and returns it. pub fn extract_next(&'_ mut self) -> Option + '_> { while self.size_bytes != 0 { - match self.yamux.outgoing { + match self.yamux.inner.outgoing { Outgoing::Header { ref mut header, ref mut substream_data_frame, @@ -1688,12 +1708,12 @@ impl<'a, T> ExtractOut<'a, T> { let out = mem::take(header); if *is_goaway { debug_assert!(matches!( - self.yamux.outgoing_goaway, + self.yamux.inner.outgoing_goaway, OutgoingGoAway::Queued )); - self.yamux.outgoing_goaway = OutgoingGoAway::Sent; + self.yamux.inner.outgoing_goaway = OutgoingGoAway::Sent; } - self.yamux.outgoing = + self.yamux.inner.outgoing = if let Some((data, remaining_bytes)) = substream_data_frame.take() { Outgoing::SubstreamData { data, @@ -1718,7 +1738,7 @@ impl<'a, T> ExtractOut<'a, T> { } => { let (write_buffers, first_write_buffer_offset) = match data { OutgoingSubstreamData::Healthy(id) => { - let substream = self.yamux.substreams.get_mut(&id.0).unwrap(); + let substream = self.yamux.inner.substreams.get_mut(&id.0).unwrap(); if let SubstreamState::Healthy { ref mut write_buffers, ref mut first_write_buffer_offset, @@ -1746,7 +1766,7 @@ impl<'a, T> ExtractOut<'a, T> { *first_write_buffer_offset = 0; match NonZeroUsize::new(remain.get() - first_buf_avail) { Some(r) => *remain = r, - None => self.yamux.outgoing = Outgoing::Idle, + None => self.yamux.inner.outgoing = Outgoing::Idle, }; either::Right(out) } else if remain.get() <= self.size_bytes { @@ -1756,7 +1776,7 @@ impl<'a, T> ExtractOut<'a, T> { 0, ); *first_write_buffer_offset += remain.get(); - self.yamux.outgoing = Outgoing::Idle; + self.yamux.inner.outgoing = Outgoing::Idle; either::Right(out) } else { let out = VecWithOffset( @@ -1775,7 +1795,7 @@ impl<'a, T> ExtractOut<'a, T> { Outgoing::Idle => { // Send a `GoAway` frame if demanded. - if let OutgoingGoAway::Required(code) = self.yamux.outgoing_goaway { + if let OutgoingGoAway::Required(code) = self.yamux.inner.outgoing_goaway { let mut header = arrayvec::ArrayVec::new(); header.push(0); header.push(3); @@ -1793,17 +1813,17 @@ impl<'a, T> ExtractOut<'a, T> { .unwrap(); debug_assert_eq!(header.len(), 12); - self.yamux.outgoing = Outgoing::Header { + self.yamux.inner.outgoing = Outgoing::Header { header, substream_data_frame: None, is_goaway: true, }; - self.yamux.outgoing_goaway = OutgoingGoAway::Queued; + self.yamux.inner.outgoing_goaway = OutgoingGoAway::Queued; continue; } // Send RST frames. - if let Some(substream_id) = self.yamux.rsts_to_send.pop_front() { + if let Some(substream_id) = self.yamux.inner.rsts_to_send.pop_front() { let mut header = arrayvec::ArrayVec::new(); header.push(0); header.push(1); @@ -1814,7 +1834,7 @@ impl<'a, T> ExtractOut<'a, T> { header.try_extend_from_slice(&0u32.to_be_bytes()).unwrap(); debug_assert_eq!(header.len(), 12); - self.yamux.outgoing = Outgoing::Header { + self.yamux.inner.outgoing = Outgoing::Header { header, substream_data_frame: None, is_goaway: false, @@ -1823,11 +1843,11 @@ impl<'a, T> ExtractOut<'a, T> { } // Send outgoing pings. - if self.yamux.pings_to_send > 0 { - self.yamux.pings_to_send -= 1; - let opaque_value: u32 = self.yamux.randomness.gen(); + if self.yamux.inner.pings_to_send > 0 { + self.yamux.inner.pings_to_send -= 1; + let opaque_value: u32 = self.yamux.inner.randomness.gen(); self.yamux.queue_ping_request_header(opaque_value); - self.yamux.pings_waiting_reply.push_back(opaque_value); + self.yamux.inner.pings_waiting_reply.push_back(opaque_value); continue; } @@ -1835,6 +1855,7 @@ impl<'a, T> ExtractOut<'a, T> { // TODO: O(n) if let Some((id, sub)) = self .yamux + .inner .substreams .iter_mut() .find(|(_, s)| { @@ -1873,6 +1894,7 @@ impl<'a, T> ExtractOut<'a, T> { // TODO: choose substreams in some sort of round-robin way if let Some((id, sub)) = self .yamux + .inner .substreams .iter_mut() .find(|(_, s)| match &s.state { From 916f0ea82d531bdfc431d6046dc1a89b1b08ea8e Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 31 Mar 2023 15:11:25 +0200 Subject: [PATCH 03/74] Rename and comment field --- lib/src/libp2p/connection/yamux.rs | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index d8a75c1949..e88cef8be1 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -154,7 +154,8 @@ enum SubstreamState { remote_window_pending_increase: u64, /// Amount of data the local node is allowed to transmit to the remote. allowed_window: u64, - local_write: SubstreamStateLocalWrite, + /// State of the local writing side of this substream. + local_write_close: SubstreamStateLocalWrite, /// True if the writing side of the remote node is closed for this substream. remote_write_closed: bool, /// Buffer of buffers to be written out to the socket. @@ -408,7 +409,7 @@ impl Yamux { remote_allowed_window: DEFAULT_FRAME_SIZE, remote_window_pending_increase: 0, allowed_window: DEFAULT_FRAME_SIZE, - local_write: SubstreamStateLocalWrite::Open, + local_write_close: SubstreamStateLocalWrite::Open, remote_write_closed: false, write_buffers: Vec::with_capacity(16), first_write_buffer_offset: 0, @@ -556,7 +557,7 @@ impl Yamux { &substream.user_data, )), SubstreamState::Healthy { - local_write, + local_write_close: local_write, remote_write_closed, write_buffers, first_write_buffer_offset, @@ -1177,7 +1178,7 @@ impl Yamux { remote_allowed_window: DEFAULT_FRAME_SIZE, remote_window_pending_increase: 0, allowed_window: DEFAULT_FRAME_SIZE + u64::from(extra_window), - local_write: SubstreamStateLocalWrite::Open, + local_write_close: SubstreamStateLocalWrite::Open, remote_write_closed: data_frame_size == 0 && fin, write_buffers: Vec::new(), first_write_buffer_offset: 0, @@ -1475,7 +1476,7 @@ impl<'a, T> SubstreamRef<'a, T> { matches!( self.substream.state, SubstreamState::Healthy { - local_write: SubstreamStateLocalWrite::Open, + local_write_close: SubstreamStateLocalWrite::Open, .. } ) @@ -1531,7 +1532,7 @@ impl<'a, T> SubstreamMut<'a, T> { match &mut substream.state { SubstreamState::Reset => {} SubstreamState::Healthy { - local_write, + local_write_close: local_write, write_buffers, first_write_buffer_offset, .. @@ -1602,7 +1603,7 @@ impl<'a, T> SubstreamMut<'a, T> { matches!( self.substream.get().state, SubstreamState::Healthy { - local_write: SubstreamStateLocalWrite::Open, + local_write_close: SubstreamStateLocalWrite::Open, .. } ) @@ -1620,7 +1621,7 @@ impl<'a, T> SubstreamMut<'a, T> { pub fn close(&mut self) { let substream = self.substream.get_mut(); if let SubstreamState::Healthy { - local_write: ref mut local_write @ SubstreamStateLocalWrite::Open, + local_write_close: ref mut local_write @ SubstreamStateLocalWrite::Open, .. } = substream.state { @@ -1900,7 +1901,7 @@ impl<'a, T> ExtractOut<'a, T> { .find(|(_, s)| match &s.state { SubstreamState::Healthy { write_buffers, - local_write, + local_write_close: local_write, .. } => { !write_buffers.is_empty() @@ -1913,7 +1914,7 @@ impl<'a, T> ExtractOut<'a, T> { if let SubstreamState::Healthy { first_message_queued, allowed_window, - local_write, + local_write_close: local_write, write_buffers, .. } = &mut sub.state From 681a40c27f905eb1469b2594a2489b5fa78a6b28 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 31 Mar 2023 15:50:48 +0200 Subject: [PATCH 04/74] Cache the list of dead substreams --- lib/src/libp2p/connection/yamux.rs | 60 +++++++++++++++++++++++++----- 1 file changed, 50 insertions(+), 10 deletions(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index e88cef8be1..918324a107 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -95,6 +95,10 @@ struct YamuxInner { /// A `SipHasher` is used in order to avoid hash collision attacks on substream IDs. substreams: hashbrown::HashMap, SipHasherBuild>, + /// Subset of the content of [`YamuxInner::substreams`] that is considered "dead", meaning + /// that it is returned by [`Yamux::dead_substreams`]. + dead_substreams: hashbrown::HashSet, + /// Number of substreams within [`Yamux::substreams`] whose [`Substream::inbound`] is `true`. num_inbound: usize, @@ -292,6 +296,10 @@ impl Yamux { config.capacity, SipHasherBuild::new(randomness.gen()), ), + dead_substreams: hashbrown::HashSet::with_capacity_and_hasher( + config.capacity, + SipHasherBuild::new(randomness.gen()), + ), num_inbound: 0, received_goaway: None, incoming: Incoming::Header(arrayvec::ArrayVec::new()), @@ -423,6 +431,7 @@ impl Yamux { substream: e, outgoing: &mut self.inner.outgoing, rsts_to_send: &mut self.inner.rsts_to_send, + dead_substreams: &mut self.inner.dead_substreams, }, _ => unreachable!(), } @@ -469,6 +478,7 @@ impl Yamux { substream: e, outgoing: &mut self.inner.outgoing, rsts_to_send: &mut self.inner.rsts_to_send, + dead_substreams: &mut self.inner.dead_substreams, }) } else { None @@ -547,36 +557,38 @@ impl Yamux { ) -> impl Iterator + '_ { // TODO: O(n) self.inner - .substreams + .dead_substreams .iter() - .filter_map(|(id, substream)| { + .map(|id| { + let substream = self.inner.substreams.get(id).unwrap(); match &substream.state { - SubstreamState::Reset => Some(( + SubstreamState::Reset => ( SubstreamId(*id), DeadSubstreamTy::Reset, &substream.user_data, - )), + ), SubstreamState::Healthy { - local_write_close: local_write, + local_write_close, remote_write_closed, write_buffers, first_write_buffer_offset, .. } => { - if matches!(local_write, SubstreamStateLocalWrite::FinQueued) + if matches!(local_write_close, SubstreamStateLocalWrite::FinQueued) && *remote_write_closed && (write_buffers.is_empty() // TODO: cumbersome || (write_buffers.len() == 1 && write_buffers[0].len() <= *first_write_buffer_offset)) { - Some(( + ( SubstreamId(*id), DeadSubstreamTy::ClosedGracefully, &substream.user_data, - )) + ) } else { - None + // Substream shouldn't have been put in `dead_substreams`. + unreachable!() } } } @@ -601,8 +613,12 @@ impl Yamux { /// Panics if the substream with that id doesn't exist or isn't dead. /// pub fn remove_dead_substream(&mut self, id: SubstreamId) -> T { + let was_in = self.inner.dead_substreams.remove(&id.0); + if !was_in { + panic!() + } + let substream = self.inner.substreams.remove(&id.0).unwrap(); - // TODO: check whether substream is dead using the same criteria as in dead_substreams() if substream.inbound { self.inner.num_inbound -= 1; @@ -652,6 +668,7 @@ impl Yamux { state: SubstreamState::Healthy { remote_write_closed: remote_write_closed @ false, + local_write_close, .. }, .. @@ -659,6 +676,11 @@ impl Yamux { { *remote_write_closed = true; + if matches!(*local_write_close, SubstreamStateLocalWrite::FinQueued) { + let _was_inserted = self.inner.dead_substreams.insert(substream_id.0); + debug_assert!(_was_inserted); + } + return Ok(IncomingDataOutcome { yamux: self, bytes_read: total_read, @@ -835,6 +857,10 @@ impl Yamux { reset_substreams.push(SubstreamId(*substream_id)); + let _was_inserted = + self.inner.dead_substreams.insert(*substream_id); + debug_assert!(_was_inserted); + // We might be currently writing a frame of data of the substream // being reset. If that happens, we need to update some internal // state regarding this frame of data. @@ -906,6 +932,9 @@ impl Yamux { // always keep traces of old substreams, we have no way to know whether // this is the case or not. if let Some(s) = self.inner.substreams.get_mut(&stream_id) { + let _was_inserted = self.inner.dead_substreams.insert(stream_id); + debug_assert!(_was_inserted); + // We might be currently writing a frame of data of the substream // being reset. If that happens, we need to update some internal // state regarding this frame of data. @@ -1208,6 +1237,7 @@ impl Yamux { }, outgoing: &mut self.inner.outgoing, rsts_to_send: &mut self.inner.rsts_to_send, + dead_substreams: &mut self.inner.dead_substreams, } } _ => panic!(), @@ -1497,6 +1527,7 @@ pub struct SubstreamMut<'a, T> { substream: OccupiedEntry<'a, NonZeroU32, Substream, SipHasherBuild>, outgoing: &'a mut Outgoing, rsts_to_send: &'a mut VecDeque, + dead_substreams: &'a mut hashbrown::HashSet, } impl<'a, T> SubstreamMut<'a, T> { @@ -1646,6 +1677,10 @@ impl<'a, T> SubstreamMut<'a, T> { if let SubstreamState::Healthy { .. } = self.substream.get().state { self.rsts_to_send.push_back(substream_id.0); } + // TODO: else { panic!() } ?! + + let _was_inserted = self.dead_substreams.insert(substream_id.0); + debug_assert!(_was_inserted); // We might be currently writing a frame of data of the substream being reset. // If that happens, we need to update some internal state regarding this frame of data. @@ -1913,6 +1948,7 @@ impl<'a, T> ExtractOut<'a, T> { { if let SubstreamState::Healthy { first_message_queued, + remote_write_closed, allowed_window, local_write_close: local_write, write_buffers, @@ -1932,6 +1968,10 @@ impl<'a, T> ExtractOut<'a, T> { && len_out_usize == pending_len; if fin_flag { *local_write = SubstreamStateLocalWrite::FinQueued; + if *remote_write_closed { + let _was_inserted = self.yamux.inner.dead_substreams.insert(id); + debug_assert!(!_was_inserted); + } } self.yamux .queue_data_frame_header(syn_ack_flag, fin_flag, id, len_out); From 18a3a3aa2c607e7ea983ac6fe7fa3f56671bde68 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 31 Mar 2023 15:57:44 +0200 Subject: [PATCH 05/74] Fix missing `Box` import --- lib/src/libp2p/connection/yamux.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index 918324a107..cc17991c0f 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -52,7 +52,7 @@ use crate::util::SipHasherBuild; -use alloc::{collections::VecDeque, vec::Vec}; +use alloc::{boxed::Box, collections::VecDeque, vec::Vec}; use core::{ cmp, fmt, mem, num::{NonZeroU32, NonZeroUsize}, From 7eb874d54a9b1bcbc9e0357c22970bb41f7f6fd2 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 31 Mar 2023 15:58:23 +0200 Subject: [PATCH 06/74] Remove TODO --- lib/src/libp2p/connection/yamux.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index cc17991c0f..66e3868048 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -555,7 +555,6 @@ impl Yamux { pub fn dead_substreams( &'_ self, ) -> impl Iterator + '_ { - // TODO: O(n) self.inner .dead_substreams .iter() From 04d6666287507db8429c7dec44401513c584bc42 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 31 Mar 2023 16:03:45 +0200 Subject: [PATCH 07/74] Use a `VecDeque` for `write_buffers` --- lib/src/libp2p/connection/yamux.rs | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index 66e3868048..b49f50a242 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -163,9 +163,9 @@ enum SubstreamState { /// True if the writing side of the remote node is closed for this substream. remote_write_closed: bool, /// Buffer of buffers to be written out to the socket. - // TODO: is it a good idea to have an unbounded Vec? + // TODO: is it a good idea to have an unbounded VecDeque? // TODO: call shrink_to_fit from time to time? - write_buffers: Vec>, + write_buffers: VecDeque>, /// Number of bytes in `self.write_buffers[0]` has have already been written out to the /// socket. first_write_buffer_offset: usize, @@ -277,7 +277,7 @@ enum OutgoingSubstreamData { /// Data is coming from a substream in a reset state. Obsolete { /// Buffer of buffers to be written out to the socket. - write_buffers: Vec>, + write_buffers: VecDeque>, /// Number of bytes in `self.inner.write_buffers[0]` has have already been written out to /// the socket. @@ -419,7 +419,7 @@ impl Yamux { allowed_window: DEFAULT_FRAME_SIZE, local_write_close: SubstreamStateLocalWrite::Open, remote_write_closed: false, - write_buffers: Vec::with_capacity(16), + write_buffers: VecDeque::with_capacity(16), first_write_buffer_offset: 0, }, inbound: false, @@ -1208,7 +1208,7 @@ impl Yamux { allowed_window: DEFAULT_FRAME_SIZE + u64::from(extra_window), local_write_close: SubstreamStateLocalWrite::Open, remote_write_closed: data_frame_size == 0 && fin, - write_buffers: Vec::new(), + write_buffers: VecDeque::new(), first_write_buffer_offset: 0, }, inbound: true, @@ -1570,7 +1570,7 @@ impl<'a, T> SubstreamMut<'a, T> { debug_assert!(!write_buffers.is_empty() || *first_write_buffer_offset == 0); if matches!(local_write, SubstreamStateLocalWrite::Open) { - write_buffers.push(data); + write_buffers.push_back(data); } } } @@ -1795,8 +1795,10 @@ impl<'a, T> ExtractOut<'a, T> { let out = if first_buf_avail <= remain.get() && first_buf_avail <= self.size_bytes { - let out = - VecWithOffset(write_buffers.remove(0), *first_write_buffer_offset); + let out = VecWithOffset( + write_buffers.pop_front().unwrap(), + *first_write_buffer_offset, + ); self.size_bytes -= first_buf_avail; *first_write_buffer_offset = 0; match NonZeroUsize::new(remain.get() - first_buf_avail) { From ad92626d13852c286a61e942b2d7ff4b2867df4c Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 31 Mar 2023 16:14:13 +0200 Subject: [PATCH 08/74] Add TODO --- lib/src/libp2p/connection/yamux.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index b49f50a242..51f81a01dc 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -165,6 +165,7 @@ enum SubstreamState { /// Buffer of buffers to be written out to the socket. // TODO: is it a good idea to have an unbounded VecDeque? // TODO: call shrink_to_fit from time to time? + // TODO: instead of storing `Vec`s, consider storing a generic `B` and let the user manually write a `B` to the output buffer write_buffers: VecDeque>, /// Number of bytes in `self.write_buffers[0]` has have already been written out to the /// socket. From 415c0aadba5e662b442ef871134504771e5bddf6 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 31 Mar 2023 16:42:20 +0200 Subject: [PATCH 09/74] Remove automatic window size increase on incoming data --- .../connection/established/single_stream.rs | 8 +++++ lib/src/libp2p/connection/yamux.rs | 36 +++++++++---------- 2 files changed, 24 insertions(+), 20 deletions(-) diff --git a/lib/src/libp2p/connection/established/single_stream.rs b/lib/src/libp2p/connection/established/single_stream.rs index 897b1e9f25..a2b68a3666 100644 --- a/lib/src/libp2p/connection/established/single_stream.rs +++ b/lib/src/libp2p/connection/established/single_stream.rs @@ -256,6 +256,14 @@ where // Discard the data from the decrypted data buffer. self.encryption.consume_inbound_data(num_read); + // Give the possibility for the remote to send more data. + // TODO: only do that for notification substreams? because for requests we already set the value to the maximum when the substream is created + self.inner + .yamux + .substream_by_id_mut(substream_id) + .unwrap() + .add_remote_window(u64::try_from(num_read).unwrap()); + if let Some(event) = event { return Ok((self, Some(event))); } else if num_read == 0 { diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index 51f81a01dc..bd542a062d 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -1077,7 +1077,6 @@ impl Yamux { SubstreamState::Healthy { remote_write_closed, remote_allowed_window, - remote_window_pending_increase, .. }, .. @@ -1093,9 +1092,6 @@ impl Yamux { *remote_allowed_window = remote_allowed_window .checked_sub(u64::from(length)) .ok_or(Error::CreditsExceeded)?; - - // TODO: make this behavior tweakable by the user! - *remote_window_pending_increase += 256 * 1024; } self.inner.incoming = Incoming::DataFrame { @@ -1577,22 +1573,22 @@ impl<'a, T> SubstreamMut<'a, T> { } } - /// Allow the remote to send up to `bytes` bytes at once in the next packet. - /// - /// This method sets the number of allowed bytes to at least this value. In other words, - /// if this method was to be twice with the same parameter, the second call would have no - /// effect. - /// - /// # Context - /// - /// In order to properly handle back-pressure, the Yamux protocol only allows the remote to - /// send a certain number of bytes before the local node grants the authorization to send more - /// data. - /// This method grants the authorization to the remote to send up to `bytes` bytes. - /// - /// Call this when you expect a large payload with the maximum size this payload is allowed - /// to be. - /// + /// Adds `bytes` to the number of bytes the remote is allowed to send at once in the next + /// packet. + // TODO: properly define behavior in case of overflow? + pub fn add_remote_window(&mut self, bytes: u64) { + if let SubstreamState::Healthy { + remote_window_pending_increase, + .. + } = &mut self.substream.get_mut().state + { + *remote_window_pending_increase = remote_window_pending_increase.saturating_add(bytes); + } + } + + /// Similar to [`SubstreamMut::add_remote_window`], but sets the number of allowed bytes to + /// be at least this value. In other words, if this method was to be twice with the same + /// parameter, the second call would have no effect. pub fn reserve_window(&mut self, bytes: u64) { if let SubstreamState::Healthy { remote_window_pending_increase, From 8832e623607b27f2f2982dbfc3b32eaeea432d18 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 31 Mar 2023 17:35:13 +0200 Subject: [PATCH 10/74] Remove SubstreamRef and SubstreamMut in favor of functions on Yamux --- .../connection/established/single_stream.rs | 117 ++-- lib/src/libp2p/connection/yamux.rs | 577 ++++++++---------- 2 files changed, 316 insertions(+), 378 deletions(-) diff --git a/lib/src/libp2p/connection/established/single_stream.rs b/lib/src/libp2p/connection/established/single_stream.rs index a2b68a3666..08a42026c7 100644 --- a/lib/src/libp2p/connection/established/single_stream.rs +++ b/lib/src/libp2p/connection/established/single_stream.rs @@ -185,17 +185,14 @@ where // It might be that the remote has reset the ping substream, in which case the out ping // substream no longer exists and we immediately consider the ping as failed. - if let Some(substream) = self - .inner - .yamux - .substream_by_id_mut(self.inner.outgoing_pings) - { + if self.inner.yamux.has_substream(self.inner.outgoing_pings) { let payload = self .inner .ping_payload_randomness .sample(rand::distributions::Standard); - substream - .into_user_data() + self.inner + .yamux + .user_data_mut(self.inner.outgoing_pings) .as_mut() .unwrap() .queue_ping(&payload, read_write.now.clone() + self.inner.ping_timeout); @@ -236,7 +233,7 @@ where // but this time update the state machine specific to that substream. if let Some((substream_id, bytes_remaining)) = self.inner.current_data_frame { // It might be that the substream has been closed in `process_substream`. - if self.inner.yamux.substream_by_id_mut(substream_id).is_none() { + if !self.inner.yamux.has_substream(substream_id) { self.encryption.consume_inbound_data(bytes_remaining.get()); self.inner.current_data_frame = None; continue; @@ -260,9 +257,7 @@ where // TODO: only do that for notification substreams? because for requests we already set the value to the maximum when the substream is created self.inner .yamux - .substream_by_id_mut(substream_id) - .unwrap() - .add_remote_window(u64::try_from(num_read).unwrap()); + .add_remote_window(substream_id, u64::try_from(num_read).unwrap()); if let Some(event) = event { return Ok((self, Some(event))); @@ -438,12 +433,8 @@ where // Mutable reference to the substream state machine within the yamux // state machine. - let state_machine_refmut = self - .inner - .yamux - .substream_by_id_mut(dead_substream_id) - .unwrap() - .into_user_data(); + let state_machine_refmut = + self.inner.yamux.user_data_mut(dead_substream_id); // Extract the substream state machine, maybe putting it back later. let state_machine_extracted = match state_machine_refmut.take() { @@ -574,15 +565,13 @@ where let mut total_read = 0; loop { - let mut substream = inner.yamux.substream_by_id_mut(substream_id).unwrap(); - - let state_machine = match substream.user_data_mut().take() { + let state_machine = match inner.yamux.user_data_mut(substream_id).take() { Some(s) => s, None => break (total_read, None), }; - let read_is_closed = !substream.can_receive(); - let write_is_closed = !substream.can_send(); + let read_is_closed = !inner.yamux.can_receive(substream_id); + let write_is_closed = !inner.yamux.can_send(substream_id); let mut substream_read_write = ReadWrite { now: outer_read_write.now.clone(), @@ -613,23 +602,22 @@ where let written_bytes = substream_read_write.written_bytes; if written_bytes != 0 { debug_assert!(!write_is_closed); - substream.write(inner.intermediary_buffer[..written_bytes].to_vec()); + inner.yamux.write( + substream_id, + inner.intermediary_buffer[..written_bytes].to_vec(), + ); } if !write_is_closed && closed_after { debug_assert_eq!(written_bytes, 0); - substream.close(); + inner.yamux.close(substream_id); } match substream_update { - Some(s) => *substream.user_data_mut() = Some(s), + Some(s) => *inner.yamux.user_data_mut(substream_id) = Some(s), None => { if !closed_after || !read_is_closed { // TODO: what we do here is definitely correct, but the docs of `reset()` seem sketchy, investigate - inner - .yamux - .substream_by_id_mut(substream_id) - .unwrap() - .reset(); + inner.yamux.reset(substream_id); } } }; @@ -637,13 +625,7 @@ where let event_to_yield = match event { None => None, Some(substream::Event::InboundNegotiated(protocol)) => { - let substream = inner - .yamux - .substream_by_id_mut(substream_id) - .unwrap() - .into_user_data() - .as_mut() - .unwrap(); + let substream = inner.yamux.user_data_mut(substream_id).as_mut().unwrap(); if protocol == inner.ping_protocol { substream.set_inbound_ty(substream::InboundTy::Ping); @@ -818,7 +800,7 @@ where } }; - let mut substream = + let substream_id = self.inner .yamux .open_substream(Some(substream::Substream::request_out( @@ -834,13 +816,14 @@ where ))); // TODO: we add some bytes due to the length prefix, this is a bit hacky as we should ask this information from the substream - substream.reserve_window( + self.inner.yamux.reserve_window( + substream_id, u64::try_from(self.inner.request_protocols[protocol_index].max_response_size) .unwrap_or(u64::max_value()) .saturating_add(64), ); - Ok(SubstreamId(SubstreamIdInner::SingleStream(substream.id()))) + Ok(SubstreamId(SubstreamIdInner::SingleStream(substream_id))) } /// Returns the user data associated to a notifications substream. @@ -855,10 +838,13 @@ where _ => return None, }; + if !self.inner.yamux.has_substream(id) { + return None; + } + self.inner .yamux - .substream_by_id_mut(id)? - .into_user_data() + .user_data_mut(id) .as_mut() .unwrap() .notifications_substream_user_data_mut() @@ -908,7 +894,7 @@ where user_data, ))); - SubstreamId(SubstreamIdInner::SingleStream(substream.id())) + SubstreamId(SubstreamIdInner::SingleStream(substream)) } /// Accepts an inbound notifications protocol. Must be called in response to a @@ -933,9 +919,7 @@ where // TODO: self.inner.notifications_protocols[protocol_index].max_notification_size; self.inner .yamux - .substream_by_id_mut(substream_id) - .unwrap() - .into_user_data() + .user_data_mut(substream_id) .as_mut() .unwrap() .accept_in_notifications_substream(handshake, max_notification_size, user_data); @@ -956,9 +940,7 @@ where self.inner .yamux - .substream_by_id_mut(substream_id) - .unwrap() - .into_user_data() + .user_data_mut(substream_id) .as_mut() .unwrap() .reject_in_notifications_substream(); @@ -993,9 +975,7 @@ where self.inner .yamux - .substream_by_id_mut(substream_id) - .unwrap() - .into_user_data() + .user_data_mut(substream_id) .as_mut() .unwrap() .write_notification_unbounded(notification); @@ -1016,10 +996,11 @@ where _ => panic!(), }; - let substream = self.inner.yamux.substream_by_id(substream_id).unwrap(); - let already_queued = substream.queued_bytes(); - let from_substream = substream - .into_user_data() + let already_queued = self.inner.yamux.queued_bytes(substream_id); + let from_substream = self + .inner + .yamux + .user_data(substream_id) .as_ref() .unwrap() .notification_substream_queued_bytes(); @@ -1044,11 +1025,13 @@ where _ => panic!(), }; + if !self.inner.yamux.has_substream(substream_id) { + panic!() + } + self.inner .yamux - .substream_by_id_mut(substream_id) - .unwrap() - .into_user_data() + .user_data_mut(substream_id) .as_mut() .unwrap() .close_notifications_substream(); @@ -1070,11 +1053,13 @@ where _ => return Err(RespondInRequestError::SubstreamClosed), }; + if !self.inner.yamux.has_substream(substream_id) { + return Err(RespondInRequestError::SubstreamClosed); + } + self.inner .yamux - .substream_by_id_mut(substream_id) - .ok_or(RespondInRequestError::SubstreamClosed)? - .into_user_data() + .user_data_mut(substream_id) .as_mut() .unwrap() .respond_in_request(response) @@ -1137,11 +1122,9 @@ impl ConnectionPrototype { randomness_seed: randomness.sample(rand::distributions::Standard), }); - let outgoing_pings = yamux - .open_substream(Some(substream::Substream::ping_out( - config.ping_protocol.clone(), - ))) - .id(); + let outgoing_pings = yamux.open_substream(Some(substream::Substream::ping_out( + config.ping_protocol.clone(), + ))); SingleStream { encryption: self.encryption, diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index bd542a062d..f1e686636d 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -57,7 +57,7 @@ use core::{ cmp, fmt, mem, num::{NonZeroU32, NonZeroUsize}, }; -use hashbrown::hash_map::{Entry, OccupiedEntry}; +use hashbrown::hash_map::Entry; use rand::Rng as _; use rand_chacha::{rand_core::SeedableRng as _, ChaCha20Rng}; @@ -367,7 +367,7 @@ impl Yamux { /// Panics if a [`IncomingDataDetail::GoAway`] event has been generated. This can also be /// checked by calling [`Yamux::received_goaway`]. /// - pub fn open_substream(&mut self, user_data: T) -> SubstreamMut { + pub fn open_substream(&mut self, user_data: T) -> SubstreamId { // It is forbidden to open new substreams if a `GoAway` frame has been received. assert!(self.inner.received_goaway.is_none()); @@ -427,15 +427,7 @@ impl Yamux { user_data, }); - match self.inner.substreams.entry(substream_id.0) { - Entry::Occupied(e) => SubstreamMut { - substream: e, - outgoing: &mut self.inner.outgoing, - rsts_to_send: &mut self.inner.rsts_to_send, - dead_substreams: &mut self.inner.dead_substreams, - }, - _ => unreachable!(), - } + substream_id } /// Returns `Some` if a [`IncomingDataDetail::GoAway`] event has been generated in the past, @@ -462,27 +454,269 @@ impl Yamux { .map(|(id, s)| (SubstreamId(*id), &mut s.user_data)) } - /// Returns a reference to a substream by its ID. Returns `None` if no substream with this ID - /// is open. - pub fn substream_by_id(&self, id: SubstreamId) -> Option> { - Some(SubstreamRef { - id, - substream: self.inner.substreams.get(&id.0)?, - }) + /// Returns `true` if the given [`SubstreamId`] exists. + /// + /// Also returns `true` if the substream is in a dead state. + pub fn has_substream(&self, substream_id: SubstreamId) -> bool { + self.inner.substreams.contains_key(&substream_id.0) } - /// Returns a reference to a substream by its ID. Returns `None` if no substream with this ID - /// is open. - pub fn substream_by_id_mut(&mut self, id: SubstreamId) -> Option> { - if let Entry::Occupied(e) = self.inner.substreams.entry(id.0) { - Some(SubstreamMut { - substream: e, - outgoing: &mut self.inner.outgoing, - rsts_to_send: &mut self.inner.rsts_to_send, - dead_substreams: &mut self.inner.dead_substreams, - }) - } else { - None + /// Returns the user data associated to a substream. + /// + /// # Panic + /// + /// Panics if the [`SubstreamId`] is invalid. + /// + pub fn user_data(&self, substream_id: SubstreamId) -> &T { + &self + .inner + .substreams + .get(&substream_id.0) + .unwrap() + .user_data + } + + /// Returns the user data associated to a substream. + /// + /// # Panic + /// + /// Panics if the [`SubstreamId`] is invalid. + /// + pub fn user_data_mut(&mut self, substream_id: SubstreamId) -> &mut T { + &mut self + .inner + .substreams + .get_mut(&substream_id.0) + .unwrap_or_else(|| panic!()) + .user_data + } + + /// Appends data to the buffer of data to send out on this substream. + /// + /// # Panic + /// + /// Panics if the [`SubstreamId`] is invalid. + /// Panics if [`Yamux::close`] has already been called on this substream. + /// + // TODO: doc obsolete + pub fn write(&mut self, substream_id: SubstreamId, data: Vec) { + let substream = self + .inner + .substreams + .get_mut(&substream_id.0) + .unwrap_or_else(|| panic!()); + match &mut substream.state { + SubstreamState::Reset => {} + SubstreamState::Healthy { + local_write_close: local_write, + write_buffers, + first_write_buffer_offset, + .. + } => { + debug_assert!(!write_buffers.is_empty() || *first_write_buffer_offset == 0); + + if matches!(local_write, SubstreamStateLocalWrite::Open) { + write_buffers.push_back(data); + } + } + } + } + + /// Adds `bytes` to the number of bytes the remote is allowed to send at once in the next + /// packet. + /// + /// # Panic + /// + /// Panics if the [`SubstreamId`] is invalid. + /// + // TODO: properly define behavior in case of overflow? + pub fn add_remote_window(&mut self, substream_id: SubstreamId, bytes: u64) { + if let SubstreamState::Healthy { + remote_window_pending_increase, + .. + } = &mut self + .inner + .substreams + .get_mut(&substream_id.0) + .unwrap_or_else(|| panic!()) + .state + { + *remote_window_pending_increase = remote_window_pending_increase.saturating_add(bytes); + } + } + + /// Similar to [`SubstreamMut::add_remote_window`], but sets the number of allowed bytes to + /// be at least this value. In other words, if this method was to be twice with the same + /// parameter, the second call would have no effect. + /// + /// # Panic + /// + /// Panics if the [`SubstreamId`] is invalid. + /// + pub fn reserve_window(&mut self, substream_id: SubstreamId, bytes: u64) { + if let SubstreamState::Healthy { + remote_window_pending_increase, + .. + } = &mut self + .inner + .substreams + .get_mut(&substream_id.0) + .unwrap_or_else(|| panic!()) + .state + { + *remote_window_pending_increase = cmp::max(*remote_window_pending_increase, bytes); + } + } + + /// Returns the number of bytes queued for writing on this substream. + /// + /// Returns 0 if the substream is in a reset state. + /// + /// # Panic + /// + /// Panics if the [`SubstreamId`] is invalid. + /// + pub fn queued_bytes(&self, substream_id: SubstreamId) -> usize { + match &self + .inner + .substreams + .get(&substream_id.0) + .unwrap_or_else(|| panic!()) + .state + { + SubstreamState::Healthy { + write_buffers, + first_write_buffer_offset, + .. + } => write_buffers.iter().fold(0, |n, buf| n + buf.len()) - first_write_buffer_offset, + SubstreamState::Reset => 0, + } + } + + /// Returns `false` if the remote has closed their writing side of this substream, or if + /// [`SubstreamMut::reset`] has been called on this substream, or if the substream has been + /// reset by the remote. + /// + /// # Panic + /// + /// Panics if the [`SubstreamId`] is invalid. + /// + pub fn can_receive(&self, substream_id: SubstreamId) -> bool { + matches!(self.inner.substreams.get(&substream_id.0).unwrap_or_else(|| panic!()).state, + SubstreamState::Healthy { + remote_write_closed, + .. + } if !remote_write_closed) + } + + /// Returns `false` if [`SubstreamMut::close`] or [`SubstreamMut::reset`] has been called on + /// this substream, or if the remote has . + /// + /// # Panic + /// + /// Panics if the [`SubstreamId`] is invalid. + /// + pub fn can_send(&self, substream_id: SubstreamId) -> bool { + matches!( + self.inner + .substreams + .get(&substream_id.0) + .unwrap_or_else(|| panic!()) + .state, + SubstreamState::Healthy { + local_write_close: SubstreamStateLocalWrite::Open, + .. + } + ) + } + + /// Marks the substream as closed. It is no longer possible to write data on it. + /// + /// # Panic + /// + /// Panics if the [`SubstreamId`] is invalid. + /// Panics if the local writing side is already closed, which can happen if + /// [`SubstreamMut::close`] has already been called on this substream or if the remote has + /// reset the substream in the past. + /// + // TODO: doc obsolete + pub fn close(&mut self, substream_id: SubstreamId) { + let substream = self + .inner + .substreams + .get_mut(&substream_id.0) + .unwrap_or_else(|| panic!()); + if let SubstreamState::Healthy { + local_write_close: ref mut local_write @ SubstreamStateLocalWrite::Open, + .. + } = substream.state + { + *local_write = SubstreamStateLocalWrite::FinDesired; + } + } + + /// Abruptly shuts down the substream. Sends a frame with the `RST` flag to the remote. + /// + /// Use this method when a protocol error happens on a substream. + /// + /// # Panic + /// + /// Panics if the [`SubstreamId`] is invalid. + /// Panics if the local writing side is already closed, which can happen if + /// [`SubstreamMut::close`] has already been called on this substream or if the remote has + /// reset the substream in the past. + /// + pub fn reset(&mut self, substream_id: SubstreamId) { + // Add an entry to the list of RST headers to send to the remote. + if let SubstreamState::Healthy { .. } = self + .inner + .substreams + .get(&substream_id.0) + .unwrap_or_else(|| panic!()) + .state + { + self.inner.rsts_to_send.push_back(substream_id.0); + } + // TODO: else { panic!() } ?! + + let _was_inserted = self.inner.dead_substreams.insert(substream_id.0); + debug_assert!(_was_inserted); + + // We might be currently writing a frame of data of the substream being reset. + // If that happens, we need to update some internal state regarding this frame of data. + match ( + &mut self.inner.outgoing, + mem::replace( + &mut self + .inner + .substreams + .get_mut(&substream_id.0) + .unwrap_or_else(|| panic!()) + .state, + SubstreamState::Reset, + ), + ) { + ( + Outgoing::Header { + substream_data_frame: Some((data @ OutgoingSubstreamData::Healthy(_), _)), + .. + } + | Outgoing::SubstreamData { + data: data @ OutgoingSubstreamData::Healthy(_), + .. + }, + SubstreamState::Healthy { + write_buffers, + first_write_buffer_offset, + .. + }, + ) if *data == OutgoingSubstreamData::Healthy(substream_id) => { + *data = OutgoingSubstreamData::Obsolete { + write_buffers, + first_write_buffer_offset, + }; + } + _ => {} } } @@ -1186,7 +1420,7 @@ impl Yamux { /// /// Panics if no incoming substream is currently pending. /// - pub fn accept_pending_substream(&mut self, user_data: T) -> SubstreamMut { + pub fn accept_pending_substream(&mut self, user_data: T) -> SubstreamId { match self.inner.incoming { Incoming::PendingIncomingSubstream { substream_id, @@ -1226,15 +1460,7 @@ impl Yamux { } }; - SubstreamMut { - substream: match self.inner.substreams.entry(substream_id.0) { - Entry::Occupied(e) => e, - _ => unreachable!(), - }, - outgoing: &mut self.inner.outgoing, - rsts_to_send: &mut self.inner.rsts_to_send, - dead_substreams: &mut self.inner.dead_substreams, - } + substream_id } _ => panic!(), } @@ -1447,277 +1673,6 @@ where } } -/// Reference to a substream within the [`Yamux`]. -pub struct SubstreamRef<'a, T> { - id: SubstreamId, - substream: &'a Substream, -} - -impl<'a, T> SubstreamRef<'a, T> { - /// Identifier of the substream. - pub fn id(&self) -> SubstreamId { - self.id - } - - /// Returns the user data associated to this substream. - pub fn user_data(&self) -> &T { - &self.substream.user_data - } - - /// Returns the user data associated to this substream. - pub fn into_user_data(self) -> &'a T { - &self.substream.user_data - } - - /// Returns the number of bytes queued for writing on this substream. - /// - /// Returns 0 if the substream is in a reset state. - pub fn queued_bytes(&self) -> usize { - match &self.substream.state { - SubstreamState::Healthy { - write_buffers, - first_write_buffer_offset, - .. - } => write_buffers.iter().fold(0, |n, buf| n + buf.len()) - first_write_buffer_offset, - SubstreamState::Reset => 0, - } - } - - /// Returns `false` if the remote has closed their writing side of this substream, or if - /// [`SubstreamMut::reset`] has been called on this substream, or if the substream has been - /// reset by the remote. - pub fn can_receive(&self) -> bool { - match self.substream.state { - SubstreamState::Healthy { - remote_write_closed, - .. - } => !remote_write_closed, - SubstreamState::Reset => false, - } - } - - /// Returns `false` if [`SubstreamMut::close`] or [`SubstreamMut::reset`] has been called on - /// this substream, or if the remote has reset it. - pub fn can_send(&self) -> bool { - matches!( - self.substream.state, - SubstreamState::Healthy { - local_write_close: SubstreamStateLocalWrite::Open, - .. - } - ) - } -} - -impl<'a, T> fmt::Debug for SubstreamRef<'a, T> -where - T: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("Substream").field(self.user_data()).finish() - } -} - -/// Reference to a substream within the [`Yamux`]. -pub struct SubstreamMut<'a, T> { - substream: OccupiedEntry<'a, NonZeroU32, Substream, SipHasherBuild>, - outgoing: &'a mut Outgoing, - rsts_to_send: &'a mut VecDeque, - dead_substreams: &'a mut hashbrown::HashSet, -} - -impl<'a, T> SubstreamMut<'a, T> { - /// Identifier of the substream. - pub fn id(&self) -> SubstreamId { - SubstreamId(*self.substream.key()) - } - - /// Returns the user data associated to this substream. - pub fn user_data(&self) -> &T { - &self.substream.get().user_data - } - - /// Returns the user data associated to this substream. - pub fn user_data_mut(&mut self) -> &mut T { - &mut self.substream.get_mut().user_data - } - - /// Returns the user data associated to this substream. - pub fn into_user_data(self) -> &'a mut T { - &mut self.substream.into_mut().user_data - } - - /// Appends data to the buffer of data to send out on this substream. - /// - /// # Panic - /// - /// Panics if [`SubstreamMut::close`] has already been called on this substream. - /// - // TODO: doc obsolete - pub fn write(&mut self, data: Vec) { - let substream = self.substream.get_mut(); - match &mut substream.state { - SubstreamState::Reset => {} - SubstreamState::Healthy { - local_write_close: local_write, - write_buffers, - first_write_buffer_offset, - .. - } => { - debug_assert!(!write_buffers.is_empty() || *first_write_buffer_offset == 0); - - if matches!(local_write, SubstreamStateLocalWrite::Open) { - write_buffers.push_back(data); - } - } - } - } - - /// Adds `bytes` to the number of bytes the remote is allowed to send at once in the next - /// packet. - // TODO: properly define behavior in case of overflow? - pub fn add_remote_window(&mut self, bytes: u64) { - if let SubstreamState::Healthy { - remote_window_pending_increase, - .. - } = &mut self.substream.get_mut().state - { - *remote_window_pending_increase = remote_window_pending_increase.saturating_add(bytes); - } - } - - /// Similar to [`SubstreamMut::add_remote_window`], but sets the number of allowed bytes to - /// be at least this value. In other words, if this method was to be twice with the same - /// parameter, the second call would have no effect. - pub fn reserve_window(&mut self, bytes: u64) { - if let SubstreamState::Healthy { - remote_window_pending_increase, - .. - } = &mut self.substream.get_mut().state - { - *remote_window_pending_increase = cmp::max(*remote_window_pending_increase, bytes); - } - } - - /// Returns the number of bytes queued for writing on this substream. - /// - /// Returns 0 if the substream is in a reset state. - pub fn queued_bytes(&self) -> usize { - match &self.substream.get().state { - SubstreamState::Healthy { - write_buffers, - first_write_buffer_offset, - .. - } => write_buffers.iter().fold(0, |n, buf| n + buf.len()) - first_write_buffer_offset, - SubstreamState::Reset => 0, - } - } - - /// Returns `false` if the remote has closed their writing side of this substream, or if - /// [`SubstreamMut::reset`] has been called on this substream, or if the substream has been - /// reset by the remote. - pub fn can_receive(&self) -> bool { - matches!(self.substream.get().state, - SubstreamState::Healthy { - remote_write_closed, - .. - } if !remote_write_closed) - } - - /// Returns `false` if [`SubstreamMut::close`] or [`SubstreamMut::reset`] has been called on - /// this substream, or if the remote has . - pub fn can_send(&self) -> bool { - matches!( - self.substream.get().state, - SubstreamState::Healthy { - local_write_close: SubstreamStateLocalWrite::Open, - .. - } - ) - } - - /// Marks the substream as closed. It is no longer possible to write data on it. - /// - /// # Panic - /// - /// Panics if the local writing side is already closed, which can happen if - /// [`SubstreamMut::close`] has already been called on this substream or if the remote has - /// reset the substream in the past. - /// - // TODO: doc obsolete - pub fn close(&mut self) { - let substream = self.substream.get_mut(); - if let SubstreamState::Healthy { - local_write_close: ref mut local_write @ SubstreamStateLocalWrite::Open, - .. - } = substream.state - { - *local_write = SubstreamStateLocalWrite::FinDesired; - } - } - - /// Abruptly shuts down the substream. Sends a frame with the `RST` flag to the remote. - /// - /// Use this method when a protocol error happens on a substream. - /// - /// # Panic - /// - /// Panics if the local writing side is already closed, which can happen if - /// [`SubstreamMut::close`] has already been called on this substream or if the remote has - /// reset the substream in the past. - /// - pub fn reset(&mut self) { - let substream_id = SubstreamId(*self.substream.key()); - - // Add an entry to the list of RST headers to send to the remote. - if let SubstreamState::Healthy { .. } = self.substream.get().state { - self.rsts_to_send.push_back(substream_id.0); - } - // TODO: else { panic!() } ?! - - let _was_inserted = self.dead_substreams.insert(substream_id.0); - debug_assert!(_was_inserted); - - // We might be currently writing a frame of data of the substream being reset. - // If that happens, we need to update some internal state regarding this frame of data. - match ( - &mut self.outgoing, - mem::replace(&mut self.substream.get_mut().state, SubstreamState::Reset), - ) { - ( - Outgoing::Header { - substream_data_frame: Some((data @ OutgoingSubstreamData::Healthy(_), _)), - .. - } - | Outgoing::SubstreamData { - data: data @ OutgoingSubstreamData::Healthy(_), - .. - }, - SubstreamState::Healthy { - write_buffers, - first_write_buffer_offset, - .. - }, - ) if *data == OutgoingSubstreamData::Healthy(substream_id) => { - *data = OutgoingSubstreamData::Obsolete { - write_buffers, - first_write_buffer_offset, - }; - } - _ => {} - } - } -} - -impl<'a, T> fmt::Debug for SubstreamMut<'a, T> -where - T: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("Substream").field(self.user_data()).finish() - } -} - pub struct ExtractOut<'a, T> { yamux: &'a mut Yamux, size_bytes: usize, From abfeb59344889f9f7895fe48a23a0c1861ac8a8b Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 31 Mar 2023 17:36:14 +0200 Subject: [PATCH 11/74] Fix faulty debug_assert --- lib/src/libp2p/connection/yamux.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index f1e686636d..3a43147748 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -1923,7 +1923,7 @@ impl<'a, T> ExtractOut<'a, T> { *local_write = SubstreamStateLocalWrite::FinQueued; if *remote_write_closed { let _was_inserted = self.yamux.inner.dead_substreams.insert(id); - debug_assert!(!_was_inserted); + debug_assert!(_was_inserted); } } self.yamux From e285c545d1a9d472c5ac00fc88a92f3ee3a7ccc9 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 31 Mar 2023 17:48:17 +0200 Subject: [PATCH 12/74] Some docfixes --- lib/src/libp2p/connection/yamux.rs | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index 3a43147748..bf87ba0b33 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -37,9 +37,8 @@ //! //! The generic parameter of [`Yamux`] is an opaque "user data" associated to each substream. //! -//! When [`SubstreamMut::write`] is called, the buffer of data to send out is stored within the -//! [`Yamux`] object. This data will then be progressively returned by -//! [`Yamux::extract_out`]. +//! When [`Yamux::write`] is called, the buffer of data to send out is stored within the +//! [`Yamux`] object. This data will then be progressively returned by [`Yamux::extract_out`]. //! //! It is the responsibility of the user to enforce a bound to the amount of enqueued data, as //! the [`Yamux`] itself doesn't enforce any limit. Enforcing such a bound must be done based @@ -545,9 +544,9 @@ impl Yamux { } } - /// Similar to [`SubstreamMut::add_remote_window`], but sets the number of allowed bytes to - /// be at least this value. In other words, if this method was to be twice with the same - /// parameter, the second call would have no effect. + /// Similar to [`Yamux::add_remote_window`], but sets the number of allowed bytes to be at + /// least this value. In other words, if this method was to be twice with the same parameter, + /// the second call would have no effect. /// /// # Panic /// @@ -594,7 +593,7 @@ impl Yamux { } /// Returns `false` if the remote has closed their writing side of this substream, or if - /// [`SubstreamMut::reset`] has been called on this substream, or if the substream has been + /// [`Yamux::reset`] has been called on this substream, or if the substream has been /// reset by the remote. /// /// # Panic @@ -609,8 +608,8 @@ impl Yamux { } if !remote_write_closed) } - /// Returns `false` if [`SubstreamMut::close`] or [`SubstreamMut::reset`] has been called on - /// this substream, or if the remote has . + /// Returns `false` if [`Yamux::close`] or [`Yamux::reset`] has been called on this substream, + /// or if the remote has . /// /// # Panic /// @@ -635,9 +634,9 @@ impl Yamux { /// # Panic /// /// Panics if the [`SubstreamId`] is invalid. - /// Panics if the local writing side is already closed, which can happen if - /// [`SubstreamMut::close`] has already been called on this substream or if the remote has - /// reset the substream in the past. + /// Panics if the local writing side is already closed, which can happen if [`Yamux::close`] + /// has already been called on this substream or if the remote has reset the substream in the + /// past. /// // TODO: doc obsolete pub fn close(&mut self, substream_id: SubstreamId) { @@ -662,9 +661,9 @@ impl Yamux { /// # Panic /// /// Panics if the [`SubstreamId`] is invalid. - /// Panics if the local writing side is already closed, which can happen if - /// [`SubstreamMut::close`] has already been called on this substream or if the remote has - /// reset the substream in the past. + /// Panics if the local writing side is already closed, which can happen if [`Yamux::close`] + /// has already been called on this substream or if the remote has reset the substream in the + /// past. /// pub fn reset(&mut self, substream_id: SubstreamId) { // Add an entry to the list of RST headers to send to the remote. From bc4940813b3ec7fcfe1c051434bee6f1ca542ff1 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Sat, 1 Apr 2023 11:33:08 +0200 Subject: [PATCH 13/74] Fix insertion in dead_substreams at the wrong time --- lib/src/libp2p/connection/yamux.rs | 35 ++++++++++++++++++++++-------- 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index bf87ba0b33..58b915eb74 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -902,6 +902,8 @@ impl Yamux { SubstreamState::Healthy { remote_write_closed: remote_write_closed @ false, local_write_close, + write_buffers, + first_write_buffer_offset, .. }, .. @@ -909,7 +911,12 @@ impl Yamux { { *remote_write_closed = true; - if matches!(*local_write_close, SubstreamStateLocalWrite::FinQueued) { + if matches!(*local_write_close, SubstreamStateLocalWrite::FinQueued) + && (write_buffers.is_empty() // TODO: cumbersome + || (write_buffers.len() == 1 + && write_buffers[0].len() + <= *first_write_buffer_offset)) + { let _was_inserted = self.inner.dead_substreams.insert(substream_id.0); debug_assert!(_was_inserted); } @@ -1722,7 +1729,7 @@ impl<'a, T> ExtractOut<'a, T> { remaining_bytes: ref mut remain, ref mut data, } => { - let (write_buffers, first_write_buffer_offset) = match data { + let (write_buffers, first_write_buffer_offset, substream_id) = match data { OutgoingSubstreamData::Healthy(id) => { let substream = self.yamux.inner.substreams.get_mut(&id.0).unwrap(); if let SubstreamState::Healthy { @@ -1731,7 +1738,7 @@ impl<'a, T> ExtractOut<'a, T> { .. } = &mut substream.state { - (write_buffers, first_write_buffer_offset) + (write_buffers, first_write_buffer_offset, Some(*id)) } else { unreachable!() } @@ -1739,7 +1746,7 @@ impl<'a, T> ExtractOut<'a, T> { OutgoingSubstreamData::Obsolete { ref mut write_buffers, ref mut first_write_buffer_offset, - } => (write_buffers, first_write_buffer_offset), + } => (write_buffers, first_write_buffer_offset, None), }; let first_buf_avail = write_buffers[0].len() - *first_write_buffer_offset; @@ -1752,10 +1759,25 @@ impl<'a, T> ExtractOut<'a, T> { ); self.size_bytes -= first_buf_avail; *first_write_buffer_offset = 0; + let write_buffers_empty = write_buffers.is_empty(); match NonZeroUsize::new(remain.get() - first_buf_avail) { Some(r) => *remain = r, None => self.yamux.inner.outgoing = Outgoing::Idle, }; + if write_buffers_empty { + if let Some(id) = substream_id { + if let SubstreamState::Healthy { + local_write_close: SubstreamStateLocalWrite::FinQueued, + remote_write_closed: true, + .. + } = self.yamux.inner.substreams.get(&id.0).unwrap().state + { + let _was_inserted = + self.yamux.inner.dead_substreams.insert(id.0); + debug_assert!(_was_inserted); + } + } + } either::Right(out) } else if remain.get() <= self.size_bytes { self.size_bytes -= remain.get(); @@ -1900,7 +1922,6 @@ impl<'a, T> ExtractOut<'a, T> { { if let SubstreamState::Healthy { first_message_queued, - remote_write_closed, allowed_window, local_write_close: local_write, write_buffers, @@ -1920,10 +1941,6 @@ impl<'a, T> ExtractOut<'a, T> { && len_out_usize == pending_len; if fin_flag { *local_write = SubstreamStateLocalWrite::FinQueued; - if *remote_write_closed { - let _was_inserted = self.yamux.inner.dead_substreams.insert(id); - debug_assert!(_was_inserted); - } } self.yamux .queue_data_frame_header(syn_ack_flag, fin_flag, id, len_out); From b685ee836fdadcd39d47f03afff6086fef63334e Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Sat, 1 Apr 2023 11:40:37 +0200 Subject: [PATCH 14/74] Remove the `ExtractOut` intermediary object --- .../connection/established/single_stream.rs | 9 +- lib/src/libp2p/connection/yamux.rs | 555 +++++++++--------- 2 files changed, 268 insertions(+), 296 deletions(-) diff --git a/lib/src/libp2p/connection/established/single_stream.rs b/lib/src/libp2p/connection/established/single_stream.rs index 08a42026c7..c3c3903106 100644 --- a/lib/src/libp2p/connection/established/single_stream.rs +++ b/lib/src/libp2p/connection/established/single_stream.rs @@ -504,7 +504,7 @@ where // Calculate number of bytes that we can extract from yamux. This is similar but not // exactly the same as the size of the outgoing buffer, as noise adds some headers to // the data. - let unencrypted_bytes_to_extract = self + let mut unencrypted_bytes_to_extract = self .encryption .encrypt_size_conv(read_write.outgoing_buffer_available()); @@ -512,9 +512,10 @@ where // Extract outgoing data that is buffered within yamux. // TODO: don't allocate an intermediary buffer, but instead pass them directly to the encryption let mut buffers = Vec::with_capacity(32); - let mut extract_out = self.inner.yamux.extract_out(unencrypted_bytes_to_extract); - while let Some(buffer) = extract_out.extract_next() { - buffers.push(buffer.as_ref().to_vec()); // TODO: copy + while let Some(buffer) = self.inner.yamux.extract_next(unencrypted_bytes_to_extract) { + let buffer = buffer.as_ref(); + unencrypted_bytes_to_extract -= buffer.len(); + buffers.push(buffer.to_vec()); // TODO: copy } if !buffers.is_empty() { diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index 58b915eb74..6623c8406c 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -1389,27 +1389,277 @@ impl Yamux { }) } - /// Returns an object that provides an iterator to a list of buffers whose content must be - /// sent out on the socket. + /// Builds the next buffer to send out on the socket and returns it. /// - /// The buffers produced by the iterator will never yield more than `size_bytes` bytes of - /// data. The user is expected to pass an exact amount of bytes that the next layer is ready - /// to accept. - /// - /// After the [`ExtractOut`] has been destroyed, the Yamux state machine will automatically - /// consider that these `size_bytes` have been sent out, even if the iterator has been - /// destroyed before finishing. It is a logic error to `mem::forget` the [`ExtractOut`]. + /// The buffer will never be larger than `size_bytes` bytes. The user is expected to pass an + /// exact amount of bytes that the next layer is ready to accept. /// /// > **Note**: Most other objects in the networking code have a "`read_write`" method that /// > writes the outgoing data to a buffer. This is an idiomatic way to do things in /// > situations where the data is generated on the fly. In the context of Yamux, /// > however, this would be rather sub-optimal considering that buffers to send out /// > are already stored in their final form in the state machine. - pub fn extract_out(&mut self, size_bytes: usize) -> ExtractOut { - ExtractOut { - yamux: self, - size_bytes, + pub fn extract_next(&'_ mut self, size_bytes: usize) -> Option + '_> { + while size_bytes != 0 { + match self.inner.outgoing { + Outgoing::Header { + ref mut header, + ref mut substream_data_frame, + ref is_goaway, + } => { + // Finish writing the header. + debug_assert!(!header.is_empty()); + if size_bytes >= header.len() { + let out = mem::take(header); + if *is_goaway { + debug_assert!(matches!( + self.inner.outgoing_goaway, + OutgoingGoAway::Queued + )); + self.inner.outgoing_goaway = OutgoingGoAway::Sent; + } + self.inner.outgoing = + if let Some((data, remaining_bytes)) = substream_data_frame.take() { + Outgoing::SubstreamData { + data, + remaining_bytes, + } + } else { + Outgoing::Idle + }; + return Some(either::Left(out)); + } else { + let to_add = header[..size_bytes].to_vec(); + for _ in 0..size_bytes { + header.remove(0); + } + return Some(either::Right(VecWithOffset(to_add, 0))); + } + } + + Outgoing::SubstreamData { + remaining_bytes: ref mut remain, + ref mut data, + } => { + let (write_buffers, first_write_buffer_offset, substream_id) = match data { + OutgoingSubstreamData::Healthy(id) => { + let substream = self.inner.substreams.get_mut(&id.0).unwrap(); + if let SubstreamState::Healthy { + ref mut write_buffers, + ref mut first_write_buffer_offset, + .. + } = &mut substream.state + { + (write_buffers, first_write_buffer_offset, Some(*id)) + } else { + unreachable!() + } + } + OutgoingSubstreamData::Obsolete { + ref mut write_buffers, + ref mut first_write_buffer_offset, + } => (write_buffers, first_write_buffer_offset, None), + }; + + let first_buf_avail = write_buffers[0].len() - *first_write_buffer_offset; + let out = if first_buf_avail <= remain.get() + && first_buf_avail <= size_bytes + { + let out = VecWithOffset( + write_buffers.pop_front().unwrap(), + *first_write_buffer_offset, + ); + *first_write_buffer_offset = 0; + let write_buffers_empty = write_buffers.is_empty(); + match NonZeroUsize::new(remain.get() - first_buf_avail) { + Some(r) => *remain = r, + None => self.inner.outgoing = Outgoing::Idle, + }; + if write_buffers_empty { + if let Some(id) = substream_id { + if let SubstreamState::Healthy { + local_write_close: SubstreamStateLocalWrite::FinQueued, + remote_write_closed: true, + .. + } = self.inner.substreams.get(&id.0).unwrap().state + { + let _was_inserted = + self.inner.dead_substreams.insert(id.0); + debug_assert!(_was_inserted); + } + } + } + either::Right(out) + } else if remain.get() <= size_bytes { + let out = VecWithOffset( + write_buffers[0][*first_write_buffer_offset..][..remain.get()].to_vec(), + 0, + ); + *first_write_buffer_offset += remain.get(); + self.inner.outgoing = Outgoing::Idle; + either::Right(out) + } else { + let out = VecWithOffset( + write_buffers[0][*first_write_buffer_offset..][..size_bytes] + .to_vec(), + 0, + ); + *first_write_buffer_offset += size_bytes; + *remain = NonZeroUsize::new(remain.get() - size_bytes).unwrap(); + either::Right(out) + }; + + return Some(out); + } + + Outgoing::Idle => { + // Send a `GoAway` frame if demanded. + if let OutgoingGoAway::Required(code) = self.inner.outgoing_goaway { + let mut header = arrayvec::ArrayVec::new(); + header.push(0); + header.push(3); + header.try_extend_from_slice(&0u16.to_be_bytes()).unwrap(); + header.try_extend_from_slice(&0u32.to_be_bytes()).unwrap(); + header + .try_extend_from_slice( + &match code { + GoAwayErrorCode::NormalTermination => 0u32, + GoAwayErrorCode::ProtocolError => 1u32, + GoAwayErrorCode::InternalError => 2u32, + } + .to_be_bytes(), + ) + .unwrap(); + debug_assert_eq!(header.len(), 12); + + self.inner.outgoing = Outgoing::Header { + header, + substream_data_frame: None, + is_goaway: true, + }; + self.inner.outgoing_goaway = OutgoingGoAway::Queued; + continue; + } + + // Send RST frames. + if let Some(substream_id) = self.inner.rsts_to_send.pop_front() { + let mut header = arrayvec::ArrayVec::new(); + header.push(0); + header.push(1); + header.try_extend_from_slice(&8u16.to_be_bytes()).unwrap(); + header + .try_extend_from_slice(&substream_id.get().to_be_bytes()) + .unwrap(); + header.try_extend_from_slice(&0u32.to_be_bytes()).unwrap(); + debug_assert_eq!(header.len(), 12); + + self.inner.outgoing = Outgoing::Header { + header, + substream_data_frame: None, + is_goaway: false, + }; + continue; + } + + // Send outgoing pings. + if self.inner.pings_to_send > 0 { + self.inner.pings_to_send -= 1; + let opaque_value: u32 = self.inner.randomness.gen(); + self.queue_ping_request_header(opaque_value); + self.inner.pings_waiting_reply.push_back(opaque_value); + continue; + } + + // Send window update frames. + // TODO: O(n) + if let Some((id, sub)) = self + .inner + .substreams + .iter_mut() + .find(|(_, s)| { + matches!(&s.state, + SubstreamState::Healthy { + remote_window_pending_increase, + .. + } if *remote_window_pending_increase != 0) + }) + .map(|(id, sub)| (*id, sub)) + { + if let SubstreamState::Healthy { + first_message_queued, + remote_window_pending_increase, + remote_allowed_window, + .. + } = &mut sub.state + { + let syn_ack_flag = !*first_message_queued; + *first_message_queued = true; + + let update = u32::try_from(*remote_window_pending_increase) + .unwrap_or(u32::max_value()); + *remote_window_pending_increase -= u64::from(update); + *remote_allowed_window += u64::from(update); + self.queue_window_size_frame_header(syn_ack_flag, id, update); + continue; + } else { + unreachable!() + } + } + + // Start writing more data from another substream. + // TODO: O(n) + // TODO: choose substreams in some sort of round-robin way + if let Some((id, sub)) = self + .inner + .substreams + .iter_mut() + .find(|(_, s)| match &s.state { + SubstreamState::Healthy { + write_buffers, + local_write_close: local_write, + .. + } => { + !write_buffers.is_empty() + || matches!(local_write, SubstreamStateLocalWrite::FinDesired) + } + _ => false, + }) + .map(|(id, sub)| (*id, sub)) + { + if let SubstreamState::Healthy { + first_message_queued, + allowed_window, + local_write_close: local_write, + write_buffers, + .. + } = &mut sub.state + { + let pending_len = write_buffers.iter().fold(0, |l, b| l + b.len()); + let len_out = cmp::min( + u32::try_from(pending_len).unwrap_or(u32::max_value()), + u32::try_from(*allowed_window).unwrap_or(u32::max_value()), + ); + let len_out_usize = usize::try_from(len_out).unwrap(); + *allowed_window -= u64::from(len_out); + let syn_ack_flag = !*first_message_queued; + *first_message_queued = true; + let fin_flag = !matches!(local_write, SubstreamStateLocalWrite::Open) + && len_out_usize == pending_len; + if fin_flag { + *local_write = SubstreamStateLocalWrite::FinQueued; + } + self.queue_data_frame_header(syn_ack_flag, fin_flag, id, len_out); + } else { + unreachable!() + } + } else { + break; + } + } + } } + + None } /// Accepts an incoming substream. @@ -1679,285 +1929,6 @@ where } } -pub struct ExtractOut<'a, T> { - yamux: &'a mut Yamux, - size_bytes: usize, -} - -impl<'a, T> ExtractOut<'a, T> { - /// Builds the next buffer to send out and returns it. - pub fn extract_next(&'_ mut self) -> Option + '_> { - while self.size_bytes != 0 { - match self.yamux.inner.outgoing { - Outgoing::Header { - ref mut header, - ref mut substream_data_frame, - ref is_goaway, - } => { - // Finish writing the header. - debug_assert!(!header.is_empty()); - if self.size_bytes >= header.len() { - self.size_bytes -= header.len(); - let out = mem::take(header); - if *is_goaway { - debug_assert!(matches!( - self.yamux.inner.outgoing_goaway, - OutgoingGoAway::Queued - )); - self.yamux.inner.outgoing_goaway = OutgoingGoAway::Sent; - } - self.yamux.inner.outgoing = - if let Some((data, remaining_bytes)) = substream_data_frame.take() { - Outgoing::SubstreamData { - data, - remaining_bytes, - } - } else { - Outgoing::Idle - }; - return Some(either::Left(out)); - } else { - let to_add = header[..self.size_bytes].to_vec(); - for _ in 0..self.size_bytes { - header.remove(0); - } - return Some(either::Right(VecWithOffset(to_add, 0))); - } - } - - Outgoing::SubstreamData { - remaining_bytes: ref mut remain, - ref mut data, - } => { - let (write_buffers, first_write_buffer_offset, substream_id) = match data { - OutgoingSubstreamData::Healthy(id) => { - let substream = self.yamux.inner.substreams.get_mut(&id.0).unwrap(); - if let SubstreamState::Healthy { - ref mut write_buffers, - ref mut first_write_buffer_offset, - .. - } = &mut substream.state - { - (write_buffers, first_write_buffer_offset, Some(*id)) - } else { - unreachable!() - } - } - OutgoingSubstreamData::Obsolete { - ref mut write_buffers, - ref mut first_write_buffer_offset, - } => (write_buffers, first_write_buffer_offset, None), - }; - - let first_buf_avail = write_buffers[0].len() - *first_write_buffer_offset; - let out = if first_buf_avail <= remain.get() - && first_buf_avail <= self.size_bytes - { - let out = VecWithOffset( - write_buffers.pop_front().unwrap(), - *first_write_buffer_offset, - ); - self.size_bytes -= first_buf_avail; - *first_write_buffer_offset = 0; - let write_buffers_empty = write_buffers.is_empty(); - match NonZeroUsize::new(remain.get() - first_buf_avail) { - Some(r) => *remain = r, - None => self.yamux.inner.outgoing = Outgoing::Idle, - }; - if write_buffers_empty { - if let Some(id) = substream_id { - if let SubstreamState::Healthy { - local_write_close: SubstreamStateLocalWrite::FinQueued, - remote_write_closed: true, - .. - } = self.yamux.inner.substreams.get(&id.0).unwrap().state - { - let _was_inserted = - self.yamux.inner.dead_substreams.insert(id.0); - debug_assert!(_was_inserted); - } - } - } - either::Right(out) - } else if remain.get() <= self.size_bytes { - self.size_bytes -= remain.get(); - let out = VecWithOffset( - write_buffers[0][*first_write_buffer_offset..][..remain.get()].to_vec(), - 0, - ); - *first_write_buffer_offset += remain.get(); - self.yamux.inner.outgoing = Outgoing::Idle; - either::Right(out) - } else { - let out = VecWithOffset( - write_buffers[0][*first_write_buffer_offset..][..self.size_bytes] - .to_vec(), - 0, - ); - *first_write_buffer_offset += self.size_bytes; - *remain = NonZeroUsize::new(remain.get() - self.size_bytes).unwrap(); - self.size_bytes = 0; - either::Right(out) - }; - - return Some(out); - } - - Outgoing::Idle => { - // Send a `GoAway` frame if demanded. - if let OutgoingGoAway::Required(code) = self.yamux.inner.outgoing_goaway { - let mut header = arrayvec::ArrayVec::new(); - header.push(0); - header.push(3); - header.try_extend_from_slice(&0u16.to_be_bytes()).unwrap(); - header.try_extend_from_slice(&0u32.to_be_bytes()).unwrap(); - header - .try_extend_from_slice( - &match code { - GoAwayErrorCode::NormalTermination => 0u32, - GoAwayErrorCode::ProtocolError => 1u32, - GoAwayErrorCode::InternalError => 2u32, - } - .to_be_bytes(), - ) - .unwrap(); - debug_assert_eq!(header.len(), 12); - - self.yamux.inner.outgoing = Outgoing::Header { - header, - substream_data_frame: None, - is_goaway: true, - }; - self.yamux.inner.outgoing_goaway = OutgoingGoAway::Queued; - continue; - } - - // Send RST frames. - if let Some(substream_id) = self.yamux.inner.rsts_to_send.pop_front() { - let mut header = arrayvec::ArrayVec::new(); - header.push(0); - header.push(1); - header.try_extend_from_slice(&8u16.to_be_bytes()).unwrap(); - header - .try_extend_from_slice(&substream_id.get().to_be_bytes()) - .unwrap(); - header.try_extend_from_slice(&0u32.to_be_bytes()).unwrap(); - debug_assert_eq!(header.len(), 12); - - self.yamux.inner.outgoing = Outgoing::Header { - header, - substream_data_frame: None, - is_goaway: false, - }; - continue; - } - - // Send outgoing pings. - if self.yamux.inner.pings_to_send > 0 { - self.yamux.inner.pings_to_send -= 1; - let opaque_value: u32 = self.yamux.inner.randomness.gen(); - self.yamux.queue_ping_request_header(opaque_value); - self.yamux.inner.pings_waiting_reply.push_back(opaque_value); - continue; - } - - // Send window update frames. - // TODO: O(n) - if let Some((id, sub)) = self - .yamux - .inner - .substreams - .iter_mut() - .find(|(_, s)| { - matches!(&s.state, - SubstreamState::Healthy { - remote_window_pending_increase, - .. - } if *remote_window_pending_increase != 0) - }) - .map(|(id, sub)| (*id, sub)) - { - if let SubstreamState::Healthy { - first_message_queued, - remote_window_pending_increase, - remote_allowed_window, - .. - } = &mut sub.state - { - let syn_ack_flag = !*first_message_queued; - *first_message_queued = true; - - let update = u32::try_from(*remote_window_pending_increase) - .unwrap_or(u32::max_value()); - *remote_window_pending_increase -= u64::from(update); - *remote_allowed_window += u64::from(update); - self.yamux - .queue_window_size_frame_header(syn_ack_flag, id, update); - continue; - } else { - unreachable!() - } - } - - // Start writing more data from another substream. - // TODO: O(n) - // TODO: choose substreams in some sort of round-robin way - if let Some((id, sub)) = self - .yamux - .inner - .substreams - .iter_mut() - .find(|(_, s)| match &s.state { - SubstreamState::Healthy { - write_buffers, - local_write_close: local_write, - .. - } => { - !write_buffers.is_empty() - || matches!(local_write, SubstreamStateLocalWrite::FinDesired) - } - _ => false, - }) - .map(|(id, sub)| (*id, sub)) - { - if let SubstreamState::Healthy { - first_message_queued, - allowed_window, - local_write_close: local_write, - write_buffers, - .. - } = &mut sub.state - { - let pending_len = write_buffers.iter().fold(0, |l, b| l + b.len()); - let len_out = cmp::min( - u32::try_from(pending_len).unwrap_or(u32::max_value()), - u32::try_from(*allowed_window).unwrap_or(u32::max_value()), - ); - let len_out_usize = usize::try_from(len_out).unwrap(); - *allowed_window -= u64::from(len_out); - let syn_ack_flag = !*first_message_queued; - *first_message_queued = true; - let fin_flag = !matches!(local_write, SubstreamStateLocalWrite::Open) - && len_out_usize == pending_len; - if fin_flag { - *local_write = SubstreamStateLocalWrite::FinQueued; - } - self.yamux - .queue_data_frame_header(syn_ack_flag, fin_flag, id, len_out); - } else { - unreachable!() - } - } else { - break; - } - } - } - } - - None - } -} - #[derive(Clone)] struct VecWithOffset(Vec, usize); impl AsRef<[u8]> for VecWithOffset { From ba2b99c55646f6f52da8655effa586fb6ba33251 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Sat, 1 Apr 2023 11:43:46 +0200 Subject: [PATCH 15/74] Fix faulty debug_assert --- lib/src/libp2p/connection/established/single_stream.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/src/libp2p/connection/established/single_stream.rs b/lib/src/libp2p/connection/established/single_stream.rs index c3c3903106..06ccef379b 100644 --- a/lib/src/libp2p/connection/established/single_stream.rs +++ b/lib/src/libp2p/connection/established/single_stream.rs @@ -529,7 +529,6 @@ where None => (&mut [], &mut []), }, ); - debug_assert!(_read <= unencrypted_bytes_to_extract); read_write.advance_write(written); } } From 5adc96e66a7f4589dd6b469a61e42ecb9e701022 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Sat, 1 Apr 2023 14:09:35 +0200 Subject: [PATCH 16/74] Turn an if into a debug_assert! --- lib/src/libp2p/connection/yamux.rs | 34 ++++++++++++------------------ 1 file changed, 14 insertions(+), 20 deletions(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index 6623c8406c..66ee2d529a 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -807,22 +807,20 @@ impl Yamux { first_write_buffer_offset, .. } => { - if matches!(local_write_close, SubstreamStateLocalWrite::FinQueued) - && *remote_write_closed - && (write_buffers.is_empty() // TODO: cumbersome + debug_assert!( + matches!(local_write_close, SubstreamStateLocalWrite::FinQueued) + && *remote_write_closed + && (write_buffers.is_empty() // TODO: cumbersome || (write_buffers.len() == 1 && write_buffers[0].len() <= *first_write_buffer_offset)) - { - ( - SubstreamId(*id), - DeadSubstreamTy::ClosedGracefully, - &substream.user_data, - ) - } else { - // Substream shouldn't have been put in `dead_substreams`. - unreachable!() - } + ); + + ( + SubstreamId(*id), + DeadSubstreamTy::ClosedGracefully, + &substream.user_data, + ) } } }) @@ -1462,9 +1460,7 @@ impl Yamux { }; let first_buf_avail = write_buffers[0].len() - *first_write_buffer_offset; - let out = if first_buf_avail <= remain.get() - && first_buf_avail <= size_bytes - { + let out = if first_buf_avail <= remain.get() && first_buf_avail <= size_bytes { let out = VecWithOffset( write_buffers.pop_front().unwrap(), *first_write_buffer_offset, @@ -1483,8 +1479,7 @@ impl Yamux { .. } = self.inner.substreams.get(&id.0).unwrap().state { - let _was_inserted = - self.inner.dead_substreams.insert(id.0); + let _was_inserted = self.inner.dead_substreams.insert(id.0); debug_assert!(_was_inserted); } } @@ -1500,8 +1495,7 @@ impl Yamux { either::Right(out) } else { let out = VecWithOffset( - write_buffers[0][*first_write_buffer_offset..][..size_bytes] - .to_vec(), + write_buffers[0][*first_write_buffer_offset..][..size_bytes].to_vec(), 0, ); *first_write_buffer_offset += size_bytes; From 09429ce2208b44ba8bcaf5724627c883e906945b Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Sat, 1 Apr 2023 14:49:31 +0200 Subject: [PATCH 17/74] Tweaks to the `incoming_data` function --- lib/src/libp2p/connection/yamux.rs | 207 +++++++++++++++++------------ 1 file changed, 119 insertions(+), 88 deletions(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index 66ee2d529a..8c381e5e22 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -893,9 +893,16 @@ impl Yamux { remaining_bytes: 0, fin: true, } => { + // End of the data frame. Proceed to receive new header at the next iteration. self.inner.incoming = Incoming::Header(arrayvec::ArrayVec::new()); - if let Some(Substream { + // Note that it is possible that we are receiving data corresponding to a + // substream for which a RST has been sent out by the local node. Since the + // local state machine doesn't keep track of RST'ted substreams, any + // frame concerning a substream that has been RST or doesn't exist is + // discarded and doesn't result in an error, under the presumption that we + // are in this situation. + let Some(Substream { state: SubstreamState::Healthy { remote_write_closed: remote_write_closed @ false, @@ -905,26 +912,25 @@ impl Yamux { .. }, .. - }) = self.inner.substreams.get_mut(&substream_id.0) - { - *remote_write_closed = true; + }) = self.inner.substreams.get_mut(&substream_id.0) else { continue; }; - if matches!(*local_write_close, SubstreamStateLocalWrite::FinQueued) - && (write_buffers.is_empty() // TODO: cumbersome - || (write_buffers.len() == 1 - && write_buffers[0].len() - <= *first_write_buffer_offset)) - { - let _was_inserted = self.inner.dead_substreams.insert(substream_id.0); - debug_assert!(_was_inserted); - } + *remote_write_closed = true; - return Ok(IncomingDataOutcome { - yamux: self, - bytes_read: total_read, - detail: Some(IncomingDataDetail::StreamClosed { substream_id }), - }); + if matches!(*local_write_close, SubstreamStateLocalWrite::FinQueued) + && (write_buffers.is_empty() // TODO: cumbersome + || (write_buffers.len() == 1 + && write_buffers[0].len() + <= *first_write_buffer_offset)) + { + let _was_inserted = self.inner.dead_substreams.insert(substream_id.0); + debug_assert!(_was_inserted); } + + return Ok(IncomingDataOutcome { + yamux: self, + bytes_read: total_read, + detail: Some(IncomingDataDetail::StreamClosed { substream_id }), + }); } Incoming::DataFrame { @@ -932,6 +938,7 @@ impl Yamux { fin: false, .. } => { + // End of the data frame. Proceed to receive new header at the next iteration. self.inner.incoming = Incoming::Header(arrayvec::ArrayVec::new()); } @@ -945,20 +952,22 @@ impl Yamux { debug_assert_ne!(*remaining_bytes, 0); + // Extract the data and update the local states. let pulled_data = cmp::min( *remaining_bytes, u32::try_from(data.len()).unwrap_or(u32::max_value()), ); - let pulled_data_usize = usize::try_from(pulled_data).unwrap(); *remaining_bytes -= pulled_data; - let start_offset = total_read; total_read += pulled_data_usize; data = &data[pulled_data_usize..]; - // Note that it is possible that we are receiving data corresponding to a - // substream for which a RST has been sent out by the local node. Since the + // If the substream still exists, report the event to the API user. + // If the substream doesn't exist anymore, just continue iterating. + // + // It is possible that we are receiving data corresponding to a substream for + // which a RST has been sent out by the local node. Since the // local state machine doesn't keep track of RST'ted substreams, any // frame concerning a substream that has been RST or doesn't exist is // discarded and doesn't result in an error, under the presumption that we @@ -983,9 +992,10 @@ impl Yamux { }); } - // Also note that we don't switch back `self.inner.incoming` to `Header`. - // Instead, the next iteration will pick up `DataFrame` again and transition - // again. This is necessary to handle the `fin` flag elegantly. + // We don't switch back `self.inner.incoming` to `Header` even if there's no + // bytes remaining in the data frame. Instead, the next iteration will pick up + // `DataFrame` again and transition again. This is necessary to handle the + // `fin` flag elegantly. } Incoming::DataFrame { @@ -1024,6 +1034,7 @@ impl Yamux { // be empty. If it is not the case, we simply leave the ping header // there and prevent any further data from being read. if !matches!(self.inner.outgoing, Outgoing::Idle) { + // TODO: this could trigger a deadlock if the send buffer is very small break; } @@ -1057,6 +1068,7 @@ impl Yamux { self.inner.incoming = Incoming::Header(arrayvec::ArrayVec::new()); } header::DecodedYamuxHeader::PingResponse { opaque_value } => { + // TODO: this is `O(n)` let pos = match self .inner .pings_waiting_reply @@ -1156,7 +1168,10 @@ impl Yamux { length, .. } => { - // Handle `RST` flag separately. + // Frame with the `RST` flag set. Destroy the substream. + + // It is invalid to have the `RST` flag set and data at the same time. + // TODO: why is it invalid? if matches!(decoded_header, header::DecodedYamuxHeader::Data { .. }) && length != 0 { @@ -1169,56 +1184,53 @@ impl Yamux { // which we have sent a RST frame earlier. Considering that we don't // always keep traces of old substreams, we have no way to know whether // this is the case or not. - if let Some(s) = self.inner.substreams.get_mut(&stream_id) { - let _was_inserted = self.inner.dead_substreams.insert(stream_id); - debug_assert!(_was_inserted); + let Some(s) = self.inner.substreams.get_mut(&stream_id) else { continue }; - // We might be currently writing a frame of data of the substream - // being reset. If that happens, we need to update some internal - // state regarding this frame of data. - match ( - &mut self.inner.outgoing, - mem::replace(&mut s.state, SubstreamState::Reset), - ) { - ( - Outgoing::Header { - substream_data_frame: - Some((data @ OutgoingSubstreamData::Healthy(_), _)), - .. - } - | Outgoing::SubstreamData { - data: data @ OutgoingSubstreamData::Healthy(_), - .. - }, - SubstreamState::Healthy { - write_buffers, - first_write_buffer_offset, - .. - }, - ) if *data - == OutgoingSubstreamData::Healthy(SubstreamId( - stream_id, - )) => - { - *data = OutgoingSubstreamData::Obsolete { - write_buffers, - first_write_buffer_offset, - }; + let _was_inserted = self.inner.dead_substreams.insert(stream_id); + debug_assert!(_was_inserted); + + // We might be currently writing a frame of data of the substream + // being reset. If that happens, we need to update some internal + // state regarding this frame of data. + match ( + &mut self.inner.outgoing, + mem::replace(&mut s.state, SubstreamState::Reset), + ) { + ( + Outgoing::Header { + substream_data_frame: + Some((data @ OutgoingSubstreamData::Healthy(_), _)), + .. } - _ => {} + | Outgoing::SubstreamData { + data: data @ OutgoingSubstreamData::Healthy(_), + .. + }, + SubstreamState::Healthy { + write_buffers, + first_write_buffer_offset, + .. + }, + ) if *data + == OutgoingSubstreamData::Healthy(SubstreamId(stream_id)) => + { + *data = OutgoingSubstreamData::Obsolete { + write_buffers, + first_write_buffer_offset, + }; } - - return Ok(IncomingDataOutcome { - yamux: self, - bytes_read: total_read, - detail: Some(IncomingDataDetail::StreamReset { - substream_id: SubstreamId(stream_id), - }), - }); + _ => {} } + + return Ok(IncomingDataOutcome { + yamux: self, + bytes_read: total_read, + detail: Some(IncomingDataDetail::StreamReset { + substream_id: SubstreamId(stream_id), + }), + }); } - // Remote has sent a SYN flag. A new substream is to be opened. header::DecodedYamuxHeader::Data { syn: true, fin, @@ -1235,6 +1247,7 @@ impl Yamux { length, .. } => { + // Remote has sent a SYN flag. A new substream is to be opened. match self.inner.substreams.get(&stream_id) { Some(Substream { state: SubstreamState::Healthy { .. }, @@ -1250,18 +1263,46 @@ impl Yamux { // Because we don't immediately destroy substreams, the remote // might decide to re-use a substream ID that is still // allocated locally. If that happens, we block the reading. + // It will be unblocked when the API user destroys the old + // substream. break; } None => {} } + // When receiving a new substream, the outgoing state must always be + // `Outgoing::Idle`, in order to potentially queue the substream + // rejection message later. + // If it is not the case, we simply leave the header there and prevent + // any further data from being read. + if !matches!(self.inner.outgoing, Outgoing::Idle) { + break; + } + let is_data = matches!(decoded_header, header::DecodedYamuxHeader::Data { .. }); // If we have queued or sent a GoAway frame, then the substream is // automatically rejected. if !matches!(self.inner.outgoing_goaway, OutgoingGoAway::NotRequired) { - self.inner.incoming = if !is_data || length == 0 { + // Send the `RST` frame. + let mut header = arrayvec::ArrayVec::new(); + header.push(0); + header.push(1); + header.try_extend_from_slice(&0x8u16.to_be_bytes()).unwrap(); + header + .try_extend_from_slice(&stream_id.get().to_be_bytes()) + .unwrap(); + header.try_extend_from_slice(&0u32.to_be_bytes()).unwrap(); + debug_assert_eq!(header.len(), 12); + + self.inner.outgoing = Outgoing::Header { + header, + substream_data_frame: None, + is_goaway: false, + }; + + self.inner.incoming = if !is_data { Incoming::Header(arrayvec::ArrayVec::new()) } else { Incoming::DataFrame { @@ -1270,16 +1311,8 @@ impl Yamux { fin, } }; - continue; - } - // As documented, when in the `Incoming::PendingIncomingSubstream` - // state, the outgoing state must always be `Outgoing::Idle`, in - // order to potentially queue the substream rejection message later. - // If it is not the case, we simply leave the header there and prevent - // any further data from being read. - if !matches!(self.inner.outgoing, Outgoing::Idle) { - break; + continue; } self.inner.incoming = Incoming::PendingIncomingSubstream { @@ -1332,6 +1365,8 @@ impl Yamux { .ok_or(Error::CreditsExceeded)?; } + // Switch to the `DataFrame` state in order to process the frame, even + // if the substream no longer exists. self.inner.incoming = Incoming::DataFrame { substream_id: SubstreamId(stream_id), remaining_bytes: length, @@ -1743,14 +1778,10 @@ impl Yamux { fin, .. } => { - self.inner.incoming = if data_frame_size == 0 { - Incoming::Header(arrayvec::ArrayVec::new()) - } else { - Incoming::DataFrame { - substream_id, - remaining_bytes: data_frame_size, - fin, - } + self.inner.incoming = Incoming::DataFrame { + substream_id, + remaining_bytes: data_frame_size, + fin, }; let mut header = arrayvec::ArrayVec::new(); From 146ccc89ea939529af2737c51639eb13216b08ee Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Sat, 1 Apr 2023 15:11:56 +0200 Subject: [PATCH 18/74] Use a HashSet for outgoing pings --- lib/src/libp2p/connection/yamux.rs | 55 +++++++++++++++++++----------- 1 file changed, 36 insertions(+), 19 deletions(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index 8c381e5e22..5ae8beaa3c 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -119,11 +119,12 @@ struct YamuxInner { next_outbound_substream: NonZeroU32, /// Number of pings to send out that haven't been queued yet. - pings_to_send: u32, + pings_to_send: usize, - /// List of pings that have been sent out but haven't been replied yet. For each ping, - /// contains the opaque value that has been sent out and that must be matched by the remote. - pings_waiting_reply: VecDeque, + /// List of pings that have been sent out but haven't been replied yet. Each ping has as key + /// the opaque value that has been sent out and that must be matched by the remote. + /// Since the opaque values are generated locally and randomly, we can use the `FNV` hasher. + pings_waiting_reply: hashbrown::HashSet, /// List of substream IDs that have been reset locally. For each entry, a RST header should /// be sent to the remote and the entry removed. @@ -285,6 +286,9 @@ enum OutgoingSubstreamData { }, } +/// Maximum number of simultaneous outgoing pings allowed. +const MAX_PINGS: usize = 100000; + impl Yamux { /// Initializes a new Yamux state machine. pub fn new(config: Config) -> Yamux { @@ -312,7 +316,7 @@ impl Yamux { }, pings_to_send: 0, // We leave the initial capacity at 0, as it is likely that no ping is sent at all. - pings_waiting_reply: VecDeque::new(), + pings_waiting_reply: hashbrown::HashSet::with_hasher(Default::default()), rsts_to_send: VecDeque::with_capacity(config.capacity), randomness, }), @@ -720,7 +724,21 @@ impl Yamux { } /// Queues sending out a ping to the remote. + /// + /// # Panic + /// + /// Panics if there are already [`MAX_PINGS`] pings that have been queued and that the remote + /// hasn't answered yet. [`MAX_PINGS`] is pretty large, and unless there is a bug in the API + /// user's code causing pings to be allocated in a loop, this limit is not likely to ever be + /// reached. + /// pub fn queue_ping(&mut self) { + // A maximum number of simultaneous pings (`MAX_PINGS`) is necessary because we don't + // support sending multiple identical ping opaque values. Since the ping opaque values + // are 32 bits, the actual maximum number of simultaneous pings is 2^32. But because we + // allocate ping values by looping until we find a not-yet-allocated value, the arbitrary + // self-enforced maximum needs to be way lower. + assert!(self.inner.pings_to_send + self.inner.pings_waiting_reply.len() < MAX_PINGS); self.inner.pings_to_send += 1; } @@ -1068,18 +1086,10 @@ impl Yamux { self.inner.incoming = Incoming::Header(arrayvec::ArrayVec::new()); } header::DecodedYamuxHeader::PingResponse { opaque_value } => { - // TODO: this is `O(n)` - let pos = match self - .inner - .pings_waiting_reply - .iter() - .position(|v| *v == opaque_value) - { - Some(p) => p, - None => return Err(Error::PingResponseNotMatching), - }; + if !self.inner.pings_waiting_reply.remove(&opaque_value) { + return Err(Error::PingResponseNotMatching); + } - self.inner.pings_waiting_reply.remove(pos); self.inner.incoming = Incoming::Header(arrayvec::ArrayVec::new()); return Ok(IncomingDataOutcome { yamux: self, @@ -1593,9 +1603,16 @@ impl Yamux { // Send outgoing pings. if self.inner.pings_to_send > 0 { self.inner.pings_to_send -= 1; - let opaque_value: u32 = self.inner.randomness.gen(); - self.queue_ping_request_header(opaque_value); - self.inner.pings_waiting_reply.push_back(opaque_value); + // Generate opaque values in a loop until we don't hit a duplicate. + loop { + let opaque_value: u32 = self.inner.randomness.gen(); + if !self.inner.pings_waiting_reply.insert(opaque_value) { + continue; + } + self.queue_ping_request_header(opaque_value); + break; + } + debug_assert!(self.inner.pings_waiting_reply.len() <= MAX_PINGS); continue; } From cb660040b3f6aea2a35b35ad93691b9a3ac489a3 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Sun, 2 Apr 2023 14:25:27 +0200 Subject: [PATCH 19/74] Add an `encode` function --- lib/src/libp2p/connection/yamux.rs | 197 +++++++--------------- lib/src/libp2p/connection/yamux/header.rs | 93 ++++++++++ 2 files changed, 154 insertions(+), 136 deletions(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index 5ae8beaa3c..fc6660c971 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -1047,7 +1047,7 @@ impl Yamux { // Handle any message other than data or window size. match decoded_header { - header::DecodedYamuxHeader::PingRequest { .. } => { + header::DecodedYamuxHeader::PingRequest { opaque_value } => { // Ping. In order to queue the pong message, the outgoing queue must // be empty. If it is not the case, we simply leave the ping header // there and prevent any further data from being read. @@ -1057,28 +1057,10 @@ impl Yamux { } self.inner.outgoing = Outgoing::Header { - header: { - let mut header = arrayvec::ArrayVec::new(); - header - .try_extend_from_slice( - &[ - 0, - 2, - 0x0, - 0x2, - 0, - 0, - 0, - 0, - incoming_header[8], - incoming_header[9], - incoming_header[10], - incoming_header[11], - ][..], - ) - .unwrap(); - header - }, + header: header::encode(&header::DecodedYamuxHeader::PingResponse { + opaque_value, + }) + .into(), substream_data_frame: None, is_goaway: false, }; @@ -1296,18 +1278,16 @@ impl Yamux { // automatically rejected. if !matches!(self.inner.outgoing_goaway, OutgoingGoAway::NotRequired) { // Send the `RST` frame. - let mut header = arrayvec::ArrayVec::new(); - header.push(0); - header.push(1); - header.try_extend_from_slice(&0x8u16.to_be_bytes()).unwrap(); - header - .try_extend_from_slice(&stream_id.get().to_be_bytes()) - .unwrap(); - header.try_extend_from_slice(&0u32.to_be_bytes()).unwrap(); - debug_assert_eq!(header.len(), 12); - self.inner.outgoing = Outgoing::Header { - header, + header: header::encode(&header::DecodedYamuxHeader::Window { + syn: false, + ack: false, + fin: false, + rst: true, + stream_id, + length: 0, + }) + .into(), substream_data_frame: None, is_goaway: false, }; @@ -1553,26 +1533,12 @@ impl Yamux { Outgoing::Idle => { // Send a `GoAway` frame if demanded. - if let OutgoingGoAway::Required(code) = self.inner.outgoing_goaway { - let mut header = arrayvec::ArrayVec::new(); - header.push(0); - header.push(3); - header.try_extend_from_slice(&0u16.to_be_bytes()).unwrap(); - header.try_extend_from_slice(&0u32.to_be_bytes()).unwrap(); - header - .try_extend_from_slice( - &match code { - GoAwayErrorCode::NormalTermination => 0u32, - GoAwayErrorCode::ProtocolError => 1u32, - GoAwayErrorCode::InternalError => 2u32, - } - .to_be_bytes(), - ) - .unwrap(); - debug_assert_eq!(header.len(), 12); - + if let OutgoingGoAway::Required(error_code) = self.inner.outgoing_goaway { self.inner.outgoing = Outgoing::Header { - header, + header: header::encode(&header::DecodedYamuxHeader::GoAway { + error_code, + }) + .into(), substream_data_frame: None, is_goaway: true, }; @@ -1582,18 +1548,16 @@ impl Yamux { // Send RST frames. if let Some(substream_id) = self.inner.rsts_to_send.pop_front() { - let mut header = arrayvec::ArrayVec::new(); - header.push(0); - header.push(1); - header.try_extend_from_slice(&8u16.to_be_bytes()).unwrap(); - header - .try_extend_from_slice(&substream_id.get().to_be_bytes()) - .unwrap(); - header.try_extend_from_slice(&0u32.to_be_bytes()).unwrap(); - debug_assert_eq!(header.len(), 12); - self.inner.outgoing = Outgoing::Header { - header, + header: header::encode(&header::DecodedYamuxHeader::Window { + syn: false, + ack: false, + fin: false, + rst: true, + stream_id: substream_id, + length: 0, + }) + .into(), substream_data_frame: None, is_goaway: false, }; @@ -1801,19 +1765,17 @@ impl Yamux { fin, }; - let mut header = arrayvec::ArrayVec::new(); - header.push(0); - header.push(1); - header.try_extend_from_slice(&0x8u16.to_be_bytes()).unwrap(); - header - .try_extend_from_slice(&substream_id.0.get().to_be_bytes()) - .unwrap(); - header.try_extend_from_slice(&0u32.to_be_bytes()).unwrap(); - debug_assert_eq!(header.len(), 12); - debug_assert!(matches!(self.inner.outgoing, Outgoing::Idle)); self.inner.outgoing = Outgoing::Header { - header, + header: header::encode(&header::DecodedYamuxHeader::Window { + syn: false, + ack: false, + fin: false, + rst: true, + stream_id: substream_id.0, + length: 0, + }) + .into(), substream_data_frame: None, is_goaway: false, }; @@ -1837,34 +1799,19 @@ impl Yamux { ) { assert!(matches!(self.inner.outgoing, Outgoing::Idle)); - let mut flags: u16 = 0; - if syn_ack_flag { - if (substream_id.get() % 2) == (self.inner.next_outbound_substream.get() % 2) { - // SYN - flags |= 0x1; - } else { - // ACK - flags |= 0x2; - } - } - if fin_flag { - flags |= 0x4; - } - - let mut header = arrayvec::ArrayVec::new(); - header.push(0); - header.push(0); - header.try_extend_from_slice(&flags.to_be_bytes()).unwrap(); - header - .try_extend_from_slice(&substream_id.get().to_be_bytes()) - .unwrap(); - header - .try_extend_from_slice(&data_length.to_be_bytes()) - .unwrap(); - debug_assert_eq!(header.len(), 12); + let is_outbound = + (substream_id.get() % 2) == (self.inner.next_outbound_substream.get() % 2); self.inner.outgoing = Outgoing::Header { - header, + header: header::encode(&header::DecodedYamuxHeader::Data { + syn: syn_ack_flag && is_outbound, + ack: syn_ack_flag && !is_outbound, + fin: fin_flag, + rst: false, + stream_id: substream_id, + length: data_length, + }) + .into(), is_goaway: false, substream_data_frame: NonZeroUsize::new(usize::try_from(data_length).unwrap()).map( |length| { @@ -1891,31 +1838,19 @@ impl Yamux { ) { assert!(matches!(self.inner.outgoing, Outgoing::Idle)); - let mut flags: u16 = 0; - if syn_ack_flag { - if (substream_id.get() % 2) == (self.inner.next_outbound_substream.get() % 2) { - // SYN - flags |= 0x1; - } else { - // ACK - flags |= 0x2; - } - } - - let mut header = arrayvec::ArrayVec::new(); - header.push(0); - header.push(1); - header.try_extend_from_slice(&flags.to_be_bytes()).unwrap(); - header - .try_extend_from_slice(&substream_id.get().to_be_bytes()) - .unwrap(); - header - .try_extend_from_slice(&window_size.to_be_bytes()) - .unwrap(); - debug_assert_eq!(header.len(), 12); + let is_outbound = + (substream_id.get() % 2) == (self.inner.next_outbound_substream.get() % 2); self.inner.outgoing = Outgoing::Header { - header, + header: header::encode(&header::DecodedYamuxHeader::Window { + syn: syn_ack_flag && is_outbound, + ack: syn_ack_flag && !is_outbound, + fin: false, + rst: false, + stream_id: substream_id, + length: window_size, + }) + .into(), substream_data_frame: None, is_goaway: false, }; @@ -1929,19 +1864,9 @@ impl Yamux { /// fn queue_ping_request_header(&mut self, opaque_value: u32) { assert!(matches!(self.inner.outgoing, Outgoing::Idle)); - - let mut header = arrayvec::ArrayVec::new(); - header.push(0); - header.push(2); - header.try_extend_from_slice(&[0, 1]).unwrap(); - header.try_extend_from_slice(&[0, 0, 0, 0]).unwrap(); - header - .try_extend_from_slice(&opaque_value.to_be_bytes()) - .unwrap(); - debug_assert_eq!(header.len(), 12); - self.inner.outgoing = Outgoing::Header { - header, + header: header::encode(&header::DecodedYamuxHeader::PingRequest { opaque_value }) + .into(), substream_data_frame: None, is_goaway: false, }; diff --git a/lib/src/libp2p/connection/yamux/header.rs b/lib/src/libp2p/connection/yamux/header.rs index 0286ac2fbc..0c91201ea4 100644 --- a/lib/src/libp2p/connection/yamux/header.rs +++ b/lib/src/libp2p/connection/yamux/header.rs @@ -66,6 +66,99 @@ pub enum GoAwayErrorCode { InternalError = 0x2, } +pub fn encode(header: &DecodedYamuxHeader) -> [u8; 12] { + match header { + DecodedYamuxHeader::Data { + syn, + ack, + fin, + rst, + stream_id, + length, + } + | DecodedYamuxHeader::Window { + syn, + ack, + fin, + rst, + stream_id, + length, + } => { + let ty = match header { + DecodedYamuxHeader::Data { .. } => 0, + DecodedYamuxHeader::Window { .. } => 1, + _ => unreachable!(), + }; + + let mut flags: u8 = 0; + if *syn { + flags |= 0x1; + } + if *ack { + flags |= 0x2; + } + if *fin { + flags |= 0x4; + } + if *rst { + flags |= 0x8; + } + + let stream_id = stream_id.get().to_be_bytes(); + let length = length.to_be_bytes(); + + [ + 0, + ty, + 0, + flags, + stream_id[0], + stream_id[1], + stream_id[2], + stream_id[3], + length[0], + length[1], + length[2], + length[3], + ] + } + DecodedYamuxHeader::PingRequest { opaque_value } + | DecodedYamuxHeader::PingResponse { opaque_value } => { + let flags = match header { + DecodedYamuxHeader::PingRequest { .. } => 1, + DecodedYamuxHeader::PingResponse { .. } => 2, + _ => unreachable!(), + }; + + let opaque_value = opaque_value.to_be_bytes(); + + [ + 0, + 2, + 0, + flags, + 0, + 0, + 0, + 0, + opaque_value[0], + opaque_value[1], + opaque_value[2], + opaque_value[3], + ] + } + DecodedYamuxHeader::GoAway { error_code } => { + let code = match error_code { + GoAwayErrorCode::NormalTermination => 0, + GoAwayErrorCode::ProtocolError => 1, + GoAwayErrorCode::InternalError => 2, + }; + + [0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, code] + } + } +} + /// Decodes a Yamux header. pub fn decode_yamux_header(bytes: &[u8]) -> Result { match nom::combinator::all_consuming(nom::combinator::complete(decode))(bytes) { From f51bd6f8a274e2a1b74b9ff6423e55721aee26fe Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Sun, 2 Apr 2023 14:32:54 +0200 Subject: [PATCH 20/74] Store the decoded header in Outgoing --- lib/src/libp2p/connection/yamux.rs | 74 ++++++++++++++++-------------- 1 file changed, 40 insertions(+), 34 deletions(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index fc6660c971..0d57944f5d 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -221,13 +221,17 @@ enum Outgoing { /// Writing out a header. Header { - /// Bytes of the header to write out. + /// Header to write out. /// /// The length of this buffer might not be equal to 12 in case some parts of the header have /// already been written out but not all. /// /// Never empty (as otherwise the state must have been transitioned to something else). - header: arrayvec::ArrayVec, + header: header::DecodedYamuxHeader, + + /// Number of bytes from `header` that have already been sent out. Always strictly + /// inferior to 12. + header_already_sent: u8, /// If `Some`, then the header is data frame header and we must then transition the /// state to [`Outgoing::SubstreamData`]. @@ -1057,10 +1061,8 @@ impl Yamux { } self.inner.outgoing = Outgoing::Header { - header: header::encode(&header::DecodedYamuxHeader::PingResponse { - opaque_value, - }) - .into(), + header: header::DecodedYamuxHeader::PingResponse { opaque_value }, + header_already_sent: 0, substream_data_frame: None, is_goaway: false, }; @@ -1279,15 +1281,15 @@ impl Yamux { if !matches!(self.inner.outgoing_goaway, OutgoingGoAway::NotRequired) { // Send the `RST` frame. self.inner.outgoing = Outgoing::Header { - header: header::encode(&header::DecodedYamuxHeader::Window { + header: header::DecodedYamuxHeader::Window { syn: false, ack: false, fin: false, rst: true, stream_id, length: 0, - }) - .into(), + }, + header_already_sent: 0, substream_data_frame: None, is_goaway: false, }; @@ -1427,13 +1429,20 @@ impl Yamux { match self.inner.outgoing { Outgoing::Header { ref mut header, + ref mut header_already_sent, ref mut substream_data_frame, ref is_goaway, } => { // Finish writing the header. - debug_assert!(!header.is_empty()); - if size_bytes >= header.len() { - let out = mem::take(header); + debug_assert!(*header_already_sent < 12); + let encoded_header = header::encode(header); + let encoded_header_remains_to_write = + &encoded_header[usize::from(*header_already_sent)..]; + + if size_bytes >= encoded_header_remains_to_write.len() { + let out = + arrayvec::ArrayVec::::try_from(encoded_header_remains_to_write) + .unwrap(); if *is_goaway { debug_assert!(matches!( self.inner.outgoing_goaway, @@ -1452,10 +1461,9 @@ impl Yamux { }; return Some(either::Left(out)); } else { - let to_add = header[..size_bytes].to_vec(); - for _ in 0..size_bytes { - header.remove(0); - } + let to_add = encoded_header_remains_to_write.to_vec(); + *header_already_sent += u8::try_from(size_bytes).unwrap(); + debug_assert!(*header_already_sent < 12); return Some(either::Right(VecWithOffset(to_add, 0))); } } @@ -1535,10 +1543,8 @@ impl Yamux { // Send a `GoAway` frame if demanded. if let OutgoingGoAway::Required(error_code) = self.inner.outgoing_goaway { self.inner.outgoing = Outgoing::Header { - header: header::encode(&header::DecodedYamuxHeader::GoAway { - error_code, - }) - .into(), + header: header::DecodedYamuxHeader::GoAway { error_code }, + header_already_sent: 0, substream_data_frame: None, is_goaway: true, }; @@ -1549,15 +1555,15 @@ impl Yamux { // Send RST frames. if let Some(substream_id) = self.inner.rsts_to_send.pop_front() { self.inner.outgoing = Outgoing::Header { - header: header::encode(&header::DecodedYamuxHeader::Window { + header: header::DecodedYamuxHeader::Window { syn: false, ack: false, fin: false, rst: true, stream_id: substream_id, length: 0, - }) - .into(), + }, + header_already_sent: 0, substream_data_frame: None, is_goaway: false, }; @@ -1767,15 +1773,15 @@ impl Yamux { debug_assert!(matches!(self.inner.outgoing, Outgoing::Idle)); self.inner.outgoing = Outgoing::Header { - header: header::encode(&header::DecodedYamuxHeader::Window { + header: header::DecodedYamuxHeader::Window { syn: false, ack: false, fin: false, rst: true, stream_id: substream_id.0, length: 0, - }) - .into(), + }, + header_already_sent: 0, substream_data_frame: None, is_goaway: false, }; @@ -1803,15 +1809,15 @@ impl Yamux { (substream_id.get() % 2) == (self.inner.next_outbound_substream.get() % 2); self.inner.outgoing = Outgoing::Header { - header: header::encode(&header::DecodedYamuxHeader::Data { + header: header::DecodedYamuxHeader::Data { syn: syn_ack_flag && is_outbound, ack: syn_ack_flag && !is_outbound, fin: fin_flag, rst: false, stream_id: substream_id, length: data_length, - }) - .into(), + }, + header_already_sent: 0, is_goaway: false, substream_data_frame: NonZeroUsize::new(usize::try_from(data_length).unwrap()).map( |length| { @@ -1842,15 +1848,15 @@ impl Yamux { (substream_id.get() % 2) == (self.inner.next_outbound_substream.get() % 2); self.inner.outgoing = Outgoing::Header { - header: header::encode(&header::DecodedYamuxHeader::Window { + header: header::DecodedYamuxHeader::Window { syn: syn_ack_flag && is_outbound, ack: syn_ack_flag && !is_outbound, fin: false, rst: false, stream_id: substream_id, length: window_size, - }) - .into(), + }, + header_already_sent: 0, substream_data_frame: None, is_goaway: false, }; @@ -1865,8 +1871,8 @@ impl Yamux { fn queue_ping_request_header(&mut self, opaque_value: u32) { assert!(matches!(self.inner.outgoing, Outgoing::Idle)); self.inner.outgoing = Outgoing::Header { - header: header::encode(&header::DecodedYamuxHeader::PingRequest { opaque_value }) - .into(), + header: header::DecodedYamuxHeader::PingRequest { opaque_value }, + header_already_sent: 0, substream_data_frame: None, is_goaway: false, }; From 3f6b9ec8414d8d3e0004a30aeb762657eec78062 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Sun, 2 Apr 2023 14:34:13 +0200 Subject: [PATCH 21/74] Remove is_goaway flag --- lib/src/libp2p/connection/yamux.rs | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index 0d57944f5d..1795edc141 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -236,11 +236,6 @@ enum Outgoing { /// If `Some`, then the header is data frame header and we must then transition the /// state to [`Outgoing::SubstreamData`]. substream_data_frame: Option<(OutgoingSubstreamData, NonZeroUsize)>, - - /// `true` if this header contains a `GoAway` frame. This indicates that - /// [`Yamux::outgoing_goaway`] must be transitioned to [`OutgoingGoAway::Sent`] after - /// this header has been extracted. - is_goaway: bool, }, /// Writing out data from a substream. @@ -1064,7 +1059,6 @@ impl Yamux { header: header::DecodedYamuxHeader::PingResponse { opaque_value }, header_already_sent: 0, substream_data_frame: None, - is_goaway: false, }; self.inner.incoming = Incoming::Header(arrayvec::ArrayVec::new()); @@ -1291,7 +1285,6 @@ impl Yamux { }, header_already_sent: 0, substream_data_frame: None, - is_goaway: false, }; self.inner.incoming = if !is_data { @@ -1431,7 +1424,6 @@ impl Yamux { ref mut header, ref mut header_already_sent, ref mut substream_data_frame, - ref is_goaway, } => { // Finish writing the header. debug_assert!(*header_already_sent < 12); @@ -1443,7 +1435,7 @@ impl Yamux { let out = arrayvec::ArrayVec::::try_from(encoded_header_remains_to_write) .unwrap(); - if *is_goaway { + if matches!(header, header::DecodedYamuxHeader::GoAway { .. }) { debug_assert!(matches!( self.inner.outgoing_goaway, OutgoingGoAway::Queued @@ -1546,7 +1538,6 @@ impl Yamux { header: header::DecodedYamuxHeader::GoAway { error_code }, header_already_sent: 0, substream_data_frame: None, - is_goaway: true, }; self.inner.outgoing_goaway = OutgoingGoAway::Queued; continue; @@ -1565,7 +1556,6 @@ impl Yamux { }, header_already_sent: 0, substream_data_frame: None, - is_goaway: false, }; continue; } @@ -1783,7 +1773,6 @@ impl Yamux { }, header_already_sent: 0, substream_data_frame: None, - is_goaway: false, }; } _ => panic!(), @@ -1818,7 +1807,6 @@ impl Yamux { length: data_length, }, header_already_sent: 0, - is_goaway: false, substream_data_frame: NonZeroUsize::new(usize::try_from(data_length).unwrap()).map( |length| { ( @@ -1858,7 +1846,6 @@ impl Yamux { }, header_already_sent: 0, substream_data_frame: None, - is_goaway: false, }; } @@ -1874,7 +1861,6 @@ impl Yamux { header: header::DecodedYamuxHeader::PingRequest { opaque_value }, header_already_sent: 0, substream_data_frame: None, - is_goaway: false, }; } } From 76cd2bcb344bf3899d8d273b3a9ee5db0ce37aeb Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Sun, 2 Apr 2023 14:44:31 +0200 Subject: [PATCH 22/74] Remove the header queing private functions --- lib/src/libp2p/connection/yamux.rs | 122 ++++++++--------------------- 1 file changed, 34 insertions(+), 88 deletions(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index 1795edc141..ef6e1dfbbe 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -1569,7 +1569,11 @@ impl Yamux { if !self.inner.pings_waiting_reply.insert(opaque_value) { continue; } - self.queue_ping_request_header(opaque_value); + self.inner.outgoing = Outgoing::Header { + header: header::DecodedYamuxHeader::PingRequest { opaque_value }, + header_already_sent: 0, + substream_data_frame: None, + }; break; } debug_assert!(self.inner.pings_waiting_reply.len() <= MAX_PINGS); @@ -1605,7 +1609,18 @@ impl Yamux { .unwrap_or(u32::max_value()); *remote_window_pending_increase -= u64::from(update); *remote_allowed_window += u64::from(update); - self.queue_window_size_frame_header(syn_ack_flag, id, update); + self.inner.outgoing = Outgoing::Header { + header: header::DecodedYamuxHeader::Window { + syn: syn_ack_flag && !sub.inbound, + ack: syn_ack_flag && sub.inbound, + fin: false, + rst: false, + stream_id: id, + length: update, + }, + header_already_sent: 0, + substream_data_frame: None, + }; continue; } else { unreachable!() @@ -1654,7 +1669,23 @@ impl Yamux { if fin_flag { *local_write = SubstreamStateLocalWrite::FinQueued; } - self.queue_data_frame_header(syn_ack_flag, fin_flag, id, len_out); + self.inner.outgoing = Outgoing::Header { + header: header::DecodedYamuxHeader::Data { + syn: syn_ack_flag && !sub.inbound, + ack: syn_ack_flag && sub.inbound, + fin: fin_flag, + rst: false, + stream_id: id, + length: len_out, + }, + header_already_sent: 0, + substream_data_frame: NonZeroUsize::new( + usize::try_from(len_out).unwrap(), + ) + .map(|length| { + (OutgoingSubstreamData::Healthy(SubstreamId(id)), length) + }), + }; } else { unreachable!() } @@ -1778,91 +1809,6 @@ impl Yamux { _ => panic!(), } } - - /// Writes a data frame header in `self.inner.outgoing`. - /// - /// # Panic - /// - /// Panics if `self.inner.outgoing` is not `Idle`. - /// - fn queue_data_frame_header( - &mut self, - syn_ack_flag: bool, - fin_flag: bool, - substream_id: NonZeroU32, - data_length: u32, - ) { - assert!(matches!(self.inner.outgoing, Outgoing::Idle)); - - let is_outbound = - (substream_id.get() % 2) == (self.inner.next_outbound_substream.get() % 2); - - self.inner.outgoing = Outgoing::Header { - header: header::DecodedYamuxHeader::Data { - syn: syn_ack_flag && is_outbound, - ack: syn_ack_flag && !is_outbound, - fin: fin_flag, - rst: false, - stream_id: substream_id, - length: data_length, - }, - header_already_sent: 0, - substream_data_frame: NonZeroUsize::new(usize::try_from(data_length).unwrap()).map( - |length| { - ( - OutgoingSubstreamData::Healthy(SubstreamId(substream_id)), - length, - ) - }, - ), - }; - } - - /// Writes a window size update frame header in `self.inner.outgoing`. - /// - /// # Panic - /// - /// Panics if `self.inner.outgoing` is not `Idle`. - /// - fn queue_window_size_frame_header( - &mut self, - syn_ack_flag: bool, - substream_id: NonZeroU32, - window_size: u32, - ) { - assert!(matches!(self.inner.outgoing, Outgoing::Idle)); - - let is_outbound = - (substream_id.get() % 2) == (self.inner.next_outbound_substream.get() % 2); - - self.inner.outgoing = Outgoing::Header { - header: header::DecodedYamuxHeader::Window { - syn: syn_ack_flag && is_outbound, - ack: syn_ack_flag && !is_outbound, - fin: false, - rst: false, - stream_id: substream_id, - length: window_size, - }, - header_already_sent: 0, - substream_data_frame: None, - }; - } - - /// Writes a ping frame header in `self.inner.outgoing`. - /// - /// # Panic - /// - /// Panics if `self.inner.outgoing` is not `Idle`. - /// - fn queue_ping_request_header(&mut self, opaque_value: u32) { - assert!(matches!(self.inner.outgoing, Outgoing::Idle)); - self.inner.outgoing = Outgoing::Header { - header: header::DecodedYamuxHeader::PingRequest { opaque_value }, - header_already_sent: 0, - substream_data_frame: None, - }; - } } impl fmt::Debug for Yamux From aa3d25b90469b58b27b63a1009e3430fca0ed499 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Sun, 2 Apr 2023 16:23:51 +0200 Subject: [PATCH 23/74] `decode_yamux_header` now accepts a precise length --- lib/src/libp2p/connection/yamux.rs | 22 +++++++++++++--------- lib/src/libp2p/connection/yamux/header.rs | 9 +-------- 2 files changed, 14 insertions(+), 17 deletions(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index ef6e1dfbbe..61b2c911b5 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -1032,16 +1032,20 @@ impl Yamux { data = &data[1..]; } - // Not enough data to finish receiving header. Nothing more can be done. - if incoming_header.len() != 12 { - debug_assert!(data.is_empty()); - break; - } + // Decode the header in `incoming_header`. + let decoded_header = { + let Ok(full_header) = <&[u8; 12]>::try_from(&incoming_header[..]) + else { + // Not enough data to finish receiving header. Nothing more can be + // done. + debug_assert!(data.is_empty()); + break; + }; - // Full header available to decode in `incoming_header`. - let decoded_header = match header::decode_yamux_header(incoming_header) { - Ok(h) => h, - Err(err) => return Err(Error::HeaderDecode(err)), + match header::decode_yamux_header(full_header) { + Ok(h) => h, + Err(err) => return Err(Error::HeaderDecode(err)), + } }; // Handle any message other than data or window size. diff --git a/lib/src/libp2p/connection/yamux/header.rs b/lib/src/libp2p/connection/yamux/header.rs index 0c91201ea4..afb17e9970 100644 --- a/lib/src/libp2p/connection/yamux/header.rs +++ b/lib/src/libp2p/connection/yamux/header.rs @@ -160,7 +160,7 @@ pub fn encode(header: &DecodedYamuxHeader) -> [u8; 12] { } /// Decodes a Yamux header. -pub fn decode_yamux_header(bytes: &[u8]) -> Result { +pub fn decode_yamux_header(bytes: &[u8; 12]) -> Result { match nom::combinator::all_consuming(nom::combinator::complete(decode))(bytes) { Ok((_, h)) => Ok(h), Err(nom::Err::Incomplete(_)) => unreachable!(), @@ -349,11 +349,4 @@ mod tests { assert!(super::decode_yamux_header(&[0, 0, 0, 1, 0, 0, 0, 15, 0, 0, 2, 65]).is_ok()); assert!(super::decode_yamux_header(&[2, 0, 0, 1, 0, 0, 0, 15, 0, 0, 2, 65]).is_err()); } - - #[test] - fn length_check() { - assert!(super::decode_yamux_header(&[0, 0, 0, 1, 0, 0, 0, 15, 0, 0, 2, 65]).is_ok()); - assert!(super::decode_yamux_header(&[0, 0, 0, 1, 0, 0, 0, 15, 0, 0, 2, 65, 0]).is_err()); - assert!(super::decode_yamux_header(&[0, 0, 0, 1, 0, 0, 0, 15, 0, 0, 2]).is_err()); - } } From 39ba2cfa85cefd1b89e8296a342510ec04605055 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Sun, 2 Apr 2023 16:24:30 +0200 Subject: [PATCH 24/74] Remove old comment --- lib/src/libp2p/connection/yamux.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index 61b2c911b5..139587fc17 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -1048,7 +1048,6 @@ impl Yamux { } }; - // Handle any message other than data or window size. match decoded_header { header::DecodedYamuxHeader::PingRequest { opaque_value } => { // Ping. In order to queue the pong message, the outgoing queue must From cb67914eb33b12bf116005a3983160f5eb8aaa0b Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Sun, 2 Apr 2023 16:35:21 +0200 Subject: [PATCH 25/74] Add TODO --- lib/src/libp2p/connection/yamux.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index 139587fc17..ee0870c75c 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -1266,6 +1266,7 @@ impl Yamux { // rejection message later. // If it is not the case, we simply leave the header there and prevent // any further data from being read. + // TODO: could deadlock if the write buffer is very small if !matches!(self.inner.outgoing, Outgoing::Idle) { break; } From 0576892f317cdee51cd989f0030a57eedfdc584a Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Mon, 3 Apr 2023 11:26:52 +0200 Subject: [PATCH 26/74] Add yamux header encoding tests --- lib/src/libp2p/connection/yamux/header.rs | 71 ++++++++++++++++++++++- 1 file changed, 68 insertions(+), 3 deletions(-) diff --git a/lib/src/libp2p/connection/yamux/header.rs b/lib/src/libp2p/connection/yamux/header.rs index afb17e9970..81cb52bd1c 100644 --- a/lib/src/libp2p/connection/yamux/header.rs +++ b/lib/src/libp2p/connection/yamux/header.rs @@ -66,6 +66,7 @@ pub enum GoAwayErrorCode { InternalError = 0x2, } +/// Encodes a Yamux header. pub fn encode(header: &DecodedYamuxHeader) -> [u8; 12] { match header { DecodedYamuxHeader::Data { @@ -272,7 +273,7 @@ mod tests { use core::num::NonZeroU32; #[test] - fn basic_data() { + fn decode_data_frame() { assert_eq!( super::decode_yamux_header(&[0, 0, 0, 1, 0, 0, 0, 15, 0, 0, 2, 65]).unwrap(), super::DecodedYamuxHeader::Data { @@ -287,7 +288,7 @@ mod tests { } #[test] - fn basic_ping() { + fn decode_ping_frame() { assert_eq!( super::decode_yamux_header(&[0, 2, 0, 1, 0, 0, 0, 0, 0, 0, 1, 12]).unwrap(), super::DecodedYamuxHeader::PingRequest { opaque_value: 268 } @@ -313,7 +314,7 @@ mod tests { } #[test] - fn basic_goaway() { + fn decode_goaway() { assert_eq!( super::decode_yamux_header(&[0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(), super::DecodedYamuxHeader::GoAway { @@ -349,4 +350,68 @@ mod tests { assert!(super::decode_yamux_header(&[0, 0, 0, 1, 0, 0, 0, 15, 0, 0, 2, 65]).is_ok()); assert!(super::decode_yamux_header(&[2, 0, 0, 1, 0, 0, 0, 15, 0, 0, 2, 65]).is_err()); } + + macro_rules! check_encode_reendoces { + ($payload:expr) => {{ + let payload = $payload; + assert_eq!( + super::decode_yamux_header(&super::encode(&payload)).unwrap(), + payload + ); + }}; + } + + #[test] + fn encode_data() { + for _ in 0..500 { + check_encode_reendoces!(super::DecodedYamuxHeader::Data { + syn: rand::random(), + ack: rand::random(), + fin: rand::random(), + rst: rand::random(), + stream_id: rand::random(), + length: rand::random() + }); + } + } + + #[test] + fn encode_window() { + for _ in 0..500 { + check_encode_reendoces!(super::DecodedYamuxHeader::Window { + syn: rand::random(), + ack: rand::random(), + fin: rand::random(), + rst: rand::random(), + stream_id: rand::random(), + length: rand::random() + }); + } + } + + #[test] + fn encode_ping() { + check_encode_reendoces!(super::DecodedYamuxHeader::PingRequest { + opaque_value: rand::random(), + }); + + check_encode_reendoces!(super::DecodedYamuxHeader::PingResponse { + opaque_value: rand::random(), + }); + } + + #[test] + fn encode_goaway() { + check_encode_reendoces!(super::DecodedYamuxHeader::GoAway { + error_code: super::GoAwayErrorCode::NormalTermination, + }); + + check_encode_reendoces!(super::DecodedYamuxHeader::GoAway { + error_code: super::GoAwayErrorCode::ProtocolError, + }); + + check_encode_reendoces!(super::DecodedYamuxHeader::GoAway { + error_code: super::GoAwayErrorCode::InternalError, + }); + } } From 326361ee23381b0cf30d20deb31be13520cb02c3 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Tue, 4 Apr 2023 16:43:51 +0200 Subject: [PATCH 27/74] Macro name --- lib/src/libp2p/connection/yamux/header.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/lib/src/libp2p/connection/yamux/header.rs b/lib/src/libp2p/connection/yamux/header.rs index 81cb52bd1c..ec13850424 100644 --- a/lib/src/libp2p/connection/yamux/header.rs +++ b/lib/src/libp2p/connection/yamux/header.rs @@ -351,7 +351,7 @@ mod tests { assert!(super::decode_yamux_header(&[2, 0, 0, 1, 0, 0, 0, 15, 0, 0, 2, 65]).is_err()); } - macro_rules! check_encode_reendoces { + macro_rules! check_encode_redecodes { ($payload:expr) => {{ let payload = $payload; assert_eq!( @@ -364,7 +364,7 @@ mod tests { #[test] fn encode_data() { for _ in 0..500 { - check_encode_reendoces!(super::DecodedYamuxHeader::Data { + check_encode_redecodes!(super::DecodedYamuxHeader::Data { syn: rand::random(), ack: rand::random(), fin: rand::random(), @@ -378,7 +378,7 @@ mod tests { #[test] fn encode_window() { for _ in 0..500 { - check_encode_reendoces!(super::DecodedYamuxHeader::Window { + check_encode_redecodes!(super::DecodedYamuxHeader::Window { syn: rand::random(), ack: rand::random(), fin: rand::random(), @@ -391,26 +391,26 @@ mod tests { #[test] fn encode_ping() { - check_encode_reendoces!(super::DecodedYamuxHeader::PingRequest { + check_encode_redecodes!(super::DecodedYamuxHeader::PingRequest { opaque_value: rand::random(), }); - check_encode_reendoces!(super::DecodedYamuxHeader::PingResponse { + check_encode_redecodes!(super::DecodedYamuxHeader::PingResponse { opaque_value: rand::random(), }); } #[test] fn encode_goaway() { - check_encode_reendoces!(super::DecodedYamuxHeader::GoAway { + check_encode_redecodes!(super::DecodedYamuxHeader::GoAway { error_code: super::GoAwayErrorCode::NormalTermination, }); - check_encode_reendoces!(super::DecodedYamuxHeader::GoAway { + check_encode_redecodes!(super::DecodedYamuxHeader::GoAway { error_code: super::GoAwayErrorCode::ProtocolError, }); - check_encode_reendoces!(super::DecodedYamuxHeader::GoAway { + check_encode_redecodes!(super::DecodedYamuxHeader::GoAway { error_code: super::GoAwayErrorCode::InternalError, }); } From e676553672ba55319372087dda2e647db1c5db14 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Tue, 4 Apr 2023 16:44:59 +0200 Subject: [PATCH 28/74] Docfix --- lib/src/libp2p/connection/yamux.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index ee0870c75c..25a061d44b 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -33,12 +33,12 @@ //! Call [`Yamux::incoming_data`] when data is available on the socket. This function parses //! the received data, updates the internal state machine, and possibly returns an //! [`IncomingDataDetail`]. -//! Call [`Yamux::extract_out`] when the remote is ready to accept more data. +//! Call [`Yamux::extract_next`] when the remote is ready to accept more data. //! //! The generic parameter of [`Yamux`] is an opaque "user data" associated to each substream. //! //! When [`Yamux::write`] is called, the buffer of data to send out is stored within the -//! [`Yamux`] object. This data will then be progressively returned by [`Yamux::extract_out`]. +//! [`Yamux`] object. This data will then be progressively returned by [`Yamux::extract_next`]. //! //! It is the responsibility of the user to enforce a bound to the amount of enqueued data, as //! the [`Yamux`] itself doesn't enforce any limit. Enforcing such a bound must be done based @@ -263,7 +263,7 @@ enum OutgoingGoAway { /// A `GoAway` frame has been queued into [`Yamux::outgoing`] in the past. Queued, - /// A `GoAway` frame has been extracted through [`Yamux::extract_out`]. + /// A `GoAway` frame has been extracted through [`Yamux::extract_next`]. Sent, } @@ -286,7 +286,7 @@ enum OutgoingSubstreamData { } /// Maximum number of simultaneous outgoing pings allowed. -const MAX_PINGS: usize = 100000; +pub const MAX_PINGS: usize = 100000; impl Yamux { /// Initializes a new Yamux state machine. @@ -744,13 +744,13 @@ impl Yamux { /// Returns `true` if [`Yamux::send_goaway`] has been called in the past. /// /// In other words, returns `true` if a `GoAway` frame has been either queued for sending - /// (and is available through [`Yamux::extract_out`]) or has already been sent out. + /// (and is available through [`Yamux::extract_next`]) or has already been sent out. pub fn goaway_queued_or_sent(&self) -> bool { !matches!(self.inner.outgoing_goaway, OutgoingGoAway::NotRequired) } /// Returns `true` if [`Yamux::send_goaway`] has been called in the past and that this - /// `GoAway` frame has been extracted through [`Yamux::extract_out`]. + /// `GoAway` frame has been extracted through [`Yamux::extract_next`]. pub fn goaway_sent(&self) -> bool { matches!(self.inner.outgoing_goaway, OutgoingGoAway::Sent) } From 1de223c42bdc31ec95c3bf793bdef40577e5d3ef Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Tue, 4 Apr 2023 16:45:09 +0200 Subject: [PATCH 29/74] Rustfmt --- lib/src/libp2p/connection/established/single_stream.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/src/libp2p/connection/established/single_stream.rs b/lib/src/libp2p/connection/established/single_stream.rs index 0717f7a8b7..7b24cf77c6 100644 --- a/lib/src/libp2p/connection/established/single_stream.rs +++ b/lib/src/libp2p/connection/established/single_stream.rs @@ -506,7 +506,8 @@ where // Extract outgoing data that is buffered within yamux. // TODO: don't allocate an intermediary buffer, but instead pass them directly to the encryption let mut buffers = Vec::with_capacity(32); - while let Some(buffer) = self.inner.yamux.extract_next(unencrypted_bytes_to_extract) { + while let Some(buffer) = self.inner.yamux.extract_next(unencrypted_bytes_to_extract) + { let buffer = buffer.as_ref(); unencrypted_bytes_to_extract -= buffer.len(); buffers.push(buffer.to_vec()); // TODO: copy From 1c3a3c419ad90cd3053fc76236c2994431a0e400 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Tue, 4 Apr 2023 17:32:09 +0200 Subject: [PATCH 30/74] Move the write queue to a separate module --- lib/src/libp2p/connection/yamux.rs | 186 ++++++------------ .../libp2p/connection/yamux/write_queue.rs | 83 ++++++++ 2 files changed, 141 insertions(+), 128 deletions(-) create mode 100644 lib/src/libp2p/connection/yamux/write_queue.rs diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index 25a061d44b..0eb808997f 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -63,6 +63,7 @@ use rand_chacha::{rand_core::SeedableRng as _, ChaCha20Rng}; pub use header::GoAwayErrorCode; mod header; +mod write_queue; /// Name of the protocol, typically used when negotiated it using *multistream-select*. pub const PROTOCOL_NAME: &str = "/yamux/1.0.0"; @@ -163,13 +164,7 @@ enum SubstreamState { /// True if the writing side of the remote node is closed for this substream. remote_write_closed: bool, /// Buffer of buffers to be written out to the socket. - // TODO: is it a good idea to have an unbounded VecDeque? - // TODO: call shrink_to_fit from time to time? - // TODO: instead of storing `Vec`s, consider storing a generic `B` and let the user manually write a `B` to the output buffer - write_buffers: VecDeque>, - /// Number of bytes in `self.write_buffers[0]` has have already been written out to the - /// socket. - first_write_buffer_offset: usize, + write_queue: write_queue::WriteQueue, }, /// The substream has been reset, either locally or by the remote. Its entire purpose is to @@ -277,11 +272,7 @@ enum OutgoingSubstreamData { /// Data is coming from a substream in a reset state. Obsolete { /// Buffer of buffers to be written out to the socket. - write_buffers: VecDeque>, - - /// Number of bytes in `self.inner.write_buffers[0]` has have already been written out to - /// the socket. - first_write_buffer_offset: usize, + write_queue: write_queue::WriteQueue, }, } @@ -422,8 +413,7 @@ impl Yamux { allowed_window: DEFAULT_FRAME_SIZE, local_write_close: SubstreamStateLocalWrite::Open, remote_write_closed: false, - write_buffers: VecDeque::with_capacity(16), - first_write_buffer_offset: 0, + write_queue: write_queue::WriteQueue::new(), }, inbound: false, user_data, @@ -511,14 +501,11 @@ impl Yamux { SubstreamState::Reset => {} SubstreamState::Healthy { local_write_close: local_write, - write_buffers, - first_write_buffer_offset, + write_queue, .. } => { - debug_assert!(!write_buffers.is_empty() || *first_write_buffer_offset == 0); - if matches!(local_write, SubstreamStateLocalWrite::Open) { - write_buffers.push_back(data); + write_queue.push_back(data); } } } @@ -586,11 +573,7 @@ impl Yamux { .unwrap_or_else(|| panic!()) .state { - SubstreamState::Healthy { - write_buffers, - first_write_buffer_offset, - .. - } => write_buffers.iter().fold(0, |n, buf| n + buf.len()) - first_write_buffer_offset, + SubstreamState::Healthy { write_queue, .. } => write_queue.queued_bytes(), SubstreamState::Reset => 0, } } @@ -707,16 +690,9 @@ impl Yamux { data: data @ OutgoingSubstreamData::Healthy(_), .. }, - SubstreamState::Healthy { - write_buffers, - first_write_buffer_offset, - .. - }, + SubstreamState::Healthy { write_queue, .. }, ) if *data == OutgoingSubstreamData::Healthy(substream_id) => { - *data = OutgoingSubstreamData::Obsolete { - write_buffers, - first_write_buffer_offset, - }; + *data = OutgoingSubstreamData::Obsolete { write_queue }; } _ => {} } @@ -820,17 +796,13 @@ impl Yamux { SubstreamState::Healthy { local_write_close, remote_write_closed, - write_buffers, - first_write_buffer_offset, + write_queue, .. } => { debug_assert!( matches!(local_write_close, SubstreamStateLocalWrite::FinQueued) && *remote_write_closed - && (write_buffers.is_empty() // TODO: cumbersome - || (write_buffers.len() == 1 - && write_buffers[0].len() - <= *first_write_buffer_offset)) + && write_queue.is_empty() ); ( @@ -924,8 +896,7 @@ impl Yamux { SubstreamState::Healthy { remote_write_closed: remote_write_closed @ false, local_write_close, - write_buffers, - first_write_buffer_offset, + write_queue, .. }, .. @@ -934,10 +905,7 @@ impl Yamux { *remote_write_closed = true; if matches!(*local_write_close, SubstreamStateLocalWrite::FinQueued) - && (write_buffers.is_empty() // TODO: cumbersome - || (write_buffers.len() == 1 - && write_buffers[0].len() - <= *first_write_buffer_offset)) + && write_queue.is_empty() { let _was_inserted = self.inner.dead_substreams.insert(substream_id.0); debug_assert!(_was_inserted); @@ -1119,20 +1087,13 @@ impl Yamux { data: data @ OutgoingSubstreamData::Healthy(_), .. }, - SubstreamState::Healthy { - write_buffers, - first_write_buffer_offset, - .. - }, + SubstreamState::Healthy { write_queue, .. }, ) if *data == OutgoingSubstreamData::Healthy(SubstreamId( *substream_id, )) => { - *data = OutgoingSubstreamData::Obsolete { - write_buffers, - first_write_buffer_offset, - }; + *data = OutgoingSubstreamData::Obsolete { write_queue }; } _ => {} } @@ -1197,18 +1158,11 @@ impl Yamux { data: data @ OutgoingSubstreamData::Healthy(_), .. }, - SubstreamState::Healthy { - write_buffers, - first_write_buffer_offset, - .. - }, + SubstreamState::Healthy { write_queue, .. }, ) if *data == OutgoingSubstreamData::Healthy(SubstreamId(stream_id)) => { - *data = OutgoingSubstreamData::Obsolete { - write_buffers, - first_write_buffer_offset, - }; + *data = OutgoingSubstreamData::Obsolete { write_queue }; } _ => {} } @@ -1460,7 +1414,7 @@ impl Yamux { let to_add = encoded_header_remains_to_write.to_vec(); *header_already_sent += u8::try_from(size_bytes).unwrap(); debug_assert!(*header_already_sent < 12); - return Some(either::Right(VecWithOffset(to_add, 0))); + return Some(either::Right(write_queue::VecWithOffset(to_add, 0))); } } @@ -1468,71 +1422,56 @@ impl Yamux { remaining_bytes: ref mut remain, ref mut data, } => { - let (write_buffers, first_write_buffer_offset, substream_id) = match data { + let (write_queue, substream_id) = match data { OutgoingSubstreamData::Healthy(id) => { - let substream = self.inner.substreams.get_mut(&id.0).unwrap(); if let SubstreamState::Healthy { - ref mut write_buffers, - ref mut first_write_buffer_offset, + ref mut write_queue, .. - } = &mut substream.state + } = &mut self.inner.substreams.get_mut(&id.0).unwrap().state { - (write_buffers, first_write_buffer_offset, Some(*id)) + (write_queue, Some(*id)) } else { unreachable!() } } OutgoingSubstreamData::Obsolete { - ref mut write_buffers, - ref mut first_write_buffer_offset, - } => (write_buffers, first_write_buffer_offset, None), + ref mut write_queue, + } => (write_queue, None), }; - let first_buf_avail = write_buffers[0].len() - *first_write_buffer_offset; - let out = if first_buf_avail <= remain.get() && first_buf_avail <= size_bytes { - let out = VecWithOffset( - write_buffers.pop_front().unwrap(), - *first_write_buffer_offset, - ); - *first_write_buffer_offset = 0; - let write_buffers_empty = write_buffers.is_empty(); - match NonZeroUsize::new(remain.get() - first_buf_avail) { - Some(r) => *remain = r, - None => self.inner.outgoing = Outgoing::Idle, - }; - if write_buffers_empty { - if let Some(id) = substream_id { - if let SubstreamState::Healthy { - local_write_close: SubstreamStateLocalWrite::FinQueued, - remote_write_closed: true, - .. - } = self.inner.substreams.get(&id.0).unwrap().state - { - let _was_inserted = self.inner.dead_substreams.insert(id.0); - debug_assert!(_was_inserted); - } + // We only reach here if `size_bytes` and `remain` are non-zero. + // Also, `write_queue` must always have a size >= `remain`. + // Consequently, `out` can also never be empty. + debug_assert!(write_queue.queued_bytes() >= remain.get()); + let out = write_queue.extract_some(cmp::min(remain.get(), size_bytes)); + debug_assert!(!out.as_ref().is_empty()); + + // Since we are sure that `write_queue` wasn't empty beforehand, if it is + // now empty it means that we have sent out all the queued data. If a `FIN` + // was received and queued in the past, the substream is now dead. + if write_queue.is_empty() { + if let Some(id) = substream_id { + if let SubstreamState::Healthy { + local_write_close: SubstreamStateLocalWrite::FinQueued, + remote_write_closed: true, + .. + } = self.inner.substreams.get(&id.0).unwrap().state + { + let _was_inserted = self.inner.dead_substreams.insert(id.0); + debug_assert!(_was_inserted); } } - either::Right(out) - } else if remain.get() <= size_bytes { - let out = VecWithOffset( - write_buffers[0][*first_write_buffer_offset..][..remain.get()].to_vec(), - 0, - ); - *first_write_buffer_offset += remain.get(); - self.inner.outgoing = Outgoing::Idle; - either::Right(out) + } + + if let Some(still_some_remain) = + NonZeroUsize::new(remain.get() - out.as_ref().len()) + { + *remain = still_some_remain; } else { - let out = VecWithOffset( - write_buffers[0][*first_write_buffer_offset..][..size_bytes].to_vec(), - 0, - ); - *first_write_buffer_offset += size_bytes; - *remain = NonZeroUsize::new(remain.get() - size_bytes).unwrap(); - either::Right(out) - }; + self.inner.outgoing = Outgoing::Idle; + } - return Some(out); + return Some(either::Right(out)); } Outgoing::Idle => { @@ -1640,11 +1579,11 @@ impl Yamux { .iter_mut() .find(|(_, s)| match &s.state { SubstreamState::Healthy { - write_buffers, + write_queue, local_write_close: local_write, .. } => { - !write_buffers.is_empty() + !write_queue.is_empty() || matches!(local_write, SubstreamStateLocalWrite::FinDesired) } _ => false, @@ -1655,11 +1594,11 @@ impl Yamux { first_message_queued, allowed_window, local_write_close: local_write, - write_buffers, + write_queue, .. } = &mut sub.state { - let pending_len = write_buffers.iter().fold(0, |l, b| l + b.len()); + let pending_len = write_queue.queued_bytes(); let len_out = cmp::min( u32::try_from(pending_len).unwrap_or(u32::max_value()), u32::try_from(*allowed_window).unwrap_or(u32::max_value()), @@ -1736,8 +1675,7 @@ impl Yamux { allowed_window: DEFAULT_FRAME_SIZE + u64::from(extra_window), local_write_close: SubstreamStateLocalWrite::Open, remote_write_closed: data_frame_size == 0 && fin, - write_buffers: VecDeque::new(), - first_write_buffer_offset: 0, + write_queue: write_queue::WriteQueue::new(), }, inbound: true, user_data, @@ -1838,14 +1776,6 @@ where } } -#[derive(Clone)] -struct VecWithOffset(Vec, usize); -impl AsRef<[u8]> for VecWithOffset { - fn as_ref(&self) -> &[u8] { - &self.0[self.1..] - } -} - /// Identifier of a substream in the context of a connection. #[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, derive_more::From)] pub struct SubstreamId(NonZeroU32); diff --git a/lib/src/libp2p/connection/yamux/write_queue.rs b/lib/src/libp2p/connection/yamux/write_queue.rs new file mode 100644 index 0000000000..804660e33d --- /dev/null +++ b/lib/src/libp2p/connection/yamux/write_queue.rs @@ -0,0 +1,83 @@ +// Smoldot +// Copyright (C) 2023 Pierre Krieger +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use alloc::{collections::VecDeque, vec::Vec}; + +#[derive(Clone)] +pub struct VecWithOffset(pub Vec, pub usize); + +impl AsRef<[u8]> for VecWithOffset { + fn as_ref(&self) -> &[u8] { + &self.0[self.1..] + } +} + +// TODO: PartialEq/Eq?! +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct WriteQueue { + /// Buffer of buffers to be written out. + // TODO: is it a good idea to have an unbounded VecDeque? + // TODO: call shrink_to_fit from time to time? + // TODO: instead of storing `Vec`s, consider storing a generic `B` and let the user manually write a `B` to the output buffer + write_buffers: VecDeque>, + /// Number of bytes in `self.write_buffers[0]` has have already been written out to the + /// socket. + first_write_buffer_offset: usize, +} + +impl WriteQueue { + pub fn new() -> Self { + WriteQueue { + write_buffers: VecDeque::with_capacity(16), + first_write_buffer_offset: 0, + } + } + + pub fn is_empty(&self) -> bool { + debug_assert!(!self.write_buffers.is_empty() || self.first_write_buffer_offset == 0); + self.write_buffers.is_empty() + } + + pub fn push_back(&mut self, data: Vec) { + debug_assert!(!self.write_buffers.is_empty() || self.first_write_buffer_offset == 0); + self.write_buffers.push_back(data); + } + + pub fn queued_bytes(&self) -> usize { + self.write_buffers.iter().fold(0, |n, buf| n + buf.len()) - self.first_write_buffer_offset + } + + pub fn extract_some(&mut self, max_size: usize) -> VecWithOffset { + let first_buf_avail = self.write_buffers[0].len() - self.first_write_buffer_offset; + + if first_buf_avail <= max_size { + let out = VecWithOffset( + self.write_buffers.pop_front().unwrap(), + self.first_write_buffer_offset, + ); + self.first_write_buffer_offset = 0; + out + } else { + let out = VecWithOffset( + self.write_buffers[0][self.first_write_buffer_offset..][..max_size].to_vec(), + 0, + ); + self.first_write_buffer_offset += max_size; + out + } + } +} From 78ae8cac28bfdcf19a27decbbfb9ab4ed7ade5bc Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Tue, 4 Apr 2023 17:33:35 +0200 Subject: [PATCH 31/74] Doclinks --- lib/src/libp2p/connection/yamux.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index 0eb808997f..b72b42c033 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -99,7 +99,8 @@ struct YamuxInner { /// that it is returned by [`Yamux::dead_substreams`]. dead_substreams: hashbrown::HashSet, - /// Number of substreams within [`Yamux::substreams`] whose [`Substream::inbound`] is `true`. + /// Number of substreams within [`YamuxInner::substreams`] whose [`Substream::inbound`] is + /// `true`. num_inbound: usize, /// `Some` if a `GoAway` frame has been received in the past. @@ -195,7 +196,7 @@ enum Incoming { /// A header referring to a new substream has been received. The reception of any further data /// is blocked waiting for the API user to accept or reject this substream. /// - /// Note that [`Yamux::outgoing`] must always be [`Outgoing::Idle`], in order to give the + /// Note that [`YamuxInner::outgoing`] must always be [`Outgoing::Idle`], in order to give the /// possibility to send back a RST frame for the new substream. PendingIncomingSubstream { /// Identifier of the pending substream. @@ -252,10 +253,10 @@ enum OutgoingGoAway { NotRequired, /// API user has asked to send a `GoAway` frame. This frame hasn't been queued into - /// [`Yamux::outgoing`] yet. + /// [`YamuxInner::outgoing`] yet. Required(GoAwayErrorCode), - /// A `GoAway` frame has been queued into [`Yamux::outgoing`] in the past. + /// A `GoAway` frame has been queued into [`YamuxInner::outgoing`] in the past. Queued, /// A `GoAway` frame has been extracted through [`Yamux::extract_next`]. From 9f80f10cce4a6abebf8a77c59448f6754372983c Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Tue, 4 Apr 2023 20:06:24 +0200 Subject: [PATCH 32/74] Add `InvalidInboundStreamId` --- lib/src/libp2p/connection/yamux.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index b72b42c033..4a67d749df 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -63,6 +63,7 @@ use rand_chacha::{rand_core::SeedableRng as _, ChaCha20Rng}; pub use header::GoAwayErrorCode; mod header; +mod tests; mod write_queue; /// Name of the protocol, typically used when negotiated it using *multistream-select*. @@ -1193,6 +1194,16 @@ impl Yamux { length, .. } => { + // The initiator should only allocate uneven substream IDs, and the + // other side only even IDs. We don't know anymore whether we're + // initiator at this point, but we can compare with the even-ness of + // the IDs that we allocate locally. + if (self.inner.next_outbound_substream.get() % 2) + == (stream_id.get() % 2) + { + return Err(Error::InvalidInboundStreamId(stream_id)); + } + // Remote has sent a SYN flag. A new substream is to be opened. match self.inner.substreams.get(&stream_id) { Some(Substream { @@ -1863,6 +1874,8 @@ pub enum IncomingDataDetail { pub enum Error { /// Failed to decode an incoming Yamux header. HeaderDecode(header::YamuxHeaderDecodeError), + /// Received a SYN flag with a substream ID that is of the same side as the local side. + InvalidInboundStreamId(NonZeroU32), /// Received a SYN flag with a known substream ID. #[display(fmt = "Received a SYN flag with a known substream ID")] UnexpectedSyn(NonZeroU32), From 93abd839fdbd06233b1a76ad8beae458e4fa7635 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Tue, 4 Apr 2023 20:09:53 +0200 Subject: [PATCH 33/74] Some tests --- lib/src/libp2p/connection/yamux/tests.rs | 177 +++++++++++++++++++++++ 1 file changed, 177 insertions(+) create mode 100644 lib/src/libp2p/connection/yamux/tests.rs diff --git a/lib/src/libp2p/connection/yamux/tests.rs b/lib/src/libp2p/connection/yamux/tests.rs new file mode 100644 index 0000000000..2c4ddccdab --- /dev/null +++ b/lib/src/libp2p/connection/yamux/tests.rs @@ -0,0 +1,177 @@ +// Smoldot +// Copyright (C) 2023 Pierre Krieger +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#![cfg(test)] + +use super::{Config, Error, IncomingDataDetail, Yamux}; + +#[test] +fn not_immediate_data_send_when_opening_substream() { + let mut yamux = Yamux::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + }); + + let _ = yamux.open_substream(()); + assert!(yamux.extract_next(usize::max_value()).is_none()) +} + +#[test] +fn syn_sent() { + let mut yamux = Yamux::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + }); + + let substream_id = yamux.open_substream(()); + yamux.write(substream_id, b"foo".to_vec()); + + let mut output = Vec::new(); + while let Some(out) = yamux.extract_next(usize::max_value()) { + output.extend_from_slice(out.as_ref()); + } + + assert!(output.starts_with(&[0, 0, 0, 1])); + assert!(output.ends_with(&[0, 0, 0, 3, 102, 111, 111])); +} + +#[test] +fn ack_sent() { + let mut yamux = Yamux::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + }); + + let mut opened_substream = None; + + { + let data = [0, 0, 0, 1, 0, 0, 0, 84, 0, 0, 0, 0]; + let mut cursor = 0; + while cursor < data.len() { + let outcome = yamux.incoming_data(&data[cursor..]).unwrap(); + yamux = outcome.yamux; + cursor += outcome.bytes_read; + match outcome.detail { + Some(IncomingDataDetail::IncomingSubstream) => { + assert!(opened_substream.is_none()); + opened_substream = Some(yamux.accept_pending_substream(())) + } + _ => {} + } + } + } + + let mut output = Vec::new(); + while let Some(out) = yamux.extract_next(usize::max_value()) { + output.extend_from_slice(out.as_ref()); + } + + assert_eq!( + output, + &[0, 0, 0, 2, 0, 0, 0, 84, 0, 0, 0, 3, 102, 111, 111] + ); +} + +#[test] +fn invalid_inbound_substream_id() { + let mut yamux = Yamux::<()>::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + }); + + let data = [0, 0, 0, 1, 0, 0, 0, 83, 0, 0, 0, 0]; + let mut cursor = 0; + while cursor < data.len() { + match yamux.incoming_data(&data[cursor..]) { + Ok(outcome) => { + yamux = outcome.yamux; + cursor += outcome.bytes_read; + } + Err(Error::InvalidInboundStreamId(v)) if v.get() == 83 => return, + Err(_) => panic!(), + } + } + + // Test failed. + panic!() +} + +#[test] +fn substream_opened_twice() { + let mut yamux = Yamux::<()>::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + }); + + let data = [ + 0, 0, 0, 1, 0, 0, 0, 84, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 84, 0, 0, 0, 0, + ]; + let mut cursor = 0; + while cursor < data.len() { + match yamux.incoming_data(&data[cursor..]) { + Ok(outcome) => { + yamux = outcome.yamux; + cursor += outcome.bytes_read; + + if matches!(outcome.detail, Some(IncomingDataDetail::IncomingSubstream)) { + yamux.accept_pending_substream(()); + } + } + Err(Error::UnexpectedSyn(v)) if v.get() == 84 => return, + Err(_) => panic!(), + } + } + + // Test failed. + panic!() +} + +#[test] +fn substream_opened_back_after_rst() { + let mut yamux = Yamux::<()>::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + }); + + let data = [ + 0, 0, 0, 1, 0, 0, 0, 84, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 84, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, + 0, 84, 0, 0, 0, 0, + ]; + + let mut cursor = 0; + while cursor < data.len() { + match yamux.incoming_data(&data[cursor..]) { + Ok(outcome) => { + yamux = outcome.yamux; + cursor += outcome.bytes_read; + + if matches!(outcome.detail, Some(IncomingDataDetail::IncomingSubstream)) { + yamux.accept_pending_substream(()); + } + } + Err(_) => panic!(), + } + } + + // Test success. +} From 29a54baf31e0619c3ac1b74753502fd5e5a506af Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Tue, 4 Apr 2023 20:31:20 +0200 Subject: [PATCH 34/74] Fixes and more tests --- lib/src/libp2p/connection/yamux.rs | 8 +- lib/src/libp2p/connection/yamux/tests.rs | 107 +++++++++++++++++++++++ wasm-node/CHANGELOG.md | 5 ++ 3 files changed, 119 insertions(+), 1 deletion(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index 4a67d749df..74dbdfba3b 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -1270,6 +1270,10 @@ impl Yamux { continue; } + if is_data && u64::from(length) > DEFAULT_FRAME_SIZE { + return Err(Error::CreditsExceeded); + } + self.inner.incoming = Incoming::PendingIncomingSubstream { substream_id: SubstreamId(stream_id), extra_window: if !is_data { length } else { 0 }, @@ -1676,13 +1680,15 @@ impl Yamux { data_frame_size, fin, } => { + debug_assert!(u64::from(data_frame_size) <= DEFAULT_FRAME_SIZE); + let _was_before = self.inner.substreams.insert( substream_id.0, Substream { state: SubstreamState::Healthy { first_message_queued: false, remote_syn_acked: true, - remote_allowed_window: DEFAULT_FRAME_SIZE, + remote_allowed_window: DEFAULT_FRAME_SIZE - u64::from(data_frame_size), remote_window_pending_increase: 0, allowed_window: DEFAULT_FRAME_SIZE + u64::from(extra_window), local_write_close: SubstreamStateLocalWrite::Open, diff --git a/lib/src/libp2p/connection/yamux/tests.rs b/lib/src/libp2p/connection/yamux/tests.rs index 2c4ddccdab..7739ca9d83 100644 --- a/lib/src/libp2p/connection/yamux/tests.rs +++ b/lib/src/libp2p/connection/yamux/tests.rs @@ -175,3 +175,110 @@ fn substream_opened_back_after_rst() { // Test success. } + +#[test] +fn credits_exceeded_checked_before_data_is_received() { + let mut yamux = Yamux::<()>::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + }); + + // Data frame with a SYN flag, then data frame with a ton of data. + // Note that the data isn't actually there. We only *announce* that we're going to send a ton + // of data. The error should happen anyway, if the data isn't here, as we don't want to buffer + // data that exceeds the credits limit. + let data = [ + 0, 0, 0, 1, 0, 0, 0, 84, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 84, 5, 0, 0, 0, 0xff, 0xff, 0xff, + ]; + + let mut cursor = 0; + while cursor < data.len() { + match yamux.incoming_data(&data[cursor..]) { + Ok(outcome) => { + yamux = outcome.yamux; + cursor += outcome.bytes_read; + + if matches!(outcome.detail, Some(IncomingDataDetail::IncomingSubstream)) { + yamux.accept_pending_substream(()); + } + } + Err(Error::CreditsExceeded) => return, + Err(_) => panic!(), + } + } + + // Test failed. + panic!() +} + +#[test] +fn credits_exceeded_checked_at_the_syn() { + let mut yamux = Yamux::<()>::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + }); + + // Data frame with a SYN flag and a ton of data. + // Note that the data isn't actually there. We only *announce* that we're going to send a ton + // of data. The error should happen anyway, if the data isn't here, as we don't want to buffer + // data that exceeds the credits limit. + let data = [0, 0, 0, 1, 0, 0, 0, 84, 5, 0, 0, 0, 0xff, 0xff, 0xff]; + + let mut cursor = 0; + while cursor < data.len() { + match yamux.incoming_data(&data[cursor..]) { + Ok(outcome) => { + yamux = outcome.yamux; + cursor += outcome.bytes_read; + + if matches!(outcome.detail, Some(IncomingDataDetail::IncomingSubstream)) { + yamux.accept_pending_substream(()); + } + } + Err(Error::CreditsExceeded) => return, + Err(_) => panic!(), + } + } + + // Test failed. + panic!() +} + +#[test] +fn data_coming_with_the_syn_taken_into_account() { + let mut yamux = Yamux::<()>::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + }); + + // Data frame with a SYN flag and 200kiB of data, followed with data frame with 100kiB of + // data. The limit is 256kiB, so the combination of both exceeds the limit. + let mut data = [0, 0, 0, 1, 0, 0, 0, 84].to_vec(); + data.extend_from_slice(&(200 * 1024u32).to_be_bytes()[..]); + data.extend((0..200 * 1024).map(|_| 0u8)); + data.extend_from_slice(&[0, 0, 0, 0, 0, 0, 0, 84]); + data.extend_from_slice(&(100 * 1024u32).to_be_bytes()[..]); + data.extend((0..100 * 1024).map(|_| 0u8)); + + let mut cursor = 0; + while cursor < data.len() { + match yamux.incoming_data(&data[cursor..]) { + Ok(outcome) => { + yamux = outcome.yamux; + cursor += outcome.bytes_read; + + if matches!(outcome.detail, Some(IncomingDataDetail::IncomingSubstream)) { + yamux.accept_pending_substream(()); + } + } + Err(Error::CreditsExceeded) => return, + Err(_) => panic!(), + } + } + + // Test failed. + panic!() +} diff --git a/wasm-node/CHANGELOG.md b/wasm-node/CHANGELOG.md index f33f310653..5ef1bb06ec 100644 --- a/wasm-node/CHANGELOG.md +++ b/wasm-node/CHANGELOG.md @@ -6,6 +6,11 @@ - Removed support for the `ls` message in the multistream-select protocol, in accordance with the rest of the libp2p ecosystem. This message was in practice never used, and removing support for it simplifies the implementation. ([#379](https://github.com/smol-dot/smoldot/pull/379)) +### Fixed + +- Properly check whether Yamux substream IDs allocated by the remote are valid. +- Fix the size of the data of Yamux frames with the `SYN` flag not being verified against the allowed credits. + ## 1.0.1 - 2023-03-29 ### Changed From be99ed99f6693b123404540d7f44b7841d747471 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Tue, 4 Apr 2023 20:32:32 +0200 Subject: [PATCH 35/74] Fix test --- lib/src/libp2p/connection/yamux/tests.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/src/libp2p/connection/yamux/tests.rs b/lib/src/libp2p/connection/yamux/tests.rs index 7739ca9d83..eb89324522 100644 --- a/lib/src/libp2p/connection/yamux/tests.rs +++ b/lib/src/libp2p/connection/yamux/tests.rs @@ -78,6 +78,8 @@ fn ack_sent() { } } + yamux.write(opened_substream.unwrap(), b"foo".to_vec()); + let mut output = Vec::new(); while let Some(out) = yamux.extract_next(usize::max_value()) { output.extend_from_slice(out.as_ref()); From 77284f57d6781d0c288b5bb4225443bb05d33a48 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Tue, 4 Apr 2023 20:38:19 +0200 Subject: [PATCH 36/74] Fix test --- lib/src/libp2p/connection/yamux/tests.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/src/libp2p/connection/yamux/tests.rs b/lib/src/libp2p/connection/yamux/tests.rs index eb89324522..cd31b1f62e 100644 --- a/lib/src/libp2p/connection/yamux/tests.rs +++ b/lib/src/libp2p/connection/yamux/tests.rs @@ -155,6 +155,7 @@ fn substream_opened_back_after_rst() { randomness_seed: [0; 32], }); + // One SYN frame, one RST frame, one SYN frame again. All using the same substream ID. let data = [ 0, 0, 0, 1, 0, 0, 0, 84, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 84, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 84, 0, 0, 0, 0, @@ -170,12 +171,18 @@ fn substream_opened_back_after_rst() { if matches!(outcome.detail, Some(IncomingDataDetail::IncomingSubstream)) { yamux.accept_pending_substream(()); } + + let dead_substream = yamux.dead_substreams().next().map(|(s, ..)| s); + if let Some(substream_id) = dead_substream { + yamux.remove_dead_substream(substream_id); + } } Err(_) => panic!(), } } // Test success. + assert_eq!(cursor, data.len()); } #[test] From ca3be4613e2ba04741f0b8a49608f9ea393b0181 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 08:51:34 +0200 Subject: [PATCH 37/74] More tests --- lib/src/libp2p/connection/yamux/tests.rs | 117 +++++++++++++++++++++++ 1 file changed, 117 insertions(+) diff --git a/lib/src/libp2p/connection/yamux/tests.rs b/lib/src/libp2p/connection/yamux/tests.rs index cd31b1f62e..fdf99f8b25 100644 --- a/lib/src/libp2p/connection/yamux/tests.rs +++ b/lib/src/libp2p/connection/yamux/tests.rs @@ -91,6 +91,36 @@ fn ack_sent() { ); } +#[test] +fn rst_sent_when_rejecting() { + let mut yamux = Yamux::<()>::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + }); + + { + let data = [0, 0, 0, 1, 0, 0, 0, 84, 0, 0, 0, 0]; + let mut cursor = 0; + while cursor < data.len() { + let outcome = yamux.incoming_data(&data[cursor..]).unwrap(); + yamux = outcome.yamux; + cursor += outcome.bytes_read; + match outcome.detail { + Some(IncomingDataDetail::IncomingSubstream) => yamux.reject_pending_substream(), + _ => {} + } + } + } + + let mut output = Vec::new(); + while let Some(out) = yamux.extract_next(usize::max_value()) { + output.extend_from_slice(out.as_ref()); + } + + assert!(output.ends_with(&[0, 8, 0, 0, 0, 84, 0, 0, 0, 0])); +} + #[test] fn invalid_inbound_substream_id() { let mut yamux = Yamux::<()>::new(Config { @@ -291,3 +321,90 @@ fn data_coming_with_the_syn_taken_into_account() { // Test failed. panic!() } + +#[test] +fn reserve_window_works() { + let mut yamux = Yamux::<()>::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + }); + + // Data frame with a SYN flag and 200kiB of data, followed with data frame with 100kiB of + // data. The limit is 256kiB, so the combination of both exceeds the limit. + let mut data = [0, 0, 0, 1, 0, 0, 0, 84].to_vec(); + data.extend_from_slice(&(200 * 1024u32).to_be_bytes()[..]); + data.extend((0..200 * 1024).map(|_| 0u8)); + data.extend_from_slice(&[0, 0, 0, 0, 0, 0, 0, 84]); + data.extend_from_slice(&(100 * 1024u32).to_be_bytes()[..]); + data.extend((0..100 * 1024).map(|_| 0u8)); + + let mut cursor = 0; + while cursor < data.len() { + match yamux.incoming_data(&data[cursor..]) { + Ok(outcome) => { + yamux = outcome.yamux; + cursor += outcome.bytes_read; + + if matches!(outcome.detail, Some(IncomingDataDetail::IncomingSubstream)) { + let substream_id = yamux.accept_pending_substream(()); + + // `reserve_window` doesn't immediately raise the limit, so we flush the + // output buffer in order to obtain a window frame. + yamux.reserve_window(substream_id, 100 * 1024); + + let mut output = Vec::new(); + while let Some(out) = yamux.extract_next(usize::max_value()) { + output.extend_from_slice(out.as_ref()); + } + // `[0, 1, 144, 0]` is 102400 + assert_eq!(output, &[0, 1, 0, 2, 0, 0, 0, 84, 0, 1, 144, 0]); + } + } + Err(_) => panic!(), + } + } + + // Test succeeded. + assert_eq!(cursor, data.len()); +} + +#[test] +fn reserve_window_doesnt_immediately_raise_limit() { + let mut yamux = Yamux::<()>::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + }); + + // Data frame with a SYN flag and 200kiB of data, followed with data frame with 100kiB of + // data. The limit is 256kiB, so the combination of both exceeds the limit. + let mut data = [0, 0, 0, 1, 0, 0, 0, 84].to_vec(); + data.extend_from_slice(&(200 * 1024u32).to_be_bytes()[..]); + data.extend((0..200 * 1024).map(|_| 0u8)); + data.extend_from_slice(&[0, 0, 0, 0, 0, 0, 0, 84]); + data.extend_from_slice(&(100 * 1024u32).to_be_bytes()[..]); + data.extend((0..100 * 1024).map(|_| 0u8)); + + let mut cursor = 0; + while cursor < data.len() { + match yamux.incoming_data(&data[cursor..]) { + Ok(outcome) => { + yamux = outcome.yamux; + cursor += outcome.bytes_read; + + if matches!(outcome.detail, Some(IncomingDataDetail::IncomingSubstream)) { + let substream_id = yamux.accept_pending_substream(()); + + // `reserve_window` shouldn't immediately raise the limit. + yamux.reserve_window(substream_id, 100 * 1024); + } + } + Err(Error::CreditsExceeded) => return, + Err(_) => panic!(), + } + } + + // Test failed. + panic!() +} From 6b0fced888dff403572c5e449e88681ec56c81ec Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 09:17:35 +0200 Subject: [PATCH 38/74] Remove `reserve_window` function --- .../connection/established/single_stream.rs | 5 +- lib/src/libp2p/connection/yamux.rs | 46 +++++++------------ 2 files changed, 19 insertions(+), 32 deletions(-) diff --git a/lib/src/libp2p/connection/established/single_stream.rs b/lib/src/libp2p/connection/established/single_stream.rs index 7b24cf77c6..3d08035fc8 100644 --- a/lib/src/libp2p/connection/established/single_stream.rs +++ b/lib/src/libp2p/connection/established/single_stream.rs @@ -811,11 +811,12 @@ where ))); // TODO: we add some bytes due to the length prefix, this is a bit hacky as we should ask this information from the substream - self.inner.yamux.reserve_window( + self.inner.yamux.add_remote_window( substream_id, u64::try_from(self.inner.request_protocols[protocol_index].max_response_size) .unwrap_or(u64::max_value()) - .saturating_add(64), + .saturating_add(64) + .saturating_sub(yamux::NEW_SUBSTREAMS_FRAME_SIZE), ); Ok(SubstreamId(SubstreamIdInner::SingleStream(substream_id))) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index 74dbdfba3b..f1a6fbe331 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -410,9 +410,9 @@ impl Yamux { state: SubstreamState::Healthy { first_message_queued: false, remote_syn_acked: false, - remote_allowed_window: DEFAULT_FRAME_SIZE, + remote_allowed_window: NEW_SUBSTREAMS_FRAME_SIZE, remote_window_pending_increase: 0, - allowed_window: DEFAULT_FRAME_SIZE, + allowed_window: NEW_SUBSTREAMS_FRAME_SIZE, local_write_close: SubstreamStateLocalWrite::Open, remote_write_closed: false, write_queue: write_queue::WriteQueue::new(), @@ -516,6 +516,14 @@ impl Yamux { /// Adds `bytes` to the number of bytes the remote is allowed to send at once in the next /// packet. /// + /// > **Note**: When a substream has just been opened or accepted, it starts with an initial + /// > window of [`NEW_SUBSTREAMS_FRAME_SIZE`]. + /// + /// > **Note**: It is only possible to add more bytes to the window and not set or reduce this + /// > number of bytes, and it is also not possible to obtain the number of bytes the + /// > remote is allowed. That's because it would be ambiguous whether bytes possibly + /// > in the receive queue should be counted or not. + /// /// # Panic /// /// Panics if the [`SubstreamId`] is invalid. @@ -536,29 +544,6 @@ impl Yamux { } } - /// Similar to [`Yamux::add_remote_window`], but sets the number of allowed bytes to be at - /// least this value. In other words, if this method was to be twice with the same parameter, - /// the second call would have no effect. - /// - /// # Panic - /// - /// Panics if the [`SubstreamId`] is invalid. - /// - pub fn reserve_window(&mut self, substream_id: SubstreamId, bytes: u64) { - if let SubstreamState::Healthy { - remote_window_pending_increase, - .. - } = &mut self - .inner - .substreams - .get_mut(&substream_id.0) - .unwrap_or_else(|| panic!()) - .state - { - *remote_window_pending_increase = cmp::max(*remote_window_pending_increase, bytes); - } - } - /// Returns the number of bytes queued for writing on this substream. /// /// Returns 0 if the substream is in a reset state. @@ -1270,7 +1255,7 @@ impl Yamux { continue; } - if is_data && u64::from(length) > DEFAULT_FRAME_SIZE { + if is_data && u64::from(length) > NEW_SUBSTREAMS_FRAME_SIZE { return Err(Error::CreditsExceeded); } @@ -1680,7 +1665,7 @@ impl Yamux { data_frame_size, fin, } => { - debug_assert!(u64::from(data_frame_size) <= DEFAULT_FRAME_SIZE); + debug_assert!(u64::from(data_frame_size) <= NEW_SUBSTREAMS_FRAME_SIZE); let _was_before = self.inner.substreams.insert( substream_id.0, @@ -1688,9 +1673,10 @@ impl Yamux { state: SubstreamState::Healthy { first_message_queued: false, remote_syn_acked: true, - remote_allowed_window: DEFAULT_FRAME_SIZE - u64::from(data_frame_size), + remote_allowed_window: NEW_SUBSTREAMS_FRAME_SIZE + - u64::from(data_frame_size), remote_window_pending_increase: 0, - allowed_window: DEFAULT_FRAME_SIZE + u64::from(extra_window), + allowed_window: NEW_SUBSTREAMS_FRAME_SIZE + u64::from(extra_window), local_write_close: SubstreamStateLocalWrite::Open, remote_write_closed: data_frame_size == 0 && fin, write_queue: write_queue::WriteQueue::new(), @@ -1905,4 +1891,4 @@ pub enum DeadSubstreamTy { } /// By default, all new substreams have this implicit window size. -const DEFAULT_FRAME_SIZE: u64 = 256 * 1024; +pub const NEW_SUBSTREAMS_FRAME_SIZE: u64 = 256 * 1024; From a42223dbd863eeec1e4677e77aeb4840e19ae032 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 09:23:36 +0200 Subject: [PATCH 39/74] More testing --- lib/src/libp2p/connection/yamux/tests.rs | 39 ++++++++++++++++++++---- 1 file changed, 33 insertions(+), 6 deletions(-) diff --git a/lib/src/libp2p/connection/yamux/tests.rs b/lib/src/libp2p/connection/yamux/tests.rs index fdf99f8b25..57d6462cbc 100644 --- a/lib/src/libp2p/connection/yamux/tests.rs +++ b/lib/src/libp2p/connection/yamux/tests.rs @@ -19,6 +19,33 @@ use super::{Config, Error, IncomingDataDetail, Yamux}; +#[test] +fn bad_header_data() { + let mut yamux = Yamux::<()>::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + }); + + { + let data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; + let mut cursor = 0; + while cursor < data.len() { + match yamux.incoming_data(&data[cursor..]) { + Ok(outcome) => { + yamux = outcome.yamux; + cursor += outcome.bytes_read; + } + Err(Error::HeaderDecode(_)) => return, + Err(_) => panic!(), + } + } + } + + // Test failed. + panic!() +} + #[test] fn not_immediate_data_send_when_opening_substream() { let mut yamux = Yamux::new(Config { @@ -323,7 +350,7 @@ fn data_coming_with_the_syn_taken_into_account() { } #[test] -fn reserve_window_works() { +fn add_remote_window_works() { let mut yamux = Yamux::<()>::new(Config { capacity: 0, is_initiator: true, @@ -349,9 +376,9 @@ fn reserve_window_works() { if matches!(outcome.detail, Some(IncomingDataDetail::IncomingSubstream)) { let substream_id = yamux.accept_pending_substream(()); - // `reserve_window` doesn't immediately raise the limit, so we flush the + // `add_remote_window` doesn't immediately raise the limit, so we flush the // output buffer in order to obtain a window frame. - yamux.reserve_window(substream_id, 100 * 1024); + yamux.add_remote_window(substream_id, 100 * 1024); let mut output = Vec::new(); while let Some(out) = yamux.extract_next(usize::max_value()) { @@ -370,7 +397,7 @@ fn reserve_window_works() { } #[test] -fn reserve_window_doesnt_immediately_raise_limit() { +fn add_remote_window_doesnt_immediately_raise_limit() { let mut yamux = Yamux::<()>::new(Config { capacity: 0, is_initiator: true, @@ -396,8 +423,8 @@ fn reserve_window_doesnt_immediately_raise_limit() { if matches!(outcome.detail, Some(IncomingDataDetail::IncomingSubstream)) { let substream_id = yamux.accept_pending_substream(()); - // `reserve_window` shouldn't immediately raise the limit. - yamux.reserve_window(substream_id, 100 * 1024); + // `add_remote_window` shouldn't immediately raise the limit. + yamux.add_remote_window(substream_id, 100 * 1024); } } Err(Error::CreditsExceeded) => return, From c37ab7d366fe83398ed7d28503439ced40322c2f Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 09:29:51 +0200 Subject: [PATCH 40/74] Bugfix and more test --- lib/src/libp2p/connection/yamux.rs | 2 +- lib/src/libp2p/connection/yamux/tests.rs | 26 ++++++++++++++++++++++++ 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index f1a6fbe331..cf73ddaa3b 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -1412,7 +1412,7 @@ impl Yamux { }; return Some(either::Left(out)); } else { - let to_add = encoded_header_remains_to_write.to_vec(); + let to_add = encoded_header_remains_to_write[..size_bytes].to_vec(); *header_already_sent += u8::try_from(size_bytes).unwrap(); debug_assert!(*header_already_sent < 12); return Some(either::Right(write_queue::VecWithOffset(to_add, 0))); diff --git a/lib/src/libp2p/connection/yamux/tests.rs b/lib/src/libp2p/connection/yamux/tests.rs index 57d6462cbc..888fc29a81 100644 --- a/lib/src/libp2p/connection/yamux/tests.rs +++ b/lib/src/libp2p/connection/yamux/tests.rs @@ -242,6 +242,32 @@ fn substream_opened_back_after_rst() { assert_eq!(cursor, data.len()); } +#[test] +fn multiple_writes_combined_into_one() { + let mut yamux = Yamux::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + }); + + let substream_id = yamux.open_substream(()); + + // Write multiple times. All these writes should be combined into a single data frame. + yamux.write(substream_id, b"aaaa".to_vec()); + yamux.write(substream_id, b"cc".to_vec()); + yamux.write(substream_id, b"bbbbbb".to_vec()); + + let mut output = Vec::new(); + // We read 7 bytes at a time, in order to land in-between the buffers. + while let Some(out) = yamux.extract_next(7) { + assert!(out.as_ref().len() <= 7); + output.extend_from_slice(out.as_ref()); + } + + assert!(output.starts_with(&[0, 0, 0, 1])); + assert!(output.ends_with(&[0, 0, 0, 12, 97, 97, 97, 97, 99, 99, 98, 98, 98, 98, 98, 98])); +} + #[test] fn credits_exceeded_checked_before_data_is_received() { let mut yamux = Yamux::<()>::new(Config { From 4f918263abba7c4585722cb9fe676f8e529240b0 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 09:41:56 +0200 Subject: [PATCH 41/74] More tests --- lib/src/libp2p/connection/yamux.rs | 11 +-- lib/src/libp2p/connection/yamux/tests.rs | 95 ++++++++++++++++++++++++ 2 files changed, 101 insertions(+), 5 deletions(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index cf73ddaa3b..48431eedc2 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -498,17 +498,18 @@ impl Yamux { .inner .substreams .get_mut(&substream_id.0) - .unwrap_or_else(|| panic!()); + .unwrap_or_else(|| panic!("invalid substream")); match &mut substream.state { SubstreamState::Reset => {} SubstreamState::Healthy { - local_write_close: local_write, + local_write_close: SubstreamStateLocalWrite::Open, write_queue, .. } => { - if matches!(local_write, SubstreamStateLocalWrite::Open) { - write_queue.push_back(data); - } + write_queue.push_back(data); + } + SubstreamState::Healthy { .. } => { + panic!("write after close") } } } diff --git a/lib/src/libp2p/connection/yamux/tests.rs b/lib/src/libp2p/connection/yamux/tests.rs index 888fc29a81..ddea4f54e0 100644 --- a/lib/src/libp2p/connection/yamux/tests.rs +++ b/lib/src/libp2p/connection/yamux/tests.rs @@ -78,6 +78,62 @@ fn syn_sent() { assert!(output.ends_with(&[0, 0, 0, 3, 102, 111, 111])); } +#[test] +fn extract_bytes_one_by_one() { + let mut yamux = Yamux::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + }); + + let substream_id = yamux.open_substream(()); + yamux.write(substream_id, b"foo".to_vec()); + + let mut output = Vec::new(); + while let Some(out) = yamux.extract_next(1) { + assert_eq!(out.as_ref().len(), 1); + output.extend_from_slice(out.as_ref()); + } + + assert!(output.starts_with(&[0, 0, 0, 1])); + assert!(output.ends_with(&[0, 0, 0, 3, 102, 111, 111])); +} + +#[test] +fn inject_bytes_one_by_one() { + let mut yamux = Yamux::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + }); + + let data = [0, 0, 0, 1, 0, 0, 0, 84, 0, 0, 0, 5, 255, 255, 255]; + let mut cursor = 0; + + while cursor < data.len() { + let outcome = yamux.incoming_data(&data[cursor..][..1]).unwrap(); + yamux = outcome.yamux; + assert_eq!(outcome.bytes_read, 1); + + match outcome.detail { + Some(IncomingDataDetail::IncomingSubstream) => { + assert_eq!(cursor, 11); // We've read 12 bytes but `cursor` is still 11 + yamux.accept_pending_substream(()); + } + Some(IncomingDataDetail::DataFrame { start_offset, .. }) => { + assert_eq!(start_offset, 0); + assert!(cursor >= 12); + assert_eq!(data[cursor], 255); + } + _ => {} + } + + cursor += 1; + } + + assert_eq!(cursor, data.len()); +} + #[test] fn ack_sent() { let mut yamux = Yamux::new(Config { @@ -268,6 +324,45 @@ fn multiple_writes_combined_into_one() { assert!(output.ends_with(&[0, 0, 0, 12, 97, 97, 97, 97, 99, 99, 98, 98, 98, 98, 98, 98])); } +#[test] +fn close_before_syn_sent() { + let mut yamux = Yamux::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + }); + + let substream_id = yamux.open_substream(()); + yamux.write(substream_id, b"foo".to_vec()); + yamux.close(substream_id); + + let mut output = Vec::new(); + while let Some(out) = yamux.extract_next(usize::max_value()) { + output.extend_from_slice(out.as_ref()); + } + + assert!(output.starts_with(&[0, 0, 0, 1 | 4])); + assert!(output.ends_with(&[0, 0, 0, 3, 102, 111, 111])); +} + +#[test] +#[should_panic = "write after close"] +fn write_after_close_illegal() { + let mut yamux = Yamux::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + }); + + let substream_id = yamux.open_substream(()); + yamux.write(substream_id, b"foo".to_vec()); + assert!(yamux.can_send(substream_id)); + yamux.close(substream_id); + assert!(!yamux.can_send(substream_id)); + + yamux.write(substream_id, b"test".to_vec()); +} + #[test] fn credits_exceeded_checked_before_data_is_received() { let mut yamux = Yamux::<()>::new(Config { From 3112fb45f3c446808e256091c0b9b6bc92f3dd77 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 09:53:29 +0200 Subject: [PATCH 42/74] More tests and bugfix --- lib/src/libp2p/connection/yamux.rs | 4 +- lib/src/libp2p/connection/yamux/tests.rs | 70 ++++++++++++++++++++++++ wasm-node/CHANGELOG.md | 1 + 3 files changed, 74 insertions(+), 1 deletion(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index 48431eedc2..5b796e3fa5 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -1583,9 +1583,10 @@ impl Yamux { SubstreamState::Healthy { write_queue, local_write_close: local_write, + allowed_window, .. } => { - !write_queue.is_empty() + (*allowed_window != 0 && !write_queue.is_empty()) || matches!(local_write, SubstreamStateLocalWrite::FinDesired) } _ => false, @@ -1605,6 +1606,7 @@ impl Yamux { u32::try_from(pending_len).unwrap_or(u32::max_value()), u32::try_from(*allowed_window).unwrap_or(u32::max_value()), ); + debug_assert_ne!(len_out, 0); let len_out_usize = usize::try_from(len_out).unwrap(); *allowed_window -= u64::from(len_out); let syn_ack_flag = !*first_message_queued; diff --git a/lib/src/libp2p/connection/yamux/tests.rs b/lib/src/libp2p/connection/yamux/tests.rs index ddea4f54e0..6e95399925 100644 --- a/lib/src/libp2p/connection/yamux/tests.rs +++ b/lib/src/libp2p/connection/yamux/tests.rs @@ -556,3 +556,73 @@ fn add_remote_window_doesnt_immediately_raise_limit() { // Test failed. panic!() } + +#[test] +fn remote_default_window_respected() { + let mut yamux = Yamux::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + }); + + let substream_id = yamux.open_substream(()); + yamux.write(substream_id, vec![255; 300 * 1024]); // Exceeds default limit. + + let mut output = Vec::new(); + while let Some(out) = yamux.extract_next(usize::max_value()) { + if output.len() >= 50 { + panic!("{:?}", out.as_ref().len()) + } + output.extend_from_slice(out.as_ref()); + } + + assert!(output.starts_with(&[0, 0, 0, 1])); + assert_eq!(&output[8..12], &[0, 4, 0, 0]); // 256 * 1024 + assert_eq!(output.len(), 12 + 256 * 1024); +} + +#[test] +fn remote_window_frames_respected() { + let mut yamux = Yamux::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + }); + + // Window frame with a SYN flag and 5 bytes of window. + let data = [0, 1, 0, 1, 0, 0, 0, 84, 0, 0, 0, 5]; + + let mut accepted_substream = None; + + let mut cursor = 0; + while cursor < data.len() { + match yamux.incoming_data(&data[cursor..]) { + Ok(outcome) => { + yamux = outcome.yamux; + cursor += outcome.bytes_read; + + if matches!(outcome.detail, Some(IncomingDataDetail::IncomingSubstream)) { + assert!(accepted_substream.is_none()); + accepted_substream = Some(yamux.accept_pending_substream(())); + } + } + Err(_) => panic!(), + } + } + + let substream_id = accepted_substream.unwrap(); + + yamux.write(substream_id, vec![255; 300 * 1024]); // Exceeds default limit. + + let mut output = Vec::new(); + while let Some(out) = yamux.extract_next(usize::max_value()) { + if output.len() >= 50 { + panic!("{:?}", out.as_ref().len()) + } + output.extend_from_slice(out.as_ref()); + } + + assert!(output.starts_with(&[0, 0, 0, 2])); + assert_eq!(&output[8..12], &[0, 4, 0, 5]); // 256 * 1024 + 5 + assert_eq!(output.len(), 12 + 256 * 1024 + 5); +} diff --git a/wasm-node/CHANGELOG.md b/wasm-node/CHANGELOG.md index 5ef1bb06ec..3045b911db 100644 --- a/wasm-node/CHANGELOG.md +++ b/wasm-node/CHANGELOG.md @@ -10,6 +10,7 @@ - Properly check whether Yamux substream IDs allocated by the remote are valid. - Fix the size of the data of Yamux frames with the `SYN` flag not being verified against the allowed credits. +- Fix Yamux repeatedly sending empty data frames when the allowed window size is 0. ## 1.0.1 - 2023-03-29 From 1959613a2fc8dd5ff0cb74546755b8a4c35de35a Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 10:06:29 +0200 Subject: [PATCH 43/74] More tests --- lib/src/libp2p/connection/yamux/tests.rs | 132 +++++++++++++++++++++++ 1 file changed, 132 insertions(+) diff --git a/lib/src/libp2p/connection/yamux/tests.rs b/lib/src/libp2p/connection/yamux/tests.rs index 6e95399925..96f6897d45 100644 --- a/lib/src/libp2p/connection/yamux/tests.rs +++ b/lib/src/libp2p/connection/yamux/tests.rs @@ -626,3 +626,135 @@ fn remote_window_frames_respected() { assert_eq!(&output[8..12], &[0, 4, 0, 5]); // 256 * 1024 + 5 assert_eq!(output.len(), 12 + 256 * 1024 + 5); } + +#[test] +fn write_after_fin() { + let mut yamux = Yamux::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + }); + + // Data frame with SYN|FIN flags, then data frame again. + let data = [ + 0, 0, 0, 5, 0, 0, 0, 84, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 84, 0, 0, 0, 2, 0, 0, + ]; + + let mut cursor = 0; + while cursor < data.len() { + match yamux.incoming_data(&data[cursor..]) { + Ok(outcome) => { + yamux = outcome.yamux; + cursor += outcome.bytes_read; + + if matches!(outcome.detail, Some(IncomingDataDetail::IncomingSubstream)) { + yamux.accept_pending_substream(()); + } + } + Err(Error::WriteAfterFin) => return, + Err(_) => panic!(), + } + } + + // Test failed. + panic!() +} + +#[test] +fn write_after_fin_even_with_empty_frame() { + let mut yamux = Yamux::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + }); + + // Data frame with SYN|FIN flags, then empty data frame. + let data = [ + 0, 0, 0, 5, 0, 0, 0, 84, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 84, 0, 0, 0, 0, + ]; + + let mut cursor = 0; + while cursor < data.len() { + match yamux.incoming_data(&data[cursor..]) { + Ok(outcome) => { + yamux = outcome.yamux; + cursor += outcome.bytes_read; + + if matches!(outcome.detail, Some(IncomingDataDetail::IncomingSubstream)) { + yamux.accept_pending_substream(()); + } + } + Err(Error::WriteAfterFin) => return, + Err(_) => panic!(), + } + } + + // Test failed. + panic!() +} + +#[test] +fn window_frame_with_fin_after_fin() { + let mut yamux = Yamux::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + }); + + // Data frame with SYN|FIN flags, then window frame with FIN flag. + // The spec is really ambiguous about whether post-FIN window frames must have a FIN flag as + // well, so when in doubt we accept it. + let data = [ + 0, 0, 0, 5, 0, 0, 0, 84, 0, 0, 0, 2, 0, 0, 0, 1, 0, 4, 0, 0, 0, 84, 0, 0, 0, 5, + ]; + + let mut cursor = 0; + while cursor < data.len() { + match yamux.incoming_data(&data[cursor..]) { + Ok(outcome) => { + yamux = outcome.yamux; + cursor += outcome.bytes_read; + + if matches!(outcome.detail, Some(IncomingDataDetail::IncomingSubstream)) { + yamux.accept_pending_substream(()); + } + } + Err(_) => panic!(), + } + } + + assert_eq!(cursor, data.len()); +} + +#[test] +fn window_frame_without_fin_after_fin() { + let mut yamux = Yamux::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + }); + + // Data frame with SYN|FIN flags, then window frame without FIN flag. + // The spec is really ambiguous about whether post-FIN window frames must have a FIN flag as + // well, so when in doubt we accept it. + let data = [ + 0, 0, 0, 5, 0, 0, 0, 84, 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 0, 0, 84, 0, 0, 0, 5, + ]; + + let mut cursor = 0; + while cursor < data.len() { + match yamux.incoming_data(&data[cursor..]) { + Ok(outcome) => { + yamux = outcome.yamux; + cursor += outcome.bytes_read; + + if matches!(outcome.detail, Some(IncomingDataDetail::IncomingSubstream)) { + yamux.accept_pending_substream(()); + } + } + Err(_) => panic!(), + } + } + + assert_eq!(cursor, data.len()); +} From 0274e35a6707e6bfa78ec04c92948bc2648986f2 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 10:23:46 +0200 Subject: [PATCH 44/74] Add `max_simultaneous_rst_substreams` config option --- .../connection/established/single_stream.rs | 1 + lib/src/libp2p/connection/yamux.rs | 69 ++++++++----------- lib/src/libp2p/connection/yamux/tests.rs | 60 ++++++++++++++++ 3 files changed, 89 insertions(+), 41 deletions(-) diff --git a/lib/src/libp2p/connection/established/single_stream.rs b/lib/src/libp2p/connection/established/single_stream.rs index 3d08035fc8..7e7a1a7d2d 100644 --- a/lib/src/libp2p/connection/established/single_stream.rs +++ b/lib/src/libp2p/connection/established/single_stream.rs @@ -1116,6 +1116,7 @@ impl ConnectionPrototype { is_initiator: self.encryption.is_initiator(), capacity: 64, // TODO: ? randomness_seed: randomness.sample(rand::distributions::Standard), + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); let outgoing_pings = yamux.open_substream(Some(substream::Substream::ping_out( diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index 5b796e3fa5..c0b55ddc30 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -82,6 +82,13 @@ pub struct Config { /// Seed used for the randomness. Used to avoid HashDoS attack and determines the order in /// which the data on substreams is sent out. pub randomness_seed: [u8; 32], + + /// When the remote sends a substream, and this substream gets rejected by the API user, some + /// data needs to be sent out. However, the remote could refuse reading any additional data + /// and continue sending new substream requests, thus increasing the local buffer size + /// indefinitely. In order to protect against this attack, there exists a maximum number of + /// queued substream rejections after which the connection will be shut down abruptly. + pub max_simultaneous_rst_substreams: NonZeroUsize, } pub struct Yamux { @@ -130,9 +137,12 @@ struct YamuxInner { pings_waiting_reply: hashbrown::HashSet, /// List of substream IDs that have been reset locally. For each entry, a RST header should - /// be sent to the remote and the entry removed. + /// be sent to the remote. rsts_to_send: VecDeque, + /// See [`Config::max_simultaneous_rst_substreams`]. + max_simultaneous_rst_substreams: NonZeroUsize, + /// Source of randomness used for various purposes. randomness: ChaCha20Rng, } @@ -309,7 +319,8 @@ impl Yamux { pings_to_send: 0, // We leave the initial capacity at 0, as it is likely that no ping is sent at all. pings_waiting_reply: hashbrown::HashSet::with_hasher(Default::default()), - rsts_to_send: VecDeque::with_capacity(config.capacity), + rsts_to_send: VecDeque::with_capacity(4), + max_simultaneous_rst_substreams: config.max_simultaneous_rst_substreams, randomness, }), } @@ -648,6 +659,9 @@ impl Yamux { .unwrap_or_else(|| panic!()) .state { + // Note that we intentionally don't check the size against + // `max_simultaneous_rst_substreams`, as locally-emitted RST frames aren't the + // remote's fault. self.inner.rsts_to_send.push_back(substream_id.0); } // TODO: else { panic!() } ?! @@ -1213,14 +1227,14 @@ impl Yamux { None => {} } - // When receiving a new substream, the outgoing state must always be - // `Outgoing::Idle`, in order to potentially queue the substream - // rejection message later. - // If it is not the case, we simply leave the header there and prevent - // any further data from being read. - // TODO: could deadlock if the write buffer is very small - if !matches!(self.inner.outgoing, Outgoing::Idle) { - break; + // When receiving a new substream, we might have to potentially queue + // a substream rejection message later. + // In order to ensure that there is enough space in `rsts_to_send`, + // we check it against the limit now. + if self.inner.rsts_to_send.len() + >= self.inner.max_simultaneous_rst_substreams.get() + { + return Err(Error::MaxSimultaneousRstSubstreamsExceeded); } let is_data = @@ -1229,19 +1243,7 @@ impl Yamux { // If we have queued or sent a GoAway frame, then the substream is // automatically rejected. if !matches!(self.inner.outgoing_goaway, OutgoingGoAway::NotRequired) { - // Send the `RST` frame. - self.inner.outgoing = Outgoing::Header { - header: header::DecodedYamuxHeader::Window { - syn: false, - ack: false, - fin: false, - rst: true, - stream_id, - length: 0, - }, - header_already_sent: 0, - substream_data_frame: None, - }; + self.inner.rsts_to_send.push_back(stream_id); self.inner.incoming = if !is_data { Incoming::Header(arrayvec::ArrayVec::new()) @@ -1723,11 +1725,6 @@ impl Yamux { /// Panics if no incoming substream is currently pending. /// pub fn reject_pending_substream(&mut self) { - // Implementation note: the rejection mechanism could alternatively be implemented by - // queuing the substream rejection, rather than immediately putting it in `self.inner.outgoing`. - // However, this could open a DoS attack vector, as the remote could send a huge number - // of substream open request which would inevitably increase the memory consumption of the - // local node. match self.inner.incoming { Incoming::PendingIncomingSubstream { substream_id, @@ -1741,19 +1738,7 @@ impl Yamux { fin, }; - debug_assert!(matches!(self.inner.outgoing, Outgoing::Idle)); - self.inner.outgoing = Outgoing::Header { - header: header::DecodedYamuxHeader::Window { - syn: false, - ack: false, - fin: false, - rst: true, - stream_id: substream_id.0, - length: 0, - }, - header_already_sent: 0, - substream_data_frame: None, - }; + self.inner.rsts_to_send.push_back(substream_id.0); } _ => panic!(), } @@ -1885,6 +1870,8 @@ pub enum Error { /// Remote has sent a ping response, but its opaque data didn't match any of the ping that /// have been sent out in the past. PingResponseNotMatching, + /// Maximum number of simultaneous RST frames to send out has been exceeded. + MaxSimultaneousRstSubstreamsExceeded, } #[derive(Debug, Copy, Clone, PartialEq, Eq)] diff --git a/lib/src/libp2p/connection/yamux/tests.rs b/lib/src/libp2p/connection/yamux/tests.rs index 96f6897d45..03cc8af0d2 100644 --- a/lib/src/libp2p/connection/yamux/tests.rs +++ b/lib/src/libp2p/connection/yamux/tests.rs @@ -18,6 +18,7 @@ #![cfg(test)] use super::{Config, Error, IncomingDataDetail, Yamux}; +use core::num::NonZeroUsize; #[test] fn bad_header_data() { @@ -25,6 +26,7 @@ fn bad_header_data() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); { @@ -52,6 +54,7 @@ fn not_immediate_data_send_when_opening_substream() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); let _ = yamux.open_substream(()); @@ -64,6 +67,7 @@ fn syn_sent() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); let substream_id = yamux.open_substream(()); @@ -84,6 +88,7 @@ fn extract_bytes_one_by_one() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); let substream_id = yamux.open_substream(()); @@ -105,6 +110,7 @@ fn inject_bytes_one_by_one() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); let data = [0, 0, 0, 1, 0, 0, 0, 84, 0, 0, 0, 5, 255, 255, 255]; @@ -140,6 +146,7 @@ fn ack_sent() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); let mut opened_substream = None; @@ -180,6 +187,7 @@ fn rst_sent_when_rejecting() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); { @@ -204,12 +212,48 @@ fn rst_sent_when_rejecting() { assert!(output.ends_with(&[0, 8, 0, 0, 0, 84, 0, 0, 0, 0])); } +#[test] +fn max_simultaneous_rst_substreams() { + let mut yamux = Yamux::<()>::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(16).unwrap(), + }); + + let mut data = Vec::new(); + + // Queue many new substreams. + for n in 1..32 { + data.extend_from_slice(&[0, 0, 0, 1]); + data.extend_from_slice(&u32::to_be_bytes(n * 2)[..]); + data.extend_from_slice(&[0, 0, 0, 0]); + } + + let mut cursor = 0; + while cursor < data.len() { + match yamux.incoming_data(&data[cursor..]) { + Ok(outcome) => { + yamux = outcome.yamux; + cursor += outcome.bytes_read; + match outcome.detail { + Some(IncomingDataDetail::IncomingSubstream) => yamux.reject_pending_substream(), + _ => {} + } + } + Err(Error::MaxSimultaneousRstSubstreamsExceeded) => return, + Err(_) => panic!(), + } + } +} + #[test] fn invalid_inbound_substream_id() { let mut yamux = Yamux::<()>::new(Config { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); let data = [0, 0, 0, 1, 0, 0, 0, 83, 0, 0, 0, 0]; @@ -235,6 +279,7 @@ fn substream_opened_twice() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); let data = [ @@ -266,6 +311,7 @@ fn substream_opened_back_after_rst() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); // One SYN frame, one RST frame, one SYN frame again. All using the same substream ID. @@ -304,6 +350,7 @@ fn multiple_writes_combined_into_one() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); let substream_id = yamux.open_substream(()); @@ -330,6 +377,7 @@ fn close_before_syn_sent() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); let substream_id = yamux.open_substream(()); @@ -352,6 +400,7 @@ fn write_after_close_illegal() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); let substream_id = yamux.open_substream(()); @@ -369,6 +418,7 @@ fn credits_exceeded_checked_before_data_is_received() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); // Data frame with a SYN flag, then data frame with a ton of data. @@ -405,6 +455,7 @@ fn credits_exceeded_checked_at_the_syn() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); // Data frame with a SYN flag and a ton of data. @@ -439,6 +490,7 @@ fn data_coming_with_the_syn_taken_into_account() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); // Data frame with a SYN flag and 200kiB of data, followed with data frame with 100kiB of @@ -476,6 +528,7 @@ fn add_remote_window_works() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); // Data frame with a SYN flag and 200kiB of data, followed with data frame with 100kiB of @@ -523,6 +576,7 @@ fn add_remote_window_doesnt_immediately_raise_limit() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); // Data frame with a SYN flag and 200kiB of data, followed with data frame with 100kiB of @@ -563,6 +617,7 @@ fn remote_default_window_respected() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); let substream_id = yamux.open_substream(()); @@ -587,6 +642,7 @@ fn remote_window_frames_respected() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); // Window frame with a SYN flag and 5 bytes of window. @@ -633,6 +689,7 @@ fn write_after_fin() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); // Data frame with SYN|FIN flags, then data frame again. @@ -666,6 +723,7 @@ fn write_after_fin_even_with_empty_frame() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); // Data frame with SYN|FIN flags, then empty data frame. @@ -699,6 +757,7 @@ fn window_frame_with_fin_after_fin() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); // Data frame with SYN|FIN flags, then window frame with FIN flag. @@ -732,6 +791,7 @@ fn window_frame_without_fin_after_fin() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); // Data frame with SYN|FIN flags, then window frame without FIN flag. From 15e9153411bc968a44a3e946b78709336bfea6c6 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 10:25:51 +0200 Subject: [PATCH 45/74] Remove debugging thing --- lib/src/libp2p/connection/yamux/tests.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/src/libp2p/connection/yamux/tests.rs b/lib/src/libp2p/connection/yamux/tests.rs index 03cc8af0d2..a03eea5f42 100644 --- a/lib/src/libp2p/connection/yamux/tests.rs +++ b/lib/src/libp2p/connection/yamux/tests.rs @@ -625,9 +625,6 @@ fn remote_default_window_respected() { let mut output = Vec::new(); while let Some(out) = yamux.extract_next(usize::max_value()) { - if output.len() >= 50 { - panic!("{:?}", out.as_ref().len()) - } output.extend_from_slice(out.as_ref()); } From c65f09c48eda7a98215c87b5556419397feb95fb Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 10:31:46 +0200 Subject: [PATCH 46/74] More tests --- lib/src/libp2p/connection/yamux/tests.rs | 138 +++++++++++++++++++++++ 1 file changed, 138 insertions(+) diff --git a/lib/src/libp2p/connection/yamux/tests.rs b/lib/src/libp2p/connection/yamux/tests.rs index a03eea5f42..b621500d5c 100644 --- a/lib/src/libp2p/connection/yamux/tests.rs +++ b/lib/src/libp2p/connection/yamux/tests.rs @@ -815,3 +815,141 @@ fn window_frame_without_fin_after_fin() { assert_eq!(cursor, data.len()); } + +#[test] +fn send_ping() { + let mut yamux = Yamux::<()>::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), + }); + + yamux.queue_ping(); + + let mut output = Vec::new(); + while let Some(out) = yamux.extract_next(usize::max_value()) { + output.extend_from_slice(out.as_ref()); + } + assert_eq!(&output[0..8], &[0, 2, 0, 1, 0, 0, 0, 0]); + + // Ping response frame. + let mut data = vec![0, 2, 0, 2, 0, 0, 0, 0]; + data.extend_from_slice(&output[8..12]); + + let mut cursor = 0; + while cursor < data.len() { + match yamux.incoming_data(&data[cursor..]) { + Ok(outcome) => { + yamux = outcome.yamux; + cursor += outcome.bytes_read; + + if matches!(outcome.detail, Some(IncomingDataDetail::PingResponse)) { + return; + } + } + Err(_) => panic!(), + } + } + + // Test failed. + panic!() +} + +#[test] +fn remote_pong_wrong_opaque_value() { + let mut yamux = Yamux::<()>::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), + }); + + yamux.queue_ping(); + + let mut output = Vec::new(); + while let Some(out) = yamux.extract_next(usize::max_value()) { + output.extend_from_slice(out.as_ref()); + } + assert_eq!(&output[0..8], &[0, 2, 0, 1, 0, 0, 0, 0]); + + // Ping response frame. + let mut data = vec![0, 2, 0, 2, 0, 0, 0, 0]; + data.extend_from_slice(&output[8..12]); + + // Intentionally modify the opaque value to not match. + data[10] = data[10].overflowing_add(1).0; + + let mut cursor = 0; + while cursor < data.len() { + match yamux.incoming_data(&data[cursor..]) { + Ok(outcome) => { + yamux = outcome.yamux; + cursor += outcome.bytes_read; + } + Err(Error::PingResponseNotMatching) => return, + Err(_) => panic!(), + } + } + + // Test failed. + panic!() +} + +#[test] +fn remote_pong_out_of_nowhere() { + let mut yamux = Yamux::<()>::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), + }); + + // Ping response frame. + let data = &[0, 2, 0, 2, 0, 0, 0, 0, 1, 2, 3, 4]; + + let mut cursor = 0; + while cursor < data.len() { + match yamux.incoming_data(&data[cursor..]) { + Ok(outcome) => { + yamux = outcome.yamux; + cursor += outcome.bytes_read; + } + Err(Error::PingResponseNotMatching) => return, + Err(_) => panic!(), + } + } + + // Test failed. + panic!() +} + +#[test] +fn answer_remote_ping() { + let mut yamux = Yamux::<()>::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), + }); + + // Ping request frame. + let data = &[0, 2, 0, 1, 0, 0, 0, 0, 1, 2, 3, 4]; + + let mut cursor = 0; + while cursor < data.len() { + match yamux.incoming_data(&data[cursor..]) { + Ok(outcome) => { + yamux = outcome.yamux; + cursor += outcome.bytes_read; + } + Err(_) => panic!(), + } + } + + let mut output = Vec::new(); + while let Some(out) = yamux.extract_next(usize::max_value()) { + output.extend_from_slice(out.as_ref()); + } + assert_eq!(output, &[0, 2, 0, 2, 0, 0, 0, 0, 1, 2, 3, 4]); +} From bd29aaaafc9190e718bff249bfb7b366c578b81a Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 10:54:03 +0200 Subject: [PATCH 47/74] Check the ACK flag --- lib/src/libp2p/connection/yamux.rs | 56 ++++++- lib/src/libp2p/connection/yamux/tests.rs | 185 +++++++++++++++++++++++ 2 files changed, 240 insertions(+), 1 deletion(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index c0b55ddc30..63de1db9fd 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -1112,12 +1112,14 @@ impl Yamux { } header::DecodedYamuxHeader::Data { rst: true, + ack, stream_id, length, .. } | header::DecodedYamuxHeader::Window { rst: true, + ack, stream_id, length, .. @@ -1143,6 +1145,18 @@ impl Yamux { let _was_inserted = self.inner.dead_substreams.insert(stream_id); debug_assert!(_was_inserted); + // Check whether the remote has ACKed multiple times. + if matches!( + s.state, + SubstreamState::Healthy { + remote_syn_acked: true, + .. + } + ) && ack + { + return Err(Error::UnexpectedAck); + } + // We might be currently writing a frame of data of the substream // being reset. If that happens, we need to update some internal // state regarding this frame of data. @@ -1178,6 +1192,20 @@ impl Yamux { }); } + header::DecodedYamuxHeader::Data { + syn: true, + ack: true, + .. + } + | header::DecodedYamuxHeader::Window { + syn: true, + ack: true, + .. + } => { + // You're never supposed to send a SYN and ACK at the same time. + return Err(Error::UnexpectedAck); + } + header::DecodedYamuxHeader::Data { syn: true, fin, @@ -1281,6 +1309,7 @@ impl Yamux { rst: false, stream_id, length, + ack, fin, .. } => { @@ -1295,11 +1324,19 @@ impl Yamux { SubstreamState::Healthy { remote_write_closed, remote_allowed_window, + remote_syn_acked, .. }, .. }) = self.inner.substreams.get_mut(&stream_id) { + match (ack, remote_syn_acked) { + (false, true) => {} + (true, acked @ false) => *acked = true, + (true, true) => return Err(Error::UnexpectedAck), + (false, false) => return Err(Error::ExpectedAck), + } + if *remote_write_closed { return Err(Error::WriteAfterFin); } @@ -1326,6 +1363,7 @@ impl Yamux { rst: false, stream_id, length, + ack, fin, .. } => { @@ -1336,10 +1374,22 @@ impl Yamux { // id is discarded and doesn't result in an error, under the // presumption that we are in this situation. if let Some(Substream { - state: SubstreamState::Healthy { allowed_window, .. }, + state: + SubstreamState::Healthy { + remote_syn_acked, + allowed_window, + .. + }, .. }) = self.inner.substreams.get_mut(&stream_id) { + match (ack, remote_syn_acked) { + (false, true) => {} + (true, acked @ false) => *acked = true, + (true, true) => return Err(Error::UnexpectedAck), + (false, false) => return Err(Error::ExpectedAck), + } + *allowed_window = allowed_window .checked_add(u64::from(length)) .ok_or(Error::LocalCreditsOverflow)?; @@ -1872,6 +1922,10 @@ pub enum Error { PingResponseNotMatching, /// Maximum number of simultaneous RST frames to send out has been exceeded. MaxSimultaneousRstSubstreamsExceeded, + /// The remote should have sent an ACK flag but didn't. + ExpectedAck, + /// The remote sent an ACK flag but shouldn't have. + UnexpectedAck, } #[derive(Debug, Copy, Clone, PartialEq, Eq)] diff --git a/lib/src/libp2p/connection/yamux/tests.rs b/lib/src/libp2p/connection/yamux/tests.rs index b621500d5c..6b0e7f9b4a 100644 --- a/lib/src/libp2p/connection/yamux/tests.rs +++ b/lib/src/libp2p/connection/yamux/tests.rs @@ -344,6 +344,87 @@ fn substream_opened_back_after_rst() { assert_eq!(cursor, data.len()); } +#[test] +fn missing_ack() { + let mut yamux = Yamux::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), + }); + + let substream_id = yamux.open_substream(()); + yamux.write(substream_id, b"hello world".to_vec()); + + let mut output = Vec::new(); + while let Some(out) = yamux.extract_next(usize::max_value()) { + output.extend_from_slice(out.as_ref()); + } + + // Data frame without an ACK. + let mut data = Vec::new(); + data.extend_from_slice(&[0, 0, 0, 0]); + data.extend_from_slice(&output[4..8]); + data.extend_from_slice(&[0, 0, 0, 1, 0xff]); + + let mut cursor = 0; + while cursor < data.len() { + match yamux.incoming_data(&data[cursor..]) { + Ok(outcome) => { + yamux = outcome.yamux; + cursor += outcome.bytes_read; + } + Err(Error::ExpectedAck) => return, + Err(_) => panic!(), + } + } + + // Test failed. + panic!() +} + +#[test] +fn multiple_acks() { + let mut yamux = Yamux::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), + }); + + let substream_id = yamux.open_substream(()); + yamux.write(substream_id, b"hello world".to_vec()); + + let mut output = Vec::new(); + while let Some(out) = yamux.extract_next(usize::max_value()) { + output.extend_from_slice(out.as_ref()); + } + + // Two data frames with an ACK. + let mut data = Vec::new(); + data.extend_from_slice(&[0, 0, 0, 2]); + data.extend_from_slice(&output[4..8]); + data.extend_from_slice(&[0, 0, 0, 1, 0xff]); + data.extend_from_slice(&[0, 0, 0, 2]); + data.extend_from_slice(&output[4..8]); + data.extend_from_slice(&[0, 0, 0, 1, 0xff]); + + let mut cursor = 0; + while cursor < data.len() { + match yamux.incoming_data(&data[cursor..]) { + Ok(outcome) => { + yamux = outcome.yamux; + cursor += outcome.bytes_read; + } + Err(Error::UnexpectedAck) => return, + Err(_) => panic!(), + } + } + + // Test failed. + panic!() +} + #[test] fn multiple_writes_combined_into_one() { let mut yamux = Yamux::new(Config { @@ -953,3 +1034,107 @@ fn answer_remote_ping() { } assert_eq!(output, &[0, 2, 0, 2, 0, 0, 0, 0, 1, 2, 3, 4]); } + +#[test] +fn dont_send_syn_after_goaway() { + let mut yamux = Yamux::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), + }); + + let substream_id = yamux.open_substream(()); + yamux.write(substream_id, b"foo".to_vec()); + assert!(yamux.can_send(substream_id)); + + // GoAway frame. + let data = &[0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; + let mut cursor = 0; + while cursor < data.len() { + match yamux.incoming_data(&data[cursor..]) { + Ok(outcome) => { + yamux = outcome.yamux; + cursor += outcome.bytes_read; + } + Err(_) => panic!(), + } + } + + assert!(!yamux.can_send(substream_id)); + assert!(yamux.extract_next(usize::max_value()).is_none()); +} + +#[test] +fn substream_reset_on_goaway_if_not_acked() { + let mut yamux = Yamux::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), + }); + + let substream_id = yamux.open_substream(()); + yamux.write(substream_id, b"foo".to_vec()); + while let Some(_) = yamux.extract_next(usize::max_value()) {} + + // GoAway frame. + let data = &[0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; + let mut cursor = 0; + while cursor < data.len() { + match yamux.incoming_data(&data[cursor..]) { + Ok(outcome) => { + yamux = outcome.yamux; + cursor += outcome.bytes_read; + } + Err(_) => panic!(), + } + } + + assert!(!yamux.can_send(substream_id)); +} + +#[test] +fn can_still_send_after_goaway_if_acked() { + let mut yamux = Yamux::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), + }); + + let substream_id = yamux.open_substream(()); + yamux.write(substream_id, b"hello world".to_vec()); + + let mut output = Vec::new(); + while let Some(out) = yamux.extract_next(usize::max_value()) { + output.extend_from_slice(out.as_ref()); + } + + // ACK frame followed with GoAway frame. + let mut data = Vec::new(); + data.extend_from_slice(&[0, 0, 0, 2]); + data.extend_from_slice(&output[4..8]); + data.extend_from_slice(&[0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); + + let mut cursor = 0; + while cursor < data.len() { + match yamux.incoming_data(&data[cursor..]) { + Ok(outcome) => { + yamux = outcome.yamux; + cursor += outcome.bytes_read; + } + Err(_) => panic!(), + } + } + + assert!(yamux.can_send(substream_id)); + + yamux.write(substream_id, b"foo".to_vec()); + + let mut output = Vec::new(); + while let Some(out) = yamux.extract_next(usize::max_value()) { + output.extend_from_slice(out.as_ref()); + } + assert!(output.ends_with(&[3, 101, 101, 101])); +} From 0d578d512a7b1a57e279428cba08e1cdb1653fd5 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 10:57:10 +0200 Subject: [PATCH 48/74] Fix test --- lib/src/libp2p/connection/yamux/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/src/libp2p/connection/yamux/tests.rs b/lib/src/libp2p/connection/yamux/tests.rs index 6b0e7f9b4a..72a8c68abe 100644 --- a/lib/src/libp2p/connection/yamux/tests.rs +++ b/lib/src/libp2p/connection/yamux/tests.rs @@ -1136,5 +1136,5 @@ fn can_still_send_after_goaway_if_acked() { while let Some(out) = yamux.extract_next(usize::max_value()) { output.extend_from_slice(out.as_ref()); } - assert!(output.ends_with(&[3, 101, 101, 101])); + assert!(output.ends_with(&[3, 102, 111, 111])); } From 2540767dde8cdae9533d0ddf7eab7ad2b5f3e0ac Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 10:59:31 +0200 Subject: [PATCH 49/74] Error on multiple GoAways --- lib/src/libp2p/connection/yamux.rs | 7 +++++- lib/src/libp2p/connection/yamux/tests.rs | 31 ++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 1 deletion(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index 63de1db9fd..639109ce43 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -1049,8 +1049,11 @@ impl Yamux { }); } header::DecodedYamuxHeader::GoAway { error_code } => { + if self.inner.received_goaway.is_some() { + return Err(Error::MultipleGoAways); + } + self.inner.incoming = Incoming::Header(arrayvec::ArrayVec::new()); - // TODO: error if we have received one in the past before? self.inner.received_goaway = Some(error_code); let mut reset_substreams = @@ -1926,6 +1929,8 @@ pub enum Error { ExpectedAck, /// The remote sent an ACK flag but shouldn't have. UnexpectedAck, + /// Received multiple GoAway frames. + MultipleGoAways, } #[derive(Debug, Copy, Clone, PartialEq, Eq)] diff --git a/lib/src/libp2p/connection/yamux/tests.rs b/lib/src/libp2p/connection/yamux/tests.rs index 72a8c68abe..620e2ade36 100644 --- a/lib/src/libp2p/connection/yamux/tests.rs +++ b/lib/src/libp2p/connection/yamux/tests.rs @@ -1063,6 +1063,7 @@ fn dont_send_syn_after_goaway() { assert!(!yamux.can_send(substream_id)); assert!(yamux.extract_next(usize::max_value()).is_none()); + assert_eq!(yamux.dead_substreams().next().unwrap().0, substream_id); } #[test] @@ -1092,6 +1093,7 @@ fn substream_reset_on_goaway_if_not_acked() { } assert!(!yamux.can_send(substream_id)); + assert_eq!(yamux.dead_substreams().next().unwrap().0, substream_id); } #[test] @@ -1138,3 +1140,32 @@ fn can_still_send_after_goaway_if_acked() { } assert!(output.ends_with(&[3, 102, 111, 111])); } + +#[test] +fn receive_multiple_goaways() { + let mut yamux = Yamux::<()>::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), + }); + + // Two GoAway frames. + let data = &[ + 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + let mut cursor = 0; + while cursor < data.len() { + match yamux.incoming_data(&data[cursor..]) { + Ok(outcome) => { + yamux = outcome.yamux; + cursor += outcome.bytes_read; + } + Err(Error::MultipleGoAways) => return, + Err(_) => panic!(), + } + } + + // Test failed. + panic!() +} From 1c605e5c82dadb50cf89aa884c46ff15ec6f21ae Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 11:15:31 +0200 Subject: [PATCH 50/74] Fix sending a RST after a GoAway --- lib/src/libp2p/connection/yamux.rs | 8 +++--- lib/src/libp2p/connection/yamux/tests.rs | 36 +++++++++++++++++++++++- 2 files changed, 39 insertions(+), 5 deletions(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index 639109ce43..8ddf298d3b 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -756,7 +756,8 @@ impl Yamux { _ => panic!("send_goaway called multiple times"), } - // If the remote is currently opening a substream, automatically reject it. + // If the remote is currently opening a substream, ignore it. The remote understands when + // receiving the GoAway that the substream has been rejected. if let Incoming::PendingIncomingSubstream { substream_id, data_frame_size, @@ -1272,10 +1273,9 @@ impl Yamux { matches!(decoded_header, header::DecodedYamuxHeader::Data { .. }); // If we have queued or sent a GoAway frame, then the substream is - // automatically rejected. + // ignored. The remote understands when receiving the GoAway that the + // substream has been rejected. if !matches!(self.inner.outgoing_goaway, OutgoingGoAway::NotRequired) { - self.inner.rsts_to_send.push_back(stream_id); - self.inner.incoming = if !is_data { Incoming::Header(arrayvec::ArrayVec::new()) } else { diff --git a/lib/src/libp2p/connection/yamux/tests.rs b/lib/src/libp2p/connection/yamux/tests.rs index 620e2ade36..b7903c8a85 100644 --- a/lib/src/libp2p/connection/yamux/tests.rs +++ b/lib/src/libp2p/connection/yamux/tests.rs @@ -17,7 +17,7 @@ #![cfg(test)] -use super::{Config, Error, IncomingDataDetail, Yamux}; +use super::{Config, Error, GoAwayErrorCode, IncomingDataDetail, Yamux}; use core::num::NonZeroUsize; #[test] @@ -1169,3 +1169,37 @@ fn receive_multiple_goaways() { // Test failed. panic!() } + +#[test] +fn ignore_incoming_substreams_after_goaway() { + let mut yamux = Yamux::<()>::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), + }); + + yamux.send_goaway(GoAwayErrorCode::NormalTermination); + + // New substream. + let data = [0, 0, 0, 1, 0, 0, 0, 84, 0, 0, 0, 0]; + let mut cursor = 0; + while cursor < data.len() { + let outcome = yamux.incoming_data(&data[cursor..]).unwrap(); + yamux = outcome.yamux; + cursor += outcome.bytes_read; + match outcome.detail { + Some(IncomingDataDetail::IncomingSubstream) => panic!(), + _ => {} + } + } + + let mut output = Vec::new(); + while let Some(out) = yamux.extract_next(usize::max_value()) { + output.extend_from_slice(out.as_ref()); + } + assert_eq!( + output, + &[0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + ); +} From dd369b1822bd9a7551f3ece6e79550902d4c2003 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 11:23:45 +0200 Subject: [PATCH 51/74] Fix debug_assert! and more tests --- lib/src/libp2p/connection/yamux.rs | 16 +++++-- lib/src/libp2p/connection/yamux/tests.rs | 60 ++++++++++++++++++++++-- 2 files changed, 68 insertions(+), 8 deletions(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index 8ddf298d3b..d91973ae9d 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -1239,10 +1239,14 @@ impl Yamux { // Remote has sent a SYN flag. A new substream is to be opened. match self.inner.substreams.get(&stream_id) { Some(Substream { - state: SubstreamState::Healthy { .. }, + state: + SubstreamState::Healthy { + local_write_close: SubstreamStateLocalWrite::FinQueued, + remote_write_closed: true, + .. + }, .. }) => { - // TODO: also check whether substream is still open return Err(Error::UnexpectedSyn(stream_id)); } Some(Substream { @@ -1256,7 +1260,11 @@ impl Yamux { // substream. break; } - None => {} + Some(Substream { + state: SubstreamState::Healthy { .. }, + .. + }) + | None => {} } // When receiving a new substream, we might have to potentially queue @@ -1661,7 +1669,6 @@ impl Yamux { u32::try_from(pending_len).unwrap_or(u32::max_value()), u32::try_from(*allowed_window).unwrap_or(u32::max_value()), ); - debug_assert_ne!(len_out, 0); let len_out_usize = usize::try_from(len_out).unwrap(); *allowed_window -= u64::from(len_out); let syn_ack_flag = !*first_message_queued; @@ -1671,6 +1678,7 @@ impl Yamux { if fin_flag { *local_write = SubstreamStateLocalWrite::FinQueued; } + debug_assert!(len_out != 0 || fin_flag); self.inner.outgoing = Outgoing::Header { header: header::DecodedYamuxHeader::Data { syn: syn_ack_flag && !sub.inbound, diff --git a/lib/src/libp2p/connection/yamux/tests.rs b/lib/src/libp2p/connection/yamux/tests.rs index b7903c8a85..92082848f9 100644 --- a/lib/src/libp2p/connection/yamux/tests.rs +++ b/lib/src/libp2p/connection/yamux/tests.rs @@ -344,6 +344,61 @@ fn substream_opened_back_after_rst() { assert_eq!(cursor, data.len()); } +#[test] +fn substream_opened_back_after_graceful_closing() { + let mut yamux = Yamux::<()>::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), + }); + + // One SYN|FIN frame. + let data = [0, 0, 0, 1 | 4, 0, 0, 0, 84, 0, 0, 0, 1, 255]; + + let mut cursor = 0; + while cursor < data.len() { + match yamux.incoming_data(&data[cursor..]) { + Ok(outcome) => { + yamux = outcome.yamux; + cursor += outcome.bytes_read; + + if matches!(outcome.detail, Some(IncomingDataDetail::IncomingSubstream)) { + let substream_id = yamux.accept_pending_substream(()); + + // Close the substream gracefully. + yamux.close(substream_id); + } + } + Err(_) => panic!(), + } + } + + // Flush the queue in order to send out the FIN. + while yamux.extract_next(usize::max_value()).is_some() {} + + // One SYN frame again, using the same substream ID as earlier. + let data = [0, 0, 0, 1, 0, 0, 0, 84, 0, 0, 0, 1, 255]; + + let mut cursor = 0; + while cursor < data.len() { + match yamux.incoming_data(&data[cursor..]) { + Ok(outcome) => { + yamux = outcome.yamux; + cursor += outcome.bytes_read; + + if matches!(outcome.detail, Some(IncomingDataDetail::IncomingSubstream)) { + return; + } + } + Err(_) => panic!(), + } + } + + // Test failure. + panic!() +} + #[test] fn missing_ack() { let mut yamux = Yamux::new(Config { @@ -1198,8 +1253,5 @@ fn ignore_incoming_substreams_after_goaway() { while let Some(out) = yamux.extract_next(usize::max_value()) { output.extend_from_slice(out.as_ref()); } - assert_eq!( - output, - &[0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] - ); + assert_eq!(output, &[0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); } From fa72d45e9716c01f85427e9318dafa927693fbc5 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 11:24:59 +0200 Subject: [PATCH 52/74] Fix code --- lib/src/libp2p/connection/yamux.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index d91973ae9d..45e02864b3 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -1238,6 +1238,7 @@ impl Yamux { // Remote has sent a SYN flag. A new substream is to be opened. match self.inner.substreams.get(&stream_id) { + None => {} Some(Substream { state: SubstreamState::Healthy { @@ -1246,10 +1247,8 @@ impl Yamux { .. }, .. - }) => { - return Err(Error::UnexpectedSyn(stream_id)); - } - Some(Substream { + }) + | Some(Substream { state: SubstreamState::Reset, .. }) => { @@ -1263,8 +1262,9 @@ impl Yamux { Some(Substream { state: SubstreamState::Healthy { .. }, .. - }) - | None => {} + }) => { + return Err(Error::UnexpectedSyn(stream_id)); + } } // When receiving a new substream, we might have to potentially queue From dc84f6233e475dc1fbe29a76d9c3e1b2518dd14f Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 11:29:03 +0200 Subject: [PATCH 53/74] Fix test --- lib/src/libp2p/connection/yamux/tests.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/lib/src/libp2p/connection/yamux/tests.rs b/lib/src/libp2p/connection/yamux/tests.rs index 92082848f9..0f692a81d9 100644 --- a/lib/src/libp2p/connection/yamux/tests.rs +++ b/lib/src/libp2p/connection/yamux/tests.rs @@ -381,13 +381,28 @@ fn substream_opened_back_after_graceful_closing() { let data = [0, 0, 0, 1, 0, 0, 0, 84, 0, 0, 0, 1, 255]; let mut cursor = 0; + let mut killed_substream = false; + while cursor < data.len() { match yamux.incoming_data(&data[cursor..]) { Ok(outcome) => { yamux = outcome.yamux; + + // Because we can't have two substreams with the same ID at the same time, the + // reading of the new SYN frame will be blocked until we've removed the dead + // substream. + if outcome.bytes_read == 0 { + let substream_id = yamux.dead_substreams().next().unwrap().0; + yamux.remove_dead_substream(substream_id); + killed_substream = true; + continue; + } + cursor += outcome.bytes_read; if matches!(outcome.detail, Some(IncomingDataDetail::IncomingSubstream)) { + // Make sure we've removed the dead substream. + assert!(killed_substream); return; } } From b8f9427c06a9b033dc530b3dd782a26650bfcd9a Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 11:38:40 +0200 Subject: [PATCH 54/74] Add a limit to the number of pongs --- .../connection/established/single_stream.rs | 1 + lib/src/libp2p/connection/yamux.rs | 44 ++++++-- lib/src/libp2p/connection/yamux/tests.rs | 106 +++++++++++++++++- 3 files changed, 138 insertions(+), 13 deletions(-) diff --git a/lib/src/libp2p/connection/established/single_stream.rs b/lib/src/libp2p/connection/established/single_stream.rs index 7e7a1a7d2d..45a00a174c 100644 --- a/lib/src/libp2p/connection/established/single_stream.rs +++ b/lib/src/libp2p/connection/established/single_stream.rs @@ -1116,6 +1116,7 @@ impl ConnectionPrototype { is_initiator: self.encryption.is_initiator(), capacity: 64, // TODO: ? randomness_seed: randomness.sample(rand::distributions::Standard), + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index 45e02864b3..65860a701a 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -83,6 +83,12 @@ pub struct Config { /// which the data on substreams is sent out. pub randomness_seed: [u8; 32], + /// When the remote sends a ping, we need to send out a pong. However, the remote could refuse + /// to read any additional data from the socket and continue sending pings, thus increasing + /// the local buffer size indefinitely. In order to protect against this attack, there exists + /// a maximum number of queued pongs, after which the connection will be shut down abruptly. + pub max_simultaneous_queued_pongs: NonZeroUsize, + /// When the remote sends a substream, and this substream gets rejected by the API user, some /// data needs to be sent out. However, the remote could refuse reading any additional data /// and continue sending new substream requests, thus increasing the local buffer size @@ -136,6 +142,13 @@ struct YamuxInner { /// Since the opaque values are generated locally and randomly, we can use the `FNV` hasher. pings_waiting_reply: hashbrown::HashSet, + /// List of opaque values corresponding to ping requests sent by the remote. For each entry, + /// a PONG header should be sent to the remote. + pongs_to_send: VecDeque, + + /// See [`Config::max_simultaneous_queued_pongs`]. + max_simultaneous_queued_pongs: NonZeroUsize, + /// List of substream IDs that have been reset locally. For each entry, a RST header should /// be sent to the remote. rsts_to_send: VecDeque, @@ -319,6 +332,8 @@ impl Yamux { pings_to_send: 0, // We leave the initial capacity at 0, as it is likely that no ping is sent at all. pings_waiting_reply: hashbrown::HashSet::with_hasher(Default::default()), + pongs_to_send: VecDeque::with_capacity(4), + max_simultaneous_queued_pongs: config.max_simultaneous_queued_pongs, rsts_to_send: VecDeque::with_capacity(4), max_simultaneous_rst_substreams: config.max_simultaneous_rst_substreams, randomness, @@ -1021,20 +1036,13 @@ impl Yamux { match decoded_header { header::DecodedYamuxHeader::PingRequest { opaque_value } => { - // Ping. In order to queue the pong message, the outgoing queue must - // be empty. If it is not the case, we simply leave the ping header - // there and prevent any further data from being read. - if !matches!(self.inner.outgoing, Outgoing::Idle) { - // TODO: this could trigger a deadlock if the send buffer is very small - break; + if self.inner.pongs_to_send.len() + >= self.inner.max_simultaneous_queued_pongs.get() + { + return Err(Error::MaxSimultaneousPingsExceeded); } - self.inner.outgoing = Outgoing::Header { - header: header::DecodedYamuxHeader::PingResponse { opaque_value }, - header_already_sent: 0, - substream_data_frame: None, - }; - + self.inner.pongs_to_send.push_back(opaque_value); self.inner.incoming = Incoming::Header(arrayvec::ArrayVec::new()); } header::DecodedYamuxHeader::PingResponse { opaque_value } => { @@ -1588,6 +1596,16 @@ impl Yamux { continue; } + // Send outgoing pongs. + if let Some(opaque_value) = self.inner.pongs_to_send.pop_front() { + self.inner.outgoing = Outgoing::Header { + header: header::DecodedYamuxHeader::PingResponse { opaque_value }, + header_already_sent: 0, + substream_data_frame: None, + }; + continue; + } + // Send window update frames. // TODO: O(n) if let Some((id, sub)) = self @@ -1933,6 +1951,8 @@ pub enum Error { PingResponseNotMatching, /// Maximum number of simultaneous RST frames to send out has been exceeded. MaxSimultaneousRstSubstreamsExceeded, + /// Maximum number of simultaneous PONG frames to send out has been exceeded. + MaxSimultaneousPingsExceeded, /// The remote should have sent an ACK flag but didn't. ExpectedAck, /// The remote sent an ACK flag but shouldn't have. diff --git a/lib/src/libp2p/connection/yamux/tests.rs b/lib/src/libp2p/connection/yamux/tests.rs index 0f692a81d9..701e0776ca 100644 --- a/lib/src/libp2p/connection/yamux/tests.rs +++ b/lib/src/libp2p/connection/yamux/tests.rs @@ -18,7 +18,7 @@ #![cfg(test)] use super::{Config, Error, GoAwayErrorCode, IncomingDataDetail, Yamux}; -use core::num::NonZeroUsize; +use core::{cmp, num::NonZeroUsize}; #[test] fn bad_header_data() { @@ -26,6 +26,7 @@ fn bad_header_data() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -54,6 +55,7 @@ fn not_immediate_data_send_when_opening_substream() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -67,6 +69,7 @@ fn syn_sent() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -88,6 +91,7 @@ fn extract_bytes_one_by_one() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -110,6 +114,7 @@ fn inject_bytes_one_by_one() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -146,6 +151,7 @@ fn ack_sent() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -187,6 +193,7 @@ fn rst_sent_when_rejecting() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -218,6 +225,7 @@ fn max_simultaneous_rst_substreams() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(16).unwrap(), }); @@ -253,6 +261,7 @@ fn invalid_inbound_substream_id() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -279,6 +288,7 @@ fn substream_opened_twice() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -311,6 +321,7 @@ fn substream_opened_back_after_rst() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -350,6 +361,7 @@ fn substream_opened_back_after_graceful_closing() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -420,6 +432,7 @@ fn missing_ack() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -459,6 +472,7 @@ fn multiple_acks() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -501,6 +515,7 @@ fn multiple_writes_combined_into_one() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -528,6 +543,7 @@ fn close_before_syn_sent() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -551,6 +567,7 @@ fn write_after_close_illegal() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -569,6 +586,7 @@ fn credits_exceeded_checked_before_data_is_received() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -606,6 +624,7 @@ fn credits_exceeded_checked_at_the_syn() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -641,6 +660,7 @@ fn data_coming_with_the_syn_taken_into_account() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -679,6 +699,7 @@ fn add_remote_window_works() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -727,6 +748,7 @@ fn add_remote_window_doesnt_immediately_raise_limit() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -768,6 +790,7 @@ fn remote_default_window_respected() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -790,6 +813,7 @@ fn remote_window_frames_respected() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -837,6 +861,7 @@ fn write_after_fin() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -871,6 +896,7 @@ fn write_after_fin_even_with_empty_frame() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -905,6 +931,7 @@ fn window_frame_with_fin_after_fin() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -939,6 +966,7 @@ fn window_frame_without_fin_after_fin() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -973,6 +1001,7 @@ fn send_ping() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1013,6 +1042,7 @@ fn remote_pong_wrong_opaque_value() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1053,6 +1083,7 @@ fn remote_pong_out_of_nowhere() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1081,6 +1112,7 @@ fn answer_remote_ping() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1105,12 +1137,80 @@ fn answer_remote_ping() { assert_eq!(output, &[0, 2, 0, 2, 0, 0, 0, 0, 1, 2, 3, 4]); } +#[test] +fn max_simultaneous_queued_pongs() { + let mut yamux = Yamux::<()>::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), + }); + + let mut data = Vec::new(); + + // Queue many new pings. + for _ in 1..16 { + data.extend_from_slice(&[0, 2, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]); + } + + let mut cursor = 0; + while cursor < data.len() { + match yamux.incoming_data(&data[cursor..]) { + Ok(outcome) => { + yamux = outcome.yamux; + cursor += outcome.bytes_read; + } + Err(Error::MaxSimultaneousPingsExceeded) => return, + Err(_) => panic!(), + } + } + + // Test failed. + panic!() +} + +#[test] +fn simultaneous_pongs_flushed() { + let mut yamux = Yamux::<()>::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), + }); + + let mut data = Vec::new(); + + // Queue many new pings. + for _ in 1..16 { + data.extend_from_slice(&[0, 2, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]); + } + + let mut cursor = 0; + while cursor < data.len() { + match yamux.incoming_data(&data[cursor..][..cmp::min(12, data.len() - cursor)]) { + Ok(outcome) => { + yamux = outcome.yamux; + cursor += outcome.bytes_read; + + // Flush out in order to send the pong. + while yamux.extract_next(usize::max_value()).is_some() {} + } + Err(_) => panic!(), + } + } + + // Test succeded. +} + #[test] fn dont_send_syn_after_goaway() { let mut yamux = Yamux::new(Config { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1142,6 +1242,7 @@ fn substream_reset_on_goaway_if_not_acked() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1172,6 +1273,7 @@ fn can_still_send_after_goaway_if_acked() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1217,6 +1319,7 @@ fn receive_multiple_goaways() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1246,6 +1349,7 @@ fn ignore_incoming_substreams_after_goaway() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); From c82c694d4100c2279c0a280283612bcc542584d7 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 11:47:14 +0200 Subject: [PATCH 55/74] Clarify data with RST --- lib/src/libp2p/connection/yamux.rs | 11 ++--- lib/src/libp2p/connection/yamux/tests.rs | 54 ++++++++++++++++++++++++ 2 files changed, 58 insertions(+), 7 deletions(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index 65860a701a..bde10bd904 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -1126,23 +1126,20 @@ impl Yamux { rst: true, ack, stream_id, - length, .. } | header::DecodedYamuxHeader::Window { rst: true, ack, stream_id, - length, .. } => { // Frame with the `RST` flag set. Destroy the substream. - // It is invalid to have the `RST` flag set and data at the same time. - // TODO: why is it invalid? - if matches!(decoded_header, header::DecodedYamuxHeader::Data { .. }) - && length != 0 - { + // Sending a `RST` flag and data together is a weird corner case and + // is difficult to handle. It is unclear whether it is allowed at all. + // We thus consider it as invalid. + if matches!(decoded_header, header::DecodedYamuxHeader::Data { .. }) { return Err(Error::DataWithRst); } diff --git a/lib/src/libp2p/connection/yamux/tests.rs b/lib/src/libp2p/connection/yamux/tests.rs index 701e0776ca..bdf9f90084 100644 --- a/lib/src/libp2p/connection/yamux/tests.rs +++ b/lib/src/libp2p/connection/yamux/tests.rs @@ -187,6 +187,60 @@ fn ack_sent() { ); } +#[test] +fn syn_and_ack_together() { + let mut yamux = Yamux::<()>::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), + }); + + let data = [0, 0, 0, 1 | 2, 0, 0, 0, 84, 0, 0, 0, 0]; + let mut cursor = 0; + while cursor < data.len() { + match yamux.incoming_data(&data[cursor..]) { + Ok(outcome) => { + yamux = outcome.yamux; + cursor += outcome.bytes_read; + } + Err(Error::UnexpectedAck) => return, + Err(_) => panic!(), + } + } + + // Test failed. + panic!() +} + +#[test] +fn syn_and_rst_together() { + let mut yamux = Yamux::<()>::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), + }); + + let data = [0, 0, 0, 1 | 8, 0, 0, 0, 84, 0, 0, 0, 0]; + let mut cursor = 0; + while cursor < data.len() { + match yamux.incoming_data(&data[cursor..]) { + Ok(outcome) => { + yamux = outcome.yamux; + cursor += outcome.bytes_read; + } + Err(Error::DataWithRst) => return, + Err(_) => panic!(), + } + } + + // Test failed. + panic!() +} + #[test] fn rst_sent_when_rejecting() { let mut yamux = Yamux::<()>::new(Config { From 3c0d246edcfbeaca6e2eb50803b0d3ec5f480a5e Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 11:50:45 +0200 Subject: [PATCH 56/74] Test can't open after goaway --- lib/src/libp2p/connection/yamux.rs | 2 +- lib/src/libp2p/connection/yamux/tests.rs | 28 ++++++++++++++++++++++++ 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index bde10bd904..07b3cbf57a 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -390,7 +390,7 @@ impl Yamux { /// pub fn open_substream(&mut self, user_data: T) -> SubstreamId { // It is forbidden to open new substreams if a `GoAway` frame has been received. - assert!(self.inner.received_goaway.is_none()); + assert!(self.inner.received_goaway.is_none(), "can't open substream after goaway"); // Make sure that the `loop` below can finish. assert!( diff --git a/lib/src/libp2p/connection/yamux/tests.rs b/lib/src/libp2p/connection/yamux/tests.rs index bdf9f90084..11f760fb11 100644 --- a/lib/src/libp2p/connection/yamux/tests.rs +++ b/lib/src/libp2p/connection/yamux/tests.rs @@ -1428,3 +1428,31 @@ fn ignore_incoming_substreams_after_goaway() { } assert_eq!(output, &[0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); } + +#[test] +#[should_panic = "can't open substream after goaway"] +fn opening_forbidden_after_goaway() { + let mut yamux = Yamux::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), + }); + + // GoAway frame. + let data = &[0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; + let mut cursor = 0; + while cursor < data.len() { + match yamux.incoming_data(&data[cursor..]) { + Ok(outcome) => { + yamux = outcome.yamux; + cursor += outcome.bytes_read; + } + Err(_) => panic!(), + } + } + + // Panics. + yamux.open_substream(()); +} From ded9dfc6c2f2cf4153188c82138bb1fa49569f46 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 11:56:58 +0200 Subject: [PATCH 57/74] Simplify pings handling --- lib/src/libp2p/connection/yamux.rs | 39 +++++++++++------------ lib/src/libp2p/connection/yamux/tests.rs | 40 ++++++++++++++++++++++++ 2 files changed, 58 insertions(+), 21 deletions(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index 07b3cbf57a..a08ff44e65 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -137,10 +137,8 @@ struct YamuxInner { /// Number of pings to send out that haven't been queued yet. pings_to_send: usize, - /// List of pings that have been sent out but haven't been replied yet. Each ping has as key - /// the opaque value that has been sent out and that must be matched by the remote. - /// Since the opaque values are generated locally and randomly, we can use the `FNV` hasher. - pings_waiting_reply: hashbrown::HashSet, + /// List of pings that have been sent out but haven't been replied yet. + pings_waiting_reply: VecDeque, /// List of opaque values corresponding to ping requests sent by the remote. For each entry, /// a PONG header should be sent to the remote. @@ -331,7 +329,7 @@ impl Yamux { }, pings_to_send: 0, // We leave the initial capacity at 0, as it is likely that no ping is sent at all. - pings_waiting_reply: hashbrown::HashSet::with_hasher(Default::default()), + pings_waiting_reply: VecDeque::with_capacity(0), pongs_to_send: VecDeque::with_capacity(4), max_simultaneous_queued_pongs: config.max_simultaneous_queued_pongs, rsts_to_send: VecDeque::with_capacity(4), @@ -390,7 +388,10 @@ impl Yamux { /// pub fn open_substream(&mut self, user_data: T) -> SubstreamId { // It is forbidden to open new substreams if a `GoAway` frame has been received. - assert!(self.inner.received_goaway.is_none(), "can't open substream after goaway"); + assert!( + self.inner.received_goaway.is_none(), + "can't open substream after goaway" + ); // Make sure that the `loop` below can finish. assert!( @@ -1046,7 +1047,7 @@ impl Yamux { self.inner.incoming = Incoming::Header(arrayvec::ArrayVec::new()); } header::DecodedYamuxHeader::PingResponse { opaque_value } => { - if !self.inner.pings_waiting_reply.remove(&opaque_value) { + if self.inner.pings_waiting_reply.pop_front() != Some(opaque_value) { return Err(Error::PingResponseNotMatching); } @@ -1576,19 +1577,13 @@ impl Yamux { // Send outgoing pings. if self.inner.pings_to_send > 0 { self.inner.pings_to_send -= 1; - // Generate opaque values in a loop until we don't hit a duplicate. - loop { - let opaque_value: u32 = self.inner.randomness.gen(); - if !self.inner.pings_waiting_reply.insert(opaque_value) { - continue; - } - self.inner.outgoing = Outgoing::Header { - header: header::DecodedYamuxHeader::PingRequest { opaque_value }, - header_already_sent: 0, - substream_data_frame: None, - }; - break; - } + let opaque_value: u32 = self.inner.randomness.gen(); + self.inner.pings_waiting_reply.push_back(opaque_value); + self.inner.outgoing = Outgoing::Header { + header: header::DecodedYamuxHeader::PingRequest { opaque_value }, + header_already_sent: 0, + substream_data_frame: None, + }; debug_assert!(self.inner.pings_waiting_reply.len() <= MAX_PINGS); continue; } @@ -1921,7 +1916,9 @@ pub enum IncomingDataDetail { }, /// Received a response to a ping that has been sent out earlier. - // TODO: associate some data with the ping? in case they're answered in a different order? + /// + /// If multiple pings have been sent out simultaneously, they are always answered in the same + /// order as they have been sent out. PingResponse, } diff --git a/lib/src/libp2p/connection/yamux/tests.rs b/lib/src/libp2p/connection/yamux/tests.rs index 11f760fb11..9a5cc28138 100644 --- a/lib/src/libp2p/connection/yamux/tests.rs +++ b/lib/src/libp2p/connection/yamux/tests.rs @@ -1131,6 +1131,46 @@ fn remote_pong_wrong_opaque_value() { panic!() } +#[test] +fn pings_answered_in_wrong_order() { + let mut yamux = Yamux::<()>::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), + }); + + yamux.queue_ping(); + yamux.queue_ping(); + + let mut output = Vec::new(); + while let Some(out) = yamux.extract_next(usize::max_value()) { + output.extend_from_slice(out.as_ref()); + } + assert_eq!(&output[0..8], &[0, 2, 0, 1, 0, 0, 0, 0]); + assert_eq!(&output[12..20], &[0, 2, 0, 1, 0, 0, 0, 0]); + + // Ping response frame of the second ping. + let mut data = vec![0, 2, 0, 2, 0, 0, 0, 0]; + data.extend_from_slice(&output[20..24]); + + let mut cursor = 0; + while cursor < data.len() { + match yamux.incoming_data(&data[cursor..]) { + Ok(outcome) => { + yamux = outcome.yamux; + cursor += outcome.bytes_read; + } + Err(Error::PingResponseNotMatching) => return, + Err(_) => panic!(), + } + } + + // Test failed. + panic!() +} + #[test] fn remote_pong_out_of_nowhere() { let mut yamux = Yamux::<()>::new(Config { From 3aa99aeac632e44ff1f41b56c5a25ae4c7d664d6 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 12:01:31 +0200 Subject: [PATCH 58/74] CHANGELOG entry --- wasm-node/CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/wasm-node/CHANGELOG.md b/wasm-node/CHANGELOG.md index 3045b911db..1427c63884 100644 --- a/wasm-node/CHANGELOG.md +++ b/wasm-node/CHANGELOG.md @@ -5,6 +5,7 @@ ### Changed - Removed support for the `ls` message in the multistream-select protocol, in accordance with the rest of the libp2p ecosystem. This message was in practice never used, and removing support for it simplifies the implementation. ([#379](https://github.com/smol-dot/smoldot/pull/379)) +- Yamux now considers answering pings in the wrong order as invalid. ### Fixed From 5fef22308ec6a6b83b9cba6f90e71f8a65d1fcbd Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 12:11:46 +0200 Subject: [PATCH 59/74] Clarify add_remote_window behaviour --- .../connection/established/single_stream.rs | 4 ++-- lib/src/libp2p/connection/yamux.rs | 8 ++++++-- lib/src/libp2p/connection/yamux/tests.rs | 20 +++++++++++++++++-- 3 files changed, 26 insertions(+), 6 deletions(-) diff --git a/lib/src/libp2p/connection/established/single_stream.rs b/lib/src/libp2p/connection/established/single_stream.rs index 45a00a174c..4b95842dc5 100644 --- a/lib/src/libp2p/connection/established/single_stream.rs +++ b/lib/src/libp2p/connection/established/single_stream.rs @@ -257,7 +257,7 @@ where // TODO: only do that for notification substreams? because for requests we already set the value to the maximum when the substream is created self.inner .yamux - .add_remote_window(substream_id, u64::try_from(num_read).unwrap()); + .add_remote_window_saturating(substream_id, u64::try_from(num_read).unwrap()); if let Some(event) = event { return Ok((self, Some(event))); @@ -811,7 +811,7 @@ where ))); // TODO: we add some bytes due to the length prefix, this is a bit hacky as we should ask this information from the substream - self.inner.yamux.add_remote_window( + self.inner.yamux.add_remote_window_saturating( substream_id, u64::try_from(self.inner.request_protocols[protocol_index].max_response_size) .unwrap_or(u64::max_value()) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index a08ff44e65..1fbb9b07c9 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -544,6 +544,11 @@ impl Yamux { /// Adds `bytes` to the number of bytes the remote is allowed to send at once in the next /// packet. /// + /// The counter saturates if its maximum is reached. This could cause stalls if the + /// remote sends more data than the maximum. However, the number of bytes is stored in a `u64`, + /// the remote would have to send 2^64 bytes in order to reach this situation, making it + /// basically impossible. + /// /// > **Note**: When a substream has just been opened or accepted, it starts with an initial /// > window of [`NEW_SUBSTREAMS_FRAME_SIZE`]. /// @@ -556,8 +561,7 @@ impl Yamux { /// /// Panics if the [`SubstreamId`] is invalid. /// - // TODO: properly define behavior in case of overflow? - pub fn add_remote_window(&mut self, substream_id: SubstreamId, bytes: u64) { + pub fn add_remote_window_saturating(&mut self, substream_id: SubstreamId, bytes: u64) { if let SubstreamState::Healthy { remote_window_pending_increase, .. diff --git a/lib/src/libp2p/connection/yamux/tests.rs b/lib/src/libp2p/connection/yamux/tests.rs index 9a5cc28138..cce4f69574 100644 --- a/lib/src/libp2p/connection/yamux/tests.rs +++ b/lib/src/libp2p/connection/yamux/tests.rs @@ -778,7 +778,7 @@ fn add_remote_window_works() { // `add_remote_window` doesn't immediately raise the limit, so we flush the // output buffer in order to obtain a window frame. - yamux.add_remote_window(substream_id, 100 * 1024); + yamux.add_remote_window_saturating(substream_id, 100 * 1024); let mut output = Vec::new(); while let Some(out) = yamux.extract_next(usize::max_value()) { @@ -826,7 +826,7 @@ fn add_remote_window_doesnt_immediately_raise_limit() { let substream_id = yamux.accept_pending_substream(()); // `add_remote_window` shouldn't immediately raise the limit. - yamux.add_remote_window(substream_id, 100 * 1024); + yamux.add_remote_window_saturating(substream_id, 100 * 1024); } } Err(Error::CreditsExceeded) => return, @@ -838,6 +838,22 @@ fn add_remote_window_doesnt_immediately_raise_limit() { panic!() } +#[test] +fn add_remote_window_saturates() { + let mut yamux = Yamux::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), + }); + + let substream_id = yamux.open_substream(()); + + // Check that `add_remote_window_saturating` doesn't panic. + yamux.add_remote_window_saturating(substream_id, u64::max_value()); +} + #[test] fn remote_default_window_respected() { let mut yamux = Yamux::new(Config { From 40a617dfeb93287434b8df33caf7de2585180ab8 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 12:41:41 +0200 Subject: [PATCH 60/74] Add `max_out_data_frame_size` --- .../connection/established/single_stream.rs | 3 +- lib/src/libp2p/connection/yamux.rs | 24 +++++- lib/src/libp2p/connection/yamux/tests.rs | 75 ++++++++++++++++++- 3 files changed, 98 insertions(+), 4 deletions(-) diff --git a/lib/src/libp2p/connection/established/single_stream.rs b/lib/src/libp2p/connection/established/single_stream.rs index 4b95842dc5..9ca5d19a46 100644 --- a/lib/src/libp2p/connection/established/single_stream.rs +++ b/lib/src/libp2p/connection/established/single_stream.rs @@ -60,7 +60,7 @@ use super::{ use alloc::{boxed::Box, string::String, vec, vec::Vec}; use core::{ fmt, - num::NonZeroUsize, + num::{NonZeroU32, NonZeroUsize}, ops::{Add, Sub}, time::Duration, }; @@ -1116,6 +1116,7 @@ impl ConnectionPrototype { is_initiator: self.encryption.is_initiator(), capacity: 64, // TODO: ? randomness_seed: randomness.sample(rand::distributions::Standard), + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), // TODO: make configurable? max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index 1fbb9b07c9..82167da9c1 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -83,6 +83,19 @@ pub struct Config { /// which the data on substreams is sent out. pub randomness_seed: [u8; 32], + /// Maximum size of data frames to send out. + /// + /// A higher value increases the variance of the latency of the data sent on the substreams, + /// which is undesirable. A lower value increases the overhead of the Yamux protocol. This + /// overhead is equal to `1200 / (max_out_data_frame_size + 12)` %, for example setting + /// `max_out_data_frame_size` to 24 incurs a 33% overhead. + /// + /// The "best" value depends on the bandwidth speed of the underlying connection, and is thus + /// impossible to tell. + /// + /// A typical value is `8192`. + pub max_out_data_frame_size: NonZeroU32, + /// When the remote sends a ping, we need to send out a pong. However, the remote could refuse /// to read any additional data from the socket and continue sending pings, thus increasing /// the local buffer size indefinitely. In order to protect against this attack, there exists @@ -129,6 +142,9 @@ struct YamuxInner { /// Whether to send out a `GoAway` frame. outgoing_goaway: OutgoingGoAway, + /// See [`Config::max_out_data_frame_size`]. + max_out_data_frame_size: NonZeroU32, + /// Id of the next outgoing substream to open. /// This implementation allocates identifiers linearly. Every time a substream is open, its /// value is incremented by two. @@ -322,6 +338,7 @@ impl Yamux { incoming: Incoming::Header(arrayvec::ArrayVec::new()), outgoing: Outgoing::Idle, outgoing_goaway: OutgoingGoAway::NotRequired, + max_out_data_frame_size: config.max_out_data_frame_size, next_outbound_substream: if config.is_initiator { NonZeroU32::new(1).unwrap() } else { @@ -1680,8 +1697,11 @@ impl Yamux { { let pending_len = write_queue.queued_bytes(); let len_out = cmp::min( - u32::try_from(pending_len).unwrap_or(u32::max_value()), - u32::try_from(*allowed_window).unwrap_or(u32::max_value()), + self.inner.max_out_data_frame_size.get(), + cmp::min( + u32::try_from(pending_len).unwrap_or(u32::max_value()), + u32::try_from(*allowed_window).unwrap_or(u32::max_value()), + ), ); let len_out_usize = usize::try_from(len_out).unwrap(); *allowed_window -= u64::from(len_out); diff --git a/lib/src/libp2p/connection/yamux/tests.rs b/lib/src/libp2p/connection/yamux/tests.rs index cce4f69574..b087e7e1fa 100644 --- a/lib/src/libp2p/connection/yamux/tests.rs +++ b/lib/src/libp2p/connection/yamux/tests.rs @@ -18,7 +18,10 @@ #![cfg(test)] use super::{Config, Error, GoAwayErrorCode, IncomingDataDetail, Yamux}; -use core::{cmp, num::NonZeroUsize}; +use core::{ + cmp, + num::{NonZeroU32, NonZeroUsize}, +}; #[test] fn bad_header_data() { @@ -26,6 +29,7 @@ fn bad_header_data() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -55,6 +59,7 @@ fn not_immediate_data_send_when_opening_substream() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -69,6 +74,7 @@ fn syn_sent() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -85,12 +91,39 @@ fn syn_sent() { assert!(output.ends_with(&[0, 0, 0, 3, 102, 111, 111])); } +#[test] +fn max_out_data_frame_size_works() { + let mut yamux = Yamux::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(2).unwrap(), + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), + }); + + let substream_id = yamux.open_substream(()); + yamux.write(substream_id, b"foo".to_vec()); + + let mut output = Vec::new(); + while let Some(out) = yamux.extract_next(usize::max_value()) { + output.extend_from_slice(out.as_ref()); + } + + assert_eq!(&output[0..4], &[0, 0, 0, 1]); + assert_eq!(&output[8..14], &[0, 0, 0, 2, 102, 111]); + assert_eq!(&output[14..18], &[0, 0, 0, 0]); + assert_eq!(&output[22..27], &[0, 0, 0, 1, 111]); + assert_eq!(output.len(), 27); +} + #[test] fn extract_bytes_one_by_one() { let mut yamux = Yamux::new(Config { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -114,6 +147,7 @@ fn inject_bytes_one_by_one() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -151,6 +185,7 @@ fn ack_sent() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -193,6 +228,7 @@ fn syn_and_ack_together() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -220,6 +256,7 @@ fn syn_and_rst_together() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -247,6 +284,7 @@ fn rst_sent_when_rejecting() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -279,6 +317,7 @@ fn max_simultaneous_rst_substreams() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(16).unwrap(), }); @@ -315,6 +354,7 @@ fn invalid_inbound_substream_id() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -342,6 +382,7 @@ fn substream_opened_twice() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -375,6 +416,7 @@ fn substream_opened_back_after_rst() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -415,6 +457,7 @@ fn substream_opened_back_after_graceful_closing() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -486,6 +529,7 @@ fn missing_ack() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -526,6 +570,7 @@ fn multiple_acks() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -569,6 +614,7 @@ fn multiple_writes_combined_into_one() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -597,6 +643,7 @@ fn close_before_syn_sent() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -621,6 +668,7 @@ fn write_after_close_illegal() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -640,6 +688,7 @@ fn credits_exceeded_checked_before_data_is_received() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -678,6 +727,7 @@ fn credits_exceeded_checked_at_the_syn() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -714,6 +764,7 @@ fn data_coming_with_the_syn_taken_into_account() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -753,6 +804,7 @@ fn add_remote_window_works() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -802,6 +854,7 @@ fn add_remote_window_doesnt_immediately_raise_limit() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -844,6 +897,7 @@ fn add_remote_window_saturates() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -860,6 +914,7 @@ fn remote_default_window_respected() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -883,6 +938,7 @@ fn remote_window_frames_respected() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -931,6 +987,7 @@ fn write_after_fin() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -966,6 +1023,7 @@ fn write_after_fin_even_with_empty_frame() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1001,6 +1059,7 @@ fn window_frame_with_fin_after_fin() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1036,6 +1095,7 @@ fn window_frame_without_fin_after_fin() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1071,6 +1131,7 @@ fn send_ping() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1112,6 +1173,7 @@ fn remote_pong_wrong_opaque_value() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1153,6 +1215,7 @@ fn pings_answered_in_wrong_order() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1193,6 +1256,7 @@ fn remote_pong_out_of_nowhere() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1222,6 +1286,7 @@ fn answer_remote_ping() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1253,6 +1318,7 @@ fn max_simultaneous_queued_pongs() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1286,6 +1352,7 @@ fn simultaneous_pongs_flushed() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1320,6 +1387,7 @@ fn dont_send_syn_after_goaway() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1352,6 +1420,7 @@ fn substream_reset_on_goaway_if_not_acked() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1383,6 +1452,7 @@ fn can_still_send_after_goaway_if_acked() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1429,6 +1499,7 @@ fn receive_multiple_goaways() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1459,6 +1530,7 @@ fn ignore_incoming_substreams_after_goaway() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1492,6 +1564,7 @@ fn opening_forbidden_after_goaway() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); From 8b604eb96bc656dd0386b2639aaa89e6c43b6fd0 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 12:43:18 +0200 Subject: [PATCH 61/74] Fix tests --- lib/src/libp2p/connection/yamux/tests.rs | 88 ++++++++++++------------ 1 file changed, 44 insertions(+), 44 deletions(-) diff --git a/lib/src/libp2p/connection/yamux/tests.rs b/lib/src/libp2p/connection/yamux/tests.rs index b087e7e1fa..fed70c7d40 100644 --- a/lib/src/libp2p/connection/yamux/tests.rs +++ b/lib/src/libp2p/connection/yamux/tests.rs @@ -29,7 +29,7 @@ fn bad_header_data() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -59,7 +59,7 @@ fn not_immediate_data_send_when_opening_substream() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -74,7 +74,7 @@ fn syn_sent() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -123,7 +123,7 @@ fn extract_bytes_one_by_one() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -147,7 +147,7 @@ fn inject_bytes_one_by_one() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -185,7 +185,7 @@ fn ack_sent() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -228,7 +228,7 @@ fn syn_and_ack_together() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -256,7 +256,7 @@ fn syn_and_rst_together() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -284,7 +284,7 @@ fn rst_sent_when_rejecting() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -317,7 +317,7 @@ fn max_simultaneous_rst_substreams() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(16).unwrap(), }); @@ -354,7 +354,7 @@ fn invalid_inbound_substream_id() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -382,7 +382,7 @@ fn substream_opened_twice() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -416,7 +416,7 @@ fn substream_opened_back_after_rst() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -457,7 +457,7 @@ fn substream_opened_back_after_graceful_closing() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -529,7 +529,7 @@ fn missing_ack() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -570,7 +570,7 @@ fn multiple_acks() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -614,7 +614,7 @@ fn multiple_writes_combined_into_one() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -643,7 +643,7 @@ fn close_before_syn_sent() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -668,7 +668,7 @@ fn write_after_close_illegal() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -688,7 +688,7 @@ fn credits_exceeded_checked_before_data_is_received() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -727,7 +727,7 @@ fn credits_exceeded_checked_at_the_syn() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -764,7 +764,7 @@ fn data_coming_with_the_syn_taken_into_account() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -804,7 +804,7 @@ fn add_remote_window_works() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -854,7 +854,7 @@ fn add_remote_window_doesnt_immediately_raise_limit() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -897,7 +897,7 @@ fn add_remote_window_saturates() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -914,7 +914,7 @@ fn remote_default_window_respected() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -938,7 +938,7 @@ fn remote_window_frames_respected() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -987,7 +987,7 @@ fn write_after_fin() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1023,7 +1023,7 @@ fn write_after_fin_even_with_empty_frame() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1059,7 +1059,7 @@ fn window_frame_with_fin_after_fin() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1095,7 +1095,7 @@ fn window_frame_without_fin_after_fin() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1131,7 +1131,7 @@ fn send_ping() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1173,7 +1173,7 @@ fn remote_pong_wrong_opaque_value() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1215,7 +1215,7 @@ fn pings_answered_in_wrong_order() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1256,7 +1256,7 @@ fn remote_pong_out_of_nowhere() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1286,7 +1286,7 @@ fn answer_remote_ping() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1318,7 +1318,7 @@ fn max_simultaneous_queued_pongs() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1352,7 +1352,7 @@ fn simultaneous_pongs_flushed() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1387,7 +1387,7 @@ fn dont_send_syn_after_goaway() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1420,7 +1420,7 @@ fn substream_reset_on_goaway_if_not_acked() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1452,7 +1452,7 @@ fn can_still_send_after_goaway_if_acked() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1499,7 +1499,7 @@ fn receive_multiple_goaways() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1530,7 +1530,7 @@ fn ignore_incoming_substreams_after_goaway() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); @@ -1564,7 +1564,7 @@ fn opening_forbidden_after_goaway() { capacity: 0, is_initiator: true, randomness_seed: [0; 32], - max_out_data_frame_size: NonZeroU32::new(8192).unwrap(), + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); From 7460e59031c7f20e809bcd97b45dc8ca6a70af15 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 12:49:30 +0200 Subject: [PATCH 62/74] More proper handling of RST with data --- lib/src/libp2p/connection/yamux.rs | 6 +- lib/src/libp2p/connection/yamux/tests.rs | 79 ++++++++++++++++++++++++ 2 files changed, 84 insertions(+), 1 deletion(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index 82167da9c1..4289457d44 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -1148,12 +1148,14 @@ impl Yamux { rst: true, ack, stream_id, + length, .. } | header::DecodedYamuxHeader::Window { rst: true, ack, stream_id, + length, .. } => { // Frame with the `RST` flag set. Destroy the substream. @@ -1161,7 +1163,9 @@ impl Yamux { // Sending a `RST` flag and data together is a weird corner case and // is difficult to handle. It is unclear whether it is allowed at all. // We thus consider it as invalid. - if matches!(decoded_header, header::DecodedYamuxHeader::Data { .. }) { + if matches!(decoded_header, header::DecodedYamuxHeader::Data { .. }) + && length != 0 + { return Err(Error::DataWithRst); } diff --git a/lib/src/libp2p/connection/yamux/tests.rs b/lib/src/libp2p/connection/yamux/tests.rs index fed70c7d40..7d934c825a 100644 --- a/lib/src/libp2p/connection/yamux/tests.rs +++ b/lib/src/libp2p/connection/yamux/tests.rs @@ -261,13 +261,55 @@ fn syn_and_rst_together() { max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); + // SYN and RST together. The new substream is simply ignored. let data = [0, 0, 0, 1 | 8, 0, 0, 0, 84, 0, 0, 0, 0]; + let mut cursor = 0; while cursor < data.len() { match yamux.incoming_data(&data[cursor..]) { Ok(outcome) => { yamux = outcome.yamux; cursor += outcome.bytes_read; + + assert!(!matches!( + outcome.detail, + Some(IncomingDataDetail::IncomingSubstream) + )); + } + Err(_) => panic!(), + } + } + + // Test succeeded. +} + +#[test] +fn data_with_rst() { + let mut yamux = Yamux::<()>::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), + }); + + let data = [ + 0, 0, 0, 1, 0, 0, 0, 84, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 84, 0, 0, 0, 2, 255, 255, + ]; + let mut cursor = 0; + while cursor < data.len() { + match yamux.incoming_data(&data[cursor..]) { + Ok(outcome) => { + yamux = outcome.yamux; + cursor += outcome.bytes_read; + + match outcome.detail { + Some(IncomingDataDetail::IncomingSubstream) => { + yamux.accept_pending_substream(()); + } + _ => {} + } } Err(Error::DataWithRst) => return, Err(_) => panic!(), @@ -278,6 +320,43 @@ fn syn_and_rst_together() { panic!() } +#[test] +fn empty_data_frame_with_rst() { + let mut yamux = Yamux::<()>::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), + }); + + // Normal SYN frame then normal RST frame. + let data = [ + 0, 0, 0, 1, 0, 0, 0, 84, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 84, 0, 0, 0, 0, + ]; + + let mut cursor = 0; + while cursor < data.len() { + match yamux.incoming_data(&data[cursor..]) { + Ok(outcome) => { + yamux = outcome.yamux; + cursor += outcome.bytes_read; + + match outcome.detail { + Some(IncomingDataDetail::IncomingSubstream) => { + yamux.accept_pending_substream(()); + } + _ => {} + } + } + Err(_) => panic!(), + } + } + + // Test succeeded. +} + #[test] fn rst_sent_when_rejecting() { let mut yamux = Yamux::<()>::new(Config { From 194184f73e04956c6ba1c3185ae10ee5f554291e Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 12:52:51 +0200 Subject: [PATCH 63/74] Choose substreams to send on semi-randomly --- lib/src/libp2p/connection/yamux.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index 4289457d44..e7ab52d678 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -57,7 +57,7 @@ use core::{ num::{NonZeroU32, NonZeroUsize}, }; use hashbrown::hash_map::Entry; -use rand::Rng as _; +use rand::{seq::IteratorRandom as _, Rng as _}; use rand_chacha::{rand_core::SeedableRng as _, ChaCha20Rng}; pub use header::GoAwayErrorCode; @@ -1629,13 +1629,14 @@ impl Yamux { .inner .substreams .iter_mut() - .find(|(_, s)| { + .filter(|(_, s)| { matches!(&s.state, SubstreamState::Healthy { remote_window_pending_increase, .. } if *remote_window_pending_increase != 0) }) + .choose(&mut self.inner.randomness) .map(|(id, sub)| (*id, sub)) { if let SubstreamState::Healthy { @@ -1672,12 +1673,11 @@ impl Yamux { // Start writing more data from another substream. // TODO: O(n) - // TODO: choose substreams in some sort of round-robin way if let Some((id, sub)) = self .inner .substreams .iter_mut() - .find(|(_, s)| match &s.state { + .filter(|(_, s)| match &s.state { SubstreamState::Healthy { write_queue, local_write_close: local_write, @@ -1689,6 +1689,7 @@ impl Yamux { } _ => false, }) + .choose(&mut self.inner.randomness) .map(|(id, sub)| (*id, sub)) { if let SubstreamState::Healthy { From 3070e75d6eac9cff5a8815d5053dec2251b09797 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 12:55:32 +0200 Subject: [PATCH 64/74] Clarify a bit doc --- lib/src/libp2p/connection/yamux.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index e7ab52d678..2124f83271 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -572,7 +572,7 @@ impl Yamux { /// > **Note**: It is only possible to add more bytes to the window and not set or reduce this /// > number of bytes, and it is also not possible to obtain the number of bytes the /// > remote is allowed. That's because it would be ambiguous whether bytes possibly - /// > in the receive queue should be counted or not. + /// > in the send or receive queue should be counted or not. /// /// # Panic /// From ff43b5ffa737d5d8c984e010cf40166319476325 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 13:12:45 +0200 Subject: [PATCH 65/74] Add a cache for substreams to write out --- lib/src/libp2p/connection/yamux.rs | 154 +++++++++++++++++------------ 1 file changed, 92 insertions(+), 62 deletions(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index 2124f83271..79b58fed0b 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -126,6 +126,11 @@ struct YamuxInner { /// that it is returned by [`Yamux::dead_substreams`]. dead_substreams: hashbrown::HashSet, + /// Subset of the content of [`YamuxInner::substreams`] that requires some outgoing data to + /// be sent out, either because they have data to send out or because they have a window + /// update to send out. + outgoing_req_substreams: hashbrown::HashSet, + /// Number of substreams within [`YamuxInner::substreams`] whose [`Substream::inbound`] is /// `true`. num_inbound: usize, @@ -333,6 +338,10 @@ impl Yamux { config.capacity, SipHasherBuild::new(randomness.gen()), ), + outgoing_req_substreams: hashbrown::HashSet::with_capacity_and_hasher( + config.capacity, + SipHasherBuild::new(randomness.gen()), + ), num_inbound: 0, received_goaway: None, incoming: Incoming::Header(arrayvec::ArrayVec::new()), @@ -548,8 +557,22 @@ impl Yamux { SubstreamState::Healthy { local_write_close: SubstreamStateLocalWrite::Open, write_queue, + allowed_window, .. } => { + // Don't push empty data onto the queue. + if data.is_empty() { + return; + } + + // If the write queue switches from empty to non-empty, queue the substream for + // writing. + if *allowed_window != 0 && write_queue.is_empty() { + // Note that the substream might already be queued if it has a window update + // to write. + self.inner.outgoing_req_substreams.insert(substream_id.0); + } + write_queue.push_back(data); } SubstreamState::Healthy { .. } => { @@ -589,6 +612,11 @@ impl Yamux { .unwrap_or_else(|| panic!()) .state { + if *remote_window_pending_increase == 0 && bytes != 0 { + // Note that the substream might already be queued if it has data to write. + self.inner.outgoing_req_substreams.insert(substream_id.0); + } + *remote_window_pending_increase = remote_window_pending_increase.saturating_add(bytes); } } @@ -673,6 +701,7 @@ impl Yamux { } = substream.state { *local_write = SubstreamStateLocalWrite::FinDesired; + self.inner.outgoing_req_substreams.insert(substream_id.0); } } @@ -706,6 +735,8 @@ impl Yamux { let _was_inserted = self.inner.dead_substreams.insert(substream_id.0); debug_assert!(_was_inserted); + self.inner.outgoing_req_substreams.remove(&substream_id.0); + // We might be currently writing a frame of data of the substream being reset. // If that happens, we need to update some internal state regarding this frame of data. match ( @@ -878,6 +909,8 @@ impl Yamux { panic!() } + debug_assert!(!self.inner.outgoing_req_substreams.contains(&id.0)); + let substream = self.inner.substreams.remove(&id.0).unwrap(); if substream.inbound { @@ -1106,6 +1139,8 @@ impl Yamux { self.inner.dead_substreams.insert(*substream_id); debug_assert!(_was_inserted); + self.inner.outgoing_req_substreams.remove(substream_id); + // We might be currently writing a frame of data of the substream // being reset. If that happens, we need to update some internal // state regarding this frame of data. @@ -1180,6 +1215,8 @@ impl Yamux { let _was_inserted = self.inner.dead_substreams.insert(stream_id); debug_assert!(_was_inserted); + self.inner.outgoing_req_substreams.remove(&stream_id); + // Check whether the remote has ACKed multiple times. if matches!( s.state, @@ -1420,6 +1457,7 @@ impl Yamux { SubstreamState::Healthy { remote_syn_acked, allowed_window, + write_queue, .. }, .. @@ -1432,6 +1470,10 @@ impl Yamux { (false, false) => return Err(Error::ExpectedAck), } + if *allowed_window == 0 && length != 0 && !write_queue.is_empty() { + self.inner.outgoing_req_substreams.insert(stream_id); + } + *allowed_window = allowed_window .checked_add(u64::from(length)) .ok_or(Error::LocalCreditsOverflow)?; @@ -1623,29 +1665,31 @@ impl Yamux { continue; } - // Send window update frames. - // TODO: O(n) - if let Some((id, sub)) = self + // Send either window update frames or data frames. + if let Some(substream_id) = self .inner - .substreams - .iter_mut() - .filter(|(_, s)| { - matches!(&s.state, - SubstreamState::Healthy { - remote_window_pending_increase, - .. - } if *remote_window_pending_increase != 0) - }) + .outgoing_req_substreams + .iter() .choose(&mut self.inner.randomness) - .map(|(id, sub)| (*id, sub)) + .cloned() { - if let SubstreamState::Healthy { + let sub = self.inner.substreams.get_mut(&substream_id).unwrap(); + + let SubstreamState::Healthy { first_message_queued, remote_window_pending_increase, remote_allowed_window, + write_queue, + local_write_close: local_write, + allowed_window, .. - } = &mut sub.state - { + } = &mut sub.state else { unreachable!() }; + + let has_data_to_write = (*allowed_window != 0 && !write_queue.is_empty()) + || matches!(local_write, SubstreamStateLocalWrite::FinDesired); + let has_window_size_update_to_write = *remote_window_pending_increase != 0; + + if has_window_size_update_to_write { let syn_ack_flag = !*first_message_queued; *first_message_queued = true; @@ -1653,61 +1697,32 @@ impl Yamux { .unwrap_or(u32::max_value()); *remote_window_pending_increase -= u64::from(update); *remote_allowed_window += u64::from(update); + + if *remote_window_pending_increase == 0 && !has_data_to_write { + self.inner.outgoing_req_substreams.remove(&substream_id); + } + self.inner.outgoing = Outgoing::Header { header: header::DecodedYamuxHeader::Window { syn: syn_ack_flag && !sub.inbound, ack: syn_ack_flag && sub.inbound, fin: false, rst: false, - stream_id: id, + stream_id: substream_id, length: update, }, header_already_sent: 0, substream_data_frame: None, }; continue; - } else { - unreachable!() - } - } - - // Start writing more data from another substream. - // TODO: O(n) - if let Some((id, sub)) = self - .inner - .substreams - .iter_mut() - .filter(|(_, s)| match &s.state { - SubstreamState::Healthy { - write_queue, - local_write_close: local_write, - allowed_window, - .. - } => { - (*allowed_window != 0 && !write_queue.is_empty()) - || matches!(local_write, SubstreamStateLocalWrite::FinDesired) - } - _ => false, - }) - .choose(&mut self.inner.randomness) - .map(|(id, sub)| (*id, sub)) - { - if let SubstreamState::Healthy { - first_message_queued, - allowed_window, - local_write_close: local_write, - write_queue, - .. - } = &mut sub.state - { + } else if has_data_to_write { let pending_len = write_queue.queued_bytes(); - let len_out = cmp::min( - self.inner.max_out_data_frame_size.get(), - cmp::min( - u32::try_from(pending_len).unwrap_or(u32::max_value()), - u32::try_from(*allowed_window).unwrap_or(u32::max_value()), - ), + let max_possible = cmp::min( + u32::try_from(pending_len).unwrap_or(u32::max_value()), + u32::try_from(*allowed_window).unwrap_or(u32::max_value()), ); + let len_out = + cmp::min(self.inner.max_out_data_frame_size.get(), max_possible); let len_out_usize = usize::try_from(len_out).unwrap(); *allowed_window -= u64::from(len_out); let syn_ack_flag = !*first_message_queued; @@ -1718,13 +1733,18 @@ impl Yamux { *local_write = SubstreamStateLocalWrite::FinQueued; } debug_assert!(len_out != 0 || fin_flag); + + if max_possible == len_out && !has_window_size_update_to_write { + self.inner.outgoing_req_substreams.remove(&substream_id); + } + self.inner.outgoing = Outgoing::Header { header: header::DecodedYamuxHeader::Data { syn: syn_ack_flag && !sub.inbound, ack: syn_ack_flag && sub.inbound, fin: fin_flag, rst: false, - stream_id: id, + stream_id: substream_id, length: len_out, }, header_already_sent: 0, @@ -1732,15 +1752,25 @@ impl Yamux { usize::try_from(len_out).unwrap(), ) .map(|length| { - (OutgoingSubstreamData::Healthy(SubstreamId(id)), length) + ( + OutgoingSubstreamData::Healthy(SubstreamId(substream_id)), + length, + ) }), }; + + continue; } else { - unreachable!() + // Substream was queued but there's nothing to do. Should never + // happen. We use a `debug_assert!` as to not panic in that situation, + // as the queue is just a cache and not a critical state. + debug_assert!(false); + self.inner.outgoing_req_substreams.remove(&substream_id); } - } else { - break; } + + // Nothing to send out. + break; } } } From 22bfb8626a7bece27ca71b2cef07cb3799036127 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 13:18:19 +0200 Subject: [PATCH 66/74] Remove some TODOs --- lib/src/libp2p/connection/yamux.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index 79b58fed0b..cc4313e855 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -47,8 +47,6 @@ // TODO: write example -// TODO: the code of this module is rather complicated; either simplify it or write a lot of tests, including fuzzing tests - use crate::util::SipHasherBuild; use alloc::{boxed::Box, collections::VecDeque, vec::Vec}; @@ -434,8 +432,7 @@ impl Yamux { // identifier are possible if the software runs for a very long time. // Rather than naively incrementing the id by two and assuming that no substream with // this ID exists, the code below properly handles wrapping around and ignores IDs - // already in use . - // TODO: simply skill whole connection if overflow + // already in use. let id_attempt = self.inner.next_outbound_substream; self.inner.next_outbound_substream = { let mut id = self.inner.next_outbound_substream.get(); From 0d402d985f87ad45e914000ced0ab62ff1e70d77 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 13:22:58 +0200 Subject: [PATCH 67/74] Spellcheck --- lib/src/libp2p/connection/yamux.rs | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index cc4313e855..5f178f7e98 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -85,8 +85,8 @@ pub struct Config { /// /// A higher value increases the variance of the latency of the data sent on the substreams, /// which is undesirable. A lower value increases the overhead of the Yamux protocol. This - /// overhead is equal to `1200 / (max_out_data_frame_size + 12)` %, for example setting - /// `max_out_data_frame_size` to 24 incurs a 33% overhead. + /// overhead is equal to `1200 / (max_out_data_frame_size + 12)` per cent, for example setting + /// `max_out_data_frame_size` to 24 incurs a `33%` overhead. /// /// The "best" value depends on the bandwidth speed of the underlying connection, and is thus /// impossible to tell. @@ -582,9 +582,13 @@ impl Yamux { /// packet. /// /// The counter saturates if its maximum is reached. This could cause stalls if the - /// remote sends more data than the maximum. However, the number of bytes is stored in a `u64`, - /// the remote would have to send 2^64 bytes in order to reach this situation, making it - /// basically impossible. + /// remote sends a ton of data. However, given that the number of bytes is stored in a `u64`, + /// the remote would have to send at least `2^64` bytes in order to reach this situation, + /// making it basically impossible. + /// + /// It is, furthermore, a bad idea to increase this counter by an immense number ahead of + /// time, as the remote can shut down the connection if its own counter overflows. The way + /// this counter is supposed to be used is in a "streaming" way. /// /// > **Note**: When a substream has just been opened or accepted, it starts with an initial /// > window of [`NEW_SUBSTREAMS_FRAME_SIZE`]. @@ -2007,7 +2011,7 @@ pub enum Error { ExpectedAck, /// The remote sent an ACK flag but shouldn't have. UnexpectedAck, - /// Received multiple GoAway frames. + /// Received multiple `GoAway` frames. MultipleGoAways, } From f68652ddbfb644230cadea8e13ee747e3eadb487 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 13:23:20 +0200 Subject: [PATCH 68/74] PR links --- wasm-node/CHANGELOG.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/wasm-node/CHANGELOG.md b/wasm-node/CHANGELOG.md index 1427c63884..8ee6d6e5d5 100644 --- a/wasm-node/CHANGELOG.md +++ b/wasm-node/CHANGELOG.md @@ -5,13 +5,13 @@ ### Changed - Removed support for the `ls` message in the multistream-select protocol, in accordance with the rest of the libp2p ecosystem. This message was in practice never used, and removing support for it simplifies the implementation. ([#379](https://github.com/smol-dot/smoldot/pull/379)) -- Yamux now considers answering pings in the wrong order as invalid. +- Yamux now considers answering pings in the wrong order as invalid. ([#383](https://github.com/smol-dot/smoldot/pull/383)) ### Fixed -- Properly check whether Yamux substream IDs allocated by the remote are valid. -- Fix the size of the data of Yamux frames with the `SYN` flag not being verified against the allowed credits. -- Fix Yamux repeatedly sending empty data frames when the allowed window size is 0. +- Properly check whether Yamux substream IDs allocated by the remote are valid. ([#383](https://github.com/smol-dot/smoldot/pull/383)) +- Fix the size of the data of Yamux frames with the `SYN` flag not being verified against the allowed credits. ([#383](https://github.com/smol-dot/smoldot/pull/383)) +- Fix Yamux repeatedly sending empty data frames when the allowed window size is 0. ([#383](https://github.com/smol-dot/smoldot/pull/383)) ## 1.0.1 - 2023-03-29 From 63a7a14a8defb3eb434c1a1c5d3efbd3bc85a09f Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 13:53:01 +0200 Subject: [PATCH 69/74] Fix no checking if substream still healthy --- lib/src/libp2p/connection/yamux.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index 5f178f7e98..4fd1c31d5a 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -1212,6 +1212,9 @@ impl Yamux { // always keep traces of old substreams, we have no way to know whether // this is the case or not. let Some(s) = self.inner.substreams.get_mut(&stream_id) else { continue }; + if !matches!(s.state, SubstreamState::Healthy { .. }) { + continue; + } let _was_inserted = self.inner.dead_substreams.insert(stream_id); debug_assert!(_was_inserted); From 58f770414e59e5c51ca62144f2d480ec8efadd19 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 14:31:39 +0200 Subject: [PATCH 70/74] No longer panic anywhere in the public API, except invalid substreams --- .../connection/established/single_stream.rs | 92 ++++--- lib/src/libp2p/connection/yamux.rs | 248 ++++++++++-------- lib/src/libp2p/connection/yamux/tests.rs | 170 +++++++----- 3 files changed, 309 insertions(+), 201 deletions(-) diff --git a/lib/src/libp2p/connection/established/single_stream.rs b/lib/src/libp2p/connection/established/single_stream.rs index 9ca5d19a46..030de52947 100644 --- a/lib/src/libp2p/connection/established/single_stream.rs +++ b/lib/src/libp2p/connection/established/single_stream.rs @@ -327,7 +327,12 @@ where // subtle way. At the time of writing of this comment the limit should be // properly enforced, however it is not considered problematic if it weren't. if self.inner.yamux.num_inbound() >= self.inner.max_inbound_substreams { - self.inner.yamux.reject_pending_substream(); + // Can only panic if there's no incoming substream, which we know for sure + // is the case here. + self.inner + .yamux + .reject_pending_substream() + .unwrap_or_else(|_| panic!()); continue; } @@ -340,11 +345,14 @@ where .max() .unwrap_or(0); + // Can only panic if there's no incoming substream, which we know for sure + // is the case here. self.inner .yamux .accept_pending_substream(Some(substream::Substream::ingoing( max_protocol_name_len, - ))); + ))) + .unwrap_or_else(|_| panic!()); } Some( @@ -597,14 +605,17 @@ where let written_bytes = substream_read_write.written_bytes; if written_bytes != 0 { debug_assert!(!write_is_closed); - inner.yamux.write( - substream_id, - inner.intermediary_buffer[..written_bytes].to_vec(), - ); + inner + .yamux + .write( + substream_id, + inner.intermediary_buffer[..written_bytes].to_vec(), + ) + .unwrap(); } if !write_is_closed && closed_after { debug_assert_eq!(written_bytes, 0); - inner.yamux.close(substream_id); + inner.yamux.close(substream_id).unwrap(); } match substream_update { @@ -612,7 +623,7 @@ where None => { if !closed_after || !read_is_closed { // TODO: what we do here is definitely correct, but the docs of `reset()` seem sketchy, investigate - inner.yamux.reset(substream_id); + inner.yamux.reset(substream_id).unwrap(); } } }; @@ -750,6 +761,7 @@ where self.inner .yamux .send_goaway(yamux::GoAwayErrorCode::NormalTermination) + .unwrap() } /// Sends a request to the remote. @@ -795,20 +807,21 @@ where } }; - let substream_id = - self.inner - .yamux - .open_substream(Some(substream::Substream::request_out( - self.inner.request_protocols[protocol_index].name.clone(), // TODO: clone :-/ - timeout, - if has_length_prefix { - Some(request) - } else { - None - }, - self.inner.request_protocols[protocol_index].max_response_size, - user_data, - ))); + let substream_id = self + .inner + .yamux + .open_substream(Some(substream::Substream::request_out( + self.inner.request_protocols[protocol_index].name.clone(), // TODO: clone :-/ + timeout, + if has_length_prefix { + Some(request) + } else { + None + }, + self.inner.request_protocols[protocol_index].max_response_size, + user_data, + ))) + .unwrap(); // TODO: consider not panicking // TODO: we add some bytes due to the length prefix, this is a bit hacky as we should ask this information from the substream self.inner.yamux.add_remote_window_saturating( @@ -877,18 +890,19 @@ where // TODO: turn this assert into something that can't panic? assert!(handshake.len() <= max_handshake_size); - let substream = - self.inner - .yamux - .open_substream(Some(substream::Substream::notifications_out( - timeout, - self.inner.notifications_protocols[protocol_index] - .name - .clone(), // TODO: clone :-/, - handshake, - max_handshake_size, - user_data, - ))); + let substream = self + .inner + .yamux + .open_substream(Some(substream::Substream::notifications_out( + timeout, + self.inner.notifications_protocols[protocol_index] + .name + .clone(), // TODO: clone :-/, + handshake, + max_handshake_size, + user_data, + ))) + .unwrap(); // TODO: consider not panicking SubstreamId(SubstreamIdInner::SingleStream(substream)) } @@ -1121,9 +1135,13 @@ impl ConnectionPrototype { max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); - let outgoing_pings = yamux.open_substream(Some(substream::Substream::ping_out( - config.ping_protocol.clone(), - ))); + let outgoing_pings = yamux + .open_substream(Some(substream::Substream::ping_out( + config.ping_protocol.clone(), + ))) + // Can only panic if a `GoAway` has been received, or if there are too many substreams + // already open, which we know for sure can't happen here + .unwrap_or_else(|_| panic!()); SingleStream { encryption: self.encryption, diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index 4fd1c31d5a..65c2a50250 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -54,7 +54,6 @@ use core::{ cmp, fmt, mem, num::{NonZeroU32, NonZeroUsize}, }; -use hashbrown::hash_map::Entry; use rand::{seq::IteratorRandom as _, Rng as _}; use rand_chacha::{rand_core::SeedableRng as _, ChaCha20Rng}; @@ -401,77 +400,46 @@ impl Yamux { /// > protocol, all substreams in the context of libp2p start with a /// > multistream-select negotiation, and this scenario can therefore never happen. /// - /// # Panic - /// - /// Panics if all possible substream IDs are already taken. This happen if there exists more - /// than approximately `2^31` substreams, which is very unlikely to happen unless there exists - /// a bug in the code. + /// Returns an error if a [`IncomingDataDetail::GoAway`] event has been generated. This can + /// also be checked by calling [`Yamux::received_goaway`]. /// - /// Panics if a [`IncomingDataDetail::GoAway`] event has been generated. This can also be - /// checked by calling [`Yamux::received_goaway`]. + /// Returns an error if all possible substream IDs are already taken. This happen if there + /// exists more than approximately `2^31` substreams, which is very unlikely to happen unless + /// there exists a bug in the code. /// - pub fn open_substream(&mut self, user_data: T) -> SubstreamId { - // It is forbidden to open new substreams if a `GoAway` frame has been received. - assert!( - self.inner.received_goaway.is_none(), - "can't open substream after goaway" - ); + pub fn open_substream(&mut self, user_data: T) -> Result { + if self.inner.received_goaway.is_some() { + return Err(OpenSubstreamError::GoAwayReceived); + } - // Make sure that the `loop` below can finish. - assert!( - usize::try_from(u32::max_value() / 2 - 1).map_or(true, |full_len| self - .inner - .substreams - .len() - < full_len) - ); + let substream_id = self.inner.next_outbound_substream.clone(); - // Grab a `VacantEntry` in `self.inner.substreams`. - let entry = loop { - // Allocating a substream ID is surprisingly difficult because overflows in the - // identifier are possible if the software runs for a very long time. - // Rather than naively incrementing the id by two and assuming that no substream with - // this ID exists, the code below properly handles wrapping around and ignores IDs - // already in use. - let id_attempt = self.inner.next_outbound_substream; - self.inner.next_outbound_substream = { - let mut id = self.inner.next_outbound_substream.get(); - loop { - // Odd ids are reserved for the initiator and even ids are reserved for the - // listener. Assuming that the current id is valid, incrementing by 2 will - // lead to a valid id as well. - id = id.wrapping_add(2); - // However, the substream ID `0` is always invalid. - match NonZeroU32::new(id) { - Some(v) => break v, - None => continue, - } - } - }; - if let Entry::Vacant(e) = self.inner.substreams.entry(id_attempt) { - break e; - } + self.inner.next_outbound_substream = match self.inner.next_outbound_substream.checked_add(1) + { + Some(new_id) => new_id, + None => return Err(OpenSubstreamError::NoFreeSubstreamId), }; - // ID that was just allocated. - let substream_id = SubstreamId(*entry.key()); - - entry.insert(Substream { - state: SubstreamState::Healthy { - first_message_queued: false, - remote_syn_acked: false, - remote_allowed_window: NEW_SUBSTREAMS_FRAME_SIZE, - remote_window_pending_increase: 0, - allowed_window: NEW_SUBSTREAMS_FRAME_SIZE, - local_write_close: SubstreamStateLocalWrite::Open, - remote_write_closed: false, - write_queue: write_queue::WriteQueue::new(), + let _prev_value = self.inner.substreams.insert( + substream_id, + Substream { + state: SubstreamState::Healthy { + first_message_queued: false, + remote_syn_acked: false, + remote_allowed_window: NEW_SUBSTREAMS_FRAME_SIZE, + remote_window_pending_increase: 0, + allowed_window: NEW_SUBSTREAMS_FRAME_SIZE, + local_write_close: SubstreamStateLocalWrite::Open, + remote_write_closed: false, + write_queue: write_queue::WriteQueue::new(), + }, + inbound: false, + user_data, }, - inbound: false, - user_data, - }); + ); + debug_assert!(_prev_value.is_none()); - substream_id + Ok(SubstreamId(substream_id)) } /// Returns `Some` if a [`IncomingDataDetail::GoAway`] event has been generated in the past, @@ -537,20 +505,21 @@ impl Yamux { /// Appends data to the buffer of data to send out on this substream. /// + /// Returns an error if [`Yamux::close`] or [`Yamux::reset`] has been called on this substream, + /// or if the substream has been reset by the remote. + /// /// # Panic /// /// Panics if the [`SubstreamId`] is invalid. - /// Panics if [`Yamux::close`] has already been called on this substream. /// - // TODO: doc obsolete - pub fn write(&mut self, substream_id: SubstreamId, data: Vec) { + pub fn write(&mut self, substream_id: SubstreamId, data: Vec) -> Result<(), WriteError> { let substream = self .inner .substreams .get_mut(&substream_id.0) - .unwrap_or_else(|| panic!("invalid substream")); + .unwrap_or_else(|| panic!()); + match &mut substream.state { - SubstreamState::Reset => {} SubstreamState::Healthy { local_write_close: SubstreamStateLocalWrite::Open, write_queue, @@ -559,7 +528,7 @@ impl Yamux { } => { // Don't push empty data onto the queue. if data.is_empty() { - return; + return Ok(()); } // If the write queue switches from empty to non-empty, queue the substream for @@ -571,10 +540,14 @@ impl Yamux { } write_queue.push_back(data); + Ok(()) } - SubstreamState::Healthy { .. } => { - panic!("write after close") - } + SubstreamState::Reset => Err(WriteError::Reset), + SubstreamState::Healthy { + local_write_close: + SubstreamStateLocalWrite::FinDesired | SubstreamStateLocalWrite::FinQueued, + .. + } => Err(WriteError::Closed), } } @@ -598,6 +571,8 @@ impl Yamux { /// > remote is allowed. That's because it would be ambiguous whether bytes possibly /// > in the send or receive queue should be counted or not. /// + /// Has no effect if the remote has already closed their writing side. + /// /// # Panic /// /// Panics if the [`SubstreamId`] is invalid. @@ -605,6 +580,7 @@ impl Yamux { pub fn add_remote_window_saturating(&mut self, substream_id: SubstreamId, bytes: u64) { if let SubstreamState::Healthy { remote_window_pending_increase, + remote_write_closed: false, .. } = &mut self .inner @@ -626,6 +602,9 @@ impl Yamux { /// /// Returns 0 if the substream is in a reset state. /// + /// > **Note**: Might return non-zero even if [`Yamux::close`] has been called, as this counts + /// > the number of bytes still waiting to be written out. + /// /// # Panic /// /// Panics if the [`SubstreamId`] is invalid. @@ -682,27 +661,37 @@ impl Yamux { /// Marks the substream as closed. It is no longer possible to write data on it. /// + /// Returns an error if the local writing side is already closed, which can happen if + /// [`Yamux::close`] has already been called on this substream. + /// Returns an error if [`Yamux::reset`] has been called on this substream, or if the remote + /// has reset the substream in the past. + /// /// # Panic /// /// Panics if the [`SubstreamId`] is invalid. - /// Panics if the local writing side is already closed, which can happen if [`Yamux::close`] - /// has already been called on this substream or if the remote has reset the substream in the - /// past. /// - // TODO: doc obsolete - pub fn close(&mut self, substream_id: SubstreamId) { + pub fn close(&mut self, substream_id: SubstreamId) -> Result<(), CloseError> { let substream = self .inner .substreams .get_mut(&substream_id.0) .unwrap_or_else(|| panic!()); - if let SubstreamState::Healthy { - local_write_close: ref mut local_write @ SubstreamStateLocalWrite::Open, - .. - } = substream.state - { - *local_write = SubstreamStateLocalWrite::FinDesired; - self.inner.outgoing_req_substreams.insert(substream_id.0); + + match substream.state { + SubstreamState::Healthy { + local_write_close: ref mut local_write @ SubstreamStateLocalWrite::Open, + .. + } => { + *local_write = SubstreamStateLocalWrite::FinDesired; + self.inner.outgoing_req_substreams.insert(substream_id.0); + Ok(()) + } + SubstreamState::Healthy { + local_write_close: + SubstreamStateLocalWrite::FinDesired | SubstreamStateLocalWrite::FinQueued, + .. + } => Err(CloseError::AlreadyClosed), + SubstreamState::Reset => Err(CloseError::Reset), } } @@ -710,14 +699,14 @@ impl Yamux { /// /// Use this method when a protocol error happens on a substream. /// + /// Returns an error if [`Yamux::reset`] has already been called on this substream or if the + /// remote has reset the substream in the past. + /// /// # Panic /// /// Panics if the [`SubstreamId`] is invalid. - /// Panics if the local writing side is already closed, which can happen if [`Yamux::close`] - /// has already been called on this substream or if the remote has reset the substream in the - /// past. /// - pub fn reset(&mut self, substream_id: SubstreamId) { + pub fn reset(&mut self, substream_id: SubstreamId) -> Result<(), ResetError> { // Add an entry to the list of RST headers to send to the remote. if let SubstreamState::Healthy { .. } = self .inner @@ -730,8 +719,9 @@ impl Yamux { // `max_simultaneous_rst_substreams`, as locally-emitted RST frames aren't the // remote's fault. self.inner.rsts_to_send.push_back(substream_id.0); + } else { + return Err(ResetError::AlreadyReset); } - // TODO: else { panic!() } ?! let _was_inserted = self.inner.dead_substreams.insert(substream_id.0); debug_assert!(_was_inserted); @@ -767,6 +757,8 @@ impl Yamux { } _ => {} } + + Ok(()) } /// Queues sending out a ping to the remote. @@ -810,19 +802,12 @@ impl Yamux { /// /// All follow-up requests for new substreams from the remote are automatically rejected. /// [`IncomingDataDetail::IncomingSubstream`] events can no longer happen. - /// - /// # Panic - /// - /// Panics if this function has already been called in the past. This can be verified using - /// [`Yamux::goaway_queued_or_sent`]. It is illegal to call this function more than once on - /// the same instance of [`Yamux`]. - /// - pub fn send_goaway(&mut self, code: GoAwayErrorCode) { + pub fn send_goaway(&mut self, code: GoAwayErrorCode) -> Result<(), SendGoAwayError> { match self.inner.outgoing_goaway { OutgoingGoAway::NotRequired => { self.inner.outgoing_goaway = OutgoingGoAway::Required(code) } - _ => panic!("send_goaway called multiple times"), + _ => return Err(SendGoAwayError::AlreadySent), } // If the remote is currently opening a substream, ignore it. The remote understands when @@ -844,6 +829,8 @@ impl Yamux { } }; } + + Ok(()) } /// Returns the list of all substreams that have been closed or reset. @@ -1796,7 +1783,10 @@ impl Yamux { /// /// Panics if no incoming substream is currently pending. /// - pub fn accept_pending_substream(&mut self, user_data: T) -> SubstreamId { + pub fn accept_pending_substream( + &mut self, + user_data: T, + ) -> Result { match self.inner.incoming { Incoming::PendingIncomingSubstream { substream_id, @@ -1838,9 +1828,9 @@ impl Yamux { } }; - substream_id + Ok(substream_id) } - _ => panic!(), + _ => Err(PendingSubstreamError::NoPendingSubstream), } } @@ -1858,7 +1848,7 @@ impl Yamux { /// /// Panics if no incoming substream is currently pending. /// - pub fn reject_pending_substream(&mut self) { + pub fn reject_pending_substream(&mut self) -> Result<(), PendingSubstreamError> { match self.inner.incoming { Incoming::PendingIncomingSubstream { substream_id, @@ -1873,8 +1863,9 @@ impl Yamux { }; self.inner.rsts_to_send.push_back(substream_id.0); + Ok(()) } - _ => panic!(), + _ => Err(PendingSubstreamError::NoPendingSubstream), } } } @@ -1985,6 +1976,55 @@ pub enum IncomingDataDetail { PingResponse, } +/// Error potentially returned by [`Yamux::open_substream`]. +#[derive(Debug, derive_more::Display)] +pub enum OpenSubstreamError { + /// A `GoAway` frame has been received in the past. + GoAwayReceived, + /// Impossible to allocate a new substream. + NoFreeSubstreamId, +} + +/// Error potentially returned by [`Yamux::write`]. +#[derive(Debug, derive_more::Display)] +pub enum WriteError { + /// Substream was already closed. + Closed, + /// Substream was reset. + Reset, +} + +/// Error potentially returned by [`Yamux::close`]. +#[derive(Debug, derive_more::Display)] +pub enum CloseError { + /// Substream was already closed. + AlreadyClosed, + /// Substream was reset. + Reset, +} + +/// Error potentially returned by [`Yamux::reset`]. +#[derive(Debug, derive_more::Display)] +pub enum ResetError { + /// Substream was already reset. + AlreadyReset, +} + +/// Error potentially returned by [`Yamux::send_goaway`]. +#[derive(Debug, derive_more::Display)] +pub enum SendGoAwayError { + /// A `GoAway` has already been sent. + AlreadySent, +} + +/// Error potentially returned by [`Yamux::accept_pending_substream`] or +/// [`Yamux::reject_pending_substream`]. +#[derive(Debug, derive_more::Display)] +pub enum PendingSubstreamError { + /// No substream is pending. + NoPendingSubstream, +} + /// Error while decoding the Yamux stream. #[derive(Debug, derive_more::Display)] pub enum Error { diff --git a/lib/src/libp2p/connection/yamux/tests.rs b/lib/src/libp2p/connection/yamux/tests.rs index 7d934c825a..9cf1e6a1b0 100644 --- a/lib/src/libp2p/connection/yamux/tests.rs +++ b/lib/src/libp2p/connection/yamux/tests.rs @@ -17,7 +17,11 @@ #![cfg(test)] -use super::{Config, Error, GoAwayErrorCode, IncomingDataDetail, Yamux}; +use super::{ + CloseError, Config, Error, GoAwayErrorCode, IncomingDataDetail, OpenSubstreamError, WriteError, + Yamux, +}; + use core::{ cmp, num::{NonZeroU32, NonZeroUsize}, @@ -64,7 +68,7 @@ fn not_immediate_data_send_when_opening_substream() { max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); - let _ = yamux.open_substream(()); + let _ = yamux.open_substream(()).unwrap(); assert!(yamux.extract_next(usize::max_value()).is_none()) } @@ -79,8 +83,8 @@ fn syn_sent() { max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); - let substream_id = yamux.open_substream(()); - yamux.write(substream_id, b"foo".to_vec()); + let substream_id = yamux.open_substream(()).unwrap(); + yamux.write(substream_id, b"foo".to_vec()).unwrap(); let mut output = Vec::new(); while let Some(out) = yamux.extract_next(usize::max_value()) { @@ -102,8 +106,8 @@ fn max_out_data_frame_size_works() { max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); - let substream_id = yamux.open_substream(()); - yamux.write(substream_id, b"foo".to_vec()); + let substream_id = yamux.open_substream(()).unwrap(); + yamux.write(substream_id, b"foo".to_vec()).unwrap(); let mut output = Vec::new(); while let Some(out) = yamux.extract_next(usize::max_value()) { @@ -128,8 +132,8 @@ fn extract_bytes_one_by_one() { max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); - let substream_id = yamux.open_substream(()); - yamux.write(substream_id, b"foo".to_vec()); + let substream_id = yamux.open_substream(()).unwrap(); + yamux.write(substream_id, b"foo".to_vec()).unwrap(); let mut output = Vec::new(); while let Some(out) = yamux.extract_next(1) { @@ -163,7 +167,7 @@ fn inject_bytes_one_by_one() { match outcome.detail { Some(IncomingDataDetail::IncomingSubstream) => { assert_eq!(cursor, 11); // We've read 12 bytes but `cursor` is still 11 - yamux.accept_pending_substream(()); + yamux.accept_pending_substream(()).unwrap(); } Some(IncomingDataDetail::DataFrame { start_offset, .. }) => { assert_eq!(start_offset, 0); @@ -202,14 +206,16 @@ fn ack_sent() { match outcome.detail { Some(IncomingDataDetail::IncomingSubstream) => { assert!(opened_substream.is_none()); - opened_substream = Some(yamux.accept_pending_substream(())) + opened_substream = Some(yamux.accept_pending_substream(()).unwrap()) } _ => {} } } } - yamux.write(opened_substream.unwrap(), b"foo".to_vec()); + yamux + .write(opened_substream.unwrap(), b"foo".to_vec()) + .unwrap(); let mut output = Vec::new(); while let Some(out) = yamux.extract_next(usize::max_value()) { @@ -306,7 +312,7 @@ fn data_with_rst() { match outcome.detail { Some(IncomingDataDetail::IncomingSubstream) => { - yamux.accept_pending_substream(()); + yamux.accept_pending_substream(()).unwrap(); } _ => {} } @@ -345,7 +351,7 @@ fn empty_data_frame_with_rst() { match outcome.detail { Some(IncomingDataDetail::IncomingSubstream) => { - yamux.accept_pending_substream(()); + yamux.accept_pending_substream(()).unwrap(); } _ => {} } @@ -376,7 +382,9 @@ fn rst_sent_when_rejecting() { yamux = outcome.yamux; cursor += outcome.bytes_read; match outcome.detail { - Some(IncomingDataDetail::IncomingSubstream) => yamux.reject_pending_substream(), + Some(IncomingDataDetail::IncomingSubstream) => { + yamux.reject_pending_substream().unwrap() + } _ => {} } } @@ -417,7 +425,9 @@ fn max_simultaneous_rst_substreams() { yamux = outcome.yamux; cursor += outcome.bytes_read; match outcome.detail { - Some(IncomingDataDetail::IncomingSubstream) => yamux.reject_pending_substream(), + Some(IncomingDataDetail::IncomingSubstream) => { + yamux.reject_pending_substream().unwrap() + } _ => {} } } @@ -477,7 +487,7 @@ fn substream_opened_twice() { cursor += outcome.bytes_read; if matches!(outcome.detail, Some(IncomingDataDetail::IncomingSubstream)) { - yamux.accept_pending_substream(()); + yamux.accept_pending_substream(()).unwrap(); } } Err(Error::UnexpectedSyn(v)) if v.get() == 84 => return, @@ -514,7 +524,7 @@ fn substream_opened_back_after_rst() { cursor += outcome.bytes_read; if matches!(outcome.detail, Some(IncomingDataDetail::IncomingSubstream)) { - yamux.accept_pending_substream(()); + yamux.accept_pending_substream(()).unwrap(); } let dead_substream = yamux.dead_substreams().next().map(|(s, ..)| s); @@ -552,10 +562,10 @@ fn substream_opened_back_after_graceful_closing() { cursor += outcome.bytes_read; if matches!(outcome.detail, Some(IncomingDataDetail::IncomingSubstream)) { - let substream_id = yamux.accept_pending_substream(()); + let substream_id = yamux.accept_pending_substream(()).unwrap(); // Close the substream gracefully. - yamux.close(substream_id); + yamux.close(substream_id).unwrap(); } } Err(_) => panic!(), @@ -613,8 +623,8 @@ fn missing_ack() { max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); - let substream_id = yamux.open_substream(()); - yamux.write(substream_id, b"hello world".to_vec()); + let substream_id = yamux.open_substream(()).unwrap(); + yamux.write(substream_id, b"hello world".to_vec()).unwrap(); let mut output = Vec::new(); while let Some(out) = yamux.extract_next(usize::max_value()) { @@ -654,8 +664,8 @@ fn multiple_acks() { max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); - let substream_id = yamux.open_substream(()); - yamux.write(substream_id, b"hello world".to_vec()); + let substream_id = yamux.open_substream(()).unwrap(); + yamux.write(substream_id, b"hello world".to_vec()).unwrap(); let mut output = Vec::new(); while let Some(out) = yamux.extract_next(usize::max_value()) { @@ -698,12 +708,12 @@ fn multiple_writes_combined_into_one() { max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); - let substream_id = yamux.open_substream(()); + let substream_id = yamux.open_substream(()).unwrap(); // Write multiple times. All these writes should be combined into a single data frame. - yamux.write(substream_id, b"aaaa".to_vec()); - yamux.write(substream_id, b"cc".to_vec()); - yamux.write(substream_id, b"bbbbbb".to_vec()); + yamux.write(substream_id, b"aaaa".to_vec()).unwrap(); + yamux.write(substream_id, b"cc".to_vec()).unwrap(); + yamux.write(substream_id, b"bbbbbb".to_vec()).unwrap(); let mut output = Vec::new(); // We read 7 bytes at a time, in order to land in-between the buffers. @@ -727,9 +737,9 @@ fn close_before_syn_sent() { max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); - let substream_id = yamux.open_substream(()); - yamux.write(substream_id, b"foo".to_vec()); - yamux.close(substream_id); + let substream_id = yamux.open_substream(()).unwrap(); + yamux.write(substream_id, b"foo".to_vec()).unwrap(); + yamux.close(substream_id).unwrap(); let mut output = Vec::new(); while let Some(out) = yamux.extract_next(usize::max_value()) { @@ -741,7 +751,41 @@ fn close_before_syn_sent() { } #[test] -#[should_panic = "write after close"] +fn close_twice() { + let mut yamux = Yamux::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), + }); + + let substream_id = yamux.open_substream(()).unwrap(); + yamux.close(substream_id).unwrap(); + assert!(matches!( + yamux.close(substream_id), + Err(CloseError::AlreadyClosed) + )); +} + +#[test] +fn close_after_reset() { + let mut yamux = Yamux::new(Config { + capacity: 0, + is_initiator: true, + randomness_seed: [0; 32], + max_out_data_frame_size: NonZeroU32::new(u32::max_value()).unwrap(), + max_simultaneous_queued_pongs: NonZeroUsize::new(4).unwrap(), + max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), + }); + + let substream_id = yamux.open_substream(()).unwrap(); + yamux.reset(substream_id).unwrap(); + assert!(matches!(yamux.close(substream_id), Err(CloseError::Reset))); +} + +#[test] fn write_after_close_illegal() { let mut yamux = Yamux::new(Config { capacity: 0, @@ -752,13 +796,16 @@ fn write_after_close_illegal() { max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); - let substream_id = yamux.open_substream(()); - yamux.write(substream_id, b"foo".to_vec()); + let substream_id = yamux.open_substream(()).unwrap(); + yamux.write(substream_id, b"foo".to_vec()).unwrap(); assert!(yamux.can_send(substream_id)); - yamux.close(substream_id); + yamux.close(substream_id).unwrap(); assert!(!yamux.can_send(substream_id)); - yamux.write(substream_id, b"test".to_vec()); + assert!(matches!( + yamux.write(substream_id, b"test".to_vec()), + Err(WriteError::Closed) + )); } #[test] @@ -788,7 +835,7 @@ fn credits_exceeded_checked_before_data_is_received() { cursor += outcome.bytes_read; if matches!(outcome.detail, Some(IncomingDataDetail::IncomingSubstream)) { - yamux.accept_pending_substream(()); + yamux.accept_pending_substream(()).unwrap(); } } Err(Error::CreditsExceeded) => return, @@ -825,7 +872,7 @@ fn credits_exceeded_checked_at_the_syn() { cursor += outcome.bytes_read; if matches!(outcome.detail, Some(IncomingDataDetail::IncomingSubstream)) { - yamux.accept_pending_substream(()); + yamux.accept_pending_substream(()).unwrap(); } } Err(Error::CreditsExceeded) => return, @@ -865,7 +912,7 @@ fn data_coming_with_the_syn_taken_into_account() { cursor += outcome.bytes_read; if matches!(outcome.detail, Some(IncomingDataDetail::IncomingSubstream)) { - yamux.accept_pending_substream(()); + yamux.accept_pending_substream(()).unwrap(); } } Err(Error::CreditsExceeded) => return, @@ -905,7 +952,7 @@ fn add_remote_window_works() { cursor += outcome.bytes_read; if matches!(outcome.detail, Some(IncomingDataDetail::IncomingSubstream)) { - let substream_id = yamux.accept_pending_substream(()); + let substream_id = yamux.accept_pending_substream(()).unwrap(); // `add_remote_window` doesn't immediately raise the limit, so we flush the // output buffer in order to obtain a window frame. @@ -955,7 +1002,7 @@ fn add_remote_window_doesnt_immediately_raise_limit() { cursor += outcome.bytes_read; if matches!(outcome.detail, Some(IncomingDataDetail::IncomingSubstream)) { - let substream_id = yamux.accept_pending_substream(()); + let substream_id = yamux.accept_pending_substream(()).unwrap(); // `add_remote_window` shouldn't immediately raise the limit. yamux.add_remote_window_saturating(substream_id, 100 * 1024); @@ -981,7 +1028,7 @@ fn add_remote_window_saturates() { max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); - let substream_id = yamux.open_substream(()); + let substream_id = yamux.open_substream(()).unwrap(); // Check that `add_remote_window_saturating` doesn't panic. yamux.add_remote_window_saturating(substream_id, u64::max_value()); @@ -998,8 +1045,8 @@ fn remote_default_window_respected() { max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); - let substream_id = yamux.open_substream(()); - yamux.write(substream_id, vec![255; 300 * 1024]); // Exceeds default limit. + let substream_id = yamux.open_substream(()).unwrap(); + yamux.write(substream_id, vec![255; 300 * 1024]).unwrap(); // Exceeds default limit. let mut output = Vec::new(); while let Some(out) = yamux.extract_next(usize::max_value()) { @@ -1036,7 +1083,7 @@ fn remote_window_frames_respected() { if matches!(outcome.detail, Some(IncomingDataDetail::IncomingSubstream)) { assert!(accepted_substream.is_none()); - accepted_substream = Some(yamux.accept_pending_substream(())); + accepted_substream = Some(yamux.accept_pending_substream(()).unwrap()); } } Err(_) => panic!(), @@ -1045,7 +1092,7 @@ fn remote_window_frames_respected() { let substream_id = accepted_substream.unwrap(); - yamux.write(substream_id, vec![255; 300 * 1024]); // Exceeds default limit. + yamux.write(substream_id, vec![255; 300 * 1024]).unwrap(); // Exceeds default limit. let mut output = Vec::new(); while let Some(out) = yamux.extract_next(usize::max_value()) { @@ -1084,7 +1131,7 @@ fn write_after_fin() { cursor += outcome.bytes_read; if matches!(outcome.detail, Some(IncomingDataDetail::IncomingSubstream)) { - yamux.accept_pending_substream(()); + yamux.accept_pending_substream(()).unwrap(); } } Err(Error::WriteAfterFin) => return, @@ -1120,7 +1167,7 @@ fn write_after_fin_even_with_empty_frame() { cursor += outcome.bytes_read; if matches!(outcome.detail, Some(IncomingDataDetail::IncomingSubstream)) { - yamux.accept_pending_substream(()); + yamux.accept_pending_substream(()).unwrap(); } } Err(Error::WriteAfterFin) => return, @@ -1158,7 +1205,7 @@ fn window_frame_with_fin_after_fin() { cursor += outcome.bytes_read; if matches!(outcome.detail, Some(IncomingDataDetail::IncomingSubstream)) { - yamux.accept_pending_substream(()); + yamux.accept_pending_substream(()).unwrap(); } } Err(_) => panic!(), @@ -1194,7 +1241,7 @@ fn window_frame_without_fin_after_fin() { cursor += outcome.bytes_read; if matches!(outcome.detail, Some(IncomingDataDetail::IncomingSubstream)) { - yamux.accept_pending_substream(()); + yamux.accept_pending_substream(()).unwrap(); } } Err(_) => panic!(), @@ -1471,8 +1518,8 @@ fn dont_send_syn_after_goaway() { max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); - let substream_id = yamux.open_substream(()); - yamux.write(substream_id, b"foo".to_vec()); + let substream_id = yamux.open_substream(()).unwrap(); + yamux.write(substream_id, b"foo".to_vec()).unwrap(); assert!(yamux.can_send(substream_id)); // GoAway frame. @@ -1504,8 +1551,8 @@ fn substream_reset_on_goaway_if_not_acked() { max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); - let substream_id = yamux.open_substream(()); - yamux.write(substream_id, b"foo".to_vec()); + let substream_id = yamux.open_substream(()).unwrap(); + yamux.write(substream_id, b"foo".to_vec()).unwrap(); while let Some(_) = yamux.extract_next(usize::max_value()) {} // GoAway frame. @@ -1536,8 +1583,8 @@ fn can_still_send_after_goaway_if_acked() { max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); - let substream_id = yamux.open_substream(()); - yamux.write(substream_id, b"hello world".to_vec()); + let substream_id = yamux.open_substream(()).unwrap(); + yamux.write(substream_id, b"hello world".to_vec()).unwrap(); let mut output = Vec::new(); while let Some(out) = yamux.extract_next(usize::max_value()) { @@ -1563,7 +1610,7 @@ fn can_still_send_after_goaway_if_acked() { assert!(yamux.can_send(substream_id)); - yamux.write(substream_id, b"foo".to_vec()); + yamux.write(substream_id, b"foo".to_vec()).unwrap(); let mut output = Vec::new(); while let Some(out) = yamux.extract_next(usize::max_value()) { @@ -1614,7 +1661,9 @@ fn ignore_incoming_substreams_after_goaway() { max_simultaneous_rst_substreams: NonZeroUsize::new(1024).unwrap(), }); - yamux.send_goaway(GoAwayErrorCode::NormalTermination); + yamux + .send_goaway(GoAwayErrorCode::NormalTermination) + .unwrap(); // New substream. let data = [0, 0, 0, 1, 0, 0, 0, 84, 0, 0, 0, 0]; @@ -1637,7 +1686,6 @@ fn ignore_incoming_substreams_after_goaway() { } #[test] -#[should_panic = "can't open substream after goaway"] fn opening_forbidden_after_goaway() { let mut yamux = Yamux::new(Config { capacity: 0, @@ -1661,6 +1709,8 @@ fn opening_forbidden_after_goaway() { } } - // Panics. - yamux.open_substream(()); + assert!(matches!( + yamux.open_substream(()), + Err(OpenSubstreamError::GoAwayReceived) + )); } From 4a3aad0d45d34cab9945984d14a3c4bd88de2184 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 14:33:39 +0200 Subject: [PATCH 71/74] Doc and TODO fix --- lib/src/libp2p/connection/yamux.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index 65c2a50250..03f358b4b4 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -45,8 +45,6 @@ //! on the logic of the higher-level protocols. Failing to do so might lead to potential DoS //! attack vectors. -// TODO: write example - use crate::util::SipHasherBuild; use alloc::{boxed::Box, collections::VecDeque, vec::Vec}; @@ -107,6 +105,7 @@ pub struct Config { pub max_simultaneous_rst_substreams: NonZeroUsize, } +/// Yamux state machine. See [the module-level documentation](..) for more information. pub struct Yamux { /// The actual fields are wrapped in a `Box` because the `Yamux` object is moved around pretty /// often. From 35d6e5095ad322273cafc933a915b8de8ea2a32c Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 14:48:44 +0200 Subject: [PATCH 72/74] Oops, substream IDs must increment by 2 --- lib/src/libp2p/connection/yamux.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index 03f358b4b4..e4cdc4cc81 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -413,7 +413,7 @@ impl Yamux { let substream_id = self.inner.next_outbound_substream.clone(); - self.inner.next_outbound_substream = match self.inner.next_outbound_substream.checked_add(1) + self.inner.next_outbound_substream = match self.inner.next_outbound_substream.checked_add(2) { Some(new_id) => new_id, None => return Err(OpenSubstreamError::NoFreeSubstreamId), From 64205f7ef82d07d9096801e267138ff3131bb648 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 14:54:59 +0200 Subject: [PATCH 73/74] Fix clippy warning --- lib/src/libp2p/connection/yamux.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index e4cdc4cc81..ee82d89e07 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -1504,7 +1504,11 @@ impl Yamux { /// > however, this would be rather sub-optimal considering that buffers to send out /// > are already stored in their final form in the state machine. pub fn extract_next(&'_ mut self, size_bytes: usize) -> Option + '_> { - while size_bytes != 0 { + if size_bytes == 0 { + return None; + } + + loop { match self.inner.outgoing { Outgoing::Header { ref mut header, From 3ea28b94c2bbd14fca0c120e7dc942f36f02ed64 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Apr 2023 14:56:36 +0200 Subject: [PATCH 74/74] Docfix --- lib/src/libp2p/connection/yamux.rs | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/lib/src/libp2p/connection/yamux.rs b/lib/src/libp2p/connection/yamux.rs index ee82d89e07..baafea1eee 100644 --- a/lib/src/libp2p/connection/yamux.rs +++ b/lib/src/libp2p/connection/yamux.rs @@ -801,6 +801,7 @@ impl Yamux { /// /// All follow-up requests for new substreams from the remote are automatically rejected. /// [`IncomingDataDetail::IncomingSubstream`] events can no longer happen. + /// pub fn send_goaway(&mut self, code: GoAwayErrorCode) -> Result<(), SendGoAwayError> { match self.inner.outgoing_goaway { OutgoingGoAway::NotRequired => { @@ -1782,9 +1783,7 @@ impl Yamux { /// the substream is either accepted or rejected. This function should thus be called as /// soon as possible. /// - /// # Panic - /// - /// Panics if no incoming substream is currently pending. + /// Returns an error if no incoming substream is currently pending. /// pub fn accept_pending_substream( &mut self, @@ -1847,9 +1846,7 @@ impl Yamux { /// the substream is either accepted or rejected. This function should thus be called as /// soon as possible. /// - /// # Panic - /// - /// Panics if no incoming substream is currently pending. + /// Returns an error if no incoming substream is currently pending. /// pub fn reject_pending_substream(&mut self) -> Result<(), PendingSubstreamError> { match self.inner.incoming {