chore: clippy, fmt, warnings (#64)

* clippy fixes

* fmt fixes

* mirgrate to 2024 and fix workspace warning

* fix profile

* rustfmt

---------

Co-authored-by: sinu <65924192+sinui0@users.noreply.github.com>
This commit is contained in:
th4s
2025-05-01 19:09:50 +02:00
committed by GitHub
parent 6168663495
commit 6c9c40e68a
43 changed files with 201 additions and 152 deletions

View File

@@ -12,6 +12,7 @@ members = [
"rangeset",
"rangeset/fuzz",
]
resolver = "3"
[workspace.dependencies]
futures-plex = { path = "futures-plex" }
@@ -44,3 +45,6 @@ tokio-serde = "0.8"
tokio-util = "0.7"
tracing = "0.1"
tracing-subscriber = "0.3"
[profile.release.package.rangeset-fuzz]
debug = 1

View File

@@ -13,7 +13,8 @@ pub trait Rand0_6CompatExt {
/// Wraps `self` in a compatibility wrapper that implements `0.6` traits.
///
/// Same as [`Rand0_6CompatExt::compat`] but instead of taking ownership it borrows.
/// Same as [`Rand0_6CompatExt::compat`] but instead of taking ownership it
/// borrows.
fn compat_by_ref(&mut self) -> Rand0_6CompatWrapper<&mut Self>
where
Self: Sized,

View File

@@ -21,16 +21,17 @@ pub fn criterion_benchmark(c: &mut Criterion) {
});
}
// To benchmark the worst case where [range.start] is close to [other.end()], i.e. N
// iterations are needed if there is no boundary short citcuit (N == other.len_ranges()).
// To benchmark the worst case where [range.start] is close to [other.end()],
// i.e. N iterations are needed if there is no boundary short citcuit (N ==
// other.len_ranges()).
fn boundary_range_subset_of_rangeset(other: &RangeSet<u32>) {
let range = 9997..10005;
let _ = range.is_subset(other);
}
// To benchmark the worst case where [rangeset.last().start] is close to [other.end()],
// i.e. N iterations of [is_subset()] check are needed if there is no boundary short
// citcuit (N ~= rangeset.len_ranges()).
// To benchmark the worst case where [rangeset.last().start] is close to
// [other.end()], i.e. N iterations of [is_subset()] check are needed if there
// is no boundary short citcuit (N ~= rangeset.len_ranges()).
#[allow(clippy::single_range_in_vec_init)]
fn rangeset_subset_of_boundary_rangeset(rangeset: &RangeSet<u32>) {
let other = RangeSet::from(vec![0..9998]);

View File

@@ -1,8 +1,8 @@
[package]
name = "rangeset-fuzz"
version = "0.0.0"
edition = "2024"
publish = false
edition = "2021"
[package.metadata]
cargo-fuzz = true
@@ -13,9 +13,6 @@ libfuzzer-sys = { version = "0.4", features = ["arbitrary-derive"] }
[dependencies.rangeset]
path = ".."
[profile.release]
debug = 1
[[bin]]
name = "range_union_range"
path = "fuzz_targets/range_union_range.rs"

View File

@@ -4,7 +4,7 @@ use std::ops::Range;
use libfuzzer_sys::fuzz_target;
use rangeset_fuzz::{assert_invariants, SmallSet};
use rangeset_fuzz::{SmallSet, assert_invariants};
use rangeset::*;

View File

@@ -1,11 +1,10 @@
#![no_main]
use std::collections::HashSet;
use std::ops::Range;
use std::{collections::HashSet, ops::Range};
use libfuzzer_sys::fuzz_target;
use rangeset_fuzz::{assert_invariants, SmallSet};
use rangeset_fuzz::{SmallSet, assert_invariants};
use rangeset::*;

View File

@@ -1,7 +1,6 @@
#![no_main]
use std::collections::HashSet;
use std::ops::Range;
use std::{collections::HashSet, ops::Range};
use libfuzzer_sys::fuzz_target;

View File

@@ -4,7 +4,7 @@ use std::ops::Range;
use libfuzzer_sys::fuzz_target;
use rangeset_fuzz::{assert_invariants, SmallSet};
use rangeset_fuzz::{SmallSet, assert_invariants};
use rangeset::*;

View File

@@ -4,7 +4,7 @@ use std::ops::Range;
use libfuzzer_sys::fuzz_target;
use rangeset_fuzz::{assert_invariants, SmallSet};
use rangeset_fuzz::{SmallSet, assert_invariants};
use rangeset::*;

View File

@@ -2,7 +2,7 @@
use libfuzzer_sys::fuzz_target;
use rangeset_fuzz::{assert_invariants, SmallSet};
use rangeset_fuzz::{SmallSet, assert_invariants};
use rangeset::*;

View File

@@ -4,7 +4,7 @@ use std::collections::HashSet;
use libfuzzer_sys::fuzz_target;
use rangeset_fuzz::{assert_invariants, SmallSet};
use rangeset_fuzz::{SmallSet, assert_invariants};
use rangeset::*;

View File

@@ -4,7 +4,7 @@ use std::ops::Range;
use libfuzzer_sys::fuzz_target;
use rangeset_fuzz::{assert_invariants, SmallSet};
use rangeset_fuzz::{SmallSet, assert_invariants};
use rangeset::*;

View File

@@ -2,7 +2,7 @@
use libfuzzer_sys::fuzz_target;
use rangeset_fuzz::{assert_invariants, SmallSet};
use rangeset_fuzz::{SmallSet, assert_invariants};
use rangeset::*;

View File

@@ -37,7 +37,8 @@ impl<'a> Arbitrary<'a> for SmallSet {
}
}
/// Asserts that the ranges of the given set are sorted, non-adjacent, non-intersecting, and non-empty.
/// Asserts that the ranges of the given set are sorted, non-adjacent,
/// non-intersecting, and non-empty.
pub fn assert_invariants(set: RangeSet<u8>) {
assert!(set.into_inner().windows(2).all(|w| w[0].start < w[1].start
&& w[0].end < w[1].start

View File

@@ -8,7 +8,8 @@ pub trait Cover<Rhs> {
/// cover `self`.
///
/// Returns a tuple containing:
/// * A vector of indices of the sets that cover `self` (empty if no coverage at all).
/// * A vector of indices of the sets that cover `self` (empty if no
/// coverage at all).
/// * Any uncovered elements (empty if complete coverage is achieved).
fn find_cover<'a>(&self, others: impl IntoIterator<Item = &'a Rhs>) -> (Vec<usize>, Rhs)
where

View File

@@ -9,7 +9,8 @@ pub trait IndexRanges<T: Copy + Ord = usize> {
///
/// # Panics
///
/// Panics if any of the indices in the range set are out of bounds of the collection.
/// Panics if any of the indices in the range set are out of bounds of the
/// collection.
///
/// # Examples
///

View File

@@ -36,8 +36,9 @@ impl<T: Copy + Ord> Intersection<RangeSet<T>> for Range<T> {
// `self` is leftward of `other`, so we can break early.
break;
} else if let Some(intersection) = self.intersection(other) {
// Given that `other` contains sorted, non-adjacent, non-intersecting, and non-empty
// ranges, the new set will also have these properties.
// Given that `other` contains sorted, non-adjacent, non-intersecting, and
// non-empty ranges, the new set will also have these
// properties.
set.ranges.push(intersection);
}
}
@@ -74,8 +75,9 @@ impl<T: Copy + Ord> Intersection<RangeSet<T>> for RangeSet<T> {
// `b` is leftward of `a`, so we can proceed to the next range in `other`.
j += 1;
} else if let Some(intersection) = a.intersection(b) {
// Given that `self` and `other` contain sorted, non-adjacent, non-intersecting, and
// non-empty ranges, the new set will also have these properties.
// Given that `self` and `other` contain sorted, non-adjacent, non-intersecting,
// and non-empty ranges, the new set will also have these
// properties.
set.ranges.push(intersection);
if a.end <= b.end {

View File

@@ -20,8 +20,9 @@ use std::ops::{Add, Range, Sub};
/// A set of values represented using ranges.
///
/// A `RangeSet` is similar to any other kind of set, such as `HashSet`, with the difference being that the
/// values in the set are represented using ranges rather than storing each value individually.
/// A `RangeSet` is similar to any other kind of set, such as `HashSet`, with
/// the difference being that the values in the set are represented using ranges
/// rather than storing each value individually.
///
/// # Invariants
///
@@ -32,7 +33,8 @@ use std::ops::{Add, Range, Sub};
/// - The ranges are non-intersecting.
/// - The ranges are non-empty.
///
/// This is enforced in the constructor, and guaranteed to hold after applying any operation on a range or set.
/// This is enforced in the constructor, and guaranteed to hold after applying
/// any operation on a range or set.
///
/// # Examples
///
@@ -67,7 +69,8 @@ use std::ops::{Add, Range, Sub};
pub struct RangeSet<T> {
/// The ranges of the set.
///
/// The ranges *MUST* be sorted, non-adjacent, non-intersecting, and non-empty.
/// The ranges *MUST* be sorted, non-adjacent, non-intersecting, and
/// non-empty.
ranges: Vec<Range<T>>,
}
@@ -116,7 +119,8 @@ impl<T> RangeSet<T> {
impl<T: Copy + Ord> RangeSet<T> {
/// Returns a new `RangeSet` from the given ranges.
///
/// The `RangeSet` is constructed by computing the union of the given ranges.
/// The `RangeSet` is constructed by computing the union of the given
/// ranges.
pub fn new(ranges: &[Range<T>]) -> Self
where
Self: Union<Range<T>, Output = Self>,
@@ -155,12 +159,13 @@ impl<T: Copy + Ord> RangeSet<T> {
self.ranges.first().map(|range| range.start)
}
/// Returns the end of right-most range in the set, or `None` if the set is empty.
/// Returns the end of right-most range in the set, or `None` if the set is
/// empty.
///
/// # Note
///
/// This is the *non-inclusive* bound of the right-most range. See `RangeSet::max` for the
/// maximum value in the set.
/// This is the *non-inclusive* bound of the right-most range. See
/// `RangeSet::max` for the maximum value in the set.
pub fn end(&self) -> Option<T> {
self.ranges.last().map(|range| range.end)
}
@@ -178,8 +183,9 @@ impl<T: Copy + Ord + Step + Sub<Output = T>> RangeSet<T> {
/// Splits the set into two at the provided value.
///
/// Returns a new set containing all the existing elements `>= at`. After the call,
/// the original set will be left containing the elements `< at`.
/// Returns a new set containing all the existing elements `>= at`. After
/// the call, the original set will be left containing the elements `<
/// at`.
///
/// # Panics
///
@@ -253,8 +259,8 @@ where
impl<T: Copy + Ord> TryFrom<RangeSet<T>> for Range<T> {
type Error = RangeSet<T>;
/// Attempts to convert a `RangeSet` into a single `Range`, returning the set if it
/// does not contain exactly one range.
/// Attempts to convert a `RangeSet` into a single `Range`, returning the
/// set if it does not contain exactly one range.
fn try_from(set: RangeSet<T>) -> Result<Self, Self::Error> {
if set.len_ranges() == 1 {
Ok(set.ranges.into_iter().next().unwrap())
@@ -466,7 +472,8 @@ impl<T: Copy + Ord> Disjoint<Range<T>> for RangeSet<T> {
}
}
/// Asserts that the ranges of the given set are sorted, non-adjacent, non-intersecting, and non-empty.
/// Asserts that the ranges of the given set are sorted, non-adjacent,
/// non-intersecting, and non-empty.
#[cfg(test)]
pub fn assert_invariants<T: Copy + Ord>(set: &RangeSet<T>) {
assert!(set.ranges.windows(2).all(|w| w[0].start < w[1].start

View File

@@ -1,7 +1,7 @@
[package]
name = "serio"
version = "0.2.0"
edition = "2021"
edition = "2024"
authors = ["TLSNotary Contributors"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/tlsnotary/tlsn-utils"

View File

@@ -1,7 +1,7 @@
use serde::{Deserialize, Serialize};
use serio::{
codec::{Bincode, Framed},
IoSink, IoStream, SinkExt as _, StreamExt as _,
codec::{Bincode, Framed},
};
use std::io::Result;
use tokio::io::duplex;

View File

@@ -1,4 +1,5 @@
//! Memory channels for sending and receiving serializable types. Useful for testing.
//! Memory channels for sending and receiving serializable types. Useful for
//! testing.
use std::{
any::Any,
@@ -15,7 +16,8 @@ use crate::{Deserialize, Serialize, Sink, Stream};
type Item = Box<dyn Any + Send + Sync + 'static>;
/// A memory sink that can be used to send any serializable type to the receiver.
/// A memory sink that can be used to send any serializable type to the
/// receiver.
#[derive(Debug, Clone)]
pub struct MemorySink(mpsc::Sender<Item>);
@@ -50,7 +52,8 @@ impl Sink for MemorySink {
}
}
/// A memory stream that can be used to receive any deserializable type from the sender.
/// A memory stream that can be used to receive any deserializable type from the
/// sender.
#[derive(Debug)]
pub struct MemoryStream(mpsc::Receiver<Item>);
@@ -77,7 +80,8 @@ pub fn channel(buffer: usize) -> (MemorySink, MemoryStream) {
(MemorySink(sender), MemoryStream(receiver))
}
/// An unbounded memory sink that can be used to send any serializable type to the receiver.
/// An unbounded memory sink that can be used to send any serializable type to
/// the receiver.
#[derive(Debug, Clone)]
pub struct UnboundedMemorySink(mpsc::UnboundedSender<Item>);
@@ -112,7 +116,8 @@ impl Sink for UnboundedMemorySink {
}
}
/// An unbounded memory stream that can be used to receive any deserializable type from the sender.
/// An unbounded memory stream that can be used to receive any deserializable
/// type from the sender.
#[derive(Debug)]
pub struct UnboundedMemoryStream(mpsc::UnboundedReceiver<Item>);
@@ -207,7 +212,8 @@ pub fn duplex(buffer: usize) -> (MemoryDuplex, MemoryDuplex) {
)
}
/// An unbounded memory duplex that can be used to send and receive any serializable types.
/// An unbounded memory duplex that can be used to send and receive any
/// serializable types.
#[derive(Debug)]
pub struct UnboundedMemoryDuplex {
sink: UnboundedMemorySink,

View File

@@ -1,9 +1,10 @@
//! Utilities for converting framed transports to streams and sinks using a codec.
//! Utilities for converting framed transports to streams and sinks using a
//! codec.
use std::{
io::{Error, ErrorKind},
pin::Pin,
task::{ready, Context, Poll},
task::{Context, Poll, ready},
};
use bytes::{Bytes, BytesMut};

View File

@@ -4,12 +4,12 @@ use std::{
future::Future,
ops::DerefMut,
pin::Pin,
task::{ready, Context, Poll},
task::{Context, Poll, ready},
};
#[cfg(feature = "compat")]
use crate::FuturesCompat;
use crate::{future::assert_future, Serialize};
use crate::{Serialize, future::assert_future};
/// A sink with an error type of `std::io::Error`.
pub trait IoSink: Sink<Error = std::io::Error> {}
@@ -32,8 +32,8 @@ pub trait Sink {
///
/// This method returns `Poll::Ready` once the underlying sink is ready to
/// receive data. If this method returns `Poll::Pending`, the current task
/// is registered to be notified (via `cx.waker().wake_by_ref()`) when `poll_ready`
/// should be called again.
/// is registered to be notified (via `cx.waker().wake_by_ref()`) when
/// `poll_ready` should be called again.
///
/// In most cases, if the sink encounters an error, the sink will
/// permanently be unable to receive items.
@@ -67,8 +67,8 @@ pub trait Sink {
/// via `start_send` have been flushed.
///
/// Returns `Poll::Pending` if there is more work left to do, in which
/// case the current task is scheduled (via `cx.waker().wake_by_ref()`) to wake up when
/// `poll_flush` should be called again.
/// case the current task is scheduled (via `cx.waker().wake_by_ref()`) to
/// wake up when `poll_flush` should be called again.
///
/// In most cases, if the sink encounters an error, the sink will
/// permanently be unable to receive items.
@@ -80,8 +80,8 @@ pub trait Sink {
/// has been successfully closed.
///
/// Returns `Poll::Pending` if there is more work left to do, in which
/// case the current task is scheduled (via `cx.waker().wake_by_ref()`) to wake up when
/// `poll_close` should be called again.
/// case the current task is scheduled (via `cx.waker().wake_by_ref()`) to
/// wake up when `poll_close` should be called again.
///
/// If this function encounters an error, the sink should be considered to
/// have failed permanently, and no more `Sink` methods should be called.
@@ -135,7 +135,8 @@ where
}
}
/// An extension trait for Sinks that provides a variety of convenient functions.
/// An extension trait for Sinks that provides a variety of convenient
/// functions.
pub trait SinkExt: Sink {
/// Close the sink.
fn close(&mut self) -> Close<'_, Self>

View File

@@ -5,14 +5,14 @@ use std::{
marker::PhantomData,
ops::DerefMut,
pin::Pin,
task::{ready, Context, Poll},
task::{Context, Poll, ready},
};
use futures_core::FusedFuture;
#[cfg(feature = "compat")]
use crate::FuturesCompat;
use crate::{future::assert_future, Deserialize};
use crate::{Deserialize, future::assert_future};
/// A stream with an error type of `std::io::Error`.
pub trait IoStream: Stream<Error = std::io::Error> {}
@@ -22,7 +22,8 @@ impl<T: ?Sized> IoStream for T where T: Stream<Error = std::io::Error> {}
/// A stream producing any kind of value which implements `Deserialize`.
///
/// This trait is similar to [`futures::Stream`](https://docs.rs/futures/latest/futures/stream/trait.Stream.html),
/// but facilitates receiving of any deserializable type instead of a single type.
/// but facilitates receiving of any deserializable type instead of a single
/// type.
#[must_use = "streams do nothing unless polled"]
pub trait Stream {
/// The type of value produced by the stream when an error occurs.
@@ -37,8 +38,8 @@ pub trait Stream {
/// There are several possible return values, each indicating a distinct
/// stream state:
///
/// - `Poll::Pending` means that this stream's next value is not ready
/// yet. Implementations will ensure that the current task will be notified
/// - `Poll::Pending` means that this stream's next value is not ready yet.
/// Implementations will ensure that the current task will be notified
/// when the next value may be ready.
///
/// - `Poll::Ready(Some(val))` means that the stream has successfully
@@ -57,9 +58,10 @@ pub trait Stream {
/// Specifically, `size_hint()` returns a tuple where the first element
/// is the lower bound, and the second element is the upper bound.
///
/// The second half of the tuple that is returned is an [`Option`]`<`[`usize`]`>`.
/// A [`None`] here means that either there is no known upper bound, or the
/// upper bound is larger than [`usize`].
/// The second half of the tuple that is returned is an
/// [`Option`]`<`[`usize`]`>`. A [`None`] here means that either there
/// is no known upper bound, or the upper bound is larger than
/// [`usize`].
///
/// # Implementation notes
///
@@ -76,8 +78,8 @@ pub trait Stream {
/// That said, the implementation should provide a correct estimation,
/// because otherwise it would be a violation of the trait's protocol.
///
/// The default implementation returns `(0, `[`None`]`)` which is correct for any
/// stream.
/// The default implementation returns `(0, `[`None`]`)` which is correct
/// for any stream.
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
(0, None)
@@ -147,7 +149,8 @@ where
}
}
/// An extension trait for Streams that provides a variety of convenient functions.
/// An extension trait for Streams that provides a variety of convenient
/// functions.
pub trait StreamExt: Stream {
/// Creates a future that resolves to the next item in the stream.
///
@@ -237,7 +240,8 @@ impl<St: ?Sized + Stream + Unpin, Item: Deserialize> Future for Next<'_, St, Ite
}
}
/// An extension trait for [`IoStream`] which provides a variety of convenient functions.
/// An extension trait for [`IoStream`] which provides a variety of convenient
/// functions.
pub trait IoStreamExt: IoStream {
/// Creates a future that resolves to the next item in the stream, returning
/// an error if the stream is exhausted.

View File

@@ -1,7 +1,7 @@
[package]
name = "spansy"
version = "0.1.0"
edition = "2021"
edition = "2024"
description = "Parsing with span information"
repository = "https://github.com/tlsnotary/tlsn-utils"
license = "MIT OR Apache-2.0"

View File

@@ -3,12 +3,13 @@ use std::ops::Range;
use bytes::Bytes;
use crate::{
ParseError, Span,
helpers::get_span_range,
http::{
Body, BodyContent, Code, Header, HeaderName, HeaderValue, Method, Reason, Request,
RequestLine, Response, Status, Target,
},
json, ParseError, Span,
json,
};
const MAX_HEADERS: usize = 128;
@@ -28,7 +29,7 @@ pub(crate) fn parse_request_from_bytes(src: &Bytes, offset: usize) -> Result<Req
let head_end = match request.parse(&src[offset..]) {
Ok(httparse::Status::Complete(head_end)) => head_end + offset,
Ok(httparse::Status::Partial) => {
return Err(ParseError(format!("incomplete request: {:?}", src)))
return Err(ParseError(format!("incomplete request: {src:?}")));
}
Err(err) => return Err(ParseError(err.to_string())),
};
@@ -120,7 +121,7 @@ pub(crate) fn parse_response_from_bytes(
let head_end = match response.parse(&src[offset..]) {
Ok(httparse::Status::Complete(head_end)) => head_end + offset,
Ok(httparse::Status::Partial) => {
return Err(ParseError(format!("incomplete response: {:?}", src)))
return Err(ParseError(format!("incomplete response: {src:?}")));
}
Err(err) => return Err(ParseError(err.to_string())),
};
@@ -218,8 +219,8 @@ fn request_body_len(request: &Request) -> Result<usize, ParseError> {
// The presence of a message body in a request is signaled by a Content-Length
// or Transfer-Encoding header field.
// If a message is received with both a Transfer-Encoding and a Content-Length header field,
// the Transfer-Encoding overrides the Content-Length
// If a message is received with both a Transfer-Encoding and a Content-Length
// header field, the Transfer-Encoding overrides the Content-Length
if request
.headers_with_name("Transfer-Encoding")
.next()
@@ -229,22 +230,25 @@ fn request_body_len(request: &Request) -> Result<usize, ParseError> {
"Transfer-Encoding not supported yet".to_string(),
))
} else if let Some(h) = request.headers_with_name("Content-Length").next() {
// If a valid Content-Length header field is present without Transfer-Encoding, its decimal value
// defines the expected message body length in octets.
// If a valid Content-Length header field is present without Transfer-Encoding,
// its decimal value defines the expected message body length in octets.
std::str::from_utf8(h.value.0.as_bytes())?
.parse::<usize>()
.map_err(|err| ParseError(format!("failed to parse Content-Length value: {err}")))
} else {
// If this is a request message and none of the above are true, then the message body length is zero
// If this is a request message and none of the above are true, then the message
// body length is zero
Ok(0)
}
}
/// Calculates the length of the response body according to RFC 9112, section 6.
fn response_body_len(response: &Response) -> Result<usize, ParseError> {
// Any response to a HEAD request and any response with a 1xx (Informational), 204 (No Content), or 304 (Not Modified)
// status code is always terminated by the first empty line after the header fields, regardless of the header fields
// present in the message, and thus cannot contain a message body or trailer section.
// Any response to a HEAD request and any response with a 1xx (Informational),
// 204 (No Content), or 304 (Not Modified) status code is always terminated
// by the first empty line after the header fields, regardless of the header
// fields present in the message, and thus cannot contain a message body or
// trailer section.
match response
.status
.code
@@ -265,16 +269,18 @@ fn response_body_len(response: &Response) -> Result<usize, ParseError> {
"Transfer-Encoding not supported yet".to_string(),
))
} else if let Some(h) = response.headers_with_name("Content-Length").next() {
// If a valid Content-Length header field is present without Transfer-Encoding, its decimal value
// defines the expected message body length in octets.
// If a valid Content-Length header field is present without Transfer-Encoding,
// its decimal value defines the expected message body length in octets.
std::str::from_utf8(h.value.0.as_bytes())?
.parse::<usize>()
.map_err(|err| ParseError(format!("failed to parse Content-Length value: {err}")))
} else {
// If this is a response message and none of the above are true, then there is no way to
// determine the length of the message body except by reading it until the connection is closed.
// If this is a response message and none of the above are true, then there is
// no way to determine the length of the message body except by reading
// it until the connection is closed.
// We currently consider this an error because we have no outer context information.
// We currently consider this an error because we have no outer context
// information.
Err(ParseError(
"A response with a body must contain either a Content-Length or Transfer-Encoding header".to_string(),
))

View File

@@ -1,6 +1,6 @@
use rangeset::{Difference, RangeSet, ToRangeSet};
use crate::{json::JsonValue, Span, Spanned};
use crate::{Span, Spanned, json::JsonValue};
/// An HTTP header name.
#[derive(Debug, Clone, PartialEq, Eq)]
@@ -209,17 +209,19 @@ pub struct Request {
}
impl Request {
/// Returns an iterator of request headers with the given name (case-insensitive).
/// Returns an iterator of request headers with the given name
/// (case-insensitive).
///
/// This method returns an iterator because it is valid for HTTP records to contain
/// duplicate header names.
/// This method returns an iterator because it is valid for HTTP records to
/// contain duplicate header names.
pub fn headers_with_name<'a>(&'a self, name: &'a str) -> impl Iterator<Item = &'a Header> {
self.headers
.iter()
.filter(|h| h.name.0.as_str().eq_ignore_ascii_case(name))
}
/// Returns the indices of the request excluding the target, headers and body.
/// Returns the indices of the request excluding the target, headers and
/// body.
pub fn without_data(&self) -> RangeSet<usize> {
let mut indices = self.span.indices.difference(&self.request.target.0.indices);
for header in &self.headers {
@@ -361,10 +363,11 @@ pub struct Response {
}
impl Response {
/// Returns an iterator of response headers with the given name (case-insensitive).
/// Returns an iterator of response headers with the given name
/// (case-insensitive).
///
/// This method returns an iterator because it is valid for HTTP records to contain
/// duplicate header names.
/// This method returns an iterator because it is valid for HTTP records to
/// contain duplicate header names.
pub fn headers_with_name<'a>(&'a self, name: &'a str) -> impl Iterator<Item = &'a Header> {
self.headers
.iter()

View File

@@ -1,11 +1,11 @@
//! JSON span parsing.
//!
//! This module provides a JSON parser that can be used to parse span information for each JSON value within
//! a source string.
//! This module provides a JSON parser that can be used to parse span
//! information for each JSON value within a source string.
//!
//! Note that the parser does *not* fully parse values, it simply computes the span of the corresponding
//! characters in the source string. Thus, this parser should not be expected to perform any kind of
//! validation of the JSON.
//! Note that the parser does *not* fully parse values, it simply computes the
//! span of the corresponding characters in the source string. Thus, this parser
//! should not be expected to perform any kind of validation of the JSON.
//!
//! # Example
//!

View File

@@ -1,5 +1,5 @@
use bytes::Bytes;
use pest::{iterators::Pair as PestPair, Parser};
use pest::{Parser, iterators::Pair as PestPair};
use types::KeyValue;
use super::types::{self, JsonValue};

View File

@@ -127,7 +127,8 @@ impl<T: ?Sized> Span<T> {
/// Returns the length of the span in bytes.
///
/// Just like `str::len()`, this is not necessarily the number of characters.
/// Just like `str::len()`, this is not necessarily the number of
/// characters.
pub fn len(&self) -> usize {
self.indices.len()
}

View File

@@ -1,13 +1,13 @@
[package]
name = "uid-mux"
version = "0.2.0"
edition = "2024"
authors = ["TLSNotary Contributors"]
description = "Async multiplexing library with user provided stream ids."
keywords = ["multiplex", "futures", "async"]
categories = ["network-programming", "asynchronous"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/tlsnotary/tlsn-utils"
edition = "2021"
[features]
default = ["tracing", "serio"]

View File

@@ -1,14 +1,14 @@
use std::{
pin::{pin, Pin},
pin::{Pin, pin},
task::{Context, Poll},
};
use futures::{ready, AsyncRead, AsyncWrite, Future};
use futures::{AsyncRead, AsyncWrite, Future, ready};
use tokio::sync::oneshot;
use crate::{
log::{error, trace},
InternalId,
log::{error, trace},
};
const BUF: usize = 32;

View File

@@ -35,7 +35,7 @@ impl InternalId {
impl fmt::Display for InternalId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
for byte in &self.0[..4] {
write!(f, "{:02x}", byte)?;
write!(f, "{byte:02x}")?;
}
Ok(())
}
@@ -105,5 +105,9 @@ pub(crate) mod log {
};
}
pub(crate) use {debug, error, info, trace, warn_ as warn};
pub(crate) use debug;
pub(crate) use error;
pub(crate) use info;
pub(crate) use trace;
pub(crate) use warn_ as warn;
}

View File

@@ -1,4 +1,4 @@
use ::serio::{codec::Codec, IoDuplex};
use ::serio::{IoDuplex, codec::Codec};
use async_trait::async_trait;
use crate::UidMux;
@@ -90,7 +90,7 @@ mod tests {
use crate::yamux::{Config, Mode, Yamux};
use ::serio::codec::Bincode;
use serio::{stream::IoStreamExt, SinkExt};
use serio::{SinkExt, stream::IoStreamExt};
use tokio::io::duplex;
use tokio_util::compat::TokioAsyncReadCompatExt;

View File

@@ -1,12 +1,12 @@
//! Test utilities.
use tokio::io::{duplex, DuplexStream};
use tokio::io::{DuplexStream, duplex};
use tokio_util::compat::{Compat, TokioAsyncReadCompatExt};
use yamux::{Config, Mode};
use crate::{
yamux::{Yamux, YamuxCtrl},
FramedMux,
yamux::{Yamux, YamuxCtrl},
};
/// Creates a test pair of yamux instances.
@@ -56,7 +56,7 @@ mod serio {
};
use async_trait::async_trait;
use serio::channel::{duplex, MemoryDuplex};
use serio::channel::{MemoryDuplex, duplex};
use crate::serio::FramedUidMux;

View File

@@ -8,21 +8,21 @@ use std::{
future::IntoFuture,
pin::Pin,
sync::{
atomic::{AtomicBool, Ordering},
Arc, Mutex,
atomic::{AtomicBool, Ordering},
},
task::{Context, Poll, Waker},
};
use async_trait::async_trait;
use futures::{stream::FuturesUnordered, AsyncRead, AsyncWrite, Future, FutureExt, StreamExt};
use tokio::sync::{oneshot, Notify};
use futures::{AsyncRead, AsyncWrite, Future, FutureExt, StreamExt, stream::FuturesUnordered};
use tokio::sync::{Notify, oneshot};
use yamux::Connection;
use crate::{
InternalId, UidMux,
future::{ReadId, ReturnStream},
log::{debug, error, info, trace, warn},
InternalId, UidMux,
};
pub use yamux::{Config, ConnectionError, Mode, Stream};
@@ -344,7 +344,8 @@ pub struct YamuxCtrl {
impl YamuxCtrl {
/// Allocates `count` streams.
///
/// This can be used to efficiently pre-allocate streams prior to assigning ids to them.
/// This can be used to efficiently pre-allocate streams prior to assigning
/// ids to them.
///
/// # Note
///
@@ -497,7 +498,8 @@ mod tests {
fut.await.unwrap();
}
// Test the case where the client closes the connection while the server is expecting a new stream.
// Test the case where the client closes the connection while the server is
// expecting a new stream.
#[tokio::test]
async fn test_yamux_client_close_early() {
let (client_io, server_io) = duplex(1024);
@@ -539,7 +541,8 @@ mod tests {
fut.await.unwrap();
}
// Test the case where the server closes the connection while the client is opening a new stream.
// Test the case where the server closes the connection while the client is
// opening a new stream.
#[tokio::test]
async fn test_yamux_server_close_early() {
let (client_io, server_io) = duplex(1024);

View File

@@ -1,7 +1,7 @@
[package]
name = "tlsn-utils"
version = "0.1.0"
edition = "2021"
edition = "2024"
[lib]
name = "utils"

View File

@@ -10,7 +10,8 @@ use core::{ptr, slice};
///
/// See [tracking issue](https://github.com/rust-lang/rust/issues/43244)
///
/// We call this `FilterDrain` to avoid the naming conflict with the standard library.
/// We call this `FilterDrain` to avoid the naming conflict with the standard
/// library.
pub trait FilterDrain<'a, T, F> {
type Item;
type Iter: Iterator<Item = Self::Item> + 'a;
@@ -44,7 +45,8 @@ where
}
}
/// An iterator which uses a closure to determine if an element should be removed.
/// An iterator which uses a closure to determine if an element should be
/// removed.
#[derive(Debug)]
#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct FilterDrainIter<'a, T, F>
@@ -291,8 +293,7 @@ mod tests {
#[cfg(not(target_os = "emscripten"))]
#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
fn filter_drain_consumed_panic() {
use std::rc::Rc;
use std::sync::Mutex;
use std::{rc::Rc, sync::Mutex};
struct Check {
index: usize,
@@ -340,8 +341,7 @@ mod tests {
for (index, count) in drop_counts.iter().cloned().enumerate() {
assert_eq!(
1, count,
"unexpected drop count at index: {} (count: {})",
index, count
"unexpected drop count at index: {index} (count: {count})"
);
}
}
@@ -351,8 +351,7 @@ mod tests {
#[cfg(not(target_os = "emscripten"))]
#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
fn filter_drain_unconsumed_panic() {
use std::rc::Rc;
use std::sync::Mutex;
use std::{rc::Rc, sync::Mutex};
struct Check {
index: usize,
@@ -399,8 +398,7 @@ mod tests {
for (index, count) in drop_counts.iter().cloned().enumerate() {
assert_eq!(
1, count,
"unexpected drop count at index: {} (count: {})",
index, count
"unexpected drop count at index: {index} (count: {count})"
);
}
}

View File

@@ -107,12 +107,12 @@ impl std::fmt::Display for NestedId {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
NestedId::String { id, root } => match root {
Some(root) => write!(f, "{}/{}", root, id),
None => write!(f, "{}", id),
Some(root) => write!(f, "{root}/{id}"),
None => write!(f, "{id}"),
},
NestedId::Counter { value, root } => match root {
Some(root) => write!(f, "{}/{}", root, value),
None => write!(f, "{}", value),
Some(root) => write!(f, "{root}/{value}"),
None => write!(f, "{value}"),
},
}
}

View File

@@ -24,7 +24,8 @@ pub fn choose<T: Copy>(items: &[[T; 2]], choice: &[bool]) -> Vec<T> {
.collect()
}
/// Returns a subset of items in a collection which corresponds to provided indices
/// Returns a subset of items in a collection which corresponds to provided
/// indices
///
/// Panics if index is out of bounds
#[inline]
@@ -32,7 +33,8 @@ pub fn pick<T: Copy>(items: &[T], idx: &[usize]) -> Vec<T> {
idx.iter().map(|i| items[*i]).collect()
}
/// This trait provides a helper method to determine whether an Iterator contains any duplicates.
/// This trait provides a helper method to determine whether an Iterator
/// contains any duplicates.
pub trait DuplicateCheck<'a, T>
where
Self: Iterator<Item = &'a T>,
@@ -60,8 +62,8 @@ where
{
}
/// This trait provides a helper method to determine whether an Iterator contains any duplicates
/// using an accessor function.
/// This trait provides a helper method to determine whether an Iterator
/// contains any duplicates using an accessor function.
pub trait DuplicateCheckBy<'a, F, T, U>
where
Self: Iterator<Item = &'a T>,

View File

@@ -58,12 +58,20 @@ impl_tuple_option_transpose!(
(t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13)
);
impl_tuple_option_transpose!(
(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14),
(t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14)
(
T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14
),
(
t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14
)
);
impl_tuple_option_transpose!(
(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15),
(t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15)
(
T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15
),
(
t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15
)
);
#[cfg(test)]

View File

@@ -1,7 +1,7 @@
[package]
name = "websocket-relay"
version = "0.1.0"
edition = "2021"
edition = "2024"
authors = ["TLSNotary Contributors"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/tlsnotary/tlsn-utils"

View File

@@ -2,12 +2,12 @@ use std::{
collections::HashMap,
net::SocketAddr,
sync::{
atomic::{AtomicBool, Ordering},
Arc, Mutex,
atomic::{AtomicBool, Ordering},
},
};
use anyhow::{anyhow, Result};
use anyhow::{Result, anyhow};
use futures::{SinkExt, StreamExt as _};
use once_cell::sync::Lazy;
use tokio::{
@@ -15,9 +15,8 @@ use tokio::{
net::{TcpListener, TcpStream},
};
use tokio_tungstenite::{
accept_hdr_async,
tungstenite::{http::Request, Message},
WebSocketStream,
WebSocketStream, accept_hdr_async,
tungstenite::{Message, http::Request},
};
use tracing::{debug, info, instrument};