rustls/msgs/deframer/
buffers.rs

1use alloc::vec::Vec;
2use core::mem;
3use core::ops::Range;
4#[cfg(feature = "std")]
5use std::io;
6
7#[cfg(feature = "std")]
8use crate::msgs::message::MAX_WIRE_SIZE;
9
10/// Conversion from a slice within a larger buffer into
11/// a `Range` offset within.
12#[derive(Debug)]
13pub(crate) struct Locator {
14    bounds: Range<*const u8>,
15}
16
17impl Locator {
18    #[inline]
19    pub(crate) fn new(slice: &[u8]) -> Self {
20        Self {
21            bounds: slice.as_ptr_range(),
22        }
23    }
24
25    #[inline]
26    pub(crate) fn locate(&self, slice: &[u8]) -> Range<usize> {
27        let bounds = slice.as_ptr_range();
28        debug_assert!(self.fully_contains(slice));
29        let start = bounds.start as usize - self.bounds.start as usize;
30        let len = bounds.end as usize - bounds.start as usize;
31        Range {
32            start,
33            end: start + len,
34        }
35    }
36
37    #[inline]
38    pub(crate) fn fully_contains(&self, slice: &[u8]) -> bool {
39        let bounds = slice.as_ptr_range();
40        bounds.start >= self.bounds.start && bounds.end <= self.bounds.end
41    }
42}
43
44/// Conversion from a `Range` offset to the original slice.
45pub(crate) struct Delocator<'b> {
46    slice: &'b [u8],
47}
48
49impl<'b> Delocator<'b> {
50    #[inline]
51    pub(crate) fn new(slice: &'b [u8]) -> Self {
52        Self { slice }
53    }
54
55    #[inline]
56    pub(crate) fn slice_from_range(&'_ self, range: &Range<usize>) -> &'b [u8] {
57        // safety: this unwrap is safe so long as `range` came from `locate()`
58        // for the same buffer
59        self.slice.get(range.clone()).unwrap()
60    }
61
62    #[inline]
63    pub(crate) fn locator(self) -> Locator {
64        Locator::new(self.slice)
65    }
66}
67
68/// Reordering the underlying buffer based on ranges.
69pub(crate) struct Coalescer<'b> {
70    slice: &'b mut [u8],
71}
72
73impl<'b> Coalescer<'b> {
74    #[inline]
75    pub(crate) fn new(slice: &'b mut [u8]) -> Self {
76        Self { slice }
77    }
78
79    #[inline]
80    pub(crate) fn copy_within(&mut self, from: Range<usize>, to: Range<usize>) {
81        debug_assert!(from.len() == to.len());
82        debug_assert!(self.slice.get(from.clone()).is_some());
83        debug_assert!(self.slice.get(to.clone()).is_some());
84        self.slice.copy_within(from, to.start);
85    }
86
87    #[inline]
88    pub(crate) fn delocator(self) -> Delocator<'b> {
89        Delocator::new(self.slice)
90    }
91}
92
93/// Accounting structure tracking progress in parsing a single buffer.
94#[derive(Clone, Debug, Default)]
95pub(crate) struct BufferProgress {
96    /// Prefix of the buffer that has been processed so far.
97    ///
98    /// `processed` may exceed `discard`, that means we have parsed
99    /// some buffer, but are still using it.  This happens due to
100    /// in-place decryption of incoming records, and in-place
101    /// reassembly of handshake messages.
102    ///
103    /// 0 <= processed <= len
104    processed: usize,
105
106    /// Prefix of the buffer that can be removed.
107    ///
108    /// If `discard` exceeds `processed`, that means we are ignoring
109    /// data without processing it.
110    ///
111    /// 0 <= discard <= len
112    discard: usize,
113}
114
115impl BufferProgress {
116    #[inline]
117    pub(crate) fn add_discard(&mut self, discard: usize) {
118        self.discard += discard;
119    }
120
121    #[inline]
122    pub(crate) fn add_processed(&mut self, processed: usize) {
123        self.processed += processed;
124    }
125
126    #[inline]
127    pub(crate) fn take_discard(&mut self) -> usize {
128        // the caller is about to discard `discard` bytes
129        // from the front of the buffer.  adjust `processed`
130        // down by the same amount.
131        self.processed = self
132            .processed
133            .saturating_sub(self.discard);
134        mem::take(&mut self.discard)
135    }
136
137    #[inline]
138    pub(crate) fn processed(&self) -> usize {
139        self.processed
140    }
141}
142
143#[derive(Default, Debug)]
144pub(crate) struct DeframerVecBuffer {
145    /// Buffer of data read from the socket, in the process of being parsed into messages.
146    ///
147    /// For buffer size management, checkout out the [`DeframerVecBuffer::prepare_read()`] method.
148    buf: Vec<u8>,
149
150    /// What size prefix of `buf` is used.
151    used: usize,
152
153    pub(crate) processed: usize,
154}
155
156impl DeframerVecBuffer {
157    /// Discard `taken` bytes from the start of our buffer.
158    pub(crate) fn discard(&mut self, taken: usize) {
159        #[allow(clippy::comparison_chain)]
160        if taken < self.used {
161            /* Before:
162             * +----------+----------+----------+
163             * | taken    | pending  |xxxxxxxxxx|
164             * +----------+----------+----------+
165             * 0          ^ taken    ^ self.used
166             *
167             * After:
168             * +----------+----------+----------+
169             * | pending  |xxxxxxxxxxxxxxxxxxxxx|
170             * +----------+----------+----------+
171             * 0          ^ self.used
172             */
173
174            self.buf
175                .copy_within(taken..self.used, 0);
176            self.used -= taken;
177            self.processed = self.processed.saturating_sub(taken);
178        } else if taken == self.used {
179            self.used = 0;
180            self.processed = 0;
181        }
182    }
183
184    pub(crate) fn filled_mut(&mut self) -> &mut [u8] {
185        &mut self.buf[..self.used]
186    }
187
188    pub(crate) fn filled(&self) -> &[u8] {
189        &self.buf[..self.used]
190    }
191}
192
193#[cfg(feature = "std")]
194impl DeframerVecBuffer {
195    /// Read some bytes from `rd`, and add them to the buffer.
196    pub(crate) fn read(&mut self, rd: &mut dyn io::Read, in_handshake: bool) -> io::Result<usize> {
197        if let Err(err) = self.prepare_read(in_handshake) {
198            return Err(io::Error::new(io::ErrorKind::InvalidData, err));
199        }
200
201        // Try to do the largest reads possible. Note that if
202        // we get a message with a length field out of range here,
203        // we do a zero length read.  That looks like an EOF to
204        // the next layer up, which is fine.
205        let new_bytes = rd.read(&mut self.buf[self.used..])?;
206        self.used += new_bytes;
207        Ok(new_bytes)
208    }
209
210    /// Resize the internal `buf` if necessary for reading more bytes.
211    fn prepare_read(&mut self, is_joining_hs: bool) -> Result<(), &'static str> {
212        /// TLS allows for handshake messages of up to 16MB.  We
213        /// restrict that to 64KB to limit potential for denial-of-
214        /// service.
215        const MAX_HANDSHAKE_SIZE: u32 = 0xffff;
216
217        const READ_SIZE: usize = 4096;
218
219        // We allow a maximum of 64k of buffered data for handshake messages only. Enforce this
220        // by varying the maximum allowed buffer size here based on whether a prefix of a
221        // handshake payload is currently being buffered. Given that the first read of such a
222        // payload will only ever be 4k bytes, the next time we come around here we allow a
223        // larger buffer size. Once the large message and any following handshake messages in
224        // the same flight have been consumed, `pop()` will call `discard()` to reset `used`.
225        // At this point, the buffer resizing logic below should reduce the buffer size.
226        let allow_max = match is_joining_hs {
227            true => MAX_HANDSHAKE_SIZE as usize,
228            false => MAX_WIRE_SIZE,
229        };
230
231        if self.used >= allow_max {
232            return Err("message buffer full");
233        }
234
235        // If we can and need to increase the buffer size to allow a 4k read, do so. After
236        // dealing with a large handshake message (exceeding `OutboundOpaqueMessage::MAX_WIRE_SIZE`),
237        // make sure to reduce the buffer size again (large messages should be rare).
238        // Also, reduce the buffer size if there are neither full nor partial messages in it,
239        // which usually means that the other side suspended sending data.
240        let need_capacity = Ord::min(allow_max, self.used + READ_SIZE);
241        if need_capacity > self.buf.len() {
242            self.buf.resize(need_capacity, 0);
243        } else if self.used == 0 || self.buf.len() > allow_max {
244            self.buf.resize(need_capacity, 0);
245            self.buf.shrink_to(need_capacity);
246        }
247
248        Ok(())
249    }
250
251    /// Append `bytes` to the end of this buffer.
252    ///
253    /// Return a `Range` saying where it went.
254    pub(crate) fn extend(&mut self, bytes: &[u8]) -> Range<usize> {
255        let len = bytes.len();
256        let start = self.used;
257        let end = start + len;
258        if self.buf.len() < end {
259            self.buf.resize(end, 0);
260        }
261        self.buf[start..end].copy_from_slice(bytes);
262        self.used += len;
263        Range { start, end }
264    }
265}
266
267/// A borrowed version of [`DeframerVecBuffer`] that tracks discard operations
268#[derive(Debug)]
269pub(crate) struct DeframerSliceBuffer<'a> {
270    // a fully initialized buffer that will be deframed
271    buf: &'a mut [u8],
272    // number of bytes to discard from the front of `buf` at a later time
273    discard: usize,
274}
275
276impl<'a> DeframerSliceBuffer<'a> {
277    pub(crate) fn new(buf: &'a mut [u8]) -> Self {
278        Self { buf, discard: 0 }
279    }
280
281    /// Tracks a pending discard operation of `num_bytes`
282    pub(crate) fn queue_discard(&mut self, num_bytes: usize) {
283        self.discard += num_bytes;
284    }
285
286    pub(crate) fn pending_discard(&self) -> usize {
287        self.discard
288    }
289
290    pub(crate) fn filled_mut(&mut self) -> &mut [u8] {
291        &mut self.buf[self.discard..]
292    }
293}