1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338
// Copyright 2022 Parity Technologies (UK) Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! Sphinx packet building.
use super::{
crypto::*,
delay::Delay,
packet::*,
target::{MixnodeIndex, Target},
};
use arrayref::{array_mut_ref, array_refs, mut_array_refs};
use arrayvec::ArrayVec;
use rand::{CryptoRng, Rng};
fn mut_arr_at<T, const N: usize>(slice: &mut [T], offset: usize) -> &mut [T; N] {
(&mut slice[offset..offset + N])
.try_into()
.expect("Slice length is fixed and matches array length")
}
enum PacketKind<'a> {
Request,
Reply(&'a SurbId),
Cover(Option<&'a CoverId>),
}
/// Build a Sphinx header. `targets` should not include the first hop. At most one target may be a
/// peer ID; all others should be mixnode indices. Returns the total forwarding delay across all
/// hops.
fn build_header(
header: &mut Header,
kx_shared_secrets: &mut ArrayVec<SharedSecret, MAX_HOPS>,
rng: &mut (impl Rng + CryptoRng),
targets: &[Target],
their_kx_publics: &[KxPublic],
kind: PacketKind,
) -> Delay {
debug_assert_eq!(targets.len() + 1, their_kx_publics.len());
debug_assert!(their_kx_publics.len() <= MAX_HOPS);
let (kx_public, mac_plus_actions) =
mut_array_refs![header, KX_PUBLIC_SIZE, MAC_SIZE + ACTIONS_SIZE];
gen_kx_public_and_shared_secrets(kx_public, kx_shared_secrets, rng, their_kx_publics);
// Routing actions, and current write offset into
let actions = array_mut_ref![mac_plus_actions, MAC_SIZE, ACTIONS_SIZE];
let mut offset = 0;
// Total forwarding delay across all hops
let mut total_delay = Delay::zero();
// We loop over the hops forward and then backward. Data that is generated by the first pass
// for the second pass is stashed here. The last hop is handled specially so is excluded here.
struct Hop {
mac_key: MacKey,
keystream: [u8; ACTIONS_SIZE + MAX_ACTIONS_PAD_SIZE],
start_offset: u16, // Starting offset of hop in actions
}
let mut hops: ArrayVec<Hop, { MAX_HOPS - 1 }> = ArrayVec::new();
// Padding for length invariance, generated from the keystreams. This is only needed for
// computing the MACs.
let mut pad = [0; ACTIONS_SIZE - RAW_ACTION_SIZE];
// Loop over hops forward (excluding the last hop)
for (target, kx_shared_secret) in targets.iter().zip(kx_shared_secrets.iter()) {
// Write forward action
let start_offset = offset;
offset += RAW_ACTION_SIZE;
let raw_action = match target {
Target::MixnodeIndex(mixnode_index) => mixnode_index.get(),
Target::PeerId(peer_id) => {
*mut_arr_at(actions, offset) = *peer_id;
offset += PEER_ID_SIZE;
RAW_ACTION_FORWARD_TO_PEER_ID
},
};
*mut_arr_at(actions, start_offset) = raw_action.to_le_bytes();
// The MAC for the next hop can't be computed yet. Leave a gap for it. Note that this is
// always the last part of the action; this is assumed by the backward loop.
offset += MAC_SIZE;
let sds = SmallDerivedSecrets::new(kx_shared_secret);
total_delay += Delay::exp(sds.delay_seed());
hops.push(Hop {
mac_key: *sds.mac_key(),
keystream: [0; ACTIONS_SIZE + MAX_ACTIONS_PAD_SIZE],
start_offset: start_offset as u16,
});
let keystream = &mut hops.last_mut().expect("Just pushed, so not empty").keystream;
apply_actions_encryption_keystream(keystream, sds.actions_encryption_key());
// At the end of the loop, pad will contain the padding as seen by the last hop (before
// decryption)
apply_keystream(&mut pad[..offset], &keystream[ACTIONS_SIZE - start_offset..]);
}
// Handle the last hop
{
// Write deliver action
let start_offset = offset;
offset += RAW_ACTION_SIZE;
let raw_action = match kind {
PacketKind::Request => RAW_ACTION_DELIVER_REQUEST,
PacketKind::Reply(surb_id) => {
*mut_arr_at(actions, offset) = *surb_id;
offset += SURB_ID_SIZE;
RAW_ACTION_DELIVER_REPLY
},
PacketKind::Cover(None) => RAW_ACTION_DELIVER_COVER,
PacketKind::Cover(Some(cover_id)) => {
*mut_arr_at(actions, offset) = *cover_id;
offset += COVER_ID_SIZE;
RAW_ACTION_DELIVER_COVER_WITH_ID
},
};
*mut_arr_at(actions, start_offset) = raw_action.to_le_bytes();
// Fill the remainder of the routing actions field with random bytes, so the last hop
// cannot determine the path length
rng.fill_bytes(&mut actions[offset..]);
let sds =
SmallDerivedSecrets::new(kx_shared_secrets.last().expect("There is at least one hop"));
// Encrypt the deliver action (and the random bytes, although this isn't really necessary).
// Note that the padding is not touched here; it is generated entirely from the keystreams
// for earlier hops, and effectively gets scrambled even further when the last hop
// "decrypts" it.
apply_actions_encryption_keystream(
&mut actions[start_offset..],
sds.actions_encryption_key(),
);
// Compute the MAC for the last hop and place it in the appropriate place (right before the
// deliver action)
*mut_arr_at(mac_plus_actions, start_offset) =
compute_mac(&actions[start_offset..], &pad[..start_offset], sds.mac_key());
}
// Loop over hops backward (excluding the last hop, which has already been handled)
for hop in hops.iter().rev() {
let start_offset = hop.start_offset as usize;
// Encrypt the actions and padding for the hop
apply_keystream(&mut mac_plus_actions[MAC_SIZE + start_offset..], &hop.keystream);
apply_keystream(&mut pad[..start_offset], &hop.keystream[ACTIONS_SIZE - start_offset..]);
// Compute the MAC for the hop and place it in the appropriate place (right before the hop
// action)
*mut_arr_at(mac_plus_actions, start_offset) = compute_mac(
&mac_plus_actions[MAC_SIZE + start_offset..],
&pad[..start_offset],
&hop.mac_key,
);
}
total_delay
}
/// Returns a mutable reference to the payload data in `packet`. This is only really useful for
/// filling in the payload data prior to calling [`complete_request_packet`] or
/// [`complete_reply_packet`].
pub fn mut_payload_data(packet: &mut Packet) -> &mut PayloadData {
array_mut_ref![packet, HEADER_SIZE, PAYLOAD_DATA_SIZE]
}
/// Complete a Sphinx request packet. The unencrypted payload data should be written to
/// [`mut_payload_data(packet)`](mut_payload_data) before calling this function. `targets` should
/// not include the first hop. At most one target may be a peer ID; all others should be mixnode
/// indices. Returns the total forwarding delay across all hops.
pub fn complete_request_packet(
packet: &mut Packet,
rng: &mut (impl Rng + CryptoRng),
targets: &[Target],
their_kx_publics: &[KxPublic],
) -> Delay {
debug_assert_eq!(targets.len() + 1, their_kx_publics.len());
debug_assert!(their_kx_publics.len() <= MAX_HOPS);
let (header, payload) = mut_array_refs![packet, HEADER_SIZE, PAYLOAD_SIZE];
// Build the header
let mut kx_shared_secrets = ArrayVec::new();
let total_delay = build_header(
header,
&mut kx_shared_secrets,
rng,
targets,
their_kx_publics,
PacketKind::Request,
);
// Force the payload tag
*array_mut_ref![payload, PAYLOAD_DATA_SIZE, PAYLOAD_TAG_SIZE] = PAYLOAD_TAG;
// Encrypt the payload
for kx_shared_secret in kx_shared_secrets.iter().rev() {
encrypt_payload(payload, &derive_payload_encryption_key(kx_shared_secret));
}
total_delay
}
/// Size in bytes of a [`Surb`].
pub const SURB_SIZE: usize = RAW_MIXNODE_INDEX_SIZE + HEADER_SIZE + SHARED_SECRET_SIZE;
/// A "single-use reply block". This should be treated as an opaque type.
pub type Surb = [u8; SURB_SIZE];
pub type SurbPayloadEncryptionKeys = ArrayVec<PayloadEncryptionKey, MAX_HOPS>;
/// Build a SURB. Note that unlike in the Sphinx paper, the last hop (which should be this node)
/// decrypts the payload, rather than adding another layer of encryption and forwarding to the
/// "destination". So the number of payload encryption keys matches the number of hops. The first
/// hop must have a mixnode index, specified by `first_mixnode_index`. `targets` specifies the
/// remaining hops. At most one target may be a peer ID; all others should be mixnode indices.
/// Returns the total forwarding delay across all hops.
pub fn build_surb(
surb: &mut Surb,
payload_encryption_keys: &mut SurbPayloadEncryptionKeys,
rng: &mut (impl Rng + CryptoRng),
first_mixnode_index: MixnodeIndex,
targets: &[Target],
their_kx_publics: &[KxPublic],
id: &SurbId,
) -> Delay {
debug_assert_eq!(targets.len() + 1, their_kx_publics.len());
debug_assert!(their_kx_publics.len() <= MAX_HOPS);
let (raw_first_mixnode_index, header, shared_secret) =
mut_array_refs![surb, RAW_MIXNODE_INDEX_SIZE, HEADER_SIZE, SHARED_SECRET_SIZE];
*raw_first_mixnode_index = first_mixnode_index.get().to_le_bytes();
// Build the header
let mut kx_shared_secrets = ArrayVec::new();
let total_delay = build_header(
header,
&mut kx_shared_secrets,
rng,
targets,
their_kx_publics,
PacketKind::Reply(id),
);
// Generate the payload encryption keys. The first key is derived from a totally random shared
// secret, the rest are derived from the key-exchange shared secrets. Note that we _could_ just
// return the shared secrets here and derive the encryption keys in decrypt_reply_payload. This
// wouldn't save much time/space though, and it seems better from a security perspective to not
// keep the shared secrets around.
rng.fill_bytes(shared_secret);
payload_encryption_keys.push(derive_payload_encryption_key(shared_secret));
kx_shared_secrets.pop(); // Last hop does not encrypt
for kx_shared_secret in &kx_shared_secrets {
payload_encryption_keys.push(derive_payload_encryption_key(kx_shared_secret));
}
total_delay
}
/// Complete a Sphinx reply packet. The unencrypted payload data should be written to
/// [`mut_payload_data(packet)`](mut_payload_data) before calling this function. `surb` should be a
/// SURB built by the receiving node using [`build_surb`]. The mixnode index of the first hop is
/// returned. Will only return [`None`] if the SURB is malformed.
pub fn complete_reply_packet(packet: &mut Packet, surb: &Surb) -> Option<MixnodeIndex> {
let (header, payload) = mut_array_refs![packet, HEADER_SIZE, PAYLOAD_SIZE];
let (raw_first_mixnode_index, surb_header, shared_secret) =
array_refs![surb, RAW_MIXNODE_INDEX_SIZE, HEADER_SIZE, SHARED_SECRET_SIZE];
// Copy the header from the SURB across as-is. We can't really check it; we just have to trust
// it.
*header = *surb_header;
// Force the payload tag
*array_mut_ref![payload, PAYLOAD_DATA_SIZE, PAYLOAD_TAG_SIZE] = PAYLOAD_TAG;
// Encrypt the payload. Actually "decrypt" to make decrypt_reply_payload slightly simpler.
decrypt_payload(payload, &derive_payload_encryption_key(shared_secret));
// Return the mixnode index of the first hop from the SURB
let raw_first_mixnode_index = RawMixnodeIndex::from_le_bytes(*raw_first_mixnode_index);
raw_first_mixnode_index.try_into().ok()
}
/// Build a Sphinx cover packet. `targets` should not include the first hop. At most one target may
/// be a peer ID; all others should be mixnode indices. Returns the total forwarding delay across
/// all hops.
pub fn build_cover_packet(
packet: &mut Packet,
rng: &mut (impl Rng + CryptoRng),
targets: &[Target],
their_kx_publics: &[KxPublic],
id: Option<&CoverId>,
) -> Delay {
debug_assert_eq!(targets.len() + 1, their_kx_publics.len());
debug_assert!(their_kx_publics.len() <= MAX_HOPS);
let (header, payload) = mut_array_refs![packet, HEADER_SIZE, PAYLOAD_SIZE];
// Build the header
let mut kx_shared_secrets = ArrayVec::new();
let total_delay = build_header(
header,
&mut kx_shared_secrets,
rng,
targets,
their_kx_publics,
PacketKind::Cover(id),
);
// Randomise the payload. It will be ignored by the destination, but needs to be
// indistinguishable from a normal encrypted payload.
rng.fill_bytes(payload);
total_delay
}