initial commit

This commit is contained in:
themighty1
2021-10-13 11:16:45 +03:00
commit 6d257fe2cc
15 changed files with 727 additions and 0 deletions

23
Dockerfile.nitro-cli Normal file
View File

@@ -0,0 +1,23 @@
FROM ubuntu@sha256:aba80b77e27148d99c034a987e7da3a287ed455390352663418c0f2ed40417fe
#these 2 lines prevent tzdata from hanging
ENV TZ=Asia/Dubai
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
RUN apt update && apt install -y gcc curl unzip libssl-dev openssl pkg-config llvm-dev libclang-dev clang docker.io
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain==1.55.0
ENV PATH="/root/.cargo/bin:${PATH}"
RUN curl https://codeload.github.com/aws/aws-nitro-enclaves-cli/zip/8af39b8cdcda6cc50549dee0d3f5c5c89d940e67 -o nitro-cli.zip
RUN unzip nitro-cli.zip
ARG SRCDIR=aws-nitro-enclaves-cli-8af39b8cdcda6cc50549dee0d3f5c5c89d940e67
RUN cd $SRCDIR && cargo build --release
# nitro-cli expects this file to exist
RUN mkdir /var/log/nitro_enclaves && > /var/log/nitro_enclaves/nitro_enclaves.log
# blobs must be in this folder
RUN mkdir -p /usr/share/nitro_enclaves/blobs
RUN cp $SRCDIR/blobs/x86_64/* /usr/share/nitro_enclaves/blobs
COPY nitro-cli.sh make_enclave.sh app/
RUN chmod +x app/nitro-cli.sh app/make_enclave.sh
CMD ["./app/nitro-cli.sh"]

30
Dockerfile.urlfetcher Normal file
View File

@@ -0,0 +1,30 @@
# syntax=docker/dockerfile:1
# deterministically build the rust app (which retrieves the attestation doc from the enclave)
# then copy the app into the enclave image
FROM ubuntu@sha256:aba80b77e27148d99c034a987e7da3a287ed455390352663418c0f2ed40417fe AS rustapp_builder
COPY rs app/rs
# rust needs gcc's linker. I was unable to pin gcc's version because Ubuntu repos update gcc
# with new security patches and don't keep old versions.
# It appears that gcc's linker does not have an effect on reproducibility of rust build process.
RUN apt update && apt install -y gcc curl
# use a specific rust version for deterministic builds
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain==1.55.0
ENV PATH="/root/.cargo/bin:${PATH}"
# install target to build for the enclave environment
RUN rustup +1.55.0 target add x86_64-unknown-linux-musl --toolchain 1.55.0
# all rust packages are pinned in Cargo.lock
RUN cd app/rs && cargo +1.55.0 build --release --target x86_64-unknown-linux-musl
FROM ubuntu@sha256:aba80b77e27148d99c034a987e7da3a287ed455390352663418c0f2ed40417fe
COPY --from=rustapp_builder app/rs/target/x86_64-unknown-linux-musl/release/attestation_retriever app/attestation_retriever
COPY --from=rustapp_builder app/rs/target/x86_64-unknown-linux-musl/release/entropy_retriever app/entropy_retriever
COPY server.py urlfetcher.sh traffic-forwarder.py dpkg_pinned app/
RUN echo "deb http://archive.ubuntu.com/ubuntu/ focal main universe" > /etc/apt/sources.list
RUN apt update --assume-no && apt install -y $(cat app/dpkg_pinned)
RUN chmod +x ./app/urlfetcher.sh
CMD ["./app/urlfetcher.sh"]

31
README Normal file
View File

@@ -0,0 +1,31 @@
URLFetcher is a deterministicaly built Nitro enclave which fetches a list of URLS and returns an attestaion document signed by Amazon with a hash of the whole request/response transcript.
This allows anyone to cryptographically prove the contents of any publicly accessible URL on the web.
You need docker to build URLFetcher deterministically (this will take ~ 15 min) :
docker build --no-cache -t urlfetcher -f Dockerfile.urlfetcher .
docker build --no-cache -t nitro-cli -f Dockerfile.nitro-cli .
docker run -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -ti nitro-cli
The final output must be:
{
"Measurements": {
"HashAlgorithm": "Sha384 { ... }",
"PCR0": "f70217239e8a1cb0f3c010b842a279e2b8d30d3700d7e4722fef22291763479a13783dc76d5219fabbd7e5aa92a7b255",
"PCR1": "c35e620586e91ed40ca5ce360eedf77ba673719135951e293121cb3931220b00f87b5a15e94e25c01fecd08fc9139342",
"PCR2": "efba114128ccd6af1d1366a12c1ac89e4a4ca5ea1434d779efadfd3ec0d1da5b7c0d8525239fac29ffde2946e07d1c16"
}
}
Cleaning up:
docker image rm -f urlfetcher nitro-cli
The resulting /tmp/urlfetcher.eif can be launched in an AWS EC2 Nitro-capable instance.
After running the enclave on EC2:
1. make sure that an HTTP proxy is listening on port 8888 on AWS host machine (e.g. tinyproxy)
2. run python3 tcp_proxy.py on the host machine
3. send urls.json formatted like ["url1","url2",...] to the enclave from the host machine with:
curl -d '@urls.json' 127.0.0.1:10011 --http0.9 -o enclaveResponse
4. enclaveResponse is a concatenation of :
<4 bytes> length of the transcript | transcript | attestation document with transcript hash in the user_data field

114
dpkg_pinned Normal file
View File

@@ -0,0 +1,114 @@
adduser=3.118ubuntu2
apt=2.0.5
base-files=11ubuntu5.3
base-passwd=3.5.47
bash=5.0-6ubuntu1.1
bsdutils=1:2.34-0.1ubuntu9.1
bzip2=1.0.8-2
coreutils=8.30-3ubuntu2
curl=7.68.0-1ubuntu2
dash=0.5.10.2-6
debconf=1.5.73
debianutils=4.9.1
diffutils=1:3.7-3
dpkg=1.19.7ubuntu3
e2fsprogs=1.45.5-2ubuntu1
fdisk=2.34-0.1ubuntu9.1
file=1:5.38-4
findutils=4.7.0-1ubuntu1
gcc-10-base:amd64=10.2.0-5ubuntu1~20.04
gpgv=2.2.19-3ubuntu2.1
grep=3.4-1
gzip=1.10-0ubuntu4
haveged=1.9.1-6ubuntu1
hostname=3.23
init-system-helpers=1.57
libacl1:amd64=2.2.53-6
libapt-pkg6.0:amd64=2.0.5
libattr1:amd64=1:2.4.48-5
libaudit-common=1:2.8.5-2ubuntu6
libaudit1:amd64=1:2.8.5-2ubuntu6
libblkid1:amd64=2.34-0.1ubuntu9.1
libbz2-1.0:amd64=1.0.8-2
libc-bin=2.31-0ubuntu9.2
libc6:amd64=2.31-0ubuntu9.2
libcap-ng0:amd64=0.7.9-2.1build1
libcom-err2:amd64=1.45.5-2ubuntu1
libcrypt1:amd64=1:4.4.10-10ubuntu4
libdb5.3:amd64=5.3.28+dfsg1-0.6ubuntu2
libdebconfclient0:amd64=0.251ubuntu1
libexpat1:amd64=2.2.9-1build1
libext2fs2:amd64=1.45.5-2ubuntu1
libfdisk1:amd64=2.34-0.1ubuntu9.1
libffi7:amd64=3.3-4
libgcc-s1:amd64=10.2.0-5ubuntu1~20.04
libgcrypt20:amd64=1.8.5-5ubuntu1
libgmp10:amd64=2:6.2.0+dfsg-4
libgnutls30:amd64=3.6.13-2ubuntu1.3
libgpg-error0:amd64=1.37-1
libhogweed5:amd64=3.5.1+really3.5.1-2ubuntu0.1
libidn2-0:amd64=2.2.0-2
liblz4-1:amd64=1.9.2-2ubuntu0.20.04.1
liblzma5:amd64=5.2.4-1ubuntu1
libmagic-mgc=1:5.38-4
libmagic1:amd64=1:5.38-4
libmount1:amd64=2.34-0.1ubuntu9.1
libmpdec2:amd64=2.4.2-3
libncurses6:amd64=6.2-0ubuntu2
libncursesw6:amd64=6.2-0ubuntu2
libnettle7:amd64=3.5.1+really3.5.1-2ubuntu0.1
libp11-kit0:amd64=0.23.20-1ubuntu0.1
libpam-modules:amd64=1.3.1-5ubuntu4.2
libpam-modules-bin=1.3.1-5ubuntu4.2
libpam-runtime=1.3.1-5ubuntu4.2
libpam0g:amd64=1.3.1-5ubuntu4.2
libpcre2-8-0:amd64=10.34-7
libpcre3:amd64=2:8.39-12build1
libprocps8:amd64=2:3.3.16-1ubuntu2.1
libpython3-stdlib:amd64=3.8.2-0ubuntu2
libpython3.8-minimal:amd64=3.8.2-1ubuntu1
libpython3.8-stdlib:amd64=3.8.2-1ubuntu1
libreadline8:amd64=8.0-4
libseccomp2:amd64=2.5.1-1ubuntu1~20.04.1
libselinux1:amd64=3.0-1build2
libsemanage-common=3.0-1build2
libsemanage1:amd64=3.0-1build2
libsepol1:amd64=3.0-1
libsmartcols1:amd64=2.34-0.1ubuntu9.1
libsqlite3-0:amd64=3.31.1-4
libss2:amd64=1.45.5-2ubuntu1
libssl1.1:amd64=1.1.1f-1ubuntu2
libstdc++6:amd64=10.2.0-5ubuntu1~20.04
libsystemd0:amd64=245.4-4ubuntu3.6
libtasn1-6:amd64=4.16.0-2
libtinfo6:amd64=6.2-0ubuntu2
libudev1:amd64=245.4-4ubuntu3.6
libunistring2:amd64=0.9.10-2
libuuid1:amd64=2.34-0.1ubuntu9.1
libzstd1:amd64=1.4.4+dfsg-3ubuntu0.1
login=1:4.8.1-1ubuntu5.20.04
logsave=1.45.5-2ubuntu1
lsb-base=11.1.0ubuntu2
mawk=1.3.4.20200120-2
mime-support=3.64ubuntu1
mount=2.34-0.1ubuntu9.1
ncurses-base=6.2-0ubuntu2
ncurses-bin=6.2-0ubuntu2
net-tools=1.60+git20180626.aebd88e-1ubuntu1
passwd=1:4.8.1-1ubuntu5.20.04
perl-base=5.30.0-9ubuntu0.2
procps=2:3.3.16-1ubuntu2.1
python3=3.8.2-0ubuntu2
python3-minimal=3.8.2-0ubuntu2
python3.8=3.8.2-1ubuntu1
python3.8-minimal=3.8.2-1ubuntu1
readline-common=8.0-4
rng-tools=5-1ubuntu2
sed=4.7-1
sensible-utils=0.0.12+nmu1
sysvinit-utils=2.96-2.1ubuntu1
tar=1.30+dfsg-7ubuntu0.20.04.1
ubuntu-keyring=2020.02.11.4
util-linux=2.34-0.1ubuntu9.1
xz-utils=5.2.4-1
zlib1g:amd64=1:1.2.11.dfsg-2ubuntu1.2

36
make_enclave.sh Executable file
View File

@@ -0,0 +1,36 @@
#!/bin/bash
echo "asking docker for image urlfetcher..."
tmp_dir1=$(mktemp -d)
docker save urlfetcher > $tmp_dir1/urlfetcher.tar
tmp_dir2=$(mktemp -d)
tar -xvf $tmp_dir1/urlfetcher.tar -C $tmp_dir2
configId=$(cat $tmp_dir2/manifest.json | awk -F 'Config\":\"' '{print $2}' | awk -F '.json' '{print $1}')
configPath=$tmp_dir2/$configId.json
dirlist=$(find $tmp_dir2 -mindepth 1 -maxdepth 1 -type d)
for dir in $dirlist
do
# extract layer into a new dir
id=$(echo $dir | cut -f4 -d/)
mkdir $tmp_dir1/$id
tar -xvf $dir/layer.tar -C $tmp_dir1/$id
# remove non-deterministic data
find $tmp_dir1/$id -type d -iname __pycache__ -exec rm -rv {} +
rm -rf $tmp_dir1/$id/root
rm -rf $tmp_dir1/$id/var
# replace orig layer with a deterministic one and change hash in config
tar --mtime='UTC 2020-01-01' -cf $tmp_dir1/$id.tar -C $tmp_dir1/$id .
oldSha=$(sha256sum $dir/layer.tar | cut -f1 -d' ')
newSha=$(sha256sum $tmp_dir1/$id.tar | cut -f1 -d' ')
cp $tmp_dir1/$id.tar $dir/layer.tar
sed -i "s/$oldSha/$newSha/" $configPath
done
tar -cf $tmp_dir1/deterministic.tar -C $tmp_dir2/ .
docker image rm urlfetcher
docker load -i $tmp_dir1/deterministic.tar
nitro-cli build-enclave --docker-uri urlfetcher --output-file /tmp/urlfetcher.eif
echo "Please make sure that PCR0 PCR1 PCR2 are deterministic"
rm -rf $tmp_dir1 $tmp_dir2

7
nitro-cli.sh Normal file
View File

@@ -0,0 +1,7 @@
#!/bin/bash
#this is the main entrypoint of the nitro-cli container
# add nitro-cli's dir to the path
PATH=$PATH:/aws-nitro-enclaves-cli-8af39b8cdcda6cc50549dee0d3f5c5c89d940e67/target/release
./app/make_enclave.sh

173
rs/Cargo.lock generated Normal file
View File

@@ -0,0 +1,173 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
[[package]]
name = "att_doc_retriever_sample"
version = "0.1.0"
dependencies = [
"nsm-driver",
"nsm-io",
"serde_bytes",
]
[[package]]
name = "bitflags"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "cc"
version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e70cc2f62c6ce1868963827bd677764c62d07c3d9a3e1fb1177ee1a9ab199eb2"
[[package]]
name = "cfg-if"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "half"
version = "1.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "62aca2aba2d62b4a7f5b33f3712cb1b0692779a56fb510499d5c0aa594daeaf3"
[[package]]
name = "libc"
version = "0.2.99"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a7f823d141fe0a24df1e23b4af4e3c7ba9e5966ec514ea068c93024aa7deb765"
[[package]]
name = "log"
version = "0.4.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
dependencies = [
"cfg-if 1.0.0",
]
[[package]]
name = "nix"
version = "0.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3b2e0b4f3320ed72aaedb9a5ac838690a8047c7b275da22711fddff4f8a14229"
dependencies = [
"bitflags",
"cc",
"cfg-if 0.1.10",
"libc",
"void",
]
[[package]]
name = "nsm-driver"
version = "0.1.0"
source = "git+https://github.com/aws/aws-nitro-enclaves-nsm-api.git?rev=4f468c4#4f468c467583bbd55429935c4f09448dd43f48a0"
dependencies = [
"libc",
"log",
"nix",
"nsm-io",
"serde_cbor",
]
[[package]]
name = "nsm-io"
version = "0.1.0"
source = "git+https://github.com/aws/aws-nitro-enclaves-nsm-api.git?rev=4f468c4#4f468c467583bbd55429935c4f09448dd43f48a0"
dependencies = [
"libc",
"log",
"nix",
"serde",
"serde_bytes",
"serde_cbor",
]
[[package]]
name = "proc-macro2"
version = "1.0.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c7ed8b8c7b886ea3ed7dde405212185f423ab44682667c8c6dd14aa1d9f6612"
dependencies = [
"unicode-xid",
]
[[package]]
name = "quote"
version = "1.0.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7"
dependencies = [
"proc-macro2",
]
[[package]]
name = "serde"
version = "1.0.127"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f03b9878abf6d14e6779d3f24f07b2cfa90352cfec4acc5aab8f1ac7f146fae8"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_bytes"
version = "0.11.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "16ae07dd2f88a366f15bd0632ba725227018c69a1c8550a927324f8eb8368bb9"
dependencies = [
"serde",
]
[[package]]
name = "serde_cbor"
version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5"
dependencies = [
"half",
"serde",
]
[[package]]
name = "serde_derive"
version = "1.0.127"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a024926d3432516606328597e0f224a51355a493b49fdd67e9209187cbe55ecc"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "syn"
version = "1.0.74"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1873d832550d4588c3dbc20f01361ab00bfe741048f71e3fecf145a7cc18b29c"
dependencies = [
"proc-macro2",
"quote",
"unicode-xid",
]
[[package]]
name = "unicode-xid"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3"
[[package]]
name = "void"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d"

26
rs/Cargo.toml Normal file
View File

@@ -0,0 +1,26 @@
[package]
name = "att_doc_retriever_sample"
version = "0.1.0"
authors = ["Doru-Florin Blanzeanu <blanzed@amazon.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[[bin]]
name = "attestation_retriever"
path = "src/attestation_retriever.rs"
[[bin]]
name = "entropy_retriever"
path = "src/entropy_retriever.rs"
[dependencies]
serde_bytes = "0.11"
[dependencies.nsm-driver]
git = "https://github.com/aws/aws-nitro-enclaves-nsm-api.git"
rev = "4f468c4"
[dependencies.nsm-io]
git = "https://github.com/aws/aws-nitro-enclaves-nsm-api.git"
rev = "4f468c4"

View File

@@ -0,0 +1,44 @@
use nsm_io::Request;
use nsm_io::Response;
use serde_bytes::ByteBuf;
use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
fn main() {
let user_data_path = std::env::args().nth(1).expect("no user_data_path given");
let out_path = std::env::args().nth(2).expect("no outpath given");
let path = Path::new(&out_path);
let display = path.display();
// Open a file in write-only mode, returns `io::Result<File>`
let mut file = match File::create(&path) {
Err(why) => panic!("couldn't create {}: {}", display, why),
Ok(file) => file,
};
let nsm_fd = nsm_driver::nsm_init();
let user_data = std::fs::read(user_data_path).unwrap();
let user_data_bytes = ByteBuf::from(user_data);
let request = Request::Attestation {
public_key: None,
user_data: Some(user_data_bytes),
nonce: None,
};
let response = nsm_driver::nsm_process_request(nsm_fd, request);
let mut attest_doc = vec![];
if let Response::Attestation{document: docu} = response {
attest_doc = docu;
}
match file.write_all(&attest_doc) {
Err(why) => panic!("couldn't write to {}: {}", display, why),
Ok(_) => println!("successfully wrote to {}", display),
}
nsm_driver::nsm_exit(nsm_fd);
}

View File

@@ -0,0 +1,14 @@
use nsm_io::Request;
use nsm_io::Response;
fn main() {
let nsm_fd = nsm_driver::nsm_init();
let request = Request::GetRandom;
let response = nsm_driver::nsm_process_request(nsm_fd, request);
let mut random = vec![];
if let Response::GetRandom{random: rnd} = response {
random = rnd;
}
println!("{:?}", random);
nsm_driver::nsm_exit(nsm_fd);
}

75
server.py Normal file
View File

@@ -0,0 +1,75 @@
import socket, json, time, hashlib, subprocess, threading, os
arPath = 'app/attestation_retriever' #attestation retriever app's path
erPath = 'app/entropy_retriever' #entropy retriever app's path
def haveged():
# Keep haveged running in the foreground
print("starting haveged")
subprocess.run(['haveged','-F','-v','1'])
def rngd():
# Keep rngd running in the foreground
print("starting rngd")
subprocess.run(['rngd','-f','-r','/tmp/rnd'])
if __name__ == "__main__":
threading.Thread(target=haveged).start()
nsm_random = b''
i = 0
while len(nsm_random) < 10000:
out = subprocess.check_output([erPath])
nsm_random += bytes(json.loads(out.decode()))
i+=1
if i > 1000:
raise('Could not get 10000 random bytes from NSM')
print("got entropy bytes from NSM:", len(nsm_random))
with open('/tmp/rnd', 'wb+') as f:
f.write(nsm_random)
threading.Thread(target=rngd).start()
# allow haveged to populate
time.sleep(5)
# check 1
with open("/proc/sys/kernel/random/entropy_avail", 'r') as f:
data = f.read()
entropy = int(data.strip())
print("/proc/sys/kernel/random/entropy_avail is ", entropy)
if entropy < 2000:
raise Exception("not enough entropy in /proc/sys/kernel/random/entropy_avail")
# check 2. Returns non-0 if randomness is bad
rv = os.system("cat /dev/random | rngtest -c 100")
if rv != 0:
raise Exception("rngtest failed")
print("rngtest passed")
print("server is listening")
sock = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
sock.bind((socket.VMADDR_CID_ANY, 10011))
sock.listen(1)
connection, client_address = sock.accept()
connection.settimeout(2)
try:
raw = connection.recv(10000)
# usually the body arrives separately from the headers, so give the TCP buffer some time to fill up
time.sleep(1)
raw += connection.recv(10000)
except:
# socket timeout
pass
print('received', raw)
# ignore HTTP headers, we just need the payload in the body
body = raw.decode().split('\r\n\r\n')[1]
json_object = json.loads(body)
final_json = []
for url in json_object:
contents = subprocess.check_output(["curl", "-x", "127.0.0.1:8001", url])
final_json.append({"request":url, "response":contents.decode()})
print(final_json)
data_to_be_hashed = json.dumps(final_json).encode()
digest_for_attestation = hashlib.sha256(data_to_be_hashed).digest()
with open('/tmp/digest', 'wb+') as f:
f.write(digest_for_attestation)
subprocess.call([arPath, '/tmp/digest', '/tmp/attest'])
with open('/tmp/attest', 'rb') as f:
attestDoc = f.read()
connection.send(len(data_to_be_hashed).to_bytes(4, 'big') + data_to_be_hashed + attestDoc)
connection.close()

86
tcp_proxy.py Normal file
View File

@@ -0,0 +1,86 @@
# tcp_proxy.py does 2 things:
# 1. forwards requests from host's port 10011 to enclave's port 10011
# 2. allows the enclave to make http requests to the outside world by runnning the vsock-proxy utility
# to forward requests from host's vsock port 8002 to host's port 8888 on which
# an http proxy should be listening (e.g. ).
# Inside the enclave traffic-forwarder.py must be run so that traffic from enclave's vsock port
# could be forwarded to host's vsock port 8002
import socket, time
import subprocess
import threading
def handler(sock):
#only process one request and close the socket
print('Handling a new connection', sock.fileno())
raw = None
try:
sock.settimeout(20)
raw = sock.recv(1000000)
if not raw:
print('No data received', sock.fileno())
sock.close()
return
#\r\n\r\n separates the headers from the POST payload
headers = raw.decode().split('\r\n\r\n')[0].split('\r\n')
expectedPayloadLength = None
headerFound = False
for h in headers:
if h.startswith('Content-Length'):
expectedPayloadLength = int(h.split(':')[1].strip())
headerFound = True
break
if not headerFound:
print('Error: Content-Length header not found')
return
payload = raw.decode().split('\r\n\r\n')[1]
payloadLengthIsCorrect = False
for x in range(10):
if len(payload) < expectedPayloadLength:
raw += sock.recv(1000000)
payload = raw.decode().split('\r\n\r\n')[1]
time.sleep(0.1)
print('waiting for more payload')
else:
payloadLengthIsCorrect = True
if not payloadLengthIsCorrect:
print('Error: payload length is incorrect')
return
#forward raw data to the enclave
client_sock = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
client_sock.settimeout(60)
client_sock.connect((16, 10011))
client_sock.sendall(raw)
response = client_sock.recv(1000000)
sock.send(response)
sock.close()
except Exception as e: #e.g. base64 decode exception
print('Exception while handling connection', sock.fileno(), e, raw)
sock.close()
def vsock():
subprocess.call(['vsock-proxy', '8002', '127.0.0.1', '8888', '--config', 'vsockconfig.yaml'])
if __name__ == "__main__":
threading.Thread(target=vsock).start()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_address = ('0.0.0.0', 10011)
sock.bind(server_address)
sock.listen(100) #as many as possible
connection_number = 0
while True:
try:
print('Waiting for a new connection')
connection, client_address = sock.accept()
connection_number += 1
print('Connection accepted', connection_number)
threading.Thread(target=handler, args=(connection,)).start()
except Exception as e:
print('Exception in notaryserver.py', e)
pass

51
traffic-forwarder.py Normal file
View File

@@ -0,0 +1,51 @@
import socket
import sys
import threading
import time
def server(local_port, remote_cid, remote_port):
try:
dock_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
dock_socket.bind(('', local_port))
dock_socket.listen(5)
while True:
client_socket = dock_socket.accept()[0]
server_socket = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
server_socket.connect((remote_cid, remote_port))
outgoing_thread = threading.Thread(target = forward, args = (client_socket, server_socket))
incoming_thread = threading.Thread(target = forward, args = (server_socket, client_socket))
outgoing_thread.start()
incoming_thread.start()
finally:
new_thread = threading.Thread(target = server, args = (local_port, remote_cid, remote_port))
new_thread.start()
return
def forward(source, destination):
string = ' '
while string:
string = source.recv(1024)
if string:
destination.sendall(string)
else:
source.shutdown(socket.SHUT_RD)
destination.shutdown(socket.SHUT_WR)
def main(args):
local_port = int(args[0])
remote_cid = int(args[1])
remote_port = int(args[2])
thread = threading.Thread(target = server, args = (local_port, remote_cid, remote_port))
thread.start()
while True:
time.sleep(60)
if __name__ == '__main__':
main(sys.argv[1:])

15
urlfetcher.sh Normal file
View File

@@ -0,0 +1,15 @@
#!/bin/bash
#this is the main entrypoint of the urlfetcher container
ifconfig lo 127.0.0.1
# traffic-forwarder.py takes data from enclave's vsock at port 8001
# and forwards it to host's vsock 8002
# where vsock-proxy takes data from host's vsock 8002 and
# forwards it to host's TCP port e.g. 8888 where the a socks proxy server
# should be listening
# (on host machine we run vsock-proxy 8002 127.0.0.1 8888 --config config.yaml)
python3 /app/traffic-forwarder.py 8001 3 8002 &
python3 /app/server.py

2
vsockconfig.yaml Normal file
View File

@@ -0,0 +1,2 @@
allowlist:
- {address: 127.0.0.1, port: 8888}