initial commit

This commit is contained in:
themighty1
2021-11-08 21:43:29 +03:00
commit e49a273b6d
10 changed files with 3919 additions and 0 deletions

3
.gitmodules vendored Normal file
View File

@@ -0,0 +1,3 @@
[submodule "circuits"]
path = circuits
url = https://github.com/tlsnotary/circuits

12
README Normal file
View File

@@ -0,0 +1,12 @@
This is the notary server for the TLSNotary protocol.
It is primarily intended to be run inside a sandboxed AWS EC2 instance (https://github.com/tlsnotary/pagesigner-oracles). It can also be run as a regular server (you'll have to start it with --no-sandbox and pass the file public.key to the client).
To compile:
go mod init notary
go get github.com/bwesterb/go-ristretto@b51b4774df9150ea7d7616f76e77f745a464bbe3
go get github.com/roasbeef/go-go-gadget-paillier@14f1f86b60008ece97b6233ed246373e555fc79f
go get golang.org/x/crypto/blake2b
go get golang.org/x/crypto/nacl/secretbox
go build -o notary

1
circuits Submodule

Submodule circuits added at cb87bc30e7

500
src/evaluator/evaluator.go Normal file
View File

@@ -0,0 +1,500 @@
package evaluator
import (
"bytes"
"crypto/sha256"
"encoding/binary"
"log"
"math"
"math/rand"
"notary/garbler"
u "notary/utils"
"time"
"github.com/bwesterb/go-ristretto"
)
type Evaluator struct {
g *garbler.Garbler
// fixed inputs for each circuit (circuit count starts at 1)
FixedInputs [][]int
// OT for bits 0/1 format: ({k:[]byte, B:[]byte})
OT0 []OTmap
OT1 []OTmap
A []byte // client-garbler's A
fixedLabels [][][]byte
OTFixedK [][]byte
ttBlobs [][]byte // truth table blobs for each circuit
olBlobs [][]byte // output labels blobs for each circuit
nonFixedOTBits [][]OTmap
Salt [][]byte // commitment salt for each circuit
CommitHash [][]byte // hash of output for each circuit
}
type OTmap struct {
K []byte
B []byte
idx int
}
func (e *Evaluator) Init(g *garbler.Garbler) {
e.g = g
e.FixedInputs = make([][]int, len(g.Cs))
e.fixedLabels = make([][][]byte, len(g.Cs))
e.ttBlobs = make([][]byte, len(g.Cs))
e.olBlobs = make([][]byte, len(g.Cs))
e.Salt = make([][]byte, len(g.Cs))
e.CommitHash = make([][]byte, len(g.Cs))
e.nonFixedOTBits = make([][]OTmap, len(g.Cs))
}
// SetFixedInputs is called after we know the amount of c6 circuits
// consult .casm file for each circuit for explanation what mask does what
func (e *Evaluator) SetFixedInputs() {
for i := 1; i < len(e.g.Cs); i++ {
c := e.g.Cs[i]
if i == 1 {
e.FixedInputs[1] = u.BytesToBits(c.Masks[1])
log.Println("e.FixedInputs[1] ", len(e.FixedInputs[1]))
}
if i == 2 {
e.FixedInputs[2] = u.BytesToBits(c.Masks[1])
log.Println("e.FixedInputs[2] ", len(e.FixedInputs[2]))
}
if i == 3 {
var allMasks []byte
allMasks = append(allMasks, c.Masks[6]...)
allMasks = append(allMasks, c.Masks[5]...)
allMasks = append(allMasks, c.Masks[4]...)
allMasks = append(allMasks, c.Masks[3]...)
allMasks = append(allMasks, c.Masks[2]...)
allMasks = append(allMasks, c.Masks[1]...)
e.FixedInputs[3] = u.BytesToBits(allMasks)
log.Println("e.FixedInputs[3] ", len(e.FixedInputs[3]))
}
if i == 4 {
var allMasks []byte
allMasks = append(allMasks, c.Masks[2]...)
allMasks = append(allMasks, c.Masks[1]...)
e.FixedInputs[4] = u.BytesToBits(allMasks)
log.Println("e.FixedInputs[4] ", len(e.FixedInputs[4]))
}
if i == 5 {
var allMasks []byte
allMasks = append(allMasks, e.g.Cs[3].Masks[4]...) // civ mask
allMasks = append(allMasks, e.g.Cs[3].Masks[2]...) // cwk mask
e.FixedInputs[5] = u.BytesToBits(allMasks)
log.Println("e.FixedInputs[5] ", len(e.FixedInputs[5]))
}
if i == 6 {
var allMasks []byte
for i := e.g.C6Count; i > 0; i-- {
allMasks = append(allMasks, e.g.Cs[6].Masks[i]...)
}
allMasks = append(allMasks, e.g.Cs[3].Masks[4]...) // civ mask
allMasks = append(allMasks, e.g.Cs[3].Masks[2]...) // cwk mask
e.FixedInputs[6] = u.BytesToBits(allMasks)
log.Println("e.FixedInputs[6] ", len(e.FixedInputs[6]))
}
}
}
// client's A for OT must be available at this point
func (e *Evaluator) PreComputeOT() []byte {
var allFixedInputs []int
allNonFixedInputsSize := 0
for i := 1; i < len(e.g.Cs); i++ {
allFixedInputs = append(allFixedInputs, e.FixedInputs[i]...)
allNonFixedInputsSize += e.g.Cs[i].NotaryNonFixedInputSize
}
log.Println("len(allFixedInputs)", len(allFixedInputs))
log.Println("allNonFixedInputsSize", allNonFixedInputsSize)
var buf [32]byte
copy(buf[:], e.A[:])
A := new(ristretto.Point)
A.SetBytes(&buf)
e.OTFixedK = nil
var OTFixedB [][]byte
for i := 0; i < len(allFixedInputs); i++ {
bit := allFixedInputs[i]
b := new(ristretto.Scalar).Rand()
B := new(ristretto.Point).ScalarMultBase(b)
if bit == 1 {
B = new(ristretto.Point).Add(A, B)
}
k := u.Generichash(16, new(ristretto.Point).ScalarMult(A, b).Bytes())
e.OTFixedK = append(e.OTFixedK, k)
OTFixedB = append(OTFixedB, B.Bytes())
}
// we prepare OT for 55% of 1s and 55% of 0s for all non-fixed inputs
// because we don't know in advance exactly how many 1s and 0s we'll have in the non-fixed
// inputs
e.OT0 = nil
e.OT1 = nil
for i := 0; i < int(math.Ceil(float64(allNonFixedInputsSize/2)*1.2))+3000; i++ {
b := new(ristretto.Scalar).Rand()
B := new(ristretto.Point).ScalarMultBase(b)
k := u.Generichash(16, new(ristretto.Point).ScalarMult(A, b).Bytes())
var m OTmap
m.K = k
m.B = B.Bytes()
e.OT0 = append(e.OT0, m)
}
for i := 0; i < int(math.Ceil(float64(allNonFixedInputsSize/2)*1.2))+3000; i++ {
b := new(ristretto.Scalar).Rand()
B := new(ristretto.Point).ScalarMultBase(b)
B = new(ristretto.Point).Add(A, B)
k := u.Generichash(16, new(ristretto.Point).ScalarMult(A, b).Bytes())
var m OTmap
m.K = k
m.B = B.Bytes()
e.OT1 = append(e.OT1, m)
}
log.Println("e.OT0/1 len is", len(e.OT0), len(e.OT1))
//send remaining OT in random sequence but remember the index in that sequence.
var OTNonFixedToSend []byte = nil
allOTLen := len(e.OT0) + len(e.OT1)
var idxSeen []int
for i := 0; i < allOTLen; i++ {
var ot *[]OTmap
rand.Seed(time.Now().UnixNano())
randIdx := rand.Intn(allOTLen)
if isIntInArray(randIdx, idxSeen) {
// this index was already seen, try again
i--
continue
}
idxSeen = append(idxSeen, randIdx)
if randIdx >= len(e.OT0) {
ot = &e.OT1
// adjust the index to become an OT1 index
randIdx = randIdx - len(e.OT0)
} else {
ot = &e.OT0
}
(*ot)[randIdx].idx = i
OTNonFixedToSend = append(OTNonFixedToSend, (*ot)[randIdx].B...)
}
var payload []byte
for i := 0; i < len(OTFixedB); i++ {
payload = append(payload, OTFixedB[i]...)
}
payload = append(payload, OTNonFixedToSend...)
log.Println("returning payload for garbler, size ", len(payload))
return payload
}
func (e *Evaluator) SetA(A []byte) {
e.A = A
}
func (e *Evaluator) ProcessEncryptedLabels(labelsBlob []byte) {
allFICount := 0 //count of all fixed inputs from all circuits
for i := 1; i < len(e.g.Cs); i++ {
allFICount += len(e.FixedInputs[i])
}
if len(labelsBlob) != allFICount*32 {
log.Println(len(labelsBlob), allFICount)
panic("len(labelsBlob) != allFICount*32")
}
idx := 0
for i := 1; i < len(e.g.Cs); i++ {
e.fixedLabels[i] = make([][]byte, len(e.FixedInputs[i]))
for j := 0; j < len(e.FixedInputs[i]); j++ {
bit := e.FixedInputs[i][j]
if bit != 0 && bit != 1 {
panic("bit != 0 || bit != 1")
}
e_ := labelsBlob[idx*32+16*bit : idx*32+16*bit+16]
inputLabel := u.Decrypt_generic(e_, e.OTFixedK[idx], 0)
idx += 1
e.fixedLabels[i][j] = inputLabel
}
}
}
// GetCircuitBlobOffset finds the offset and size of the tt+ol blob for circuit cNo
// in the blob of all circuits
func (e *Evaluator) GetCircuitBlobOffset(cNo int) (int, int, int) {
offset := 0
var ttLen, olLen int
for i := 1; i < len(e.g.Cs); i++ {
ttLen = e.g.Cs[i].Circuit.AndGateCount * 64
olLen = e.g.Cs[i].Circuit.OutputSize * 32
if i == 5 {
ttLen = e.g.C5Count * ttLen
olLen = e.g.C5Count * olLen
}
if i == 6 {
ttLen = e.g.C6Count * ttLen
olLen = e.g.C6Count * olLen
}
if i == cNo {
break
}
offset += ttLen
offset += olLen
}
return offset, ttLen, olLen
}
func (e *Evaluator) SetBlob(blob []byte) {
offset := 0
for i := 1; i < len(e.g.Cs); i++ {
ttLen := e.g.Cs[i].Circuit.AndGateCount * 64
olLen := e.g.Cs[i].Circuit.OutputSize * 32
if i == 5 {
ttLen = e.g.C5Count * ttLen
olLen = e.g.C5Count * olLen
}
if i == 6 {
ttLen = e.g.C6Count * ttLen
olLen = e.g.C6Count * olLen
}
e.ttBlobs[i] = blob[offset : offset+ttLen]
offset += ttLen
e.olBlobs[i] = blob[offset : offset+olLen]
offset += olLen
}
if len(blob) != offset {
panic("len(blob) != offset")
}
}
func (e *Evaluator) GetNonFixedIndexes(cNo int) []byte {
c := &e.g.Cs[cNo]
inputBits := u.BytesToBits(c.Input)
nonFixedBits := inputBits[:c.NotaryNonFixedInputSize]
//get OT indexes for bits in the non-fixed inputs
idxArray, otArray := e.DoGetNonFixedIndexes(nonFixedBits)
e.nonFixedOTBits[cNo] = otArray
return idxArray
}
// return indexes from the OT pool as well as OTmap for each OT
func (e *Evaluator) DoGetNonFixedIndexes(bits []int) ([]byte, []OTmap) {
var idxArray []byte //flat array of 2-byte indexes
otArray := make([]OTmap, len(bits))
for i := 0; i < len(bits); i++ {
bit := bits[i]
if bit == 0 {
// take element from the end of slice and shrink slice
ot0 := e.OT0[len(e.OT0)-1]
e.OT0 = e.OT0[:len(e.OT0)-1]
idx := make([]byte, 2)
binary.BigEndian.PutUint16(idx, uint16(ot0.idx))
idxArray = append(idxArray, idx...)
otArray[i] = ot0
} else {
// take element from the end of slice and shrink slice
ot1 := e.OT1[len(e.OT1)-1]
e.OT1 = e.OT1[:len(e.OT1)-1]
idx := make([]byte, 2)
binary.BigEndian.PutUint16(idx, uint16(ot1.idx))
idxArray = append(idxArray, idx...)
otArray[i] = ot1
}
}
if len(e.OT0) < 1 || len(e.OT1) < 1 {
panic("len(e.OT0) < 1 || len(e.OT1) < 1")
}
return idxArray, otArray
}
func (e *Evaluator) Evaluate(cNo int, blob, ttBlob, olBlob []byte) []byte {
type batchType struct {
ga *[][]byte
tt *[]byte
}
c := &e.g.Cs[cNo]
repeatCount := []int{0, 1, 1, 1, 1, e.g.C5Count, 1, e.g.C6Count}[cNo]
ttLen := c.Circuit.AndGateCount * 64
if len(blob) != c.NotaryNonFixedInputSize*32+
c.ClientNonFixedInputSize*16+
c.ClientFixedInputSize*16*repeatCount {
panic("in SetLabels")
}
nonFixedEncLabelsBlob := blob[:c.NotaryNonFixedInputSize*32]
clientLabelsBlob := blob[c.NotaryNonFixedInputSize*32:]
inputBits := u.BytesToBits(c.Input)
nonFixedLabels := make([][]byte, c.NotaryNonFixedInputSize)
// we only need non-fixed inputs
for i := 0; i < c.NotaryNonFixedInputSize; i++ {
bit := inputBits[i]
e_ := nonFixedEncLabelsBlob[i*32+16*bit : i*32+16*bit+16]
k := e.nonFixedOTBits[cNo][i].K
label := u.Decrypt_generic(e_, k, 0)
nonFixedLabels[i] = label
}
allClientLabels := make([][]byte, c.ClientNonFixedInputSize+c.ClientFixedInputSize*repeatCount)
for i := 0; i < len(allClientLabels); i++ {
allClientLabels[i] = clientLabelsBlob[i*16 : i*16+16]
}
batch := make([]batchType, repeatCount)
for r := 0; r < repeatCount; r++ {
fixedLabels := e.fixedLabels[cNo]
clientLabels := allClientLabels
if cNo == 5 {
clientNonFixed := allClientLabels[0:c.ClientNonFixedInputSize]
start := c.ClientNonFixedInputSize + r*c.ClientFixedInputSize
clientFixed := allClientLabels[start : start+c.ClientFixedInputSize]
var concat [][]byte = nil
concat = append(concat, clientNonFixed...)
concat = append(concat, clientFixed...)
clientLabels = concat
} else if cNo == 6 {
fixedCommon := e.fixedLabels[cNo][0:160]
fixedUnique := e.fixedLabels[cNo][160+r*128 : 160+r*128+128]
var concat [][]byte = nil
concat = append(concat, fixedCommon...)
concat = append(concat, fixedUnique...)
fixedLabels = concat
clientNonFixed := allClientLabels[0:c.ClientNonFixedInputSize]
start := c.ClientNonFixedInputSize + r*c.ClientFixedInputSize
clientFixed := allClientLabels[start : start+c.ClientFixedInputSize]
var concat2 [][]byte = nil
concat2 = append(concat2, clientNonFixed...)
concat2 = append(concat2, clientFixed...)
clientLabels = concat2
}
// put all labels into garbling assignment
ga := make([][]byte, c.Circuit.WireCount)
offset := 0
copy(ga[offset:], nonFixedLabels)
offset += len(nonFixedLabels)
copy(ga[offset:], fixedLabels)
offset += len(fixedLabels)
copy(ga[offset:], clientLabels)
offset += len(clientLabels)
//tt := e.ttBlobs[cNo][r*ttLen : (r+1)*ttLen]
tt := ttBlob[r*ttLen : (r+1)*ttLen]
batch[r] = batchType{&ga, &tt}
}
batchOutputLabels := make([][][]byte, repeatCount)
for r := 0; r < repeatCount; r++ {
evaluate(c.Circuit, batch[r].ga, batch[r].tt)
outputLabels := (*batch[r].ga)[len((*batch[r].ga))-c.Circuit.OutputSize:]
batchOutputLabels[r] = outputLabels
}
var output []byte
for r := 0; r < repeatCount; r++ {
outputLabels := batchOutputLabels[r]
outBits := make([]int, c.Circuit.OutputSize)
outputSizeBytes := c.Circuit.OutputSize * 32
allOutputLabelsBlob := olBlob[r*outputSizeBytes : (r+1)*outputSizeBytes]
//allOutputLabelsBlob := e.olBlobs[cNo][r*outputSizeBytes : (r+1)*outputSizeBytes]
for i := 0; i < len(outBits); i++ {
out := outputLabels[i]
if bytes.Equal(out, allOutputLabelsBlob[i*32:i*32+16]) {
outBits[i] = 0
} else if bytes.Equal(out, allOutputLabelsBlob[i*32+16:i*32+32]) {
outBits[i] = 1
} else {
log.Println("incorrect output label")
}
}
outBytes := u.BitsToBytes(outBits)
output = append(output, outBytes...)
}
c.Output = output
resHash := sha256.Sum256(c.Output)
e.CommitHash[cNo] = resHash[:]
salt := make([]byte, 32)
rand.Read(salt)
e.Salt[cNo] = salt
saltedHash := sha256.Sum256(u.Concat(e.CommitHash[cNo], salt))
return saltedHash[:]
}
func evaluate(c *garbler.Circuit, garbledAssignment *[][]byte, tt *[]byte) {
andGateIdx := 0
// gate type XOR==0 AND==1 INV==2
for i := 0; i < len(c.Gates); i++ {
g := c.Gates[i]
if g.Operation == 1 {
evaluateAnd(g, garbledAssignment, tt, andGateIdx)
andGateIdx += 1
} else if g.Operation == 0 {
evaluateXor(g, garbledAssignment)
} else if g.Operation == 2 {
evaluateInv(g, garbledAssignment)
} else {
panic("Unknown gate")
}
}
}
func evaluateAnd(g garbler.Gate, ga *[][]byte, tt *[]byte, andGateIdx int) {
in1 := g.InputWires[0]
in2 := g.InputWires[1]
out := g.OutputWire
label1 := (*ga)[in1]
label2 := (*ga)[in2]
point := 2*getPoint(label1) + getPoint(label2)
offset := andGateIdx*64 + 16*point
cipher := (*tt)[offset : offset+16]
(*ga)[out] = u.Decrypt(label1, label2, g.Id, cipher)
}
func evaluateXor(g garbler.Gate, ga *[][]byte) {
in1 := g.InputWires[0]
in2 := g.InputWires[1]
out := g.OutputWire
(*ga)[out] = xorBytes((*ga)[in1], (*ga)[in2])
}
func evaluateInv(g garbler.Gate, ga *[][]byte) {
in1 := g.InputWires[0]
out := g.OutputWire
(*ga)[out] = (*ga)[in1]
}
func getPoint(arr []byte) int {
return int(arr[15]) & 0x01
}
func xorBytes(a, b []byte) []byte {
if len(a) != len(b) {
panic("len(a) != len(b)")
}
c := make([]byte, len(a))
for i := 0; i < len(a); i++ {
c[i] = a[i] ^ b[i]
}
return c
}
func isIntInArray(a int, arr []int) bool {
for _, b := range arr {
if b == a {
return true
}
}
return false
}

View File

@@ -0,0 +1,399 @@
package garbled_pool
import (
"encoding/binary"
"io/ioutil"
"log"
"notary/garbler"
u "notary/utils"
"os"
"path/filepath"
"strconv"
"sync"
"time"
)
// gc describes a garbled circuit file
// id is the name of the file (for c5 this is the name of the dir)
// keyIdx is the index of a key in g.key used to encrypt the gc
type gc struct {
id string
keyIdx int
}
type GarbledPool struct {
// gPDirPath is full path to the garbled pool dir
gPDirPath string
// AES-GCM keys to encrypt/authenticate garbled circuits
// we need to encrypt them in case we want to store them outside the enclave
// when the encryption key changes, older keys are kept because we still
// have gc on disk encrypted with old keys
// keysCleanup sets old keys which are not used anymore to nil, thus releasing
// the memory
keys [][]byte
// key is the current key in use. It is always keys[len(keys)-1]
key []byte
// encryptedSoFar show how many bytes were encrypted using key
// NIST recommends re-keying after 64GB
encryptedSoFar int
// we change key after rekeyAfter bytes were encrypted
rekeyAfter int
// c5 subdirs' names are "50, 100, 150 ..." indicating how many garblings of
// a circuit there are in the dir
c5subdirs []string
// pool contains all non-c5 circuits
pool map[string][]gc
// poolc5 is like pool except map's <key> is one of g.c5subdirs and gc.id
// is a dir containing <key> amount of garblings
poolc5 map[string][]gc
// poolSize is how many pre-garblings of each circuit we want to have
poolSize int
Circuits []*garbler.Circuit
grb garbler.Garbler
// all circuits, count starts with 1 to avoid confusion
Cs []garbler.CData
// noSandbox is set to true when not running in a sandboxed environment
noSandbox bool
sync.Mutex
}
func (g *GarbledPool) Init(noSandbox bool) {
g.noSandbox = noSandbox
g.encryptedSoFar = 0
g.rekeyAfter = 1024 * 1024 * 1024 * 64 // 64GB
g.poolSize = 5
g.c5subdirs = []string{"50", "100", "150", "200", "300"}
g.pool = make(map[string][]gc, 5)
for _, v := range []string{"1", "2", "3", "4", "6"} {
g.pool[v] = []gc{}
}
g.poolc5 = make(map[string][]gc, len(g.c5subdirs))
for _, v := range g.c5subdirs {
g.poolc5[v] = []gc{}
}
g.Circuits = make([]*garbler.Circuit, 7)
for _, idx := range []int{1, 2, 3, 4, 5, 6} {
g.Circuits[idx] = g.grb.ParseCircuit(idx)
}
g.Cs = make([]garbler.CData, 7)
g.Cs[1].Init(512, 256, 512, 256, 512)
g.Cs[2].Init(512, 256, 640, 384, 512)
g.Cs[3].Init(832, 256, 1568, 768, 800)
g.Cs[4].Init(672, 416, 960, 480, 480)
g.Cs[5].Init(160, 0, 308, 160, 128)
g.Cs[6].Init(288, 0, 304, 160, 128)
curDir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
panic(err)
}
g.gPDirPath = filepath.Join(filepath.Dir(curDir), "garbledPool")
if g.noSandbox {
g.key = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6}
} else {
g.key = u.GetRandom(16)
}
g.keys = append(g.keys, g.key)
if _, err = os.Stat(g.gPDirPath); os.IsNotExist(err) {
// the dir does not exist, create
err = os.Mkdir(g.gPDirPath, 0755)
if err != nil {
panic(err)
}
for _, idx := range []string{"1", "2", "3", "4", "5", "6"} {
err = os.Mkdir(filepath.Join(g.gPDirPath, "c"+idx), 0755)
if err != nil {
panic(err)
}
}
// for c5 we need different sizes
for _, idx := range g.c5subdirs {
err = os.Mkdir(filepath.Join(g.gPDirPath, "c5", idx), 0755)
if err != nil {
panic(err)
}
}
} else {
// the dir already exists
if !g.noSandbox {
panic("Error. Garbled pool must not exist.")
} else {
g.loadPoolFromDisk()
}
}
go g.monitor()
}
// returns Blobs struct for each circuit
func (g *GarbledPool) GetBlobs(c5Count int) []garbler.Blobs {
if c5Count > 1024 {
panic("c5Count > 1024")
}
allBlobs := make([]garbler.Blobs, len(g.Cs))
// fetch non-c5 blobs
for i := 1; i < len(allBlobs); i++ {
iStr := strconv.Itoa(i)
if i == 5 {
continue // we will deal with c5 below
}
if len(g.pool[iStr]) == 0 {
// give monitorPool some time to fill up the pool, then repeat
log.Println("pool is not ready, sleeping", iStr)
time.Sleep(time.Second)
i = i - 1
continue
} else {
g.Lock()
gc := g.pool[iStr][0]
g.pool[iStr] = g.pool[iStr][1:]
g.Unlock()
blob := g.fetchBlob(iStr, gc)
il, tt, ol := g.deBlob(blob)
allBlobs[i].Il = g.grb.SeparateLabels(il, g.Cs[i])
allBlobs[i].Tt = tt
allBlobs[i].Ol = ol
}
}
// fetch c5 blobs. Find out from which subdir to fetch
var dirToFetch string
for _, dirToFetch = range g.c5subdirs {
dirInt, _ := strconv.Atoi(dirToFetch)
if c5Count <= dirInt {
break
}
}
// loop until there is something to fetch
for {
if len(g.poolc5[dirToFetch]) == 0 {
// give monitorPool some time to fill up the pool, then repeat
log.Println("pool is not ready, sleeping", dirToFetch)
time.Sleep(time.Second)
continue
} else {
break
}
}
g.Lock()
gc := g.poolc5[dirToFetch][0]
g.poolc5[dirToFetch] = g.poolc5[dirToFetch][1:]
g.Unlock()
blobs := g.fetchC5Blobs(dirToFetch, gc, c5Count)
il, tt, ol := g.deBlob(blobs[0])
allBlobs[5].Il = g.grb.SeparateLabels(il, g.Cs[5])
allBlobs[5].Tt = tt
allBlobs[5].Ol = ol
// all circuits after 1st have only ClientFixed input labels
// because all other labels from 1st are reused
for i := 1; i < len(blobs); i++ {
il, tt, ol := g.deBlob(blobs[i])
allBlobs[5].Tt = append(allBlobs[5].Tt, tt...)
allBlobs[5].Ol = append(allBlobs[5].Ol, ol...)
allBlobs[5].Il.ClientFixed = append(allBlobs[5].Il.ClientFixed, il...)
}
return allBlobs
}
func (g *GarbledPool) loadPoolFromDisk() {
for _, idx := range []string{"1", "2", "3", "4", "6"} {
files, err := ioutil.ReadDir(filepath.Join(g.gPDirPath, "c"+idx))
if err != nil {
panic(err)
}
var gcs []gc
for _, file := range files {
gcs = append(gcs, gc{id: file.Name(), keyIdx: 0})
}
g.pool[idx] = gcs
}
for _, idx := range g.c5subdirs {
files, err := ioutil.ReadDir(filepath.Join(g.gPDirPath, "c5", idx))
if err != nil {
panic(err)
}
var gcs []gc
for _, file := range files {
gcs = append(gcs, gc{id: file.Name(), keyIdx: 0})
}
g.poolc5[idx] = gcs
}
log.Println(g.pool)
log.Println(g.poolc5)
}
// garbles a circuit and returns a blob
func (g *GarbledPool) garbleCircuit(cNo int) []byte {
tt, il, ol, _ := g.grb.OfflinePhase(g.grb.ParseCircuit(cNo), nil, nil, nil)
return g.makeBlob(il, tt, ol)
}
// garbles a batch of count c5 circuits and return the garbled blobs
func (g *GarbledPool) garbleC5Circuits(count int) [][]byte {
var blobs [][]byte
tt, il, ol, R := g.grb.OfflinePhase(g.Circuits[5], nil, nil, nil)
labels := g.grb.SeparateLabels(il, g.Cs[5])
blobs = append(blobs, g.makeBlob(il, tt, ol))
// for all other circuits we only need ClientFixed input labels
ilReused := u.Concat(labels.NotaryFixed, labels.ClientNonFixed)
reuseIndexes := u.ExpandRange(0, 320)
for i := 2; i <= count; i++ {
tt, il, ol, _ := g.grb.OfflinePhase(g.Circuits[5], R, ilReused, reuseIndexes)
labels := g.grb.SeparateLabels(il, g.Cs[5])
blobs = append(blobs, g.makeBlob(labels.ClientFixed, tt, ol))
}
return blobs
}
// monitor replenishes the garbled pool when needed
// and re-keys the encryption key
func (g *GarbledPool) monitor() {
loopCount := 0
for {
loopCount += 1
// check every 60sec if stale keys are present and free memory
if loopCount%60 == 0 {
g.Lock()
for i := 0; i < len(g.keys); i++ {
if g.keys[i] != nil {
found := false
// check if index i is in use by any gc of the pool
for _, gcs := range g.pool {
for _, v := range gcs {
if v.keyIdx == i {
found = true
}
}
}
if !found {
g.keys[i] = nil
}
}
}
g.Unlock()
}
// check if encryption key needs to be renewed
if g.encryptedSoFar > g.rekeyAfter {
g.key = u.GetRandom(16)
g.keys = append(g.keys, g.key)
g.encryptedSoFar = 0
}
// check if gc pool needs to be replenished
for k, v := range g.pool {
if len(v) < g.poolSize {
diff := g.poolSize - len(v)
for i := 0; i < diff; i++ {
//log.Println("in monitorPool adding c", k)
kInt, _ := strconv.Atoi(k)
blob := g.garbleCircuit(kInt)
randName := u.RandString()
g.saveBlob(filepath.Join(g.gPDirPath, "c"+k, randName), blob)
g.Lock()
g.pool[k] = append(g.pool[k], gc{id: randName, keyIdx: len(g.keys) - 1})
g.Unlock()
}
}
}
for k, v := range g.poolc5 {
if len(v) < g.poolSize {
diff := g.poolSize - len(v)
for i := 0; i < diff; i++ {
//log.Println("in monitorPool adding c5", k)
kInt, _ := strconv.Atoi(k)
blobs := g.garbleC5Circuits(kInt)
randName := u.RandString()
g.saveC5Blobs(filepath.Join(g.gPDirPath, "c5", k, randName), blobs)
g.Lock()
g.poolc5[k] = append(g.poolc5[k], gc{id: randName, keyIdx: len(g.keys) - 1})
g.Unlock()
}
}
}
time.Sleep(120 * time.Second)
}
}
// packs data into a blob with length prefixes
func (g *GarbledPool) makeBlob(il []byte, tt *[]byte, ol []byte) []byte {
ilSize := make([]byte, 4)
binary.BigEndian.PutUint32(ilSize, uint32(len(il)))
ttSize := make([]byte, 4)
binary.BigEndian.PutUint32(ttSize, uint32(len(*tt)))
olSize := make([]byte, 4)
binary.BigEndian.PutUint32(olSize, uint32(len(ol)))
return u.Concat(ilSize, il, ttSize, *tt, olSize, ol)
}
func (g *GarbledPool) deBlob(blob []byte) ([]byte, []byte, []byte) {
offset := 0
ilSize := int(binary.BigEndian.Uint32(blob[offset : offset+4]))
offset += 4
il := blob[offset : offset+ilSize]
offset += ilSize
ttSize := int(binary.BigEndian.Uint32(blob[offset : offset+4]))
offset += 4
tt := blob[offset : offset+ttSize]
offset += ttSize
olSize := int(binary.BigEndian.Uint32(blob[offset : offset+4]))
offset += 4
ol := blob[offset : offset+olSize]
return il, tt, ol
}
func (g *GarbledPool) saveBlob(path string, blob []byte) {
enc := u.AESGCMencrypt(g.key, blob)
g.encryptedSoFar += len(blob)
err := os.WriteFile(path, enc, 0644)
if err != nil {
panic(err)
}
}
// fetches the blob from disk and deletes it
func (g *GarbledPool) fetchBlob(circuitNo string, c gc) []byte {
fullPath := filepath.Join(g.gPDirPath, "c"+circuitNo, c.id)
data, err := os.ReadFile(fullPath)
if err != nil {
panic(err)
}
err = os.Remove(fullPath)
if err != nil {
panic(err)
}
return u.AESGCMdecrypt(g.keys[c.keyIdx], data)
}
// fetches count blobs from folder and then removes it
func (g *GarbledPool) fetchC5Blobs(subdir string, c gc, count int) [][]byte {
var rawBlobs [][]byte
dirPath := filepath.Join(g.gPDirPath, "c5", subdir, c.id)
for i := 0; i < count; i++ {
iStr := strconv.Itoa(i + 1)
data, err := os.ReadFile(filepath.Join(dirPath, iStr))
if err != nil {
panic(err)
}
rawBlobs = append(rawBlobs, u.AESGCMdecrypt(g.keys[c.keyIdx], data))
}
err := os.RemoveAll(dirPath)
if err != nil {
panic(err)
}
return rawBlobs
}
func (g *GarbledPool) saveC5Blobs(path string, blobs [][]byte) {
err := os.Mkdir(path, 0755)
if err != nil {
panic(err)
}
for i := 0; i < len(blobs); i++ {
fileName := strconv.Itoa(i + 1)
enc := u.AESGCMencrypt(g.key, blobs[i])
g.encryptedSoFar += len(blobs[i])
err := os.WriteFile(filepath.Join(path, fileName), enc, 0644)
if err != nil {
panic(err)
}
}
}

441
src/garbler/garbler.go Normal file
View File

@@ -0,0 +1,441 @@
package garbler
import (
"crypto/rand"
"encoding/binary"
"io/ioutil"
"log"
"math/big"
u "notary/utils"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/bwesterb/go-ristretto"
)
type Garbler struct {
P1_vd []byte // client verify data
Server_verify_data []byte // server verify data
Server_iv, Client_iv []byte
R, One, Zero *big.Int // will be set in Preprocess, used by ghash
// the total amount of c5 circuits for this session
C5Count int
// the total amount of c6 circuits for this session
C6Count int
SwkMaskedByClient []byte
Ot_a *ristretto.Scalar
A *ristretto.Point
AllNonFixedOT [][][]byte
// this is the mask that we apply before sending cwk masked twice to the client
// this is done so that the client could change the mask
cwkSecondMask []byte
CwkMaskedByClient []byte //this is notary's input to c5
// all circuits, count starts with 1 to avoid confusion
Cs []CData
}
type CData struct {
OT []OTstruct // parsed OT
Ol []byte // output labels
Il Labels // input labels
Tt []byte // truth table
NotaryInputSize int // in bits
NotaryNonFixedInputSize int
NotaryFixedInputSize int
ClientInputSize int // in bits
ClientNonFixedInputSize int
ClientFixedInputSize int
OutputSize int // in bits
Output []byte // garbler+evaluator output of circuit
Input []byte // garbler's input for this circuit
PmsOuterHash []byte // only for c1
MsOuterHash []byte // output from c2
Masks [][]byte
Circuit *Circuit
TagSharesBlob []byte
FixedInputs []int // array of 0 and 1 for evaluator's fixed inputs
}
func (p *CData) Init(nis, nnfis, cis, cnfis, os int) {
p.NotaryInputSize = nis
p.NotaryNonFixedInputSize = nnfis
p.NotaryFixedInputSize = p.NotaryInputSize - p.NotaryNonFixedInputSize
p.ClientInputSize = cis
p.ClientNonFixedInputSize = cnfis
p.ClientFixedInputSize = p.ClientInputSize - p.ClientNonFixedInputSize
p.OutputSize = os
}
type OTstruct struct {
Ot_a *ristretto.Scalar
A *ristretto.Point
Ot_b *ristretto.Scalar
B *ristretto.Point
AplusB *ristretto.Point
K *ristretto.Point
M0 []byte
M1 []byte
C int
}
type Gate struct {
Id uint32
Operation uint8
InputWires []uint32
OutputWire uint32
}
type Circuit struct {
WireCount int
GarblerInputSize int
EvaluatorInputSize int
OutputSize int
AndGateCount int
Gates []Gate
}
type Labels struct {
NotaryNonFixed []byte
NotaryFixed []byte
ClientNonFixed []byte
ClientFixed []byte
}
type Blobs struct {
Il Labels // input labels
Tt []byte // truth table
Ol []byte // output labels
R []byte
}
func (g *Garbler) Init(ilBlobs []Labels, circuits []*Circuit) {
g.Cs = make([]CData, 7)
g.Cs[1].Init(512, 256, 512, 256, 512)
g.Cs[2].Init(512, 256, 640, 384, 512)
g.Cs[3].Init(832, 256, 1568, 768, 800)
g.Cs[4].Init(672, 416, 960, 480, 480)
g.Cs[5].Init(160, 0, 308, 160, 128)
g.Cs[6].Init(288, 0, 304, 160, 128)
for i := 1; i < len(g.Cs); i++ {
c := &g.Cs[i]
c.Il = ilBlobs[i]
c.Circuit = circuits[i]
if i == 1 {
c.Masks = make([][]byte, 2)
c.Masks[1] = u.GetRandom(32)
}
if i == 2 {
c.Masks = make([][]byte, 2)
c.Masks[1] = u.GetRandom(32)
}
if i == 3 {
c.Masks = make([][]byte, 7)
c.Masks[1] = u.GetRandom(16)
c.Masks[2] = u.GetRandom(16)
c.Masks[3] = u.GetRandom(4)
c.Masks[4] = u.GetRandom(4)
c.Masks[5] = u.GetRandom(16)
c.Masks[6] = u.GetRandom(16)
}
if i == 4 {
c.Masks = make([][]byte, 3)
c.Masks[1] = u.GetRandom(16)
c.Masks[2] = u.GetRandom(16)
}
if i == 6 {
c.Masks = make([][]byte, g.C6Count+1)
for j := 1; j < g.C6Count+1; j++ {
c.Masks[j] = u.GetRandom(16)
}
}
}
}
// PrepareA is done before Init so that we could send A to the client as soon as possible
func (g *Garbler) PrepareA() {
g.Ot_a = new(ristretto.Scalar).Rand()
g.A = new(ristretto.Point).ScalarMultBase(g.Ot_a)
}
func (g *Garbler) Ot_GetA() []byte {
return g.A.Bytes()
}
// internal method
func (g *Garbler) separateLabels(blob []byte, cNo int) Labels {
c := g.Cs[cNo]
return g.SeparateLabels(blob, c)
}
// separate one continuous blob of input labels into 4 blobs as in Labels struct
func (g *Garbler) SeparateLabels(blob []byte, c CData) Labels {
if len(blob) != (c.NotaryInputSize+c.ClientInputSize)*32 {
panic("in separateLabels")
}
var labels Labels
offset := 0
labels.NotaryNonFixed = make([]byte, c.NotaryNonFixedInputSize*32)
copy(labels.NotaryNonFixed, blob[offset:offset+c.NotaryNonFixedInputSize*32])
offset += c.NotaryNonFixedInputSize * 32
labels.NotaryFixed = make([]byte, c.NotaryFixedInputSize*32)
copy(labels.NotaryFixed, blob[offset:offset+c.NotaryFixedInputSize*32])
offset += c.NotaryFixedInputSize * 32
labels.ClientNonFixed = make([]byte, c.ClientNonFixedInputSize*32)
copy(labels.ClientNonFixed, blob[offset:offset+c.ClientNonFixedInputSize*32])
offset += c.ClientNonFixedInputSize * 32
labels.ClientFixed = make([]byte, c.ClientFixedInputSize*32)
copy(labels.ClientFixed, blob[offset:offset+c.ClientFixedInputSize*32])
offset += c.ClientFixedInputSize * 32
return labels
}
func (g *Garbler) C_getEncNonFixedLabels(cNo int, idxBlob []byte) []byte {
c := &g.Cs[cNo]
if len(idxBlob) != 2*c.ClientNonFixedInputSize {
log.Println(cNo)
panic("len(idxArr)!= 2*256")
}
var encLabels []byte
for i := 0; i < c.ClientNonFixedInputSize; i++ {
idx := int(binary.BigEndian.Uint16(idxBlob[i*2 : i*2+2]))
k0 := g.AllNonFixedOT[idx][0]
k1 := g.AllNonFixedOT[idx][1]
m0 := c.Il.ClientNonFixed[i*32 : i*32+16]
m1 := c.Il.ClientNonFixed[i*32+16 : i*32+32]
e0 := u.Encrypt_generic(m0, k0, 0)
e1 := u.Encrypt_generic(m1, k1, 0)
encLabels = append(encLabels, e0...)
encLabels = append(encLabels, e1...)
}
return encLabels
}
// C_getInputLabels returns notary's input labels for the circuit
func (g *Garbler) C_getInputLabels(cNo int) []byte {
c := &g.Cs[cNo]
inputBytes := c.Input
if (cNo != 6 && len(inputBytes)*8 != c.NotaryInputSize) ||
(cNo == 6 && len(inputBytes)*8 != 160+128*g.C6Count) {
log.Println("inputBytes", inputBytes)
log.Println("len(inputBytes)", len(inputBytes))
panic("len(inputBytes)*8 != c.NotaryInputSiz")
}
input := new(big.Int).SetBytes(inputBytes)
inputLabelBlob := u.Concat(c.Il.NotaryNonFixed, c.Il.NotaryFixed)
var inputLabels []byte
for i := 0; i < len(inputBytes)*8; i++ {
bit := int(input.Bit(i))
label := inputLabelBlob[i*32+bit*16 : i*32+bit*16+16]
inputLabels = append(inputLabels, label...)
}
return inputLabels
}
func (g *Garbler) ParseCircuit(cNo_ int) *Circuit {
cNo := strconv.Itoa(cNo_)
curDir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
panic(err)
}
baseDir := filepath.Dir(curDir)
jiggDir := filepath.Join(baseDir, "circuits")
cBytes, err := ioutil.ReadFile(filepath.Join(jiggDir, "c"+cNo+".out"))
if err != nil {
panic(err)
}
text := string(cBytes)
lines := strings.Split(text, "\n")
c := Circuit{}
wireCount, _ := strconv.ParseInt(strings.Split(lines[0], " ")[1], 10, 32)
gi, _ := strconv.ParseInt(strings.Split(lines[1], " ")[1], 10, 32)
ei, _ := strconv.ParseInt(strings.Split(lines[1], " ")[2], 10, 32)
out, _ := strconv.ParseInt(strings.Split(lines[2], " ")[1], 10, 32)
c.WireCount = int(wireCount)
c.GarblerInputSize = int(gi)
c.EvaluatorInputSize = int(ei)
c.OutputSize = int(out)
gates := make([]Gate, len(lines)-3)
andGateCount := 0
opBytes := map[string]byte{"XOR": 0, "AND": 1, "INV": 2}
for i, line := range lines[3:] {
items := strings.Split(line, " ")
var g Gate
g.Operation = opBytes[items[len(items)-1]]
g.Id = uint32(i)
if g.Operation == 0 || g.Operation == 1 {
inp1, _ := strconv.ParseInt(items[2], 10, 32)
inp2, _ := strconv.ParseInt(items[3], 10, 32)
out, _ := strconv.ParseInt(items[4], 10, 32)
g.InputWires = []uint32{uint32(inp1), uint32(inp2)}
g.OutputWire = uint32(out)
if g.Operation == 1 {
andGateCount += 1
}
} else { // INV gate
inp1, _ := strconv.ParseInt(items[2], 10, 32)
out, _ := strconv.ParseInt(items[3], 10, 32)
g.InputWires = []uint32{uint32(inp1)}
g.OutputWire = uint32(out)
}
gates[i] = g
}
c.Gates = gates
c.AndGateCount = int(andGateCount)
return &c
}
// garble a circuit and optionally reuse 1 ) R values 2) inputs with indexes
func (g *Garbler) OfflinePhase(c *Circuit, rReused []byte, inputsReused []byte, reuseIndexes []int) (*[]byte, []byte, []byte, []byte) {
var R []byte
if rReused != nil {
R = rReused
} else {
R = make([]byte, 16)
rand.Read(R)
//R = u.GetRandom(16)
R[15] = R[15] | 0x01
}
if len(reuseIndexes) != len(inputsReused)/32 {
panic("len(reuseIndexes) != len(ilReused)/32")
}
inputCount := c.EvaluatorInputSize + c.GarblerInputSize
//garbled assignment
ga := make([][][]byte, c.WireCount)
newInputs := generateInputLabels(inputCount-len(reuseIndexes), R)
// set both new and reused labels into ga
reusedCount := 0 //how many reused inputs were already put into ga
newInputsCount := 0 //how many new inputs were already put into ga
for i := 0; i < inputCount; i++ {
if u.Contains(i, reuseIndexes) {
ga[i] = [][]byte{
inputsReused[reusedCount*32 : reusedCount*32+16],
inputsReused[reusedCount*32+16 : reusedCount*32+32]}
reusedCount += 1
} else {
ga[i] = (*newInputs)[newInputsCount]
newInputsCount += 1
}
}
andGateCount := c.AndGateCount
//log.Println("andGateCount is", andGateCount)
truthTable := make([]byte, andGateCount*64)
garble(c, &ga, R, &truthTable)
if len(ga) != c.WireCount {
panic("len(*ga) != c.wireCount")
}
var inputLabels []byte
for i := 0; i < inputCount; i++ {
inputLabels = append(inputLabels, ga[i][0]...)
inputLabels = append(inputLabels, ga[i][1]...)
}
var outputLabels []byte
for i := 0; i < c.OutputSize; i++ {
outputLabels = append(outputLabels, ga[c.WireCount-c.OutputSize+i][0]...)
outputLabels = append(outputLabels, ga[c.WireCount-c.OutputSize+i][1]...)
}
return &truthTable, inputLabels, outputLabels, R
}
func generateInputLabels(count int, R []byte) *[][][]byte {
newLabels := make([][][]byte, count)
for i := 0; i < count; i++ {
label1 := make([]byte, 16)
rand.Read(label1)
label2 := u.XorBytes(label1, R)
newLabels[i] = [][]byte{label1, label2}
}
return &newLabels
}
func garble(c *Circuit, garbledAssignment *[][][]byte, R []byte, truthTable *[]byte) {
var andGateIdx int = 0
// gate type XOR==0 AND==1 INV==2
for i := 0; i < len(c.Gates); i++ {
gate := c.Gates[i]
if gate.Operation == 1 {
tt := garbleAnd(gate, R, garbledAssignment)
copy((*truthTable)[andGateIdx*64:andGateIdx*64+64], tt[0:64])
andGateIdx += 1
} else if gate.Operation == 0 {
garbleXor(gate, R, garbledAssignment)
} else if gate.Operation == 2 {
garbleInv(gate, garbledAssignment)
}
}
}
func getPoint(arr []byte) int {
return int(arr[15]) & 0x01
}
func garbleAnd(g Gate, R []byte, ga *[][][]byte) []byte {
in1 := g.InputWires[0]
in2 := g.InputWires[1]
out := g.OutputWire
randomLabel := make([]byte, 16)
rand.Read(randomLabel)
(*ga)[out] = [][]byte{randomLabel, u.XorBytes(randomLabel, R)}
v0 := u.Encrypt((*ga)[in1][0], (*ga)[in2][0], g.Id, (*ga)[out][0])
v1 := u.Encrypt((*ga)[in1][0], (*ga)[in2][1], g.Id, (*ga)[out][0])
v2 := u.Encrypt((*ga)[in1][1], (*ga)[in2][0], g.Id, (*ga)[out][0])
v3 := u.Encrypt((*ga)[in1][1], (*ga)[in2][1], g.Id, (*ga)[out][1])
p0 := 2*getPoint((*ga)[in1][0]) + getPoint((*ga)[in2][0])
p1 := 2*getPoint((*ga)[in1][0]) + getPoint((*ga)[in2][1])
p2 := 2*getPoint((*ga)[in1][1]) + getPoint((*ga)[in2][0])
p3 := 2*getPoint((*ga)[in1][1]) + getPoint((*ga)[in2][1])
truthTable := make([][]byte, 4)
truthTable[p0] = v0
truthTable[p1] = v1
truthTable[p2] = v2
truthTable[p3] = v3
var flatTable []byte
for i := 0; i < 4; i++ {
flatTable = append(flatTable, truthTable[i]...)
}
return flatTable
}
func garbleXor(g Gate, R []byte, ga *[][][]byte) {
in1 := g.InputWires[0]
in2 := g.InputWires[1]
out := g.OutputWire
out1 := u.XorBytes((*ga)[in1][0], (*ga)[in2][0])
out2 := u.XorBytes(u.XorBytes((*ga)[in1][1], (*ga)[in2][1]), R)
(*ga)[out] = [][]byte{out1, out2}
}
func garbleInv(g Gate, ga *[][][]byte) {
in1 := g.InputWires[0]
out := g.OutputWire
(*ga)[out] = [][]byte{(*ga)[in1][1], (*ga)[in1][0]}
}

View File

@@ -0,0 +1,90 @@
package key_manager
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"encoding/binary"
"log"
u "notary/utils"
"os"
"path/filepath"
"sync"
"time"
)
type KeyManager struct {
sync.Mutex
// Blob contains validFrom|validUntil|pubkey|signature
// the client will verify the signature (made with the masterKey)
Blob []byte
// PrivKey is the ephemeral key used to sign a session. Also used
// in ECDH with the the client to derive symmetric keys to encrypt the communication
PrivKey *ecdsa.PrivateKey
// masterKey is used to sign ephemeral keys
masterKey *ecdsa.PrivateKey
// MasterPubKeyPEM is masterKey public key in PEM format
MasterPubKeyPEM []byte
// validMins is how many minutes an ephemeral key is valid for signing
validMins int
}
func (k *KeyManager) Init() {
k.generateMasterKey()
go k.rotateEphemeralKeys()
}
func (k *KeyManager) generateMasterKey() {
// masterKey is only used to sign ephemeral keys
var err error
k.masterKey, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
log.Fatalln("Could not create keys:", err)
}
k.MasterPubKeyPEM = u.ECDSAPubkeyToPEM(&k.masterKey.PublicKey)
curDir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
panic(err)
}
err = os.WriteFile(filepath.Join(curDir, "public.key"), k.MasterPubKeyPEM, 0644)
if err != nil {
panic(err)
}
}
// generate a new ephemeral key after a certain interval
// sign it with the master key
func (k *KeyManager) rotateEphemeralKeys() {
k.validMins = 20
nextKeyRotationTime := time.Unix(0, 0)
for {
time.Sleep(time.Second * 1)
now := time.Now()
if nextKeyRotationTime.Sub(now) > time.Minute*2 {
continue
}
// to protect against side-channel attacks, we don't want the attacker to know when
// exactly next key change happens; picking a random interval
randInt := u.RandInt(k.validMins/2*60, k.validMins*60)
nextKeyRotationTime = now.Add(time.Second * time.Duration(randInt))
// else change the ephemeral key
log.Println("changing ephemeral key")
validFrom := make([]byte, 4)
binary.BigEndian.PutUint32(validFrom, uint32(now.Unix()))
validUntil := make([]byte, 4)
untilTime := now.Add(time.Second * time.Duration(k.validMins*60))
binary.BigEndian.PutUint32(validUntil, uint32(untilTime.Unix()))
newKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
log.Fatalln("Could not create keys:", err)
}
pubkey := u.Concat([]byte{0x04}, u.To32Bytes(newKey.PublicKey.X), u.To32Bytes(newKey.PublicKey.Y))
signature := u.ECDSASign(k.masterKey, validFrom, validUntil, pubkey)
blob := u.Concat(validFrom, validUntil, pubkey, signature)
k.Lock()
k.Blob = blob
k.PrivKey = newKey
k.Unlock()
}
}

518
src/notary.go Normal file
View File

@@ -0,0 +1,518 @@
// ./notary & sleep 5 && curl --data-binary '@URLFetcherDoc' 127.0.0.1:8091/setURLFetcherDoc && fg
package main
import (
"context"
"flag"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"sync"
"net/http"
_ "net/http/pprof"
"notary/garbled_pool"
"notary/key_manager"
"notary/session"
"time"
)
var sm *SessionManager
var gp *garbled_pool.GarbledPool
var km *key_manager.KeyManager
// URLFetcherDoc is the document returned by the deterministic URLFetcher enclave
// https://github.com/tlsnotary/URLFetcher
// It contains AWS HTTP API requests with Amazon's attestation
var URLFetcherDoc []byte
type smItem struct {
session *session.Session
lastSeen int64 // timestamp of last activity
creationTime int64 // timestamp
}
type SessionManager struct {
// string looks like 123.123.44.44:23409
sessions map[string]*smItem
sync.Mutex
}
func (sm *SessionManager) Init() {
sm.sessions = make(map[string]*smItem)
go sm.monitorSessions()
}
func (sm *SessionManager) addSession(key string) *session.Session {
if _, ok := sm.sessions[key]; ok {
log.Println(key)
panic("session already exists")
}
s := new(session.Session)
now := int64(time.Now().UnixNano() / 1e9)
sm.Lock()
defer sm.Unlock()
sm.sessions[key] = &smItem{s, now, now}
return s
}
// get an already-existing session associated with the key
// and update the last-seen time
func (sm *SessionManager) getSession(key string) *session.Session {
val, ok := sm.sessions[key]
if !ok {
log.Println(key)
panic("session does not exist")
}
val.lastSeen = int64(time.Now().UnixNano() / 1e9)
return val.session
}
func (sm *SessionManager) removeSession(key string) {
s, ok := sm.sessions[key]
if !ok {
log.Println(key)
panic("cannot remove: session does not exist")
}
err := os.RemoveAll(s.session.StorageDir)
if err != nil {
panic(err)
}
sm.Lock()
defer sm.Unlock()
delete(sm.sessions, key)
}
// remove sessions which have been inactive for 60 sec
func (sm *SessionManager) monitorSessions() {
for {
time.Sleep(time.Second)
now := int64(time.Now().UnixNano() / 1e9)
for k, v := range sm.sessions {
if now-v.lastSeen > 120 || now-v.creationTime > 300 {
log.Println("deleting session from monitorSessions")
sm.removeSession(k)
}
}
}
}
// read request body
func readBody(req *http.Request) []byte {
defer req.Body.Close()
log.Println("begin ReadAll")
body, err := ioutil.ReadAll(req.Body)
log.Println("finished ReadAll ", len(body))
if err != nil {
panic("can't read request body")
}
return body
}
func writeResponse(resp []byte, w http.ResponseWriter) {
//w.Header().Set("Connection", "close")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Content-Type", "application/octet-stream")
w.Write(resp)
}
func getURLFetcherDoc(w http.ResponseWriter, req *http.Request) {
log.Println("in getURLFetcherDoc", req.RemoteAddr)
writeResponse(URLFetcherDoc, w)
}
func ot_AllB(w http.ResponseWriter, req *http.Request) {
log.Println("in ot_AllB", req.RemoteAddr)
body := readBody(req)
out := sm.getSession(string(req.URL.RawQuery)).OT_AllB(body)
writeResponse(out, w)
}
func ot_encLabelsForEval(w http.ResponseWriter, req *http.Request) {
log.Println("in ot_encLabelsForEval", req.RemoteAddr)
body := readBody(req)
out := sm.getSession(string(req.URL.RawQuery)).OT_encLabelsForEval(body)
writeResponse(out, w)
}
func step1(w http.ResponseWriter, req *http.Request) {
log.Println("in step1", req.RemoteAddr)
body := readBody(req)
out := sm.getSession(string(req.URL.RawQuery)).Step1(body)
writeResponse(out, w)
}
func step2(w http.ResponseWriter, req *http.Request) {
log.Println("in step2", req.RemoteAddr)
body := readBody(req)
out := sm.getSession(string(req.URL.RawQuery)).Step2(body)
writeResponse(out, w)
}
func step3(w http.ResponseWriter, req *http.Request) {
log.Println("in step3", req.RemoteAddr)
body := readBody(req)
out := sm.getSession(string(req.URL.RawQuery)).Step3(body)
writeResponse(out, w)
}
func step4(w http.ResponseWriter, req *http.Request) {
log.Println("in step4", req.RemoteAddr)
body := readBody(req)
out := sm.getSession(string(req.URL.RawQuery)).Step4(body)
writeResponse(out, w)
}
func preInit(w http.ResponseWriter, req *http.Request) {
log.Println("in preInit", req.RemoteAddr)
body := readBody(req)
s := sm.addSession(string(req.URL.RawQuery))
// copying data so that it doesn't change from under us if
// ephemeral key happens to change while this session is running
km.Lock()
blob := make([]byte, len(km.Blob))
copy(blob, km.Blob)
key := *km.PrivKey
km.Unlock()
out := s.PreInit(body, blob, key)
writeResponse(out, w)
}
func initNow(w http.ResponseWriter, req *http.Request) {
log.Println("in initNow", req.RemoteAddr)
out := sm.getSession(string(req.URL.RawQuery)).Init(gp)
writeResponse(out, w)
}
func getBlobChunk(w http.ResponseWriter, req *http.Request) {
log.Println("in getBlobChunk", req.RemoteAddr)
body := readBody(req)
out := sm.getSession(string(req.URL.RawQuery)).GetBlobChunk(body)
writeResponse(out, w)
}
func setBlobChunk(w http.ResponseWriter, req *http.Request) {
log.Println("in setBlobChunk", req.RemoteAddr)
body := readBody(req)
out := sm.getSession(string(req.URL.RawQuery)).SetBlobChunk(body)
writeResponse(out, w)
}
func c1_step1(w http.ResponseWriter, req *http.Request) {
log.Println("in c1_step1", req.RemoteAddr)
body := readBody(req)
out := sm.getSession(string(req.URL.RawQuery)).C1_step1(body)
writeResponse(out, w)
}
func c1_step2(w http.ResponseWriter, req *http.Request) {
log.Println("in c1_step2", req.RemoteAddr)
body := readBody(req)
out := sm.getSession(string(req.URL.RawQuery)).C1_step2(body)
writeResponse(out, w)
}
func c1_step3(w http.ResponseWriter, req *http.Request) {
log.Println("in c1_step3", req.RemoteAddr)
body := readBody(req)
out := sm.getSession(string(req.URL.RawQuery)).C1_step3(body)
writeResponse(out, w)
}
func c1_step4(w http.ResponseWriter, req *http.Request) {
log.Println("in c1_step4", req.RemoteAddr)
body := readBody(req)
out := sm.getSession(string(req.URL.RawQuery)).C1_step4(body)
writeResponse(out, w)
}
func c1_step5(w http.ResponseWriter, req *http.Request) {
log.Println("in c1_step5", req.RemoteAddr)
body := readBody(req)
out := sm.getSession(string(req.URL.RawQuery)).C1_step5(body)
writeResponse(out, w)
}
func c2_step1(w http.ResponseWriter, req *http.Request) {
log.Println("in c2_step1", req.RemoteAddr)
body := readBody(req)
out := sm.getSession(string(req.URL.RawQuery)).C2_step1(body)
writeResponse(out, w)
}
func c2_step2(w http.ResponseWriter, req *http.Request) {
log.Println("in c2_step2", req.RemoteAddr)
body := readBody(req)
out := sm.getSession(string(req.URL.RawQuery)).C2_step2(body)
writeResponse(out, w)
}
func c2_step3(w http.ResponseWriter, req *http.Request) {
log.Println("in c2_step3", req.RemoteAddr)
body := readBody(req)
out := sm.getSession(string(req.URL.RawQuery)).C2_step3(body)
writeResponse(out, w)
}
func c2_step4(w http.ResponseWriter, req *http.Request) {
log.Println("in c2_step4", req.RemoteAddr)
body := readBody(req)
out := sm.getSession(string(req.URL.RawQuery)).C2_step4(body)
writeResponse(out, w)
}
func c3_step1(w http.ResponseWriter, req *http.Request) {
log.Println("in c3_step1", req.RemoteAddr)
body := readBody(req)
out := sm.getSession(string(req.URL.RawQuery)).C3_step1(body)
writeResponse(out, w)
}
func c3_step2(w http.ResponseWriter, req *http.Request) {
log.Println("in c3_step2", req.RemoteAddr)
body := readBody(req)
out := sm.getSession(string(req.URL.RawQuery)).C3_step2(body)
writeResponse(out, w)
}
func c3_step3(w http.ResponseWriter, req *http.Request) {
log.Println("in c3_step3", req.RemoteAddr)
body := readBody(req)
out := sm.getSession(string(req.URL.RawQuery)).C3_step3(body)
writeResponse(out, w)
}
func c4_pre1(w http.ResponseWriter, req *http.Request) {
log.Println("in c4_pre1", req.RemoteAddr)
body := readBody(req)
out := sm.getSession(string(req.URL.RawQuery)).C4_pre1(body)
writeResponse(out, w)
}
func c4_step1(w http.ResponseWriter, req *http.Request) {
log.Println("in c4_step1", req.RemoteAddr)
body := readBody(req)
out := sm.getSession(string(req.URL.RawQuery)).C4_step1(body)
writeResponse(out, w)
}
func c4_step2(w http.ResponseWriter, req *http.Request) {
log.Println("in c4_step2", req.RemoteAddr)
body := readBody(req)
out := sm.getSession(string(req.URL.RawQuery)).C4_step2(body)
writeResponse(out, w)
}
func c4_step3(w http.ResponseWriter, req *http.Request) {
log.Println("in c4_step3", req.RemoteAddr)
body := readBody(req)
out := sm.getSession(string(req.URL.RawQuery)).C4_step3(body)
writeResponse(out, w)
}
func c5_step1(w http.ResponseWriter, req *http.Request) {
log.Println("in c5_step1", req.RemoteAddr)
body := readBody(req)
out := sm.getSession(string(req.URL.RawQuery)).C5_step1(body)
writeResponse(out, w)
}
func c5_step2(w http.ResponseWriter, req *http.Request) {
log.Println("in c5_step2", req.RemoteAddr)
body := readBody(req)
out := sm.getSession(string(req.URL.RawQuery)).C5_step2(body)
writeResponse(out, w)
}
func c6_step1(w http.ResponseWriter, req *http.Request) {
log.Println("in c6_step1", req.RemoteAddr)
body := readBody(req)
out := sm.getSession(string(req.URL.RawQuery)).C6_step1(body)
writeResponse(out, w)
}
func c6_step2(w http.ResponseWriter, req *http.Request) {
log.Println("in c6_step2", req.RemoteAddr)
body := readBody(req)
out := sm.getSession(string(req.URL.RawQuery)).C6_step2(body)
writeResponse(out, w)
}
func checkC6Commit(w http.ResponseWriter, req *http.Request) {
log.Println("in checkC6Commit", req.RemoteAddr)
body := readBody(req)
out := sm.getSession(string(req.URL.RawQuery)).CheckC6Commit(body)
writeResponse(out, w)
}
func ghash_step1(w http.ResponseWriter, req *http.Request) {
log.Println("in ghash_step1", req.RemoteAddr)
body := readBody(req)
out := sm.getSession(string(req.URL.RawQuery)).Ghash_step1(body)
writeResponse(out, w)
}
func ghash_step2(w http.ResponseWriter, req *http.Request) {
log.Println("in ghash_step2", req.RemoteAddr)
body := readBody(req)
out := sm.getSession(string(req.URL.RawQuery)).Ghash_step2(body)
writeResponse(out, w)
}
func ghash_step3(w http.ResponseWriter, req *http.Request) {
log.Println("in ghash_step3", req.RemoteAddr)
body := readBody(req)
out := sm.getSession(string(req.URL.RawQuery)).Ghash_step3(body)
writeResponse(out, w)
}
func ghash_step4(w http.ResponseWriter, req *http.Request) {
log.Println("in ghash_step4", req.RemoteAddr)
body := readBody(req)
out := sm.getSession(string(req.URL.RawQuery)).Ghash_step4(body)
writeResponse(out, w)
}
func ghash_step5(w http.ResponseWriter, req *http.Request) {
log.Println("in ghash_step5", req.RemoteAddr)
body := readBody(req)
out := sm.getSession(string(req.URL.RawQuery)).Ghash_step5(body)
writeResponse(out, w)
}
func ghash_step6(w http.ResponseWriter, req *http.Request) {
log.Println("in ghash_step6", req.RemoteAddr)
body := readBody(req)
out := sm.getSession(string(req.URL.RawQuery)).Ghash_step6(body)
writeResponse(out, w)
}
func commitHash(w http.ResponseWriter, req *http.Request) {
log.Println("in commitHash", req.RemoteAddr)
body := readBody(req)
out := sm.getSession(string(req.URL.RawQuery)).CommitHash(body)
writeResponse(out, w)
sm.removeSession(string(req.URL.RawQuery))
}
// when notary starts we expect the admin to upload a URLFetcher document
func awaitURLFetcherDoc() {
serverMux := http.NewServeMux()
srv := &http.Server{Addr: ":10012", Handler: serverMux}
signal := make(chan struct{})
serverMux.HandleFunc("/setURLFetcherDoc", func(w http.ResponseWriter, req *http.Request) {
URLFetcherDoc = readBody(req)
log.Println("got URLFetcher doc", string(URLFetcherDoc[:100]))
close(signal)
})
// start a server and wait for signal from HandleFunc
go func() {
srv.ListenAndServe()
}()
<-signal
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
srv.Shutdown(ctx)
}
// getPubKey sends notary's public key to the client
// only useful when running as a regular non-sandboxed server
func getPubKey(w http.ResponseWriter, req *http.Request) {
log.Println("in getPubKey", req.RemoteAddr)
writeResponse(km.MasterPubKeyPEM, w)
}
func assembleCircuits() {
curDir, _ := filepath.Abs(filepath.Dir(os.Args[0]))
baseDir := filepath.Dir(curDir)
circuitsDir := filepath.Join(baseDir, "circuits")
if _, err := os.Stat(filepath.Join(circuitsDir, "c1.out")); os.IsNotExist(err) {
cmd := exec.Command("node", "assemble.js")
cmd.Dir = circuitsDir
log.Println("Assembling circuits. This will take a few seconds...")
if err := cmd.Run(); err != nil {
log.Println("Error. Could not run: node assemble.js. Please make sure that node is installed on your system.")
os.Exit(1)
}
}
}
func main() {
// defer profile.Start(profile.MemProfile).Stop()
// go func() {
// http.ListenAndServe(":8080", nil)
// }()
noSandbox := flag.Bool("no-sandbox", false, "Must be set when not running in a sandboxed environment.")
flag.Parse()
log.Println("noSandbox", *noSandbox)
km = new(key_manager.KeyManager)
km.Init()
assembleCircuits()
sm = new(SessionManager)
sm.Init()
gp = new(garbled_pool.GarbledPool)
gp.Init(*noSandbox)
if !*noSandbox {
http.HandleFunc("/getURLFetcherDoc", getURLFetcherDoc)
go awaitURLFetcherDoc()
}
// although getPubKey is only used in noSandbox cases, it still
// can be useful when debugging sandboxed notary
http.HandleFunc("/getPubKey", getPubKey)
http.HandleFunc("/preInit", preInit)
http.HandleFunc("/init", initNow)
http.HandleFunc("/getBlobChunk", getBlobChunk)
http.HandleFunc("/setBlobChunk", setBlobChunk)
http.HandleFunc("/ot_AllB", ot_AllB)
http.HandleFunc("/ot_encLabelsForEval", ot_encLabelsForEval)
http.HandleFunc("/step1", step1)
http.HandleFunc("/step2", step2)
http.HandleFunc("/step3", step3)
http.HandleFunc("/step4", step4)
http.HandleFunc("/c1_step1", c1_step1)
http.HandleFunc("/c1_step2", c1_step2)
http.HandleFunc("/c1_step3", c1_step3)
http.HandleFunc("/c1_step4", c1_step4)
http.HandleFunc("/c1_step5", c1_step5)
http.HandleFunc("/c2_step1", c2_step1)
http.HandleFunc("/c2_step2", c2_step2)
http.HandleFunc("/c2_step3", c2_step3)
http.HandleFunc("/c2_step4", c2_step4)
http.HandleFunc("/c3_step1", c3_step1)
http.HandleFunc("/c3_step2", c3_step2)
http.HandleFunc("/c3_step3", c3_step3)
http.HandleFunc("/c4_pre1", c4_pre1)
http.HandleFunc("/c4_step1", c4_step1)
http.HandleFunc("/c4_step2", c4_step2)
http.HandleFunc("/c4_step3", c4_step3)
http.HandleFunc("/c5_step1", c5_step1)
http.HandleFunc("/c5_step2", c5_step2)
http.HandleFunc("/c6_step1", c6_step1)
http.HandleFunc("/c6_step2", c6_step2)
http.HandleFunc("/checkC6Commit", checkC6Commit)
http.HandleFunc("/ghash_step1", ghash_step1)
http.HandleFunc("/ghash_step2", ghash_step2)
http.HandleFunc("/ghash_step3", ghash_step3)
http.HandleFunc("/ghash_step4", ghash_step4)
http.HandleFunc("/ghash_step5", ghash_step5)
http.HandleFunc("/ghash_step6", ghash_step6)
http.HandleFunc("/commitHash", commitHash)
http.ListenAndServe("0.0.0.0:10011", nil)
}

1338
src/session/session.go Normal file

File diff suppressed because it is too large Load Diff

617
src/utils/utils.go Normal file
View File

@@ -0,0 +1,617 @@
package utils
import (
"crypto/aes"
"crypto/cipher"
"crypto/ecdsa"
"crypto/rand"
"crypto/sha256"
"crypto/x509"
"encoding"
"encoding/binary"
"encoding/hex"
"encoding/pem"
"fmt"
"io"
"math"
"math/big"
mathrand "math/rand"
"time"
"golang.org/x/crypto/blake2b"
"golang.org/x/crypto/nacl/secretbox"
)
func Sha256(data []byte) []byte {
ret := sha256.Sum256(data)
return ret[:]
}
// port of sodium.crypto_generichash
func Generichash(length int, msg []byte) []byte {
h, err := blake2b.New(length, nil)
if err != nil {
panic("error in generichash")
}
_, err = h.Write(msg)
if err != nil {
panic("error in generichash")
}
return h.Sum(nil)
}
func Decrypt_generic(plaintext []byte, key []byte, nonce int) []byte {
return Encrypt_generic(plaintext, key, nonce)
}
func Encrypt_generic(plaintext []byte, key []byte, nonce int) []byte {
pXk := XorBytes(plaintext, key)
ro := randomOracle(key, uint32(nonce))
out := XorBytes(pXk, ro)
return out
}
func XorBytes(a, b []byte) []byte {
if len(a) != len(b) {
panic("len(a) != len(b)")
}
c := make([]byte, len(a))
for i := 0; i < len(a); i++ {
c[i] = a[i] ^ b[i]
}
return c
}
// flatten a slice of slices into a slice
func Flatten(sos [][]byte) []byte {
var res []byte
for i := 0; i < len(sos); i++ {
res = append(res, sos[i]...)
}
return res
}
func randomOracle(msg []byte, nonce_ uint32) []byte {
// sha(0)
var sha0 [32]byte
sha0_, err := hex.DecodeString("da5698be17b9b46962335799779fbeca8ce5d491c0d26243bafef9ea1837a9d8")
if err != nil {
panic(err)
}
copy(sha0[:], sha0_[:])
var nonce [24]byte
result := make([]byte, 4)
binary.BigEndian.PutUint32(result, nonce_)
// JIGG puts e.g. 277 = [0,0,1,21] in reverse order into nonce i.e [21, 1, 0,0,0...,0]
for i := 0; i < 4; i++ {
copy(nonce[i:i+1], result[3-i:4-i])
}
out := secretbox.Seal(nil, msg, &nonce, &sha0)
return out[0:16]
}
func Decrypt(a, b []byte, t uint32, m []byte) []byte {
return Encrypt(a, b, t, m)
}
func Encrypt(a, b []byte, t uint32, m []byte) []byte {
// double a
a2 := make([]byte, 16)
copy(a2[:], a[:])
leastbyte := make([]byte, 1)
copy(leastbyte, a2[0:1])
copy(a2[:], a2[1:15]) // Logical left shift by 1 byte
copy(a2[14:15], leastbyte) // Restore old least byte as new greatest (non-pointer) byte
// quadruple b
b4 := make([]byte, 16)
copy(b4[:], b[:])
leastbytes := make([]byte, 2)
copy(leastbytes, b4[0:2])
copy(b4[:], b4[2:15]) // Logical left shift by 2 bytes
copy(b4[13:15], leastbytes) // Restore old least two bytes as new greatest bytes
k := XorBytes(a2, b4)
ro := randomOracle(k, t)
mXorK := XorBytes(m, k)
return XorBytes(mXorK, ro)
}
// convert bytes into a 0/1 array with least bit at index 0
func BytesToBits(b []byte) []int {
bytes := new(big.Int).SetBytes(b)
bits := make([]int, len(b)*8)
for i := 0; i < len(bits); i++ {
bits[i] = int(bytes.Bit(i))
}
return bits
}
// convert an array of 0/1 into bytes
func BitsToBytes(b []int) []byte {
bigint := new(big.Int)
for i := 0; i < len(b); i++ {
bigint.SetBit(bigint, i, uint(b[i]))
}
// we want to preserver any leading zeroes in the bytes
byteLength := int(math.Ceil(float64(len(b)) / 8))
buf := make([]byte, byteLength)
bigint.FillBytes(buf)
return buf
}
// reverses elements order in slice of int, returns a new slice of int
func Reverse(s []int) []int {
newSlice := make([]int, len(s))
copy(newSlice, s)
for i, j := 0, len(newSlice)-1; i < j; i, j = i+1, j-1 {
newSlice[i], newSlice[j] = newSlice[j], newSlice[i]
}
return newSlice
}
// concatenate slices of bytes into a new slice with a new underlying array
func Concat(slices ...[]byte) []byte {
totalSize := 0
for _, v := range slices {
totalSize += len(v)
}
newSlice := make([]byte, totalSize)
copiedSoFar := 0
for _, v := range slices {
copy(newSlice[copiedSoFar:copiedSoFar+len(v)], v)
copiedSoFar += len(v)
}
return newSlice
}
// finishes sha256 hash from a previous mid-state
func FinishHash(outerState []byte, data []byte) []byte {
digest := sha256.New()
digestUnmarshaler, ok := digest.(encoding.BinaryUnmarshaler)
if !ok {
panic("d does not implement UnmarshalBinary")
}
// sha256.go expects the state to be formatted in a certain way
var state []byte
magic256 := "sha\x03"
state = append(state, magic256...)
state = append(state, outerState...)
// expects the previous chunk, can be set to zeroes
state = append(state, make([]byte, 64)...)
var a [8]byte
binary.BigEndian.PutUint64(a[:], 64) // 64 bytes processed so far
state = append(state, a[:]...)
if err := digestUnmarshaler.UnmarshalBinary(state); err != nil {
panic("error in UnmarshalBinary")
}
digest.Write(data)
return digest.Sum(nil)
}
// GF block multiplication
func BlockMultOld(val, encZero *big.Int) *big.Int {
res := big.NewInt(0)
_255 := big.NewInt(255)
R, ok := new(big.Int).SetString("E1000000000000000000000000000000", 16)
if !ok {
panic("SetString")
}
j := new(big.Int)
for i := 0; i < 16; i++ {
j.And(val, _255)
j.Lsh(j, uint(8*i))
res.Xor(res, gf_2_128_mul(encZero, j, R))
val.Rsh(val, 8) // val >>= 8n
}
return res
}
func BlockMult(x_, y_ *big.Int) *big.Int {
x := new(big.Int).Set(x_)
y := new(big.Int).Set(y_)
res := big.NewInt(0)
_1 := big.NewInt(1)
R, ok := new(big.Int).SetString("E1000000000000000000000000000000", 16)
if !ok {
panic("SetString")
}
for i := 127; i >= 0; i-- {
tmp1 := new(big.Int).Rsh(y, uint(i))
tmp2 := new(big.Int).And(tmp1, _1)
res.Xor(res, new(big.Int).Mul(x, tmp2))
tmp3 := new(big.Int).And(x, _1)
tmp4 := new(big.Int).Mul(tmp3, R)
tmp5 := new(big.Int).Rsh(x, 1)
x = new(big.Int).Xor(tmp5, tmp4)
}
return res
}
// return a table of byte values of x after each of the 128 rounds of BlockMult
func GetXTable(xBytes []byte) [][]byte {
x := new(big.Int).SetBytes(xBytes)
_1 := big.NewInt(1)
R, ok := new(big.Int).SetString("E1000000000000000000000000000000", 16)
if !ok {
panic("SetString")
}
xTable := make([][]byte, 128)
for i := 0; i < 128; i++ {
xTable[i] = To16Bytes(x)
tmp3 := new(big.Int).And(x, _1)
tmp4 := new(big.Int).Mul(tmp3, R)
tmp5 := new(big.Int).Rsh(x, 1)
x = new(big.Int).Xor(tmp5, tmp4)
}
return xTable
}
func FindSum(powersOfH *[][]byte, sum int) (int, int) {
for i := 0; i < len(*powersOfH); i++ {
if (*powersOfH)[i] == nil {
continue
}
for j := 0; j < len(*powersOfH); j++ {
if (*powersOfH)[j] == nil {
continue
}
if i+j == sum {
return i, j
}
}
}
// this should never happen because we always call
// findSum() knowing that the sum can be found
panic("sum not found")
}
// returns modified powersOfH
func FreeSquare(powersOfH *[][]byte, maxPowerNeeded int) {
for i := 0; i < len(*powersOfH); i++ {
if (*powersOfH)[i] == nil || i%2 == 0 {
continue
}
if i > maxPowerNeeded {
return
}
power := i
for power < maxPowerNeeded {
power = power * 2
if (*powersOfH)[power] != nil {
continue
}
prevPower := (*powersOfH)[power/2]
bigIntH := new(big.Int).SetBytes(prevPower)
(*powersOfH)[power] = To16Bytes(BlockMult(bigIntH, bigIntH))
}
}
}
func GetRandom(size int) []byte {
randomBytes := make([]byte, size)
_, err := rand.Read(randomBytes)
if err != nil {
panic(err)
}
return randomBytes
}
// convert big.Int into a slice of 16 bytes
func To16Bytes(x *big.Int) []byte {
buf := make([]byte, 16)
x.FillBytes(buf)
return buf
}
// convert big.Int into a slice of 32 bytes
func To32Bytes(x *big.Int) []byte {
buf := make([]byte, 32)
x.FillBytes(buf)
return buf
}
func gf_2_128_mul(authKey, y, R *big.Int) *big.Int {
// we don't want to change authKey. making a copy of it
x := new(big.Int).Set(authKey)
one := big.NewInt(1)
res := big.NewInt(0)
tmp := big.NewInt(0)
tmp2 := big.NewInt(0)
for i := 127; i > -1; i-- {
// res ^= x * ((y >> i) & 1n)
tmp.Rsh(y, uint(i))
tmp.And(tmp, one)
tmp.Mul(x, tmp)
res.Xor(res, tmp)
// x = (x >> 1n) ^ ((x & 1n) * BigInt(0xE1000000000000000000000000000000))
tmp.And(x, one)
tmp.Mul(tmp, R) //R is global
tmp2.Rsh(x, 1)
x.Xor(tmp2, tmp)
}
return res
}
// check if int is in array
func Contains(n int, h []int) bool {
for _, v := range h {
if v == n {
return true
}
}
return false
}
/// -------------------------------RANDOM OLD STUFF
// func getTag(w http.ResponseWriter, req *http.Request) {
// fmt.Println("in getTag", req.RemoteAddr)
// defer req.Body.Close()
// body, err := ioutil.ReadAll(req.Body)
// if err != nil {
// panic("can't read request body")
// }
// if len(body) != (128 + 16) {
// panic("len(body != 128+16")
// }
// encZero := body[:16]
// //mask := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 3, 4, 3, 3, 3, 3}
// //Hmasked := xorBytes(encZero, mask)
// encReq := body[16 : 128+16]
// // type 0x16 = Handshake; TLS Version 1.2; 16 bytes of plaintext data
// lenEncReq := make([]byte, 2)
// binary.BigEndian.PutUint16(lenEncReq, uint16(len(encReq)))
// aad := []byte{0, 0, 0, 0, 0, 0, 0, 1, 23, 3, 3}
// aad = append(aad, lenEncReq...)
// //tag1 := getAuthTag(aad, encReq, mask, nil)
// //tag2 := getAuthTag(aad, encReq, Hmasked, nil)
// //tag := xorBytes(tag1, tag2)
// tag := getAuthTag(aad, encReq, encZero, nil)
// fmt.Println("sending back ghash output ", tag)
// w.Header().Set("Access-Control-Allow-Origin", "*")
// w.Header().Set("Connection", "close")
// w.Write(tag)
// }
// // compute the GHASH function to get authentication tag for AES-GCM encryption
// func gHash(inputs [][]byte, encZero []byte) []byte {
// // polynomial := 2**128+2**7+2**2+2+1
// _1 := big.NewInt(1)
// _2 := big.NewInt(2)
// _7 := big.NewInt(7)
// _128 := big.NewInt(128)
// poly := big.NewInt(0)
// poly.Add(poly, new(big.Int).Exp(_2, _128, nil))
// poly.Add(poly, new(big.Int).Exp(_2, _7, nil))
// poly.Add(poly, new(big.Int).Exp(_2, _2, nil))
// poly.Add(poly, _2)
// poly.Add(poly, _1)
// H := new(big.Int).SetBytes(encZero)
// S := big.NewInt(0)
// for i := 0; i < len(inputs); i++ {
// inp := new(big.Int).SetBytes(inputs[i])
// out := new(big.Int).Xor(S, inp)
// out.Mul(out, H)
// out.Mod(out, poly)
// S = out
// }
// return S.Bytes()
// }
// func getAuthTag(aad, ct, encZero_, gctrBlock []byte) []byte {
// // there is no need to use precompute on the notary side
// //table := preComputeTable(encZero)
// var inputs []byte
// inputs = append(inputs, aad...)
// if len(aad)%16 > 0 {
// inputs = append(inputs, make([]byte, 16-(len(aad)%16))...)
// }
// inputs = append(inputs, ct...)
// if len(ct)%16 > 0 {
// inputs = append(inputs, make([]byte, 16-(len(ct)%16))...)
// }
// lenA := make([]byte, 8)
// binary.BigEndian.PutUint64(lenA, uint64(len(aad)*8))
// inputs = append(inputs, lenA...)
// lenC := make([]byte, 8)
// binary.BigEndian.PutUint64(lenC, uint64(len(ct)*8))
// inputs = append(inputs, lenC...)
// S := big.NewInt(0)
// X := new(big.Int)
// encZero := new(big.Int).SetBytes(encZero_)
// for i := 0; i < len(inputs)/16; i++ {
// X.SetBytes(inputs[i*16 : i*16+16])
// X.Xor(X, S)
// //S = times_auth_key_old(X, table)
// S = blockMult(X, encZero)
// //fmt.Println("after round", i, "S.Bytes()", S.Bytes())
// }
// if gctrBlock != nil {
// // if gctrBlock is nil, the output omits the final xor with gctrBlock
// S = S.Xor(S, new(big.Int).SetBytes(gctrBlock))
// }
// return S.Bytes()
// }
// // ported from https://github.com/bozhu/AES-GCM-Python/blob/master/aes_gcm.py
// func gf_2_128_mul(authKey, y *big.Int) *big.Int {
// // we don't want to change authKey. making a copy of it
// x := new(big.Int).Set(authKey)
// res := big.NewInt(0)
// tmp := big.NewInt(0)
// tmp2 := big.NewInt(0)
// for i := 127; i > -1; i-- {
// // res ^= x * ((y >> i) & 1n)
// tmp.Rsh(y, uint(i))
// tmp.And(tmp, g.One)
// tmp.Mul(x, tmp)
// res.Xor(res, tmp)
// // x = (x >> 1n) ^ ((x & 1n) * BigInt(0xE1000000000000000000000000000000))
// tmp.And(x, g.One)
// tmp.Mul(tmp, g.R) //r is global
// tmp2.Rsh(x, 1)
// x.Xor(tmp2, tmp)
// }
// return res
// }
// // this is not in use but keeping it here in case we may need it in the future
// func preComputeTable(encZero []byte) [][]*big.Int {
// authKey := new(big.Int).SetBytes(encZero)
// var table [][]*big.Int
// tmp := new(big.Int)
// tmp2 := new(big.Int)
// for i := 0; i < 16; i++ {
// var row []*big.Int
// for j := 0; j < 256; j++ {
// tmp2.SetUint64(uint64(j))
// tmp.Lsh(tmp2, uint(8*i)) //j << (8n*i)
// row = append(row, gf_2_128_mul(authKey, tmp))
// }
// table = append(table, row)
// }
// return table
// }
// // this may be used in the future if we decide to use a precomputed Htable
// func times_auth_key_old(val *big.Int, table [][]*big.Int) *big.Int {
// res := big.NewInt(0)
// _255 := big.NewInt(255)
// idx := new(big.Int)
// for i := 0; i < 16; i++ {
// idx.And(val, _255)
// res.Xor(res, table[i][idx.Uint64()]) // res ^= table[i][val & BigInt(0xFF)]
// val.Rsh(val, 8) // val >>= 8n
// }
// return res
// }
// func blockMult(val, encZero *big.Int) *big.Int {
// res := big.NewInt(0)
// _255 := big.NewInt(255)
// j := new(big.Int)
// for i := 0; i < 16; i++ {
// j.And(val, _255)
// j.Lsh(j, uint(8*i))
// res.Xor(res, gf_2_128_mul(encZero, j))
// val.Rsh(val, 8) // val >>= 8n
// }
// return res
// }
// func randomOracle(msg []byte, nonce_ int) []byte {
// // sha(0)
// var sha0 [32]byte
// sha0_, err := hex.DecodeString("da5698be17b9b46962335799779fbeca8ce5d491c0d26243bafef9ea1837a9d8")
// if err != nil {
// panic(err)
// }
// copy(sha0[:], sha0_[:])
// var nonce [24]byte
// result := make([]byte, 4)
// binary.BigEndian.PutUint32(result, uint32(nonce_))
// // JIGG puts e.g. 277 = [0,0,1,21] in reverse order into nonce i.e [21, 1, 0,0,0...,0]
// for i := 0; i < 4; i++ {
// copy(nonce[i:i+1], result[3-i:4-i])
// }
// out := secretbox.Seal(nil, msg, &nonce, &sha0)
// return out[0:16]
// }
func RandString() string {
mathrand.Seed(time.Now().UnixNano())
var letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
b := make([]rune, 10)
for i := range b {
b[i] = letterRunes[mathrand.Intn(len(letterRunes))]
}
return string(b)
}
// expand the range [min:max] into array of ints 1,2,3,4... up to but not including max
func ExpandRange(min int, max int) []int {
arr := make([]int, max-min)
for i := 0; i < len(arr); i++ {
arr[i] = min + i
}
return arr
}
func AESGCMencrypt(key []byte, plaintext []byte) []byte {
block, err := aes.NewCipher(key)
if err != nil {
panic(err.Error())
}
nonce := make([]byte, 12)
if _, err := io.ReadFull(rand.Reader, nonce); err != nil {
panic(err.Error())
}
aesgcm, err := cipher.NewGCM(block)
if err != nil {
panic(err.Error())
}
// we don't reuse plaintext slice when encrypting
ciphertext := aesgcm.Seal(nil, nonce, plaintext, nil)
return Concat(nonce, ciphertext)
}
// decrypt and reuse the ciphertext slice to put plaintext into it
func AESGCMdecrypt(key []byte, ctWithNonce []byte) []byte {
nonce := ctWithNonce[0:12]
ct := ctWithNonce[12:]
block, err := aes.NewCipher(key)
if err != nil {
panic(err.Error())
}
aesgcm, err := cipher.NewGCM(block)
if err != nil {
panic(err.Error())
}
pt, err := aesgcm.Open(ct[:0], nonce, ct, nil)
if err != nil {
panic(err.Error())
}
return pt
}
func RandInt(min, max int) int {
mathrand.Seed(int64(binary.BigEndian.Uint64(GetRandom(8))))
return mathrand.Intn(max-min) + min
}
func ECDSASign(key *ecdsa.PrivateKey, items ...[]byte) []byte {
var concatAll []byte
for _, item := range items {
concatAll = append(concatAll, item...)
}
digest_to_be_signed := Sha256(concatAll)
r, s, err := ecdsa.Sign(rand.Reader, key, digest_to_be_signed)
if err != nil {
panic("ecdsa.Sign")
}
signature := append(To32Bytes(r), To32Bytes(s)...)
return signature
}
func ECDSAPubkeyToPEM(key *ecdsa.PublicKey) []byte {
derBytes, err := x509.MarshalPKIXPublicKey(key)
if err != nil {
fmt.Println(err)
panic("x509.MarshalPKIXPublicKey")
}
block := &pem.Block{
Type: "PUBLIC KEY",
Bytes: derBytes,
}
pubKeyPEM := pem.EncodeToMemory(block)
return pubKeyPEM
}