mirror of
https://github.com/data61/MP-SPDZ.git
synced 2026-01-10 05:57:57 -05:00
Maintenance.
This commit is contained in:
114
Programs/Source/alex.mpc
Normal file
114
Programs/Source/alex.mpc
Normal file
@@ -0,0 +1,114 @@
|
||||
from Compiler.ml import keras
|
||||
import Compiler.ml as tf
|
||||
|
||||
try:
|
||||
n_epochs = int(program.args[1])
|
||||
except (ValueError, IndexError):
|
||||
n_epochs = 20
|
||||
|
||||
try:
|
||||
batch_size = int(program.args[2])
|
||||
except (ValueError, IndexError):
|
||||
batch_size = 128
|
||||
|
||||
try:
|
||||
n_threads = int(program.args[3])
|
||||
except (ValueError, IndexError):
|
||||
n_threads = 36
|
||||
|
||||
#Instantiation
|
||||
AlexNet = []
|
||||
|
||||
padding = 1
|
||||
batchnorm = 'batchnorm' in program.args
|
||||
bn1 = 'bn1' in program.args
|
||||
bn2 = 'bn2' in program.args
|
||||
|
||||
MultiArray.disable_index_checks()
|
||||
|
||||
#1st Convolutional Layer
|
||||
AlexNet.append(keras.layers.Conv2D(filters=64, input_shape=(32,32,3), kernel_size=3, strides=1, padding=2))
|
||||
AlexNet.append(keras.layers.Activation('relu'))
|
||||
if batchnorm:
|
||||
AlexNet.append(keras.layers.BatchNormalization())
|
||||
AlexNet.append(keras.layers.MaxPooling2D(pool_size=(2,2), strides=(2,2), padding=0))
|
||||
|
||||
#2nd Convolutional Layer
|
||||
AlexNet.append(keras.layers.Conv2D(filters=96, kernel_size=3, strides=1, padding=2))
|
||||
AlexNet.append(keras.layers.Activation('relu'))
|
||||
if batchnorm or bn2:
|
||||
AlexNet.append(keras.layers.BatchNormalization())
|
||||
AlexNet.append(keras.layers.MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'))
|
||||
|
||||
#3rd Convolutional Layer
|
||||
AlexNet.append(keras.layers.Conv2D(filters=96, kernel_size=(3,3), strides=(1,1), padding=padding))
|
||||
AlexNet.append(keras.layers.Activation('relu'))
|
||||
if batchnorm:
|
||||
AlexNet.append(keras.layers.BatchNormalization())
|
||||
|
||||
#4th Convolutional Layer
|
||||
AlexNet.append(keras.layers.Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), padding=padding))
|
||||
AlexNet.append(keras.layers.Activation('relu'))
|
||||
if batchnorm or bn1:
|
||||
AlexNet.append(keras.layers.BatchNormalization())
|
||||
|
||||
#5th Convolutional Layer
|
||||
AlexNet.append(keras.layers.Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), padding=padding))
|
||||
AlexNet.append(keras.layers.Activation('relu'))
|
||||
if batchnorm or bn2:
|
||||
AlexNet.append(keras.layers.BatchNormalization())
|
||||
AlexNet.append(keras.layers.MaxPooling2D(pool_size=(3,3), strides=(2,2), padding=0))
|
||||
|
||||
#Passing it to a Fully Connected layer
|
||||
# 1st Fully Connected Layer
|
||||
AlexNet.append(keras.layers.Dense(128))
|
||||
AlexNet.append(keras.layers.Activation('relu'))
|
||||
|
||||
if 'dropout' in program.args:
|
||||
AlexNet.append(keras.layers.Dropout(0.5))
|
||||
|
||||
#2nd Fully Connected Layer
|
||||
AlexNet.append(keras.layers.Dense(256))
|
||||
AlexNet.append(keras.layers.Activation('relu'))
|
||||
|
||||
if 'dropout' in program.args:
|
||||
AlexNet.append(keras.layers.Dropout(0.5))
|
||||
|
||||
#Output Layer
|
||||
AlexNet.append(keras.layers.Dense(10))
|
||||
|
||||
tf.set_n_threads(n_threads)
|
||||
program.options_from_args()
|
||||
sfix.set_precision_from_args(program, adapt_ring=True)
|
||||
|
||||
training_samples = MultiArray([50000, 32, 32, 3], sfix)
|
||||
training_labels = MultiArray([50000, 10], sint)
|
||||
|
||||
test_samples = MultiArray([10000, 32, 32, 3], sfix)
|
||||
test_labels = MultiArray([10000, 10], sint)
|
||||
|
||||
training_labels.input_from(0)
|
||||
training_samples.input_from(0, binary='binary_samples' in program.args)
|
||||
|
||||
test_labels.input_from(0)
|
||||
test_samples.input_from(0, binary='binary_samples' in program.args)
|
||||
|
||||
model = tf.keras.models.Sequential(AlexNet)
|
||||
|
||||
model.compile_by_args(program)
|
||||
|
||||
model.build(training_samples.sizes)
|
||||
model.summary()
|
||||
|
||||
model.opt.output_diff = 'output_diff' in program.args
|
||||
model.opt.output_grad = 'output_grad' in program.args
|
||||
model.opt.output_stats = 100 if 'output_stats' in program.args else 0
|
||||
model.opt.shuffle = not 'noshuffle' in program.args
|
||||
|
||||
opt = model.fit(
|
||||
training_samples,
|
||||
training_labels,
|
||||
epochs=n_epochs,
|
||||
batch_size=batch_size,
|
||||
validation_data=(test_samples, test_labels)
|
||||
)
|
||||
@@ -25,6 +25,9 @@ n_threads = 2
|
||||
if len(program.args) > 1:
|
||||
n_rounds = int(program.args[1])
|
||||
|
||||
if len(program.args) > 2:
|
||||
program.active = bool(int(program.args[2]))
|
||||
|
||||
def accept_client():
|
||||
client_socket_id = accept_client_connection(PORTNUM)
|
||||
last = regint.read_from_socket(client_socket_id)
|
||||
|
||||
@@ -4,7 +4,7 @@ import Compiler.ml as tf
|
||||
try:
|
||||
n_epochs = int(program.args[1])
|
||||
except (ValueError, IndexError):
|
||||
n_epochs = 10
|
||||
n_epochs = 20
|
||||
|
||||
try:
|
||||
batch_size = int(program.args[2])
|
||||
|
||||
72
Programs/Source/keras_mnist_lenet_avgpool.mpc
Normal file
72
Programs/Source/keras_mnist_lenet_avgpool.mpc
Normal file
@@ -0,0 +1,72 @@
|
||||
# this trains LeNet on MNIST with a dropout layer
|
||||
# see https://github.com/csiro-mlai/mnist-mpc for data preparation
|
||||
|
||||
program.options_from_args()
|
||||
|
||||
if 'torch' in program.args:
|
||||
import torchvision
|
||||
data = []
|
||||
for train in True, False:
|
||||
ds = torchvision.datasets.MNIST(root='/tmp', train=train, download=True)
|
||||
# normalize to [0,1] before input
|
||||
samples = sfix.input_tensor_via(0, ds.data / 255., binary=True)
|
||||
labels = sint.input_tensor_via(0, ds.targets, binary=True, one_hot=True)
|
||||
data += [(labels, samples)]
|
||||
|
||||
(training_labels, training_samples), (test_labels, test_samples) = data
|
||||
else:
|
||||
training_samples = sfix.Tensor([60000, 28, 28])
|
||||
training_labels = sint.Tensor([60000, 10])
|
||||
|
||||
test_samples = sfix.Tensor([10000, 28, 28])
|
||||
test_labels = sint.Tensor([10000, 10])
|
||||
|
||||
training_labels.input_from(0)
|
||||
training_samples.input_from(0)
|
||||
|
||||
test_labels.input_from(0)
|
||||
test_samples.input_from(0)
|
||||
|
||||
from Compiler import ml
|
||||
tf = ml
|
||||
|
||||
layers = [
|
||||
tf.keras.layers.Conv2D(20, 5, 1, 'valid', activation='relu'),
|
||||
]
|
||||
|
||||
if 'batchnorm' in program.args:
|
||||
layers += [tf.keras.layers.BatchNormalization()]
|
||||
|
||||
layers += [
|
||||
tf.keras.layers.AveragePooling2D(2),
|
||||
tf.keras.layers.Conv2D(50, 5, 1, 'valid', activation='relu'),
|
||||
]
|
||||
|
||||
|
||||
if 'batchnorm' in program.args:
|
||||
layers += [tf.keras.layers.BatchNormalization()]
|
||||
|
||||
layers += [
|
||||
tf.keras.layers.AveragePooling2D(2),
|
||||
tf.keras.layers.Flatten(),
|
||||
tf.keras.layers.Dropout(0.5),
|
||||
tf.keras.layers.Dense(500, activation='relu'),
|
||||
tf.keras.layers.Dense(10, activation='softmax')
|
||||
]
|
||||
|
||||
model = tf.keras.models.Sequential(layers)
|
||||
|
||||
optim = tf.keras.optimizers.Adam(amsgrad=True)
|
||||
|
||||
model.compile(optimizer=optim)
|
||||
|
||||
opt = model.fit(
|
||||
training_samples,
|
||||
training_labels,
|
||||
epochs=10,
|
||||
batch_size=128,
|
||||
validation_data=(test_samples, test_labels)
|
||||
)
|
||||
|
||||
for var in model.trainable_variables:
|
||||
var.write_to_file()
|
||||
49
Programs/Source/torch_mnist_lenet_avgpool.mpc
Normal file
49
Programs/Source/torch_mnist_lenet_avgpool.mpc
Normal file
@@ -0,0 +1,49 @@
|
||||
# this trains a dense neural network on MNIST
|
||||
|
||||
program.options_from_args()
|
||||
|
||||
import torchvision
|
||||
|
||||
data = []
|
||||
for train in True, False:
|
||||
ds = torchvision.datasets.MNIST(root='/tmp', train=train, download=True)
|
||||
# normalize to [0,1] before input
|
||||
samples = sfix.input_tensor_via(0, ds.data / 255., binary=True)
|
||||
labels = sint.input_tensor_via(0, ds.targets, binary=True, one_hot=True)
|
||||
data += [(labels, samples)]
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
net = nn.Sequential(
|
||||
nn.Conv2d(1, 20, 5),
|
||||
nn.ReLU(),
|
||||
nn.AvgPool2d(2),
|
||||
nn.Conv2d(20, 50, 5),
|
||||
nn.ReLU(),
|
||||
nn.AvgPool2d(2),
|
||||
nn.Flatten(),
|
||||
nn.ReLU(),
|
||||
nn.Linear(800, 500),
|
||||
nn.ReLU(),
|
||||
nn.Linear(500, 10)
|
||||
)
|
||||
|
||||
# test network
|
||||
ds = torchvision.datasets.MNIST(
|
||||
root='/tmp', transform=torchvision.transforms.ToTensor())
|
||||
inputs = next(iter(torch.utils.data.DataLoader(ds)))[0]
|
||||
print(inputs.shape)
|
||||
outputs = net(inputs)
|
||||
|
||||
from Compiler import ml
|
||||
|
||||
ml.set_n_threads(int(program.args[2]))
|
||||
|
||||
layers = ml.layers_from_torch(net, data[0][1].shape, 128)
|
||||
layers[0].X = data[0][1]
|
||||
layers[-1].Y = data[0][0]
|
||||
|
||||
optimizer = ml.SGD(layers)
|
||||
optimizer.run_by_args(program, int(program.args[1]), 128,
|
||||
data[1][1], data[1][0])
|
||||
Reference in New Issue
Block a user