mirror of
https://github.com/socathie/circomlib-ml.git
synced 2026-01-09 14:08:04 -05:00
409 lines
17 KiB
Plaintext
409 lines
17 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 1,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# model architecture modified from https://keras.io/examples/vision/mnist_convnet/"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 2,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"from tensorflow.keras.layers import Input, Conv2D, AveragePooling2D, Flatten, Lambda, Softmax, Dense\n",
|
|
"from tensorflow.keras import Model\n",
|
|
"from tensorflow.keras.datasets import mnist\n",
|
|
"from tensorflow.keras.utils import to_categorical\n",
|
|
"import numpy as np\n",
|
|
"import matplotlib.pyplot as plt\n",
|
|
"import tensorflow as tf"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 3,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"(X_train, y_train), (X_test, y_test) = mnist.load_data()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 4,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Convert y_train into one-hot format\n",
|
|
"temp = []\n",
|
|
"for i in range(len(y_train)):\n",
|
|
" temp.append(to_categorical(y_train[i], num_classes=10))\n",
|
|
"y_train = np.array(temp)\n",
|
|
"# Convert y_test into one-hot format\n",
|
|
"temp = []\n",
|
|
"for i in range(len(y_test)): \n",
|
|
" temp.append(to_categorical(y_test[i], num_classes=10))\n",
|
|
"y_test = np.array(temp)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 5,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"#reshaping\n",
|
|
"X_train = X_train.reshape(X_train.shape[0], 28, 28, 1)\n",
|
|
"X_test = X_test.reshape(X_test.shape[0], 28, 28, 1)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 6,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"inputs = Input(shape=(28,28,1))\n",
|
|
"out = Lambda(lambda x: x/1000)(inputs)\n",
|
|
"out = Conv2D(4, 3)(out)\n",
|
|
"out = Lambda(lambda x: x**2+x)(out)\n",
|
|
"out = AveragePooling2D()(out)\n",
|
|
"out = Lambda(lambda x: x*4)(out)\n",
|
|
"out = Conv2D(8, 3)(out)\n",
|
|
"out = Lambda(lambda x: x**2+x)(out)\n",
|
|
"out = AveragePooling2D()(out)\n",
|
|
"out = Lambda(lambda x: x*4)(out)\n",
|
|
"out = Flatten()(out)\n",
|
|
"out = Dense(10, activation=None)(out)\n",
|
|
"out = Softmax()(out)\n",
|
|
"model = Model(inputs, out)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 7,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Model: \"model\"\n",
|
|
"_________________________________________________________________\n",
|
|
"Layer (type) Output Shape Param # \n",
|
|
"=================================================================\n",
|
|
"input_1 (InputLayer) [(None, 28, 28, 1)] 0 \n",
|
|
"_________________________________________________________________\n",
|
|
"lambda (Lambda) (None, 28, 28, 1) 0 \n",
|
|
"_________________________________________________________________\n",
|
|
"conv2d (Conv2D) (None, 26, 26, 4) 40 \n",
|
|
"_________________________________________________________________\n",
|
|
"lambda_1 (Lambda) (None, 26, 26, 4) 0 \n",
|
|
"_________________________________________________________________\n",
|
|
"average_pooling2d (AveragePo (None, 13, 13, 4) 0 \n",
|
|
"_________________________________________________________________\n",
|
|
"lambda_2 (Lambda) (None, 13, 13, 4) 0 \n",
|
|
"_________________________________________________________________\n",
|
|
"conv2d_1 (Conv2D) (None, 11, 11, 8) 296 \n",
|
|
"_________________________________________________________________\n",
|
|
"lambda_3 (Lambda) (None, 11, 11, 8) 0 \n",
|
|
"_________________________________________________________________\n",
|
|
"average_pooling2d_1 (Average (None, 5, 5, 8) 0 \n",
|
|
"_________________________________________________________________\n",
|
|
"lambda_4 (Lambda) (None, 5, 5, 8) 0 \n",
|
|
"_________________________________________________________________\n",
|
|
"flatten (Flatten) (None, 200) 0 \n",
|
|
"_________________________________________________________________\n",
|
|
"dense (Dense) (None, 10) 2010 \n",
|
|
"_________________________________________________________________\n",
|
|
"softmax (Softmax) (None, 10) 0 \n",
|
|
"=================================================================\n",
|
|
"Total params: 2,346\n",
|
|
"Trainable params: 2,346\n",
|
|
"Non-trainable params: 0\n",
|
|
"_________________________________________________________________\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"model.summary()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 8,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"model.compile(\n",
|
|
" loss='categorical_crossentropy',\n",
|
|
" optimizer='adam',\n",
|
|
" metrics=['acc']\n",
|
|
" )"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 9,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Epoch 1/15\n",
|
|
"469/469 [==============================] - 5s 10ms/step - loss: 0.9272 - acc: 0.7005 - val_loss: 0.2338 - val_acc: 0.9316\n",
|
|
"Epoch 2/15\n",
|
|
"469/469 [==============================] - 4s 9ms/step - loss: 0.2102 - acc: 0.9396 - val_loss: 0.1324 - val_acc: 0.9587\n",
|
|
"Epoch 3/15\n",
|
|
"469/469 [==============================] - 5s 10ms/step - loss: 0.1367 - acc: 0.9605 - val_loss: 0.0977 - val_acc: 0.9709\n",
|
|
"Epoch 4/15\n",
|
|
"469/469 [==============================] - 5s 10ms/step - loss: 0.1066 - acc: 0.9684 - val_loss: 0.0837 - val_acc: 0.9762\n",
|
|
"Epoch 5/15\n",
|
|
"469/469 [==============================] - 4s 9ms/step - loss: 0.0962 - acc: 0.9727 - val_loss: 0.0762 - val_acc: 0.9776\n",
|
|
"Epoch 6/15\n",
|
|
"469/469 [==============================] - 4s 9ms/step - loss: 0.0830 - acc: 0.9755 - val_loss: 0.0709 - val_acc: 0.9788\n",
|
|
"Epoch 7/15\n",
|
|
"469/469 [==============================] - 4s 9ms/step - loss: 0.0766 - acc: 0.9764 - val_loss: 0.0639 - val_acc: 0.9806\n",
|
|
"Epoch 8/15\n",
|
|
"469/469 [==============================] - 4s 9ms/step - loss: 0.0702 - acc: 0.9790 - val_loss: 0.0620 - val_acc: 0.9815\n",
|
|
"Epoch 9/15\n",
|
|
"469/469 [==============================] - 4s 9ms/step - loss: 0.0703 - acc: 0.9782 - val_loss: 0.0573 - val_acc: 0.9837\n",
|
|
"Epoch 10/15\n",
|
|
"469/469 [==============================] - 4s 9ms/step - loss: 0.0639 - acc: 0.9802 - val_loss: 0.0570 - val_acc: 0.9821\n",
|
|
"Epoch 11/15\n",
|
|
"469/469 [==============================] - 4s 9ms/step - loss: 0.0627 - acc: 0.9804 - val_loss: 0.0535 - val_acc: 0.9839\n",
|
|
"Epoch 12/15\n",
|
|
"469/469 [==============================] - 4s 9ms/step - loss: 0.0607 - acc: 0.9813 - val_loss: 0.0515 - val_acc: 0.9832\n",
|
|
"Epoch 13/15\n",
|
|
"469/469 [==============================] - 4s 9ms/step - loss: 0.0562 - acc: 0.9829 - val_loss: 0.0498 - val_acc: 0.9852\n",
|
|
"Epoch 14/15\n",
|
|
"469/469 [==============================] - 4s 9ms/step - loss: 0.0538 - acc: 0.9834 - val_loss: 0.0493 - val_acc: 0.9843\n",
|
|
"Epoch 15/15\n",
|
|
"469/469 [==============================] - 4s 9ms/step - loss: 0.0528 - acc: 0.9841 - val_loss: 0.0489 - val_acc: 0.9838\n"
|
|
]
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"<tensorflow.python.keras.callbacks.History at 0x12fe77370>"
|
|
]
|
|
},
|
|
"execution_count": 9,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"model.fit(X_train, y_train, epochs=15, batch_size=128, validation_data=(X_test, y_test))"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 10,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"((28, 28, 1), 0, 255)"
|
|
]
|
|
},
|
|
"execution_count": 10,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"X = X_test[0]\n",
|
|
"X.shape, X.min(), X.max()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 11,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"model2 = Model(model.input, model.layers[-2].output)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 12,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"array([[ -8.095224 , -5.8302927, -1.2153628, 2.650765 , -19.186575 ,\n",
|
|
" -5.7322216, -26.104668 , 15.262588 , -4.949901 , -0.8113966]],\n",
|
|
" dtype=float32)"
|
|
]
|
|
},
|
|
"execution_count": 12,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"model2.predict(X_test[[0]]) - model.weights[5].numpy()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 13,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"array([[7.0393115e-11, 7.1936546e-10, 6.7945763e-08, 2.8899435e-06,\n",
|
|
" 9.6229656e-16, 7.9535123e-10, 1.0399456e-18, 9.9999690e-01,\n",
|
|
" 1.6393171e-09, 9.2139523e-08]], dtype=float32)"
|
|
]
|
|
},
|
|
"execution_count": 13,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"model.predict(X_test[[0]])"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 14,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"<matplotlib.image.AxesImage at 0x13a72d970>"
|
|
]
|
|
},
|
|
"execution_count": 14,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
},
|
|
{
|
|
"data": {
|
|
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAPsAAAD4CAYAAAAq5pAIAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8rg+JYAAAACXBIWXMAAAsTAAALEwEAmpwYAAANh0lEQVR4nO3df6zddX3H8dfL/sJeYFKwtSuVKqKxOsHlCppuSw3DAYYUo2w0GekSZskGCSxmG2ExkmxxjIiETWdSR2clCFOBQLRzksaNkLHKhZRSKFuRdVh71wvUrUXgtqXv/XG/LJdyz+dezvd7zve07+cjuTnnfN/ne77vfHtf/X7v+XzP+TgiBODY95a2GwDQH4QdSIKwA0kQdiAJwg4kMbufG5vreXGchvq5SSCVV/QLHYhxT1WrFXbb50u6RdIsSX8XETeUnn+chnSOz62zSQAFm2NTx1rXp/G2Z0n6qqQLJC2XtNr28m5fD0Bv1fmb/WxJT0fEMxFxQNKdklY10xaAptUJ+xJJP530eFe17HVsr7U9YnvkoMZrbA5AHXXCPtWbAG+49jYi1kXEcEQMz9G8GpsDUEedsO+StHTS41Ml7a7XDoBeqRP2hyWdYftdtudKulTSfc20BaBpXQ+9RcQh21dJ+idNDL2tj4gnGusMQKNqjbNHxEZJGxvqBUAPcbkskARhB5Ig7EAShB1IgrADSRB2IAnCDiRB2IEkCDuQBGEHkiDsQBKEHUiCsANJEHYgCcIOJEHYgSQIO5AEYQeSIOxAEoQdSIKwA0kQdiAJwg4kQdiBJAg7kARhB5Ig7EAShB1IgrADSRB2IIlaUzbb3ilpv6RXJR2KiOEmmgLQvFphr3w8Ip5v4HUA9BCn8UASdcMekn5o+xHba6d6gu21tkdsjxzUeM3NAehW3dP4FRGx2/ZCSffbfioiHpj8hIhYJ2mdJJ3oBVFzewC6VOvIHhG7q9sxSfdIOruJpgA0r+uw2x6yfcJr9yV9QtK2phoD0Kw6p/GLJN1j+7XX+VZE/KCRrgA0ruuwR8Qzks5ssBcAPcTQG5AEYQeSIOxAEoQdSIKwA0k08UGYFF747Mc61t552dPFdZ8aW1SsHxifU6wvuaNcn7/rxY61w1ueLK6LPDiyA0kQdiAJwg4kQdiBJAg7kARhB5Ig7EASjLPP0J/88bc61j499PPyyqfX3PjKcnnnoZc61m557uM1N370+vHYaR1rQzf9UnHd2Zseabqd1nFkB5Ig7EAShB1IgrADSRB2IAnCDiRB2IEkHNG/SVpO9II4x+f2bXtN+sVnzulYe/5D5f8zT9pe3sc/f7+L9bkf+p9i/cYP3t2xdt5bXy6u+/2Xji/WPzm/82fl63o5DhTrm8eHivWVxx3setvv+f4Vxfp71z7c9Wu3aXNs0r7YO+UvFEd2IAnCDiRB2IEkCDuQBGEHkiDsQBKEHUiCz7PP0NB3Nxdq9V77xHqr62/esbJj7S9WLCtv+1/K33l/48r3dNHRzMx++XCxPrR1tFg/+YG7ivVfmdv5+/bn7yx/F/+xaNoju+31tsdsb5u0bIHt+23vqG5P6m2bAOqayWn8NySdf8SyayVtiogzJG2qHgMYYNOGPSIekLT3iMWrJG2o7m+QdHGzbQFoWrdv0C2KiFFJqm4Xdnqi7bW2R2yPHNR4l5sDUFfP342PiHURMRwRw3M0r9ebA9BBt2HfY3uxJFW3Y821BKAXug37fZLWVPfXSLq3mXYA9Mq04+y279DEN5efYnuXpC9IukHSt21fLulZSZf0skmUHfrvPR1rQ3d1rknSq9O89tB3X+iio2bs+f2PFesfmFv+9f3S3vd1rC37+2eK6x4qVo9O04Y9IlZ3KB2d30IBJMXlskAShB1IgrADSRB2IAnCDiTBR1zRmtmnLS3Wv3LdV4r1OZ5VrH/nlt/sWDt59KHiuscijuxAEoQdSIKwA0kQdiAJwg4kQdiBJAg7kATj7GjNU3+0pFj/yLzyVNZPHChPR73gyZfedE/HMo7sQBKEHUiCsANJEHYgCcIOJEHYgSQIO5AE4+zoqfFPfqRj7dHP3DzN2uUZhP7g6quL9bf+64+nef1cOLIDSRB2IAnCDiRB2IEkCDuQBGEHkiDsQBKMs6Onnr2g8/HkeJfH0Vf/53nF+vwfPFasR7Gaz7RHdtvrbY/Z3jZp2fW2f2Z7S/VzYW/bBFDXTE7jvyHp/CmW3xwRZ1U/G5ttC0DTpg17RDwgaW8fegHQQ3XeoLvK9tbqNP+kTk+yvdb2iO2RgxqvsTkAdXQb9q9JOl3SWZJGJd3U6YkRsS4ihiNieM40H2wA0DtdhT0i9kTEqxFxWNLXJZ3dbFsAmtZV2G0vnvTwU5K2dXougMEw7Ti77TskrZR0iu1dkr4gaaXtszQxlLlT0hW9axGD7C0nnFCsX/brD3as7Tv8SnHdsS++u1ifN/5wsY7XmzbsEbF6isW39qAXAD3E5bJAEoQdSIKwA0kQdiAJwg4kwUdcUcuO6z9QrH/vlL/tWFu149PFdedtZGitSRzZgSQIO5AEYQeSIOxAEoQdSIKwA0kQdiAJxtlR9L+/+9Fifevv/HWx/pNDBzvWXvyrU4vrztNosY43hyM7kARhB5Ig7EAShB1IgrADSRB2IAnCDiTBOHtys5f8crF+zef/oVif5/Kv0KWPXdax9vZ/5PPq/cSRHUiCsANJEHYgCcIOJEHYgSQIO5AEYQeSYJz9GOfZ5X/iM7+3q1i/5PgXivXb9y8s1hd9vvPx5HBxTTRt2iO77aW2f2R7u+0nbF9dLV9g+37bO6rbk3rfLoBuzeQ0/pCkz0XE+yV9VNKVtpdLulbSpog4Q9Km6jGAATVt2CNiNCIere7vl7Rd0hJJqyRtqJ62QdLFPeoRQAPe1Bt0tpdJ+rCkzZIWRcSoNPEfgqQp/3izvdb2iO2Rgxqv2S6Abs047LaPl3SXpGsiYt9M14uIdRExHBHDczSvmx4BNGBGYbc9RxNBvz0i7q4W77G9uKovljTWmxYBNGHaoTfblnSrpO0R8eVJpfskrZF0Q3V7b086RD1nvq9Y/vOFt9V6+a9+8ZJi/W2PPVTr9dGcmYyzr5B0maTHbW+pll2niZB/2/blkp6VVP5XB9CqacMeEQ9Kcofyuc22A6BXuFwWSIKwA0kQdiAJwg4kQdiBJPiI6zFg1vL3dqytvbPe5Q/L119ZrC+77d9qvT76hyM7kARhB5Ig7EAShB1IgrADSRB2IAnCDiTBOPsx4Kk/7PzFvhfNn/GXCk3p1H8+UH5CRK3XR/9wZAeSIOxAEoQdSIKwA0kQdiAJwg4kQdiBJBhnPwq8ctHZxfqmi24qVOc32wyOWhzZgSQIO5AEYQeSIOxAEoQdSIKwA0kQdiCJmczPvlTSNyW9Q9JhSesi4hbb10v6rKTnqqdeFxEbe9VoZrtXzCrW3zm7+7H02/cvLNbn7Ct/np1Psx89ZnJRzSFJn4uIR22fIOkR2/dXtZsj4ku9aw9AU2YyP/uopNHq/n7b2yUt6XVjAJr1pv5mt71M0oclba4WXWV7q+31tqf8biTba22P2B45qPF63QLo2ozDbvt4SXdJuiYi9kn6mqTTJZ2liSP/lBdoR8S6iBiOiOE5mle/YwBdmVHYbc/RRNBvj4i7JSki9kTEqxFxWNLXJZU/rQGgVdOG3bYl3Sppe0R8edLyxZOe9ilJ25pvD0BTZvJu/ApJl0l63PaWatl1klbbPksToy87JV3Rg/5Q01++sLxYf+i3lhXrMfp4g92gTTN5N/5BSZ6ixJg6cBThCjogCcIOJEHYgSQIO5AEYQeSIOxAEo4+Trl7ohfEOT63b9sDstkcm7Qv9k41VM6RHciCsANJEHYgCcIOJEHYgSQIO5AEYQeS6Os4u+3nJP3XpEWnSHq+bw28OYPa26D2JdFbt5rs7bSIePtUhb6G/Q0bt0ciYri1BgoGtbdB7Uuit271qzdO44EkCDuQRNthX9fy9ksGtbdB7Uuit271pbdW/2YH0D9tH9kB9AlhB5JoJey2z7f977aftn1tGz10Ynun7cdtb7E90nIv622P2d42adkC2/fb3lHdTjnHXku9XW/7Z9W+22L7wpZ6W2r7R7a3237C9tXV8lb3XaGvvuy3vv/NbnuWpP+QdJ6kXZIelrQ6Ip7sayMd2N4paTgiWr8Aw/ZvSHpR0jcj4oPVshsl7Y2IG6r/KE+KiD8dkN6ul/Ri29N4V7MVLZ48zbikiyX9nlrcd4W+flt92G9tHNnPlvR0RDwTEQck3SlpVQt9DLyIeEDS3iMWr5K0obq/QRO/LH3XobeBEBGjEfFodX+/pNemGW913xX66os2wr5E0k8nPd6lwZrvPST90PYjtte23cwUFkXEqDTxyyNpYcv9HGnaabz76Yhpxgdm33Uz/XldbYR9qu/HGqTxvxUR8auSLpB0ZXW6ipmZ0TTe/TLFNOMDodvpz+tqI+y7JC2d9PhUSbtb6GNKEbG7uh2TdI8GbyrqPa/NoFvdjrXcz/8bpGm8p5pmXAOw79qc/ryNsD8s6Qzb77I9V9Klku5roY83sD1UvXEi20OSPqHBm4r6PklrqvtrJN3bYi+vMyjTeHeaZlwt77vWpz+PiL7/SLpQE+/I/0TSn7XRQ4e+3i3psernibZ7k3SHJk7rDmrijOhySSdL2iRpR3W7YIB6u03S45K2aiJYi1vq7dc08afhVklbqp8L2953hb76st+4XBZIgivogCQIO5AEYQeSIOxAEoQdSIKwA0kQdiCJ/wNGNvRI2D7VDgAAAABJRU5ErkJggg==",
|
|
"text/plain": [
|
|
"<Figure size 432x288 with 1 Axes>"
|
|
]
|
|
},
|
|
"metadata": {
|
|
"needs_background": "light"
|
|
},
|
|
"output_type": "display_data"
|
|
}
|
|
],
|
|
"source": [
|
|
"plt.imshow(X)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 15,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"6\n",
|
|
"(3, 3, 1, 4)\n",
|
|
"(4,)\n",
|
|
"(3, 3, 4, 8)\n",
|
|
"(8,)\n",
|
|
"(200, 10)\n",
|
|
"(10,)\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"print(len(model.weights))\n",
|
|
"for weights in model.weights:\n",
|
|
" print(weights.shape)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 16,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"in_json = {\n",
|
|
" \"in\": X.astype(int).flatten().tolist(), # X is already 1000 times to begin with\n",
|
|
" \"conv2d_1_weights\": (model.weights[0].numpy()*(10**3)).round().astype(int).flatten().tolist(),\n",
|
|
" \"conv2d_1_bias\": (model.weights[1].numpy()*(10**3)*(10**3)).round().astype(int).flatten().tolist(),\n",
|
|
" # poly layer would be (10**3)**2=10**6 times as well\n",
|
|
" \"conv2d_2_weights\": (model.weights[2].numpy()*(10**3)).round().astype(int).flatten().tolist(),\n",
|
|
" \"conv2d_2_bias\": (model.weights[3].numpy()*((10**3)**5)).round().astype(int).flatten().tolist(),\n",
|
|
" # poly layer would be (10**3)**5=10**15 times as well\n",
|
|
" \"dense_weights\":(model.weights[4].numpy()*(10**3)).round().astype(int).flatten().tolist(),\n",
|
|
" \"dense_bias\": np.zeros(model.weights[5].numpy().shape).tolist() # zero because we are not doing softmax in circom, just argmax\n",
|
|
"}"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 17,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"import json"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 18,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"with open(\"mnist_convnet_input.json\", \"w\") as f:\n",
|
|
" json.dump(in_json, f)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": []
|
|
}
|
|
],
|
|
"metadata": {
|
|
"interpreter": {
|
|
"hash": "11280bdb37aa6bc5d4cf1e4de756386eb1f9eecd8dcdefa77636dfac7be2370d"
|
|
},
|
|
"kernelspec": {
|
|
"display_name": "Python 3.8.6 ('tf24')",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.8.6"
|
|
},
|
|
"orig_nbformat": 4
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 2
|
|
}
|