Global...Pooling2D layers

This commit is contained in:
Cathie So
2022-12-12 02:53:50 +08:00
parent 538277887f
commit 6bb4da79cd
13 changed files with 554 additions and 0 deletions

View File

@@ -0,0 +1,24 @@
pragma circom 2.0.0;
include "./GlobalSumPooling2D.circom";
// GlobalAveragePooling2D layer, might lose precision compared to GlobalSumPooling2D
// scaledInvPoolSize is required to perform fixed point division, it is calculated as 1/(nRows*nCols) then scaled up by multiples of 10
template GlobalAveragePooling2D (nRows, nCols, nChannels, scaledInv) {
signal input in[nRows][nCols][nChannels];
signal output out[nChannels];
component globalSumPooling2D = GlobalSumPooling2D (nRows, nCols, nChannels);
for (var i=0; i<nRows; i++) {
for (var j=0; j<nCols; j++) {
for (var k=0; k<nChannels; k++) {
globalSumPooling2D.in[i][j][k] <== in[i][j][k];
}
}
}
for (var k=0; k<nChannels; k++) {
out[k] <== globalSumPooling2D.out[k]*scaledInv;
}
}

View File

@@ -0,0 +1,21 @@
pragma circom 2.0.0;
include "./util.circom";
// GlobalMaxPooling2D layer
template GlobalMaxPooling2D (nRows, nCols, nChannels) {
signal input in[nRows][nCols][nChannels];
signal output out[nChannels];
component max[nChannels];
for (var k=0; k<nChannels; k++) {
max[k] = Max(nRows*nCols);
for (var i=0; i<nRows; i++) {
for (var j=0; j<nCols; j++) {
max[k].in[i*nCols+j] <== in[i][j][k];
}
}
out[k] <== max[k].out;
}
}

View File

@@ -0,0 +1,22 @@
pragma circom 2.0.0;
include "./circomlib-matrix/matElemSum.circom";
include "./util.circom";
// GlobalSumPooling2D layer, basically GlobalAveragePooling2D layer with a constant scaling, more optimized for circom
template GlobalSumPooling2D (nRows, nCols, nChannels) {
signal input in[nRows][nCols][nChannels];
signal output out[nChannels];
component elemSum[nChannels];
for (var k=0; k<nChannels; k++) {
elemSum[k] = matElemSum(nRows,nCols);
for (var i=0; i<nRows; i++) {
for (var j=0; j<nCols; j++) {
elemSum[k].a[i][j] <== in[i][j][k];
}
}
out[k] <== elemSum[k].out;
}
}

View File

@@ -0,0 +1,197 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"from tensorflow.keras.layers import Input, GlobalAveragePooling2D\n",
"from tensorflow.keras import Model\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"inputs = Input(shape=(5,5,3))\n",
"out = GlobalAveragePooling2D()(inputs)\n",
"model = Model(inputs, out)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Model: \"model\"\n",
"_________________________________________________________________\n",
"Layer (type) Output Shape Param # \n",
"=================================================================\n",
"input_1 (InputLayer) [(None, 5, 5, 3)] 0 \n",
"_________________________________________________________________\n",
"global_average_pooling2d (Gl (None, 3) 0 \n",
"=================================================================\n",
"Total params: 0\n",
"Trainable params: 0\n",
"Non-trainable params: 0\n",
"_________________________________________________________________\n"
]
}
],
"source": [
"model.summary()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"array([[[[0.75026257, 0.78298092, 0.96781344],\n",
" [0.32235377, 0.89150794, 0.04706537],\n",
" [0.91958291, 0.95572218, 0.01006833],\n",
" [0.60887321, 0.67452346, 0.9722854 ],\n",
" [0.47488548, 0.08846556, 0.15469522]],\n",
"\n",
" [[0.4964009 , 0.56350259, 0.86448218],\n",
" [0.12851276, 0.26571101, 0.11222685],\n",
" [0.15848715, 0.69473995, 0.45558278],\n",
" [0.36135735, 0.77453115, 0.94767797],\n",
" [0.54757355, 0.3529423 , 0.7502107 ]],\n",
"\n",
" [[0.23254084, 0.13915902, 0.62088772],\n",
" [0.91802735, 0.18125181, 0.82032438],\n",
" [0.08115132, 0.47008071, 0.11862867],\n",
" [0.93358649, 0.82824588, 0.84168659],\n",
" [0.62363021, 0.38914314, 0.13280334]],\n",
"\n",
" [[0.84657932, 0.80405209, 0.59412592],\n",
" [0.56584756, 0.10237339, 0.21217235],\n",
" [0.31526466, 0.21418521, 0.51236233],\n",
" [0.42648049, 0.01163492, 0.09296196],\n",
" [0.97516359, 0.45381077, 0.70935164]],\n",
"\n",
" [[0.68801577, 0.34860446, 0.25255818],\n",
" [0.15228667, 0.16675365, 0.25885748],\n",
" [0.46006891, 0.9028665 , 0.77014467],\n",
" [0.52331235, 0.54846645, 0.39861399],\n",
" [0.83559747, 0.48153349, 0.93005651]]]])"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"X = np.random.rand(1,5,5,3)\n",
"X"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"array([[0.53383374, 0.4834715 , 0.5019058 ]], dtype=float32)"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"y = model.predict(X)\n",
"y"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"in_json = {\n",
" \"in\": (X*1000).round().astype(int).flatten().tolist()\n",
"}"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"out_json = {\n",
" \"out\": (y*1000*100).round().astype(int).flatten().tolist()\n",
"}"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"import json"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"with open(\"globalAveragePooling2D_input.json\", \"w\") as f:\n",
" json.dump(in_json, f)"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
"with open(\"globalAveragePooling2D_output.json\", \"w\") as f:\n",
" json.dump(out_json, f)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "tf24",
"language": "python",
"name": "tf24"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.6"
},
"orig_nbformat": 4
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -0,0 +1 @@
{"in": [750, 783, 968, 322, 892, 47, 920, 956, 10, 609, 675, 972, 475, 88, 155, 496, 564, 864, 129, 266, 112, 158, 695, 456, 361, 775, 948, 548, 353, 750, 233, 139, 621, 918, 181, 820, 81, 470, 119, 934, 828, 842, 624, 389, 133, 847, 804, 594, 566, 102, 212, 315, 214, 512, 426, 12, 93, 975, 454, 709, 688, 349, 253, 152, 167, 259, 460, 903, 770, 523, 548, 399, 836, 482, 930]}

View File

@@ -0,0 +1 @@
{"out": [53383, 48347, 50191]}

View File

@@ -0,0 +1,197 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"from tensorflow.keras.layers import Input, GlobalMaxPooling2D\n",
"from tensorflow.keras import Model\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"inputs = Input(shape=(5,5,3))\n",
"x = GlobalMaxPooling2D()(inputs)\n",
"model = Model(inputs, x)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Model: \"model\"\n",
"_________________________________________________________________\n",
"Layer (type) Output Shape Param # \n",
"=================================================================\n",
"input_1 (InputLayer) [(None, 5, 5, 3)] 0 \n",
"_________________________________________________________________\n",
"global_max_pooling2d (Global (None, 3) 0 \n",
"=================================================================\n",
"Total params: 0\n",
"Trainable params: 0\n",
"Non-trainable params: 0\n",
"_________________________________________________________________\n"
]
}
],
"source": [
"model.summary()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"array([[[[0.32466035, 0.98252793, 0.44400143],\n",
" [0.84845853, 0.19442104, 0.59510471],\n",
" [0.44089874, 0.15830311, 0.72414619],\n",
" [0.67081125, 0.00734799, 0.67856931],\n",
" [0.55140412, 0.46201941, 0.6070781 ]],\n",
"\n",
" [[0.93564932, 0.20329226, 0.82119732],\n",
" [0.96698972, 0.93135353, 0.63006489],\n",
" [0.42358955, 0.30340362, 0.90993389],\n",
" [0.63976257, 0.1406262 , 0.22038059],\n",
" [0.94295376, 0.91868854, 0.28490443]],\n",
"\n",
" [[0.22116569, 0.23807312, 0.14799033],\n",
" [0.27190278, 0.89536995, 0.53043589],\n",
" [0.53038256, 0.69774341, 0.43229638],\n",
" [0.22129893, 0.45845914, 0.80878986],\n",
" [0.14265615, 0.21502123, 0.90049627]],\n",
"\n",
" [[0.05753169, 0.64384457, 0.21423554],\n",
" [0.21892986, 0.43545047, 0.30016867],\n",
" [0.45103494, 0.41946604, 0.15388892],\n",
" [0.83526323, 0.83552575, 0.38730236],\n",
" [0.67391823, 0.84635641, 0.41258421]],\n",
"\n",
" [[0.75021931, 0.77485261, 0.57189854],\n",
" [0.74505654, 0.9464458 , 0.73346162],\n",
" [0.41600983, 0.04725781, 0.36665437],\n",
" [0.71862184, 0.46074702, 0.12424663],\n",
" [0.61020934, 0.17278885, 0.94038123]]]])"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"X = np.random.rand(1,5,5,3)\n",
"X"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"array([[0.9669897, 0.9825279, 0.9403812]], dtype=float32)"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"y = model.predict(X)\n",
"y"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"in_json = {\n",
" \"in\": (X*1000).round().astype(int).flatten().tolist()\n",
"}"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"out_json = {\n",
" \"out\": (y*1000).round().astype(int).flatten().tolist()\n",
"}"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"import json"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"with open(\"globalMaxPooling2D_input.json\", \"w\") as f:\n",
" json.dump(in_json, f)"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
"with open(\"globalMaxPooling2D_output.json\", \"w\") as f:\n",
" json.dump(out_json, f)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "tf24",
"language": "python",
"name": "tf24"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.6"
},
"orig_nbformat": 4
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -0,0 +1 @@
{"in": [325, 983, 444, 848, 194, 595, 441, 158, 724, 671, 7, 679, 551, 462, 607, 936, 203, 821, 967, 931, 630, 424, 303, 910, 640, 141, 220, 943, 919, 285, 221, 238, 148, 272, 895, 530, 530, 698, 432, 221, 458, 809, 143, 215, 900, 58, 644, 214, 219, 435, 300, 451, 419, 154, 835, 836, 387, 674, 846, 413, 750, 775, 572, 745, 946, 733, 416, 47, 367, 719, 461, 124, 610, 173, 940]}

View File

@@ -0,0 +1 @@
{"out": [967, 983, 940]}

View File

@@ -0,0 +1,44 @@
const chai = require("chai");
const path = require("path");
const wasm_tester = require("circom_tester").wasm;
const F1Field = require("ffjavascript").F1Field;
const Scalar = require("ffjavascript").Scalar;
exports.p = Scalar.fromString("21888242871839275222246405745257275088548364400416034343698204186575808495617");
const Fr = new F1Field(exports.p);
const assert = chai.assert;
describe("GlobalAveragePooling2D layer test", function () {
this.timeout(100000000);
// GlobalAveragePooling with strides==poolSize
it("(5,5,3) -> (3,)", async () => {
const json = require("../models/globalAveragePooling2D_input.json");
const OUTPUT = require("../models/globalAveragePooling2D_output.json");
const circuit = await wasm_tester(path.join(__dirname, "circuits", "GlobalAveragePooling2D_test.circom"));
const INPUT = {
"in": json.in
}
const witness = await circuit.calculateWitness(INPUT, true);
assert(Fr.eq(Fr.e(witness[0]),Fr.e(1)));
let ape = 0;
for (var i=0; i<OUTPUT.out.length; i++) {
console.log("actual", OUTPUT.out[i], "predicted", Fr.toString(witness[i+1]));
ape += Math.abs((OUTPUT.out[i]-parseInt(Fr.toString(witness[i+1])))/OUTPUT.out[i]);
}
const mape = ape/OUTPUT.out.length;
console.log("mean absolute % error", mape);
assert(mape < 0.001);
});
});

View File

@@ -0,0 +1,35 @@
const chai = require("chai");
const path = require("path");
const wasm_tester = require("circom_tester").wasm;
const F1Field = require("ffjavascript").F1Field;
const Scalar = require("ffjavascript").Scalar;
exports.p = Scalar.fromString("21888242871839275222246405745257275088548364400416034343698204186575808495617");
const Fr = new F1Field(exports.p);
const assert = chai.assert;
describe("GlobalMaxPooling2D layer test", function () {
this.timeout(100000000);
// GlobalMaxPooling with strides==poolSize
it("(5,5,3) -> (3,)", async () => {
const json = require("../models/globalMaxPooling2D_input.json");
const OUTPUT = require("../models/globalMaxPooling2D_output.json");
const circuit = await wasm_tester(path.join(__dirname, "circuits", "GlobalMaxPooling2D_test.circom"));
const INPUT = {
"in": json.in
}
const witness = await circuit.calculateWitness(INPUT, true);
assert(Fr.eq(Fr.e(witness[0]),Fr.e(1)));
for (var i=0; i<OUTPUT.out.length; i++) {
assert(Fr.eq(Fr.e(OUTPUT.out[i]),witness[i+1]));
}
});
});

View File

@@ -0,0 +1,5 @@
pragma circom 2.0.0;
include "../../circuits/GlobalAveragePooling2D.circom";
component main = GlobalAveragePooling2D(5, 5, 3, 4);

View File

@@ -0,0 +1,5 @@
pragma circom 2.0.0;
include "../../circuits/GlobalMaxPooling2D.circom";
component main = GlobalMaxPooling2D(5, 5, 3);