From b69a759618c8c758545d9ef8fc6d67634d838665 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Mon, 11 Oct 2021 15:27:19 +0200 Subject: [PATCH 1/5] [Deps] update finn-base to get faster shape inference base ops --- docker/Dockerfile.finn | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/Dockerfile.finn b/docker/Dockerfile.finn index c7c418482d..802292cac3 100644 --- a/docker/Dockerfile.finn +++ b/docker/Dockerfile.finn @@ -86,7 +86,7 @@ RUN pip install -e git+https://github.com/fbcotter/dataset_loading.git@0.0.4#egg # git-based Python repo dependencies # these are installed in editable mode for easier co-development -ARG FINN_BASE_COMMIT="d38426634f1cfbc5432a0e52c3f65c07fad12aa4" +ARG FINN_BASE_COMMIT="9e8fc02683d272cc14309eeebcf14293dc5e4edd" ARG FINN_EXP_COMMIT="f82c0d9868bb88ea045dfadb28508d327d287221" ARG BREVITAS_COMMIT="462f86cdc60f9915baf13afd1676fb21da44c2ee" ARG PYVERILATOR_COMMIT="0c3eb9343500fc1352a02c020a736c8c2db47e8e" From 1663022fb8508cf5fe4cf5b988b1361676167033 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Mon, 11 Oct 2021 15:41:28 +0200 Subject: [PATCH 2/5] [Refactor] use RandomNormal for faster/more compact shape inference --- src/finn/custom_op/fpgadataflow/addstreams_batch.py | 13 +++---------- .../custom_op/fpgadataflow/channelwise_op_batch.py | 12 +++--------- .../fpgadataflow/convolutioninputgenerator.py | 12 +++--------- .../fpgadataflow/convolutioninputgenerator1d.py | 13 +++---------- src/finn/custom_op/fpgadataflow/downsampler.py | 13 +++---------- src/finn/custom_op/fpgadataflow/fmpadding_batch.py | 13 +++---------- .../custom_op/fpgadataflow/globalaccpool_batch.py | 13 +++---------- src/finn/custom_op/fpgadataflow/iodma.py | 13 +++---------- .../custom_op/fpgadataflow/labelselect_batch.py | 12 +++--------- src/finn/custom_op/fpgadataflow/pool_batch.py | 13 +++---------- .../streamingdatawidthconverter_batch.py | 13 +++---------- .../fpgadataflow/streamingfclayer_batch.py | 13 +++---------- src/finn/custom_op/fpgadataflow/streamingfifo.py | 13 +++---------- .../fpgadataflow/streamingmaxpool_batch.py | 13 +++---------- .../custom_op/fpgadataflow/thresholding_batch.py | 13 +++---------- src/finn/custom_op/fpgadataflow/upsampler.py | 13 +++---------- .../fpgadataflow/vector_vector_activate_batch.py | 13 +++---------- 17 files changed, 51 insertions(+), 167 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/addstreams_batch.py b/src/finn/custom_op/fpgadataflow/addstreams_batch.py index 856f84fae0..aaf8687de1 100644 --- a/src/finn/custom_op/fpgadataflow/addstreams_batch.py +++ b/src/finn/custom_op/fpgadataflow/addstreams_batch.py @@ -29,7 +29,7 @@ import numpy as np import os import warnings -from onnx import TensorProto, helper +from onnx import helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -84,18 +84,11 @@ def make_shape_compatible_op(self, model): assert ishape == exp_ishape, "Unexpected input1 shape." ishape = tuple(model.get_tensor_shape(self.onnx_node.input[1])) assert ishape == exp_ishape, "Unexpected input2 shape." - # implement tensor with correct shape - values = np.random.randn(*oshape).astype(np.float32) return helper.make_node( - "Constant", + "RandomNormal", inputs=[], outputs=[self.onnx_node.output[0]], - value=helper.make_tensor( - name="const_tensor", - data_type=TensorProto.FLOAT, - dims=values.shape, - vals=values.flatten().astype(float), - ), + shape=list(oshape), ) def infer_node_datatype(self, model): diff --git a/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py b/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py index 073d6620ac..b274bb8dc1 100644 --- a/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py +++ b/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py @@ -30,7 +30,7 @@ import os import warnings from math import ceil -from onnx import TensorProto, helper +from onnx import helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -125,17 +125,11 @@ def calc_tmem(self): def make_shape_compatible_op(self, model): oshape = self.get_normal_output_shape() # implement tensor with correct shape - values = np.random.randn(*oshape).astype(np.float32) return helper.make_node( - "Constant", + "RandomNormal", inputs=[], outputs=[self.onnx_node.output[0]], - value=helper.make_tensor( - name="const_tensor", - data_type=TensorProto.FLOAT, - dims=values.shape, - vals=values.flatten().astype(float), - ), + shape=list(oshape), ) def infer_node_datatype(self, model): diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py index 9ec7bc662d..8566ee035d 100644 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py @@ -29,7 +29,7 @@ import math import numpy as np import os -from onnx import TensorProto, helper +from onnx import helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -148,17 +148,11 @@ def make_shape_compatible_op(self, model): ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) assert ishape == exp_ishape, "Unexpect input shape for ConvInpGen." # implement tensor with correct shape - values = np.random.randn(*oshape).astype(np.float32) return helper.make_node( - "Constant", + "RandomNormal", inputs=[], outputs=[self.onnx_node.output[0]], - value=helper.make_tensor( - name="const_tensor", - data_type=TensorProto.FLOAT, - dims=values.shape, - vals=values.flatten().astype(float), - ), + shape=list(oshape), ) def infer_node_datatype(self, model): diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator1d.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator1d.py index b428210acf..c76cb47d2f 100644 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator1d.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator1d.py @@ -29,7 +29,7 @@ import math import numpy as np import os -from onnx import TensorProto, helper +from onnx import helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -137,18 +137,11 @@ def make_shape_compatible_op(self, model): oshape = self.get_normal_output_shape() ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) assert ishape == exp_ishape, "Unexpect input shape for ConvInpGen." - # implement tensor with correct shape - values = np.random.randn(*oshape).astype(np.float32) return helper.make_node( - "Constant", + "RandomNormal", inputs=[], outputs=[self.onnx_node.output[0]], - value=helper.make_tensor( - name="const_tensor", - data_type=TensorProto.FLOAT, - dims=values.shape, - vals=values.flatten().astype(float), - ), + shape=list(oshape), ) def infer_node_datatype(self, model): diff --git a/src/finn/custom_op/fpgadataflow/downsampler.py b/src/finn/custom_op/fpgadataflow/downsampler.py index 2313ab28b4..de251a8486 100644 --- a/src/finn/custom_op/fpgadataflow/downsampler.py +++ b/src/finn/custom_op/fpgadataflow/downsampler.py @@ -1,7 +1,7 @@ import numpy as np import os import warnings -from onnx import TensorProto, helper +from onnx import helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -83,18 +83,11 @@ def make_shape_compatible_op(self, model): oshape = self.get_normal_output_shape() ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) assert ishape == exp_ishape, "Unexpect input shape for DownSampler." - # implement tensor with correct shape - values = np.random.randn(*oshape).astype(np.float32) return helper.make_node( - "Constant", + "RandomNormal", inputs=[], outputs=[self.onnx_node.output[0]], - value=helper.make_tensor( - name="const_tensor", - data_type=TensorProto.FLOAT, - dims=values.shape, - vals=values.flatten().astype(float), - ), + shape=list(oshape), ) def infer_node_datatype(self, model): diff --git a/src/finn/custom_op/fpgadataflow/fmpadding_batch.py b/src/finn/custom_op/fpgadataflow/fmpadding_batch.py index ca0b2f12ab..ec2a2b690f 100644 --- a/src/finn/custom_op/fpgadataflow/fmpadding_batch.py +++ b/src/finn/custom_op/fpgadataflow/fmpadding_batch.py @@ -1,7 +1,7 @@ import numpy as np import os import warnings -from onnx import TensorProto, helper +from onnx import helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -99,18 +99,11 @@ def make_shape_compatible_op(self, model): oshape = self.get_normal_output_shape() ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) assert ishape == exp_ishape, "Unexpect input shape for SameResize." - # implement tensor with correct shape - values = np.random.randn(*oshape).astype(np.float32) return helper.make_node( - "Constant", + "RandomNormal", inputs=[], outputs=[self.onnx_node.output[0]], - value=helper.make_tensor( - name="const_tensor", - data_type=TensorProto.FLOAT, - dims=values.shape, - vals=values.flatten().astype(float), - ), + shape=list(oshape), ) def infer_node_datatype(self, model): diff --git a/src/finn/custom_op/fpgadataflow/globalaccpool_batch.py b/src/finn/custom_op/fpgadataflow/globalaccpool_batch.py index eabdcf599d..d6f860db15 100644 --- a/src/finn/custom_op/fpgadataflow/globalaccpool_batch.py +++ b/src/finn/custom_op/fpgadataflow/globalaccpool_batch.py @@ -29,7 +29,7 @@ import numpy as np import os import warnings -from onnx import TensorProto, helper +from onnx import helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -95,18 +95,11 @@ def make_shape_compatible_op(self, model): oshape = self.get_normal_output_shape() ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) assert ishape == exp_ishape, "Unexpected input shape." - # implement tensor with correct shape - values = np.random.randn(*oshape).astype(np.float32) return helper.make_node( - "Constant", + "RandomNormal", inputs=[], outputs=[self.onnx_node.output[0]], - value=helper.make_tensor( - name="const_tensor", - data_type=TensorProto.FLOAT, - dims=values.shape, - vals=values.flatten(), - ), + shape=list(oshape), ) def infer_node_datatype(self, model): diff --git a/src/finn/custom_op/fpgadataflow/iodma.py b/src/finn/custom_op/fpgadataflow/iodma.py index 4fa74e35db..34842181c3 100644 --- a/src/finn/custom_op/fpgadataflow/iodma.py +++ b/src/finn/custom_op/fpgadataflow/iodma.py @@ -29,7 +29,7 @@ import math import numpy as np import warnings -from onnx import TensorProto, helper +from onnx import helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -146,18 +146,11 @@ def make_shape_compatible_op(self, model): oshape = self.get_normal_output_shape() ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) assert ishape == exp_ishape, "Unexpected input shape." - # implement tensor with correct shape - values = np.random.randn(*oshape).astype(np.float32) return helper.make_node( - "Constant", + "RandomNormal", inputs=[], outputs=[self.onnx_node.output[0]], - value=helper.make_tensor( - name="const_tensor", - data_type=TensorProto.FLOAT, - dims=values.shape, - vals=values.flatten().astype(float), - ), + shape=list(oshape), ) def infer_node_datatype(self, model): diff --git a/src/finn/custom_op/fpgadataflow/labelselect_batch.py b/src/finn/custom_op/fpgadataflow/labelselect_batch.py index d70d0f6a9b..4d34de8fcc 100644 --- a/src/finn/custom_op/fpgadataflow/labelselect_batch.py +++ b/src/finn/custom_op/fpgadataflow/labelselect_batch.py @@ -102,18 +102,12 @@ def make_shape_compatible_op(self, model): oshape = self.get_normal_output_shape() ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) assert ishape == exp_ishape, "Unexpected input shape." - # implement tensor with correct shape - values = np.random.randn(*oshape).astype(np.int64) return helper.make_node( - "Constant", + "RandomNormal", inputs=[], outputs=[self.onnx_node.output[0]], - value=helper.make_tensor( - name="const_tensor", - data_type=TensorProto.INT64, - dims=values.shape, - vals=values.flatten(), - ), + shape=list(oshape), + dtype=TensorProto.INT64, ) def infer_node_datatype(self, model): diff --git a/src/finn/custom_op/fpgadataflow/pool_batch.py b/src/finn/custom_op/fpgadataflow/pool_batch.py index cef964acd5..5bcc653597 100644 --- a/src/finn/custom_op/fpgadataflow/pool_batch.py +++ b/src/finn/custom_op/fpgadataflow/pool_batch.py @@ -28,7 +28,7 @@ import numpy as np import os -from onnx import TensorProto, helper +from onnx import helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -163,18 +163,11 @@ def make_shape_compatible_op(self, model): oshape = self.get_normal_output_shape() ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) assert ishape == exp_ishape, "Unexpected input shape for Pool_Batch." - # implement tensor with correct shape - values = np.random.randn(*oshape).astype(np.float32) return helper.make_node( - "Constant", + "RandomNormal", inputs=[], outputs=[self.onnx_node.output[0]], - value=helper.make_tensor( - name="const_tensor", - data_type=TensorProto.FLOAT, - dims=values.shape, - vals=values.flatten().astype(float), - ), + shape=list(oshape), ) def infer_node_datatype(self, model): diff --git a/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py b/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py index 67e3cd3654..aab87be52f 100644 --- a/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py +++ b/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py @@ -30,7 +30,7 @@ import numpy as np import os import warnings -from onnx import TensorProto, helper +from onnx import helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -165,18 +165,11 @@ def make_shape_compatible_op(self, model): oshape = self.get_normal_output_shape() ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) assert ishape == tuple(exp_ishape), "Unexpect input shape for StreamingDWC." - # implement tensor with correct shape - values = np.random.randn(*oshape).astype(np.float32) return helper.make_node( - "Constant", + "RandomNormal", inputs=[], outputs=[self.onnx_node.output[0]], - value=helper.make_tensor( - name="const_tensor", - data_type=TensorProto.FLOAT, - dims=values.shape, - vals=values.flatten().astype(float), - ), + shape=list(oshape), ) def infer_node_datatype(self, model): diff --git a/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py b/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py index 96594d4413..2f1097d712 100644 --- a/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py +++ b/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py @@ -31,7 +31,7 @@ import os import textwrap import warnings -from onnx import TensorProto, helper +from onnx import helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -151,18 +151,11 @@ def calc_tmem(self): def make_shape_compatible_op(self, model): oshape = self.get_normal_output_shape() - # implement tensor with correct shape - values = np.random.randn(*oshape).astype(np.float32) return helper.make_node( - "Constant", + "RandomNormal", inputs=[], outputs=[self.onnx_node.output[0]], - value=helper.make_tensor( - name="const_tensor", - data_type=TensorProto.FLOAT, - dims=values.shape, - vals=values.flatten().astype(float), - ), + shape=list(oshape), ) def infer_node_datatype(self, model): diff --git a/src/finn/custom_op/fpgadataflow/streamingfifo.py b/src/finn/custom_op/fpgadataflow/streamingfifo.py index 9653d698f5..ce09d5565a 100644 --- a/src/finn/custom_op/fpgadataflow/streamingfifo.py +++ b/src/finn/custom_op/fpgadataflow/streamingfifo.py @@ -30,7 +30,7 @@ import os import subprocess import warnings -from onnx import TensorProto, helper +from onnx import helper from shutil import copy from finn.core.datatype import DataType @@ -78,18 +78,11 @@ def make_shape_compatible_op(self, model): oshape = self.get_normal_output_shape() ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) assert ishape == tuple(exp_ishape), "Unexpect input shape for StreamingFIFO." - # implement tensor with correct shape - values = np.random.randn(*oshape).astype(np.float32) return helper.make_node( - "Constant", + "RandomNormal", inputs=[], outputs=[self.onnx_node.output[0]], - value=helper.make_tensor( - name="const_tensor", - data_type=TensorProto.FLOAT, - dims=values.shape, - vals=values.flatten().astype(float), - ), + shape=list(oshape), ) def infer_node_datatype(self, model): diff --git a/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py b/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py index 19a42fe2d6..c0641ffb74 100644 --- a/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py +++ b/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py @@ -29,7 +29,7 @@ import numpy as np import os import warnings -from onnx import TensorProto, helper +from onnx import helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -140,18 +140,11 @@ def make_shape_compatible_op(self, model): oshape = self.get_normal_output_shape() ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) assert ishape == exp_ishape, "Unexpect input shape for StreamingMaxPool." - # implement tensor with correct shape - values = np.random.randn(*oshape).astype(np.float32) return helper.make_node( - "Constant", + "RandomNormal", inputs=[], outputs=[self.onnx_node.output[0]], - value=helper.make_tensor( - name="const_tensor", - data_type=TensorProto.FLOAT, - dims=values.shape, - vals=values.flatten().astype(float), - ), + shape=list(oshape), ) def infer_node_datatype(self, model): diff --git a/src/finn/custom_op/fpgadataflow/thresholding_batch.py b/src/finn/custom_op/fpgadataflow/thresholding_batch.py index 7fb7634dc2..df562f5450 100644 --- a/src/finn/custom_op/fpgadataflow/thresholding_batch.py +++ b/src/finn/custom_op/fpgadataflow/thresholding_batch.py @@ -31,7 +31,7 @@ import textwrap import warnings from math import ceil, log2 -from onnx import TensorProto, helper +from onnx import helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -112,18 +112,11 @@ def calc_tmem(self): def make_shape_compatible_op(self, model): oshape = self.get_normal_output_shape() - # implement tensor with correct shape - values = np.random.randn(*oshape).astype(np.float32) return helper.make_node( - "Constant", + "RandomNormal", inputs=[], outputs=[self.onnx_node.output[0]], - value=helper.make_tensor( - name="const_tensor", - data_type=TensorProto.FLOAT, - dims=values.shape, - vals=values.flatten().astype(float), - ), + shape=list(oshape), ) def infer_node_datatype(self, model): diff --git a/src/finn/custom_op/fpgadataflow/upsampler.py b/src/finn/custom_op/fpgadataflow/upsampler.py index e8aa09b1c0..fcdf9d7d2c 100644 --- a/src/finn/custom_op/fpgadataflow/upsampler.py +++ b/src/finn/custom_op/fpgadataflow/upsampler.py @@ -1,7 +1,7 @@ import numpy as np import os import warnings -from onnx import TensorProto, helper +from onnx import helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -69,18 +69,11 @@ def make_shape_compatible_op(self, model): assert ( ishape == exp_ishape ), "Unexpect input shape for UpsampleNearestNeighbour_Batch." - # implement tensor with correct shape - values = np.random.randn(*oshape).astype(np.float32) return helper.make_node( - "Constant", + "RandomNormal", inputs=[], outputs=[self.onnx_node.output[0]], - value=helper.make_tensor( - name="const_tensor", - data_type=TensorProto.FLOAT, - dims=values.shape, - vals=values.flatten().astype(float), - ), + shape=list(oshape), ) def infer_node_datatype(self, model): diff --git a/src/finn/custom_op/fpgadataflow/vector_vector_activate_batch.py b/src/finn/custom_op/fpgadataflow/vector_vector_activate_batch.py index 9fc133b9bc..d9e7e566e6 100644 --- a/src/finn/custom_op/fpgadataflow/vector_vector_activate_batch.py +++ b/src/finn/custom_op/fpgadataflow/vector_vector_activate_batch.py @@ -2,7 +2,7 @@ import numpy as np import os import warnings -from onnx import TensorProto, helper +from onnx import helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -129,18 +129,11 @@ def calc_tmem(self): def make_shape_compatible_op(self, model): oshape = self.get_normal_output_shape() - # implement tensor with correct shape - values = np.random.randn(*oshape).astype(np.float32) return helper.make_node( - "Constant", + "RandomNormal", inputs=[], outputs=[self.onnx_node.output[0]], - value=helper.make_tensor( - name="const_tensor", - data_type=TensorProto.FLOAT, - dims=values.shape, - vals=values.flatten().astype(float), - ), + shape=list(oshape), ) def infer_node_datatype(self, model): From f78200b9fc66a2687e4b0c75993d31a2fa78cac2 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 12 Oct 2021 22:17:47 +0200 Subject: [PATCH 3/5] [Deps] update finn-base --- docker/Dockerfile.finn | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/Dockerfile.finn b/docker/Dockerfile.finn index 802292cac3..10393fac4b 100644 --- a/docker/Dockerfile.finn +++ b/docker/Dockerfile.finn @@ -86,7 +86,7 @@ RUN pip install -e git+https://github.com/fbcotter/dataset_loading.git@0.0.4#egg # git-based Python repo dependencies # these are installed in editable mode for easier co-development -ARG FINN_BASE_COMMIT="9e8fc02683d272cc14309eeebcf14293dc5e4edd" +ARG FINN_BASE_COMMIT="db444c1936300f5594499f624c9b4ff1f287492b" ARG FINN_EXP_COMMIT="f82c0d9868bb88ea045dfadb28508d327d287221" ARG BREVITAS_COMMIT="462f86cdc60f9915baf13afd1676fb21da44c2ee" ARG PYVERILATOR_COMMIT="0c3eb9343500fc1352a02c020a736c8c2db47e8e" From 2e04199971dd13177b4019123ed69eb0aad7fdc1 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 13 Oct 2021 10:35:29 +0200 Subject: [PATCH 4/5] [Deps] update finn-base --- docker/Dockerfile.finn | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/Dockerfile.finn b/docker/Dockerfile.finn index 10393fac4b..1572ba2872 100644 --- a/docker/Dockerfile.finn +++ b/docker/Dockerfile.finn @@ -86,7 +86,7 @@ RUN pip install -e git+https://github.com/fbcotter/dataset_loading.git@0.0.4#egg # git-based Python repo dependencies # these are installed in editable mode for easier co-development -ARG FINN_BASE_COMMIT="db444c1936300f5594499f624c9b4ff1f287492b" +ARG FINN_BASE_COMMIT="7c2603a95e90e4de2575020e575c24eab6a15889" ARG FINN_EXP_COMMIT="f82c0d9868bb88ea045dfadb28508d327d287221" ARG BREVITAS_COMMIT="462f86cdc60f9915baf13afd1676fb21da44c2ee" ARG PYVERILATOR_COMMIT="0c3eb9343500fc1352a02c020a736c8c2db47e8e" From fe8f8768cc9211bb0709b024ec517566b487ba45 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 13 Oct 2021 10:37:31 +0200 Subject: [PATCH 5/5] [Refactor] switch to CustomOp base-class impl for shape inf --- src/finn/custom_op/fpgadataflow/addstreams_batch.py | 8 +------- src/finn/custom_op/fpgadataflow/channelwise_op_batch.py | 8 +------- .../custom_op/fpgadataflow/convolutioninputgenerator.py | 8 +------- .../custom_op/fpgadataflow/convolutioninputgenerator1d.py | 8 +------- src/finn/custom_op/fpgadataflow/downsampler.py | 8 +------- src/finn/custom_op/fpgadataflow/fmpadding_batch.py | 8 +------- src/finn/custom_op/fpgadataflow/globalaccpool_batch.py | 8 +------- src/finn/custom_op/fpgadataflow/iodma.py | 8 +------- src/finn/custom_op/fpgadataflow/labelselect_batch.py | 4 +++- src/finn/custom_op/fpgadataflow/pool_batch.py | 8 +------- .../fpgadataflow/streamingdatawidthconverter_batch.py | 8 +------- src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py | 8 +------- src/finn/custom_op/fpgadataflow/streamingfifo.py | 8 +------- src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py | 8 +------- src/finn/custom_op/fpgadataflow/thresholding_batch.py | 8 +------- src/finn/custom_op/fpgadataflow/upsampler.py | 8 +------- .../fpgadataflow/vector_vector_activate_batch.py | 8 +------- 17 files changed, 19 insertions(+), 113 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/addstreams_batch.py b/src/finn/custom_op/fpgadataflow/addstreams_batch.py index aaf8687de1..fa80e47485 100644 --- a/src/finn/custom_op/fpgadataflow/addstreams_batch.py +++ b/src/finn/custom_op/fpgadataflow/addstreams_batch.py @@ -29,7 +29,6 @@ import numpy as np import os import warnings -from onnx import helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -84,12 +83,7 @@ def make_shape_compatible_op(self, model): assert ishape == exp_ishape, "Unexpected input1 shape." ishape = tuple(model.get_tensor_shape(self.onnx_node.input[1])) assert ishape == exp_ishape, "Unexpected input2 shape." - return helper.make_node( - "RandomNormal", - inputs=[], - outputs=[self.onnx_node.output[0]], - shape=list(oshape), - ) + return super().make_const_shape_op(oshape) def infer_node_datatype(self, model): node = self.onnx_node diff --git a/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py b/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py index 083ee894f4..4961f61482 100644 --- a/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py +++ b/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py @@ -30,7 +30,6 @@ import os import warnings from math import ceil -from onnx import helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -125,12 +124,7 @@ def calc_tmem(self): def make_shape_compatible_op(self, model): oshape = self.get_normal_output_shape() # implement tensor with correct shape - return helper.make_node( - "RandomNormal", - inputs=[], - outputs=[self.onnx_node.output[0]], - shape=list(oshape), - ) + return super().make_const_shape_op(oshape) def infer_node_datatype(self, model): node = self.onnx_node diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py index def0b20b08..a401883684 100644 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py @@ -29,7 +29,6 @@ import math import numpy as np import os -from onnx import helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -148,12 +147,7 @@ def make_shape_compatible_op(self, model): ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) assert ishape == exp_ishape, "Unexpect input shape for ConvInpGen." # implement tensor with correct shape - return helper.make_node( - "RandomNormal", - inputs=[], - outputs=[self.onnx_node.output[0]], - shape=list(oshape), - ) + return super().make_const_shape_op(oshape) def infer_node_datatype(self, model): node = self.onnx_node diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator1d.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator1d.py index 6a81b91770..c4cf804126 100644 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator1d.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator1d.py @@ -29,7 +29,6 @@ import math import numpy as np import os -from onnx import helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -137,12 +136,7 @@ def make_shape_compatible_op(self, model): oshape = self.get_normal_output_shape() ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) assert ishape == exp_ishape, "Unexpect input shape for ConvInpGen." - return helper.make_node( - "RandomNormal", - inputs=[], - outputs=[self.onnx_node.output[0]], - shape=list(oshape), - ) + return super().make_const_shape_op(oshape) def infer_node_datatype(self, model): node = self.onnx_node diff --git a/src/finn/custom_op/fpgadataflow/downsampler.py b/src/finn/custom_op/fpgadataflow/downsampler.py index 9dcbb5e144..6a0667f67d 100644 --- a/src/finn/custom_op/fpgadataflow/downsampler.py +++ b/src/finn/custom_op/fpgadataflow/downsampler.py @@ -1,7 +1,6 @@ import numpy as np import os import warnings -from onnx import helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -83,12 +82,7 @@ def make_shape_compatible_op(self, model): oshape = self.get_normal_output_shape() ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) assert ishape == exp_ishape, "Unexpect input shape for DownSampler." - return helper.make_node( - "RandomNormal", - inputs=[], - outputs=[self.onnx_node.output[0]], - shape=list(oshape), - ) + return super().make_const_shape_op(oshape) def infer_node_datatype(self, model): node = self.onnx_node diff --git a/src/finn/custom_op/fpgadataflow/fmpadding_batch.py b/src/finn/custom_op/fpgadataflow/fmpadding_batch.py index dd76195669..f29ea431ff 100644 --- a/src/finn/custom_op/fpgadataflow/fmpadding_batch.py +++ b/src/finn/custom_op/fpgadataflow/fmpadding_batch.py @@ -1,7 +1,6 @@ import numpy as np import os import warnings -from onnx import helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -99,12 +98,7 @@ def make_shape_compatible_op(self, model): oshape = self.get_normal_output_shape() ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) assert ishape == exp_ishape, "Unexpect input shape for SameResize." - return helper.make_node( - "RandomNormal", - inputs=[], - outputs=[self.onnx_node.output[0]], - shape=list(oshape), - ) + return super().make_const_shape_op(oshape) def infer_node_datatype(self, model): node = self.onnx_node diff --git a/src/finn/custom_op/fpgadataflow/globalaccpool_batch.py b/src/finn/custom_op/fpgadataflow/globalaccpool_batch.py index d6f860db15..6d4a55ee5c 100644 --- a/src/finn/custom_op/fpgadataflow/globalaccpool_batch.py +++ b/src/finn/custom_op/fpgadataflow/globalaccpool_batch.py @@ -29,7 +29,6 @@ import numpy as np import os import warnings -from onnx import helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -95,12 +94,7 @@ def make_shape_compatible_op(self, model): oshape = self.get_normal_output_shape() ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) assert ishape == exp_ishape, "Unexpected input shape." - return helper.make_node( - "RandomNormal", - inputs=[], - outputs=[self.onnx_node.output[0]], - shape=list(oshape), - ) + return super().make_const_shape_op(oshape) def infer_node_datatype(self, model): node = self.onnx_node diff --git a/src/finn/custom_op/fpgadataflow/iodma.py b/src/finn/custom_op/fpgadataflow/iodma.py index 34842181c3..802c7e7851 100644 --- a/src/finn/custom_op/fpgadataflow/iodma.py +++ b/src/finn/custom_op/fpgadataflow/iodma.py @@ -29,7 +29,6 @@ import math import numpy as np import warnings -from onnx import helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -146,12 +145,7 @@ def make_shape_compatible_op(self, model): oshape = self.get_normal_output_shape() ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) assert ishape == exp_ishape, "Unexpected input shape." - return helper.make_node( - "RandomNormal", - inputs=[], - outputs=[self.onnx_node.output[0]], - shape=list(oshape), - ) + return super().make_const_shape_op(oshape) def infer_node_datatype(self, model): node = self.onnx_node diff --git a/src/finn/custom_op/fpgadataflow/labelselect_batch.py b/src/finn/custom_op/fpgadataflow/labelselect_batch.py index 4d34de8fcc..1eb5962fdb 100644 --- a/src/finn/custom_op/fpgadataflow/labelselect_batch.py +++ b/src/finn/custom_op/fpgadataflow/labelselect_batch.py @@ -106,8 +106,10 @@ def make_shape_compatible_op(self, model): "RandomNormal", inputs=[], outputs=[self.onnx_node.output[0]], - shape=list(oshape), + mean=0.0, + scale=1.0, dtype=TensorProto.INT64, + shape=list(oshape), ) def infer_node_datatype(self, model): diff --git a/src/finn/custom_op/fpgadataflow/pool_batch.py b/src/finn/custom_op/fpgadataflow/pool_batch.py index 332b400c0d..ba8a446f2c 100644 --- a/src/finn/custom_op/fpgadataflow/pool_batch.py +++ b/src/finn/custom_op/fpgadataflow/pool_batch.py @@ -28,7 +28,6 @@ import numpy as np import os -from onnx import helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -163,12 +162,7 @@ def make_shape_compatible_op(self, model): oshape = self.get_normal_output_shape() ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) assert ishape == exp_ishape, "Unexpected input shape for Pool_Batch." - return helper.make_node( - "RandomNormal", - inputs=[], - outputs=[self.onnx_node.output[0]], - shape=list(oshape), - ) + return super().make_const_shape_op(oshape) def infer_node_datatype(self, model): node = self.onnx_node diff --git a/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py b/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py index aaf796b15b..1791706afa 100644 --- a/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py +++ b/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py @@ -30,7 +30,6 @@ import numpy as np import os import warnings -from onnx import helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -165,12 +164,7 @@ def make_shape_compatible_op(self, model): oshape = self.get_normal_output_shape() ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) assert ishape == tuple(exp_ishape), "Unexpect input shape for StreamingDWC." - return helper.make_node( - "RandomNormal", - inputs=[], - outputs=[self.onnx_node.output[0]], - shape=list(oshape), - ) + return super().make_const_shape_op(oshape) def infer_node_datatype(self, model): node = self.onnx_node diff --git a/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py b/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py index ccc9780ff4..90abb66e66 100644 --- a/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py +++ b/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py @@ -31,7 +31,6 @@ import os import textwrap import warnings -from onnx import helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -151,12 +150,7 @@ def calc_tmem(self): def make_shape_compatible_op(self, model): oshape = self.get_normal_output_shape() - return helper.make_node( - "RandomNormal", - inputs=[], - outputs=[self.onnx_node.output[0]], - shape=list(oshape), - ) + return super().make_const_shape_op(oshape) def infer_node_datatype(self, model): node = self.onnx_node diff --git a/src/finn/custom_op/fpgadataflow/streamingfifo.py b/src/finn/custom_op/fpgadataflow/streamingfifo.py index 02d9a1915c..91f6ed5b8d 100644 --- a/src/finn/custom_op/fpgadataflow/streamingfifo.py +++ b/src/finn/custom_op/fpgadataflow/streamingfifo.py @@ -30,7 +30,6 @@ import os import subprocess import warnings -from onnx import helper from shutil import copy from finn.core.datatype import DataType @@ -78,12 +77,7 @@ def make_shape_compatible_op(self, model): oshape = self.get_normal_output_shape() ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) assert ishape == tuple(exp_ishape), "Unexpect input shape for StreamingFIFO." - return helper.make_node( - "RandomNormal", - inputs=[], - outputs=[self.onnx_node.output[0]], - shape=list(oshape), - ) + return super().make_const_shape_op(oshape) def infer_node_datatype(self, model): node = self.onnx_node diff --git a/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py b/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py index 5574d67066..1e66a5c204 100644 --- a/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py +++ b/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py @@ -29,7 +29,6 @@ import numpy as np import os import warnings -from onnx import helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -140,12 +139,7 @@ def make_shape_compatible_op(self, model): oshape = self.get_normal_output_shape() ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) assert ishape == exp_ishape, "Unexpect input shape for StreamingMaxPool." - return helper.make_node( - "RandomNormal", - inputs=[], - outputs=[self.onnx_node.output[0]], - shape=list(oshape), - ) + return super().make_const_shape_op(oshape) def infer_node_datatype(self, model): node = self.onnx_node diff --git a/src/finn/custom_op/fpgadataflow/thresholding_batch.py b/src/finn/custom_op/fpgadataflow/thresholding_batch.py index d5fa86abdd..610139f44e 100644 --- a/src/finn/custom_op/fpgadataflow/thresholding_batch.py +++ b/src/finn/custom_op/fpgadataflow/thresholding_batch.py @@ -31,7 +31,6 @@ import textwrap import warnings from math import ceil, log2 -from onnx import helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -112,12 +111,7 @@ def calc_tmem(self): def make_shape_compatible_op(self, model): oshape = self.get_normal_output_shape() - return helper.make_node( - "RandomNormal", - inputs=[], - outputs=[self.onnx_node.output[0]], - shape=list(oshape), - ) + return super().make_const_shape_op(oshape) def infer_node_datatype(self, model): node = self.onnx_node diff --git a/src/finn/custom_op/fpgadataflow/upsampler.py b/src/finn/custom_op/fpgadataflow/upsampler.py index 60052b46a1..d5f809305b 100644 --- a/src/finn/custom_op/fpgadataflow/upsampler.py +++ b/src/finn/custom_op/fpgadataflow/upsampler.py @@ -1,7 +1,6 @@ import numpy as np import os import warnings -from onnx import helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -69,12 +68,7 @@ def make_shape_compatible_op(self, model): assert ( ishape == exp_ishape ), "Unexpect input shape for UpsampleNearestNeighbour_Batch." - return helper.make_node( - "RandomNormal", - inputs=[], - outputs=[self.onnx_node.output[0]], - shape=list(oshape), - ) + return super().make_const_shape_op(oshape) def infer_node_datatype(self, model): node = self.onnx_node diff --git a/src/finn/custom_op/fpgadataflow/vector_vector_activate_batch.py b/src/finn/custom_op/fpgadataflow/vector_vector_activate_batch.py index 0ad03f37cd..c67eb0f21b 100644 --- a/src/finn/custom_op/fpgadataflow/vector_vector_activate_batch.py +++ b/src/finn/custom_op/fpgadataflow/vector_vector_activate_batch.py @@ -2,7 +2,6 @@ import numpy as np import os import warnings -from onnx import helper from finn.core.datatype import DataType from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp @@ -129,12 +128,7 @@ def calc_tmem(self): def make_shape_compatible_op(self, model): oshape = self.get_normal_output_shape() - return helper.make_node( - "RandomNormal", - inputs=[], - outputs=[self.onnx_node.output[0]], - shape=list(oshape), - ) + return super().make_const_shape_op(oshape) def infer_node_datatype(self, model): node = self.onnx_node