Skip to content

Commit

Permalink
Merge pull request #393 from Xilinx/feature/faster_shape_inf
Browse files Browse the repository at this point in the history
Faster&smaller shape inference
  • Loading branch information
maltanar authored Oct 13, 2021
2 parents 85a1f1c + fe8f876 commit f98cc21
Show file tree
Hide file tree
Showing 18 changed files with 22 additions and 232 deletions.
2 changes: 1 addition & 1 deletion docker/Dockerfile.finn
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ RUN pip install -e git+https://github.com/fbcotter/dataset_loading.git@0.0.4#egg

# git-based Python repo dependencies
# these are installed in editable mode for easier co-development
ARG FINN_BASE_COMMIT="1fdf06c068f77ed5a312cd3a6edad098f64b09ed"
ARG FINN_BASE_COMMIT="7c2603a95e90e4de2575020e575c24eab6a15889"
ARG FINN_EXP_COMMIT="f82c0d9868bb88ea045dfadb28508d327d287221"
ARG BREVITAS_COMMIT="462f86cdc60f9915baf13afd1676fb21da44c2ee"
ARG PYVERILATOR_COMMIT="0c3eb9343500fc1352a02c020a736c8c2db47e8e"
Expand Down
15 changes: 1 addition & 14 deletions src/finn/custom_op/fpgadataflow/addstreams_batch.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@
import numpy as np
import os
import warnings
from onnx import TensorProto, helper

from finn.core.datatype import DataType
from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp
Expand Down Expand Up @@ -84,19 +83,7 @@ def make_shape_compatible_op(self, model):
assert ishape == exp_ishape, "Unexpected input1 shape."
ishape = tuple(model.get_tensor_shape(self.onnx_node.input[1]))
assert ishape == exp_ishape, "Unexpected input2 shape."
# implement tensor with correct shape
values = np.random.randn(*oshape).astype(np.float32)
return helper.make_node(
"Constant",
inputs=[],
outputs=[self.onnx_node.output[0]],
value=helper.make_tensor(
name="const_tensor",
data_type=TensorProto.FLOAT,
dims=values.shape,
vals=values.flatten().astype(float),
),
)
return super().make_const_shape_op(oshape)

def infer_node_datatype(self, model):
node = self.onnx_node
Expand Down
14 changes: 1 addition & 13 deletions src/finn/custom_op/fpgadataflow/channelwise_op_batch.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@
import os
import warnings
from math import ceil
from onnx import TensorProto, helper

from finn.core.datatype import DataType
from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp
Expand Down Expand Up @@ -125,18 +124,7 @@ def calc_tmem(self):
def make_shape_compatible_op(self, model):
oshape = self.get_normal_output_shape()
# implement tensor with correct shape
values = np.random.randn(*oshape).astype(np.float32)
return helper.make_node(
"Constant",
inputs=[],
outputs=[self.onnx_node.output[0]],
value=helper.make_tensor(
name="const_tensor",
data_type=TensorProto.FLOAT,
dims=values.shape,
vals=values.flatten().astype(float),
),
)
return super().make_const_shape_op(oshape)

def infer_node_datatype(self, model):
node = self.onnx_node
Expand Down
14 changes: 1 addition & 13 deletions src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@
import math
import numpy as np
import os
from onnx import TensorProto, helper

from finn.core.datatype import DataType
from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp
Expand Down Expand Up @@ -148,18 +147,7 @@ def make_shape_compatible_op(self, model):
ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0]))
assert ishape == exp_ishape, "Unexpect input shape for ConvInpGen."
# implement tensor with correct shape
values = np.random.randn(*oshape).astype(np.float32)
return helper.make_node(
"Constant",
inputs=[],
outputs=[self.onnx_node.output[0]],
value=helper.make_tensor(
name="const_tensor",
data_type=TensorProto.FLOAT,
dims=values.shape,
vals=values.flatten().astype(float),
),
)
return super().make_const_shape_op(oshape)

def infer_node_datatype(self, model):
node = self.onnx_node
Expand Down
15 changes: 1 addition & 14 deletions src/finn/custom_op/fpgadataflow/convolutioninputgenerator1d.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@
import math
import numpy as np
import os
from onnx import TensorProto, helper

from finn.core.datatype import DataType
from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp
Expand Down Expand Up @@ -137,19 +136,7 @@ def make_shape_compatible_op(self, model):
oshape = self.get_normal_output_shape()
ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0]))
assert ishape == exp_ishape, "Unexpect input shape for ConvInpGen."
# implement tensor with correct shape
values = np.random.randn(*oshape).astype(np.float32)
return helper.make_node(
"Constant",
inputs=[],
outputs=[self.onnx_node.output[0]],
value=helper.make_tensor(
name="const_tensor",
data_type=TensorProto.FLOAT,
dims=values.shape,
vals=values.flatten().astype(float),
),
)
return super().make_const_shape_op(oshape)

def infer_node_datatype(self, model):
node = self.onnx_node
Expand Down
15 changes: 1 addition & 14 deletions src/finn/custom_op/fpgadataflow/downsampler.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import numpy as np
import os
import warnings
from onnx import TensorProto, helper

from finn.core.datatype import DataType
from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp
Expand Down Expand Up @@ -83,19 +82,7 @@ def make_shape_compatible_op(self, model):
oshape = self.get_normal_output_shape()
ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0]))
assert ishape == exp_ishape, "Unexpect input shape for DownSampler."
# implement tensor with correct shape
values = np.random.randn(*oshape).astype(np.float32)
return helper.make_node(
"Constant",
inputs=[],
outputs=[self.onnx_node.output[0]],
value=helper.make_tensor(
name="const_tensor",
data_type=TensorProto.FLOAT,
dims=values.shape,
vals=values.flatten().astype(float),
),
)
return super().make_const_shape_op(oshape)

def infer_node_datatype(self, model):
node = self.onnx_node
Expand Down
15 changes: 1 addition & 14 deletions src/finn/custom_op/fpgadataflow/fmpadding_batch.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import numpy as np
import os
import warnings
from onnx import TensorProto, helper

from finn.core.datatype import DataType
from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp
Expand Down Expand Up @@ -99,19 +98,7 @@ def make_shape_compatible_op(self, model):
oshape = self.get_normal_output_shape()
ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0]))
assert ishape == exp_ishape, "Unexpect input shape for SameResize."
# implement tensor with correct shape
values = np.random.randn(*oshape).astype(np.float32)
return helper.make_node(
"Constant",
inputs=[],
outputs=[self.onnx_node.output[0]],
value=helper.make_tensor(
name="const_tensor",
data_type=TensorProto.FLOAT,
dims=values.shape,
vals=values.flatten().astype(float),
),
)
return super().make_const_shape_op(oshape)

def infer_node_datatype(self, model):
node = self.onnx_node
Expand Down
15 changes: 1 addition & 14 deletions src/finn/custom_op/fpgadataflow/globalaccpool_batch.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@
import numpy as np
import os
import warnings
from onnx import TensorProto, helper

from finn.core.datatype import DataType
from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp
Expand Down Expand Up @@ -95,19 +94,7 @@ def make_shape_compatible_op(self, model):
oshape = self.get_normal_output_shape()
ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0]))
assert ishape == exp_ishape, "Unexpected input shape."
# implement tensor with correct shape
values = np.random.randn(*oshape).astype(np.float32)
return helper.make_node(
"Constant",
inputs=[],
outputs=[self.onnx_node.output[0]],
value=helper.make_tensor(
name="const_tensor",
data_type=TensorProto.FLOAT,
dims=values.shape,
vals=values.flatten(),
),
)
return super().make_const_shape_op(oshape)

def infer_node_datatype(self, model):
node = self.onnx_node
Expand Down
15 changes: 1 addition & 14 deletions src/finn/custom_op/fpgadataflow/iodma.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@
import math
import numpy as np
import warnings
from onnx import TensorProto, helper

from finn.core.datatype import DataType
from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp
Expand Down Expand Up @@ -146,19 +145,7 @@ def make_shape_compatible_op(self, model):
oshape = self.get_normal_output_shape()
ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0]))
assert ishape == exp_ishape, "Unexpected input shape."
# implement tensor with correct shape
values = np.random.randn(*oshape).astype(np.float32)
return helper.make_node(
"Constant",
inputs=[],
outputs=[self.onnx_node.output[0]],
value=helper.make_tensor(
name="const_tensor",
data_type=TensorProto.FLOAT,
dims=values.shape,
vals=values.flatten().astype(float),
),
)
return super().make_const_shape_op(oshape)

def infer_node_datatype(self, model):
node = self.onnx_node
Expand Down
14 changes: 5 additions & 9 deletions src/finn/custom_op/fpgadataflow/labelselect_batch.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,18 +102,14 @@ def make_shape_compatible_op(self, model):
oshape = self.get_normal_output_shape()
ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0]))
assert ishape == exp_ishape, "Unexpected input shape."
# implement tensor with correct shape
values = np.random.randn(*oshape).astype(np.int64)
return helper.make_node(
"Constant",
"RandomNormal",
inputs=[],
outputs=[self.onnx_node.output[0]],
value=helper.make_tensor(
name="const_tensor",
data_type=TensorProto.INT64,
dims=values.shape,
vals=values.flatten(),
),
mean=0.0,
scale=1.0,
dtype=TensorProto.INT64,
shape=list(oshape),
)

def infer_node_datatype(self, model):
Expand Down
15 changes: 1 addition & 14 deletions src/finn/custom_op/fpgadataflow/pool_batch.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@

import numpy as np
import os
from onnx import TensorProto, helper

from finn.core.datatype import DataType
from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp
Expand Down Expand Up @@ -163,19 +162,7 @@ def make_shape_compatible_op(self, model):
oshape = self.get_normal_output_shape()
ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0]))
assert ishape == exp_ishape, "Unexpected input shape for Pool_Batch."
# implement tensor with correct shape
values = np.random.randn(*oshape).astype(np.float32)
return helper.make_node(
"Constant",
inputs=[],
outputs=[self.onnx_node.output[0]],
value=helper.make_tensor(
name="const_tensor",
data_type=TensorProto.FLOAT,
dims=values.shape,
vals=values.flatten().astype(float),
),
)
return super().make_const_shape_op(oshape)

def infer_node_datatype(self, model):
node = self.onnx_node
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@
import numpy as np
import os
import warnings
from onnx import TensorProto, helper

from finn.core.datatype import DataType
from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp
Expand Down Expand Up @@ -165,19 +164,7 @@ def make_shape_compatible_op(self, model):
oshape = self.get_normal_output_shape()
ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0]))
assert ishape == tuple(exp_ishape), "Unexpect input shape for StreamingDWC."
# implement tensor with correct shape
values = np.random.randn(*oshape).astype(np.float32)
return helper.make_node(
"Constant",
inputs=[],
outputs=[self.onnx_node.output[0]],
value=helper.make_tensor(
name="const_tensor",
data_type=TensorProto.FLOAT,
dims=values.shape,
vals=values.flatten().astype(float),
),
)
return super().make_const_shape_op(oshape)

def infer_node_datatype(self, model):
node = self.onnx_node
Expand Down
15 changes: 1 addition & 14 deletions src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@
import os
import textwrap
import warnings
from onnx import TensorProto, helper

from finn.core.datatype import DataType
from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp
Expand Down Expand Up @@ -151,19 +150,7 @@ def calc_tmem(self):

def make_shape_compatible_op(self, model):
oshape = self.get_normal_output_shape()
# implement tensor with correct shape
values = np.random.randn(*oshape).astype(np.float32)
return helper.make_node(
"Constant",
inputs=[],
outputs=[self.onnx_node.output[0]],
value=helper.make_tensor(
name="const_tensor",
data_type=TensorProto.FLOAT,
dims=values.shape,
vals=values.flatten().astype(float),
),
)
return super().make_const_shape_op(oshape)

def infer_node_datatype(self, model):
node = self.onnx_node
Expand Down
15 changes: 1 addition & 14 deletions src/finn/custom_op/fpgadataflow/streamingfifo.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@
import os
import subprocess
import warnings
from onnx import TensorProto, helper
from shutil import copy

from finn.core.datatype import DataType
Expand Down Expand Up @@ -78,19 +77,7 @@ def make_shape_compatible_op(self, model):
oshape = self.get_normal_output_shape()
ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0]))
assert ishape == tuple(exp_ishape), "Unexpect input shape for StreamingFIFO."
# implement tensor with correct shape
values = np.random.randn(*oshape).astype(np.float32)
return helper.make_node(
"Constant",
inputs=[],
outputs=[self.onnx_node.output[0]],
value=helper.make_tensor(
name="const_tensor",
data_type=TensorProto.FLOAT,
dims=values.shape,
vals=values.flatten().astype(float),
),
)
return super().make_const_shape_op(oshape)

def infer_node_datatype(self, model):
node = self.onnx_node
Expand Down
Loading

0 comments on commit f98cc21

Please # to comment.