Skip to content
New issue

Have a question about this project? # for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “#”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? # to your account

How to convert the model to onnx? #24

Open
xiaocode opened this issue May 7, 2024 · 5 comments
Open

How to convert the model to onnx? #24

xiaocode opened this issue May 7, 2024 · 5 comments

Comments

@xiaocode
Copy link

xiaocode commented May 7, 2024

from argparse import Namespace
from models.psp import pSp
import torch.nn as nn
import torch
import onnx

#Function to Convert to ONNX 
def Convert_ONNX(): 
    device = "cuda" if torch.cuda.is_available() else "cpu"
    ckpt_path = 'pretrained_models/styleganex_toonify_pixar.pt'
    ckpt = torch.load(ckpt_path, map_location='cpu')
    opts = ckpt['opts']
    opts['checkpoint_path'] = ckpt_path
    opts['device'] =  device
    opts = Namespace(**opts)
    torch_model = pSp(opts)
    torch_model.cpu()

    output_onnx = str("styleganex_toonify_pixar.onnx")

    # set the model to inference mode 
    torch_model.eval() 

    # The exported model will thus accept inputs of size [batch_size, 1, 224, 224] where batch_size can be variable.
    batch_size = 1 
    # Let's create a dummy input tensor
    channel = 3
    height = 224
    width = 224
    torch_input = torch.randn(batch_size, channel, height, width, requires_grad=True)

    dynamic_axes= {
        'input0': {0: 'batch', 2: 'height', 3: 'width'},
        'output0': {0: 'batch', 2: 'height', 3: 'width'}
    }

    # Export the model
    # """ 
    torch.onnx.export(
         torch_model,         # model being run 
         torch_input,       # model input (or a tuple for multiple inputs) 
         output_onnx,       # where to save the model  
         export_params=True,  # store the trained parameter weights inside the model file 
         opset_version=15,    # the ONNX version to export the model to 
         # WARNING: DNN inference with torch>=1.12 may require do_constant_folding=False
         do_constant_folding=True,  # whether to execute constant folding for optimization
         input_names = ['input0'],   # the model's input names 
         output_names = ['output0'], # the model's output names 
         dynamic_axes = dynamic_axes)
    # """
    
    print(" ") 
    print('Model has been converted to ONNX')

    # Checks
    onnx_model = onnx.load(output_onnx)  # load onnx model
    onnx.checker.check_model(onnx_model)  # check onnx model

    print('ONNX export success, saved as %s' % output_onnx)


def main():
    Convert_ONNX()

if __name__ == "__main__":
    main()

When I run this code,it shows the error below:

torch.onnx.errors.SymbolicValueError: Unsupported: ONNX export of convolution for kernel of unknown shape. [Caused by the value '1865 defined in (%1865 : Float(*, *, *, *, strides=[401408, 784, 28, 1], requires_grad=1, device=cpu) = onnx::Reshape[allowzero=0](%1803, %1864), scope: models.psp.pSp::/models.stylegan2.model.Generator::decoder/models.stylegan2.model.StyledConv::conv1/models.stylegan2.model.ModulatedConv2d::conv # /home/yxy/github/StyleGANEX/models/stylegan2/model.py:297:0
)' (type 'Tensor') in the TorchScript graph. The containing node has kind 'onnx::Reshape'.]

I have searched some relative docs,It shows that we can not use dynamic shapes when convert to ONNX, but the doc in pytorch didn`t mention this.

@williamyang1991
Copy link
Owner

I don't know onnx and I'm afraid I can't help you.

@xiaocode
Copy link
Author

xiaocode commented May 7, 2024

Can you take a look of the netron result is right compare to your model?
styleganex_toonify_pixar onnx

@Dratlan
Copy link

Dratlan commented Jul 11, 2024

from argparse import Namespace
from models.psp import pSp
import torch.nn as nn
import torch
import onnx

#Function to Convert to ONNX 
def Convert_ONNX(): 
    device = "cuda" if torch.cuda.is_available() else "cpu"
    ckpt_path = 'pretrained_models/styleganex_toonify_pixar.pt'
    ckpt = torch.load(ckpt_path, map_location='cpu')
    opts = ckpt['opts']
    opts['checkpoint_path'] = ckpt_path
    opts['device'] =  device
    opts = Namespace(**opts)
    torch_model = pSp(opts)
    torch_model.cpu()

    output_onnx = str("styleganex_toonify_pixar.onnx")

    # set the model to inference mode 
    torch_model.eval() 

    # The exported model will thus accept inputs of size [batch_size, 1, 224, 224] where batch_size can be variable.
    batch_size = 1 
    # Let's create a dummy input tensor
    channel = 3
    height = 224
    width = 224
    torch_input = torch.randn(batch_size, channel, height, width, requires_grad=True)

    dynamic_axes= {
        'input0': {0: 'batch', 2: 'height', 3: 'width'},
        'output0': {0: 'batch', 2: 'height', 3: 'width'}
    }

    # Export the model
    # """ 
    torch.onnx.export(
         torch_model,         # model being run 
         torch_input,       # model input (or a tuple for multiple inputs) 
         output_onnx,       # where to save the model  
         export_params=True,  # store the trained parameter weights inside the model file 
         opset_version=15,    # the ONNX version to export the model to 
         # WARNING: DNN inference with torch>=1.12 may require do_constant_folding=False
         do_constant_folding=True,  # whether to execute constant folding for optimization
         input_names = ['input0'],   # the model's input names 
         output_names = ['output0'], # the model's output names 
         dynamic_axes = dynamic_axes)
    # """
    
    print(" ") 
    print('Model has been converted to ONNX')

    # Checks
    onnx_model = onnx.load(output_onnx)  # load onnx model
    onnx.checker.check_model(onnx_model)  # check onnx model

    print('ONNX export success, saved as %s' % output_onnx)


def main():
    Convert_ONNX()

if __name__ == "__main__":
    main()

When I run this code,it shows the error below:

torch.onnx.errors.SymbolicValueError: Unsupported: ONNX export of convolution for kernel of unknown shape. [Caused by the value '1865 defined in (%1865 : Float(*, *, *, *, strides=[401408, 784, 28, 1], requires_grad=1, device=cpu) = onnx::Reshape[allowzero=0](%1803, %1864), scope: models.psp.pSp::/models.stylegan2.model.Generator::decoder/models.stylegan2.model.StyledConv::conv1/models.stylegan2.model.ModulatedConv2d::conv # /home/yxy/github/StyleGANEX/models/stylegan2/model.py:297:0 )' (type 'Tensor') in the TorchScript graph. The containing node has kind 'onnx::Reshape'.]

I have searched some relative docs,It shows that we can not use dynamic shapes when convert to ONNX, but the doc in pytorch didn`t mention this.

hey bro, have you finished it

@xiaocode
Copy link
Author

xiaocode commented Jul 11, 2024

@Dratlan Not yet...I'll have try another day

@Dratlan
Copy link

Dratlan commented Jul 11, 2024

we can not use dynamic shapes when convert to ONNX,

all right, while in yolovx(x is the vertion number), it can use dynamic shapes when convert to onnx. maybe we can study from it.

# for free to join this conversation on GitHub. Already have an account? # to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

3 participants