forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathexport.h
47 lines (38 loc) · 1.61 KB
/
export.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
#pragma once
#include "torch/csrc/jit/ir.h"
#include "torch/csrc/jit/script/module.h"
#include "torch/csrc/onnx/onnx.h"
#include <ostream>
namespace torch { namespace jit {
// This map is used to keep track of parameters that should be exported
// externally. When `defer_weight_export` is true, the returned map contains
// kv pairs that map {external reference name} -> {at::Tensor to be exported}.
// It is the responsibility of the caller to export these appropriately.
//
// For example, when exporting to a zip archive, the caller may write out files
// for each entry in the export map, with the filename being the key and the
// file contents being the raw tensor data.
using RawDataExportMap = std::unordered_map<std::string, at::Tensor>;
TORCH_API std::tuple<std::string, RawDataExportMap> ExportGraph(
const std::shared_ptr<Graph>& graph,
const std::vector<at::Tensor>& initializers,
int64_t onnx_opset_version,
bool defer_weight_export = false,
::torch::onnx::OperatorExportTypes operator_export_type
= ::torch::onnx::OperatorExportTypes::ONNX);
// For testing purposes
TORCH_API std::string PrettyPrintExportedGraph(
const std::shared_ptr<Graph>& graph,
const std::vector<at::Tensor> & initializers,
int64_t onnx_opset_version,
bool defer_weight_export,
::torch::onnx::OperatorExportTypes operator_export_type
= ::torch::onnx::OperatorExportTypes::ONNX,
bool google_printer = false);
TORCH_API void ExportModule(
const script::Module& module,
std::ostream& out);
TORCH_API void ExportModule(
const script::Module& module,
const std::string& filename);
}}