|
6 | 6 | #include "NvInfer.h" |
7 | 7 |
|
8 | 8 | #include "ATen/core/function_schema.h" |
| 9 | +#include "ATen/core/jit_type.h" |
9 | 10 |
|
| 11 | +#include "torch/custom_class.h" |
10 | 12 | #include "torch/csrc/jit/frontend/function_schema_parser.h" |
11 | 13 | #include "torch/csrc/jit/ir/ir.h" |
12 | 14 | #include "torch/csrc/jit/passes/pass_manager.h" |
@@ -40,32 +42,70 @@ c10::FunctionSchema GenerateGraphSchema(torch::jit::script::Module mod, std::str |
40 | 42 |
|
41 | 43 |
|
42 | 44 | void AddEngineToGraph(torch::jit::script::Module mod, std::shared_ptr<torch::jit::Graph>& g, std::string& serialized_engine) { |
43 | | - execution::EngineID uid = execution::RegisterEngineFromSerializedEngine(serialized_engine); |
44 | | - auto num_io = execution::GetEngineIO(uid); |
45 | | - |
46 | | - auto self = g->addInput("self.1"); |
| 45 | + auto engine = execution::TRTEngine(mod._ivalue()->name(), serialized_engine); |
| 46 | + // Get required metadata about the engine out |
| 47 | + auto num_io = engine.num_io; |
| 48 | + auto name = engine.name; |
| 49 | + |
| 50 | + // Add the engine as an attribute of the module, this will let the engine be serialized and deserialized |
| 51 | + auto engine_ptr = c10::make_intrusive<execution::TRTEngine>(engine); |
| 52 | + mod.register_attribute( |
| 53 | + name, |
| 54 | + c10::getCustomClassType<c10::intrusive_ptr<execution::TRTEngine>>(), |
| 55 | + c10::IValue(std::move(engine_ptr)), |
| 56 | + false |
| 57 | + ); |
| 58 | + |
| 59 | + // Add the module as an input into the graph |
| 60 | + auto self = g->addInput("self_1"); |
47 | 61 | self->setType(mod.type()); |
48 | 62 |
|
49 | | - auto id_val = g->insertConstant(uid); |
| 63 | + // Start by retriveing the engine from the module attribute list |
| 64 | + auto engine_node = g->createGetAttr(self, name); |
| 65 | + g->block()->appendNode(engine_node); |
50 | 66 |
|
| 67 | + // Add inputs to the graph corresponding to the number of input tensors expected by the engine |
| 68 | + // Also store those inputs in a vector so that they can be coalesced into a single list at runtime |
51 | 69 | std::vector<torch::jit::Value*> engine_inputs; |
52 | | - engine_inputs.push_back(id_val); |
53 | | - |
54 | 70 | for (uint64_t i = 0; i < num_io.first; i++) { |
55 | | - auto in_val = g->addInput(""); |
| 71 | + auto in_val = g->addInput(std::string("input_") + std::to_string(i)); |
56 | 72 | in_val->setType(c10::TensorType::get()); |
57 | 73 | engine_inputs.push_back(in_val); |
58 | 74 | } |
59 | 75 |
|
60 | | - auto engine_node = g->create(c10::Symbol::fromQualString("trt::execute_engine"), torch::jit::ArrayRef<torch::jit::Value*>(engine_inputs), num_io.second); |
61 | | - g->block()->appendNode(engine_node); |
62 | | - |
63 | | - if (engine_node->outputs().size() > 1) { |
64 | | - auto return_tuple_node = g->createTuple(engine_node->outputs()); |
| 76 | + // Create a node that will merge all of the input tensors into a single list argument to the trt::execute_engine op |
| 77 | + // Creates: prim::ListConstruct(<input tensors>) |
| 78 | + auto input_list_node = g->createList(c10::TensorType::get(), torch::jit::ArrayRef<torch::jit::Value*>(engine_inputs)); |
| 79 | + g->block()->appendNode(input_list_node); |
| 80 | + |
| 81 | + // Make a list of inputs to the actual trt::execute_engine op |
| 82 | + // Note: Ordering of list and then engine is because we can pop off the engine first which contains all the metadata |
| 83 | + // needed for execution |
| 84 | + std::vector<torch::jit::Value*> execute_node_inputs; |
| 85 | + execute_node_inputs.push_back(input_list_node->outputs()[0]); |
| 86 | + execute_node_inputs.push_back(engine_node->outputs()[0]); |
| 87 | + |
| 88 | + // Create the actual execution node trt::execute_engine using the assembled inputs |
| 89 | + auto execute_node = g->create(c10::Symbol::fromQualString("trt::execute_engine"), torch::jit::ArrayRef<torch::jit::Value*>(execute_node_inputs), 1); |
| 90 | + g->block()->appendNode(execute_node); |
| 91 | + execute_node->outputs()[0]->setType(c10::ListType::ofTensors()); |
| 92 | + |
| 93 | + // Create a node to unpack the list into seperate tensors, in the case of there being only one tensor, the tensor will be returned, |
| 94 | + // otherwise they are returned as a tuple of tensors. |
| 95 | + // Creates: prim::ListUnpack(<engine output>) |
| 96 | + auto unpack_node = g->createListUnpack(execute_node->outputs()[0], num_io.second); |
| 97 | + g->block()->appendNode(unpack_node); |
| 98 | + |
| 99 | + // If there are multiple output tensors from TensorRT we wrap them in a tuple to return |
| 100 | + if (unpack_node->outputs().size() > 1) { |
| 101 | + // Creates prim::TupleConstruct(<output tensors>) using outputs of the unpack node |
| 102 | + auto return_tuple_node = g->createTuple(unpack_node->outputs()); |
65 | 103 | g->block()->appendNode(return_tuple_node); |
| 104 | + // Set the output as the produced tuple |
66 | 105 | g->registerOutput(return_tuple_node->outputs()[0]); |
67 | 106 | } else { |
68 | | - g->registerOutput(engine_node->outputs()[0]); |
| 107 | + // Set the output as the sole output tensor |
| 108 | + g->registerOutput(unpack_node->outputs()[0]); |
69 | 109 | } |
70 | 110 |
|
71 | 111 | LOG_DEBUG(*g << "(AddEngineToGraph)\n"); |
|
0 commit comments