| 
 | 1 | +/*  | 
 | 2 | +Load and run a saved TensorFlow's multiple-input model  | 
 | 3 | +
  | 
 | 4 | +g++ -Wall -fPIC -D_GLIBCXX_USE_CXX11_ABI=1 run_model.cpp -o run_model.o \  | 
 | 5 | +    -I/usr/local/tensorflow-2.11/include/ -L/usr/local/tensorflow-2.11/lib \   | 
 | 6 | +    -ltensorflow_cc -ltensorflow_framework   | 
 | 7 | +
  | 
 | 8 | +Rangsiman Ketkaew  | 
 | 9 | +*/  | 
 | 10 | + | 
 | 11 | +#include <tensorflow/cc/saved_model/loader.h>  | 
 | 12 | +#include <tensorflow/cc/saved_model/tag_constants.h>  | 
 | 13 | + | 
 | 14 | +using namespace tensorflow;  | 
 | 15 | + | 
 | 16 | +int main()  | 
 | 17 | +{  | 
 | 18 | + | 
 | 19 | +    // load the whole folder  | 
 | 20 | +    std::string export_dir = "./model/";  | 
 | 21 | + | 
 | 22 | +    // Load  | 
 | 23 | +    SavedModelBundle model_bundle;  | 
 | 24 | +    SessionOptions session_options = SessionOptions();  | 
 | 25 | +    RunOptions run_options = RunOptions();  | 
 | 26 | +    Status status = LoadSavedModel(session_options, run_options, export_dir, {kSavedModelTagServe}, &model_bundle);  | 
 | 27 | + | 
 | 28 | +    if (!status.ok())  | 
 | 29 | +    {  | 
 | 30 | +        std::cerr << "Failed: " << status;  | 
 | 31 | +    }  | 
 | 32 | + | 
 | 33 | +    auto sig_map = model_bundle.GetSignatures();  | 
 | 34 | +    auto model_def = sig_map.at("serving_default");  | 
 | 35 | + | 
 | 36 | +    for (auto const &p : sig_map)  | 
 | 37 | +    {  | 
 | 38 | +        std::cout << "key: " << p.first.c_str() << std::endl;  | 
 | 39 | +    }  | 
 | 40 | +    for (auto const &p : model_def.inputs())  | 
 | 41 | +    {  | 
 | 42 | +        std::cout << "key: " << p.first.c_str() << " " << p.second.name().c_str() << std::endl;  | 
 | 43 | +    }  | 
 | 44 | +    for (auto const &p : model_def.outputs())  | 
 | 45 | +    {  | 
 | 46 | +        std::cout << "key: " << p.first.c_str() << " " << p.second.name().c_str() << std::endl;  | 
 | 47 | +    }  | 
 | 48 | + | 
 | 49 | +    auto input_tensor_1_name = "serving_default_args_0:0";  | 
 | 50 | +    auto input_tensor_2_name = "serving_default_args_0_1:0";  | 
 | 51 | +    auto output_tensor_1_name = "StatefulPartitionedCall:0";  | 
 | 52 | +    auto output_tensor_2_name = "StatefulPartitionedCall:1";  | 
 | 53 | + | 
 | 54 | +    // Create output placeholder tensors  | 
 | 55 | +    // I use the size of input that exactly matchs the size of input from Python model  | 
 | 56 | +    // Use saved_model_cli to check  | 
 | 57 | +    tensorflow::Tensor input_1(tensorflow::DT_FLOAT, tensorflow::TensorShape({1, 394}));  | 
 | 58 | +    tensorflow::Tensor input_2(tensorflow::DT_FLOAT, tensorflow::TensorShape({1, 99}));  | 
 | 59 | + | 
 | 60 | +    auto input_mat_1 = input_1.matrix<float>();  | 
 | 61 | +    // assign random value  | 
 | 62 | +    for (unsigned i = 0; i < 394; ++i)  | 
 | 63 | +        input_mat_1(0, i) = 1.0;  | 
 | 64 | +    auto input_mat_2 = input_2.matrix<float>();  | 
 | 65 | +    // assign random value  | 
 | 66 | +    for (unsigned i = 0; i < 99; ++i)  | 
 | 67 | +        input_mat_2(0, i) = 2.0;  | 
 | 68 | + | 
 | 69 | +    typedef std::vector<std::pair<std::string, tensorflow::Tensor>> tensor_dict;  | 
 | 70 | +    tensor_dict feed_dict = {  | 
 | 71 | +        {input_tensor_1_name, input_1},  | 
 | 72 | +        {input_tensor_2_name, input_2}};  | 
 | 73 | + | 
 | 74 | +    // Create output placeholder tensors for results  | 
 | 75 | +    std::vector<tensorflow::Tensor> outputs;  | 
 | 76 | +    std::vector<std::string> output_names = {output_tensor_1_name, output_tensor_2_name};  | 
 | 77 | +    // Running inference  | 
 | 78 | +    tensorflow::Status status_run = model_bundle.session->Run(feed_dict,  | 
 | 79 | +                                                              output_names,  | 
 | 80 | +                                                              {},  | 
 | 81 | +                                                              &outputs);  | 
 | 82 | +    // Check if session is successfully loaded  | 
 | 83 | +    if (!status_run.ok())  | 
 | 84 | +    {  | 
 | 85 | +        std::cerr << "Failed: " << status_run;  | 
 | 86 | +    }  | 
 | 87 | +    else  | 
 | 88 | +    {  | 
 | 89 | +        std::cout << "Passed: " << status_run << std::endl;  | 
 | 90 | +    }  | 
 | 91 | + | 
 | 92 | +    std::cout << "input 1         " << input_1.DebugString() << std::endl;  | 
 | 93 | +    std::cout << "input 2         " << input_2.DebugString() << std::endl;  | 
 | 94 | +    std::cout << "output          " << outputs[0].DebugString() << std::endl;  | 
 | 95 | +    std::cout << "dense/kernel:0  " << outputs[1].DebugString() << std::endl;  | 
 | 96 | +    // std::cout << "dense/bias:0    " << outputs[2].DebugString() << std::endl;  | 
 | 97 | + | 
 | 98 | +    return 0;  | 
 | 99 | +}  | 
0 commit comments