发布时间:2024-04-02 15:01
分四步
要素:
包含目录或附加包含目录:
E:\\libtorch-win-shared-with-deps-1.11.0+cpu\\libtorch\\include
E:\\libtorch-win-shared-with-deps-1.11.0+cpu\\libtorch\\include\\torch\\csrc\\api\\include
库目录或附加库目录:
E:\\libtorch-win-shared-with-deps-1.11.0+cpu\\libtorch\\lib
附加依赖项:
c10.lib
fbgemm.lib
asmjit.lib
torch.lib
torch_cpu.lib
def test(weight_file):
example = torch.rand(1,3, 224, 224)
model = HandRecNet()
if os.path.exists(weight_file):
model.load_state_dict(torch.load(weight_file, map_location=\'cpu\'))
model.eval()
with torch.no_grad():
traced_script_module = torch.jit.trace(model, example)
traced_script_module.save(\"traced_model.pt\")
#include //one-stop header
#include
#include
#include
#include
#include
#include
#include
int main(int argc, const char* argv[]){
if (argc != 2){
std::cerr << \"usage: example-app \\n\";
return -1;
}
torch::jit::script::Module module;
try{
// Deserialize the scriptmodule from a file using torch::jit::load().
module = torch::jit::load(argv[1]);
}
catch(const c10::Error& e){
std::cerr << \"error loading the model\\n\";
return -1;
}
std::cout << \"model load ok\\n\";
// load image with opencv and transform.
// 1. read image
cv::Mat image;
image = cv::imread(\"../dog2.JPEG\", CV_LOAD_IMAGE_COLOR);
// 2. convert color space, opencv read the image in BGR
cv::cvtColor(image, image, CV_BGR2RGB);
cv::Mat img_float;
// convert to float format
image.convertTo(img_float, CV_32F, 1.0/255);
// 3. resize the image for resnet101 model
cv::resize(img_float, img_float, cv::Size(224, 224),cv::INTER_AREA);
// 4. transform to tensor
auto img_tensor = torch::from_blob(img_float.data, {1,224,224,3},torch::kFloat32);
// in pytorch, batch first, then channel
img_tensor = img_tensor.permute({0,3,1,2});
// 5. Removing mean values of the RGB channels
// the values are from following link.
// https://github.com/pytorch/examples/blob/master/imagenet/main.py#L202
img_tensor[0][0] = img_tensor[0][0].sub_(0.485).div_(0.229);
img_tensor[0][1] = img_tensor[0][1].sub_(0.456).div_(0.224);
img_tensor[0][2] = img_tensor[0][2].sub_(0.406).div_(0.225);
// Create vectors of inputs.
std::vector inputs1, inputs2;
inputs1.push_back(torch::ones({1,3,224,224}));
inputs2.push_back(img_tensor);
// 6. Execute the model and turn its output into a tensor
at::Tensor output = module.forward(inputs2).toTensor();
std::cout << output.sizes() << std::endl;
std::cout << output.slice(/*dim=*/1,/*start=*/0,/*end=*/3) << \'\\n\';
// 7. Load labels
std::string label_file = \"../synset_words.txt\";
std::ifstream rf(label_file.c_str());
CHECK(rf) << \"Unable to open labels file\" << label_file;
std::string line;
std::vector labels;
while(std::getline(rf, line)){labels.push_back(line);}
// 8. print predicted top-3 labels
std::tuple result = output.sort(-1, true);
torch::Tensor top_scores = std::get<0>(result)[0];
torch::Tensor top_idxs = std::get<1>(result)[0].toType(torch::kInt32);
auto top_scores_a = top_scores.accessor();
auto top_idxs_a = top_idxs.accessor();
for(int i=0; i<3;i++){
int idx = top_idxs_a[i];
std::cout << \"top-\" << i+1 << \" label: \";
std::cout << labels[idx] << \",score: \" << top_scores_a[i] << std::endl;
}
return 0;
}
————————————————
[1]https://blog.csdn.net/weixin_44278406/article/details/103637160
[2]https://blog.csdn.net/Challovactor/article/details/104793002
[3]https://pytorch.org/tutorials/advanced/cpp_export.html
[4]https://pytorch.org/docs/stable/jit.html#
[5]https://zhuanlan.zhihu.com/p/146453159
[6]https://zhuanlan.zhihu.com/p/141401062
[7]http://zhaoxuhui.top/blog/2021/04/13/libtorch-installation-and-use.html