Skip to content
New issue

Have a question about this project? # for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “#”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? # to your account

can you provide inference code of libtorch #250

Open
JJY0710 opened this issue Aug 1, 2022 · 4 comments
Open

can you provide inference code of libtorch #250

JJY0710 opened this issue Aug 1, 2022 · 4 comments

Comments

@JJY0710
Copy link

JJY0710 commented Aug 1, 2022

Thank you.

@CoinCheung
Copy link
Owner

What is the necessity of this?

@XDxiaoqi
Copy link

//模型的加载
bool BiseSegment::model_forward(const cv::Mat& input_img, cv::Mat & res_img)
{
if (input_img.empty()) {
std::cout<<"error , unet segnet src empty"<<std::endl;
return false;
}

//resize网络的大小
cv::Mat resize_img;
if ((input_img.cols !=img_width_) || (input_img.rows != img_hight_)) {
    std::cout<<"warning segnet config w: "<<img_width_<<" actw: "<< input_img.cols
             <<" config h: "<<img_hight_<<" acth:"<<input_img.rows<<std::flush<<std::endl;
    cv::resize(input_img, resize_img, cv::Size(img_width_, img_hight_), cv::INTER_NEAREST);
} else {
    resize_img = input_img;
}

// Mat转换为Tensor
auto imgTensor = torch::from_blob(resize_img.data, {1, img_hight_, img_width_, 3}, torch::kUInt8);
// auto imgTensor = torch::from_blob(resize_img.data, 
//                     {1, img_hight_, img_width_, 3}, torch::kByte).to(torch::kCUDA);

//通道的切换,就写硬编码
imgTensor = imgTensor.permute({0, 3, 1, 2});              // [batch_size, channel, height, width]
imgTensor = imgTensor.to(torch::kFloat32);
imgTensor = imgTensor.div(255.0);

//Normalization, 每个通道的平均值
imgTensor[0][0] = imgTensor[0][0].sub_(0.5);
imgTensor[0][1] = imgTensor[0][1].sub_(0.5);
imgTensor[0][2] = imgTensor[0][2].sub_(0.5);
//imgTensor[0][0] = imgTensor[0][0].sub_(0.5).div_(1.0);

//维度添加 cuda类型
cpu: torch::DeviceType deviceType = torch::kCPU;
//torch::Device deviceType(torch::kCUDA);
auto imgVar = torch::autograd::make_variable(imgTensor, false).to(deviceType);
std::vector<torch::jit::IValue> inputs;
//inputs.emplace_back(imgVar.to(at::kCUDA));
inputs.emplace_back(imgVar.to(at::kCPU));

//网络前像推倒
ModuleHandler* model_handle = static_cast<ModuleHandler*>(module_handle_);
torch::Tensor pred = model_handle->module_.forward(inputs).toTensor();

//torch::Tensor pred = output.argmax(1);
//std::cout<<"### segnet output size: "<<pred.sizes()<<std::endl;

pred = pred.squeeze();
pred = pred.to(torch::kU8).mul(50);  //五个分类, 50
pred = pred.to(torch::kCPU);

cv::Mat net_out = cv::Mat(cv::Size(resize_img.cols, resize_img.rows), CV_8U, pred.data_ptr());

//net的输出和input的大小保持一致
if ((net_out.cols !=input_img.cols) || (net_out.rows !=input_img.rows)) {
    cv::resize(net_out, res_img, input_img.size(), cv::INTER_NEAREST);
} else {
    res_img = net_out.clone();
}

return true;

}

@JJY0710 JJY0710 changed the title can you provide inferrence code of libtorch can you provide inference code of libtorch Aug 17, 2022
@JJY0710
Copy link
Author

JJY0710 commented Aug 17, 2022

What is the necessity of this?

I want to test inference time of libtorch

@JJY0710
Copy link
Author

JJY0710 commented Aug 17, 2022

//模型的加载 bool BiseSegment::model_forward(const cv::Mat& input_img, cv::Mat & res_img) { if (input_img.empty()) { std::cout<<"error , unet segnet src empty"<<std::endl; return false; }

//resize网络的大小
cv::Mat resize_img;
if ((input_img.cols !=img_width_) || (input_img.rows != img_hight_)) {
    std::cout<<"warning segnet config w: "<<img_width_<<" actw: "<< input_img.cols
             <<" config h: "<<img_hight_<<" acth:"<<input_img.rows<<std::flush<<std::endl;
    cv::resize(input_img, resize_img, cv::Size(img_width_, img_hight_), cv::INTER_NEAREST);
} else {
    resize_img = input_img;
}

// Mat转换为Tensor
auto imgTensor = torch::from_blob(resize_img.data, {1, img_hight_, img_width_, 3}, torch::kUInt8);
// auto imgTensor = torch::from_blob(resize_img.data, 
//                     {1, img_hight_, img_width_, 3}, torch::kByte).to(torch::kCUDA);

//通道的切换,就写硬编码
imgTensor = imgTensor.permute({0, 3, 1, 2});              // [batch_size, channel, height, width]
imgTensor = imgTensor.to(torch::kFloat32);
imgTensor = imgTensor.div(255.0);

//Normalization, 每个通道的平均值
imgTensor[0][0] = imgTensor[0][0].sub_(0.5);
imgTensor[0][1] = imgTensor[0][1].sub_(0.5);
imgTensor[0][2] = imgTensor[0][2].sub_(0.5);
//imgTensor[0][0] = imgTensor[0][0].sub_(0.5).div_(1.0);

//维度添加 cuda类型
cpu: torch::DeviceType deviceType = torch::kCPU;
//torch::Device deviceType(torch::kCUDA);
auto imgVar = torch::autograd::make_variable(imgTensor, false).to(deviceType);
std::vector<torch::jit::IValue> inputs;
//inputs.emplace_back(imgVar.to(at::kCUDA));
inputs.emplace_back(imgVar.to(at::kCPU));

//网络前像推倒
ModuleHandler* model_handle = static_cast<ModuleHandler*>(module_handle_);
torch::Tensor pred = model_handle->module_.forward(inputs).toTensor();

//torch::Tensor pred = output.argmax(1);
//std::cout<<"### segnet output size: "<<pred.sizes()<<std::endl;

pred = pred.squeeze();
pred = pred.to(torch::kU8).mul(50);  //五个分类, 50
pred = pred.to(torch::kCPU);

cv::Mat net_out = cv::Mat(cv::Size(resize_img.cols, resize_img.rows), CV_8U, pred.data_ptr());

//net的输出和input的大小保持一致
if ((net_out.cols !=input_img.cols) || (net_out.rows !=input_img.rows)) {
    cv::resize(net_out, res_img, input_img.size(), cv::INTER_NEAREST);
} else {
    res_img = net_out.clone();
}

return true;

}

太感谢你了,谢谢你的推理代码,因为我对libtorch还不熟悉

# for free to join this conversation on GitHub. Already have an account? # to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

3 participants