Crates.io | paddle_inference |
lib.rs | paddle_inference |
version | 0.4.0 |
source | src |
created_at | 2022-05-19 01:58:23.326861 |
updated_at | 2022-05-27 02:20:06.941181 |
description | paddle_inference_c的Rust封装 |
homepage | |
repository | https://github.com/ZB94/paddle_inference |
max_upload_size | |
id | 589447 |
size | 121,640 |
本库是对百度飞浆推理库C接口的封装,详细说明请参考官方文档
paddle_inference_c
的动态库及其第三方依赖库能被正常搜索到。如:
PATH
中LD_LIBRARY_PATH
中use paddle_inference::config::model::Model;
use paddle_inference::config::setting::Cpu;
use paddle_inference::Predictor;
let predictor = Predictor::builder(Model::path(
"模型文件路径",
"模型参数文件路径",
))
// 使用 CPU 识别
.cpu(Cpu {
threads: Some(std::thread::available_parallelism().unwrap().get() as i32),
mkldnn: None,
})
// 设置缓存陌路
.set_optimization_cache_dir("caches".to_string())
// 创建 Predictor
.build();
let names = predictor.input_names();
println!("输入名称列表长度: {}", names.len());
// 获取和设置输入数据
let input = predictor.input(&names.get(0).unwrap());
input.reshape(&[1, 3, 100, 100]);
input.copy_from_f32(&[0.0; 3 * 100 * 100]);
// 执行
println!("run: {}", predictor.run());
let names = predictor.output_names();
println!("output names len: {}", names.len());
let output = predictor.output(&names.get(0).unwrap());
println!("output type: {:?}", output.data_type());
println!("output shape: {:?}", output.shape());