Comments (3)
您好,我的理解上C API无非是对C++ API再做了一层封装,难度应该不大。您是否能提交一个PR来完成这个工作呢,我可以配合您来完成。(我也很想把该项目移植到C#等其它编程语言中,但是现在确实人手有限)
from paddle2onnx.
这是一套可能的设计(可能大语言模型生成):
paddle2onnx_c_api.h
:
#ifndef PADDLE2ONNX_C_API_H
#define PADDLE2ONNX_C_API_H
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
// 默认和C++ API一样的导出逻辑
#if defined(_WIN32)
#ifdef PADDLE2ONNX_C_LIB
#define PADDLE2ONNX_C_API __declspec(dllexport)
#else
#define PADDLE2ONNX_C_API __declspec(dllimport)
#endif
#else
#define PADDLE2ONNX_C_API __attribute__((visibility("default")))
#endif
// 数据结构定义
typedef struct {
char op_name[100];
char export_op_name[100];
} Paddle2ONNX_CustomOp;
typedef struct {
char name[100];
int64_t* shape;
int32_t rank;
int32_t dtype;
} Paddle2ONNX_ModelTensorInfo;
typedef struct {
int64_t background_label;
int64_t keep_top_k;
float nms_eta;
float nms_threshold;
float score_threshold;
int64_t nms_top_k;
bool normalized;
} Paddle2ONNX_NMSParameters;
typedef struct {
Paddle2ONNX_ModelTensorInfo inputs[100];
Paddle2ONNX_ModelTensorInfo outputs[100];
int num_inputs;
int num_outputs;
} Paddle2ONNX_TensorInfoContainer;
// API函数声明
PADDLE2ONNX_C_API int Paddle2ONNX_IsExportable(
const char* model, const char* params, int32_t opset_version,
bool auto_upgrade_opset, bool verbose, bool enable_onnx_checker,
bool enable_experimental_op, bool enable_optimize,
Paddle2ONNX_CustomOp* ops, int op_count, const char* deploy_backend);
PADDLE2ONNX_C_API int Paddle2ONNX_Export(
const char* model, const char* params, char** out, int* out_size,
int32_t opset_version, bool auto_upgrade_opset, bool verbose,
bool enable_onnx_checker, bool enable_experimental_op, bool enable_optimize,
Paddle2ONNX_CustomOp* ops, int op_count, const char* deploy_backend,
char** calibration_cache, int* calibration_size, const char* external_file,
bool* save_external, bool export_fp16_model, char** disable_fp16_op_types,
int disable_fp16_op_types_count);
PADDLE2ONNX_C_API int Paddle2ONNX_RemoveMultiClassNMS(
const char* onnx_model, int model_size, char** out_model, int* out_model_size);
PADDLE2ONNX_C_API int Paddle2ONNX_ConvertFP32ToFP16(
const char* onnx_model, int model_size, char** out_model, int* out_model_size);
#ifdef __cplusplus
}
#endif
#endif // PADDLE2ONNX_C_API_H
这是可能的paddle2onnx_c_api.c
实现:
#include "paddle2onnx_c_api.h"
#include "paddle2onnx/converter.h" // Assume this is the correct path to C++ header
#ifdef __cplusplus
extern "C" {
#endif
// Helper function to convert C CustomOps array to C++ vector
static std::vector<paddle2onnx::CustomOp> ConvertCtoCppCustomOps(Paddle2ONNX_CustomOp* ops, int op_count) {
std::vector<paddle2onnx::CustomOp> cpp_ops(op_count);
for (int i = 0; i < op_count; i++) {
strcpy(cpp_ops[i].op_name, ops[i].op_name);
strcpy(cpp_ops[i].export_op_name, ops[i].export_op_name);
}
return cpp_ops;
}
PADDLE2ONNX_C_API int Paddle2ONNX_IsExportable(
const char* model,
const char* params,
int32_t opset_version,
bool auto_upgrade_opset,
bool verbose,
bool enable_onnx_checker,
bool enable_experimental_op,
bool enable_optimize,
Paddle2ONNX_CustomOp* ops,
int op_count,
const char* deploy_backend) {
try {
std::vector<paddle2onnx::CustomOp> cpp_ops = ConvertCtoCppCustomOps(ops, op_count);
bool result = paddle2onnx::IsExportable(model, params, opset_version,
auto_upgrade_opset, verbose,
enable_onnx_checker, enable_experimental_op,
enable_optimize, cpp_ops.data(), op_count,
deploy_backend);
return result ? 0 : 1;
} catch (...) {
return -1;
}
}
PADDLE2ONNX_C_API int Paddle2ONNX_Export(
const char* model,
const char* params,
char** out, int* out_size,
int32_t opset_version,
bool auto_upgrade_opset,
bool verbose,
bool enable_onnx_checker,
bool enable_experimental_op,
bool enable_optimize,
Paddle2ONNX_CustomOp* ops,
int op_count,
const char* deploy_backend,
char** calibration_cache,
int* calibration_size,
const char* external_file,
bool* save_external,
bool export_fp16_model,
char** disable_fp16_op_types,
int disable_fp16_op_types_count) {
try {
std::vector<paddle2onnx::CustomOp> cpp_ops = ConvertCtoCppCustomOps(ops, op_count);
std::string output;
std::vector<std::string> disabled_ops(disable_fp16_op_types, disable_fp16_op_types + disable_fp16_op_types_count);
bool result = paddle2onnx::Export(model, params, &output, opset_version,
auto_upgrade_opset, verbose, enable_onnx_checker,
enable_experimental_op, enable_optimize,
cpp_ops.data(), op_count, deploy_backend,
nullptr, 0, external_file, save_external, export_fp16_model,
disabled_ops.data(), disable_fp16_op_types_count);
if (result) {
*out_size = output.size();
*out = new char[*out_size];
std::memcpy(*out, output.c_str(), *out_size);
}
return result ? 0 : 1;
} catch (...) {
return -1;
}
}
PADDLE2ONNX_C_API int Paddle2ONNX_RemoveMultiClassNMS(
const char* onnx_model,
int model_size,
char** out_model,
int* out_model_size) {
try {
std::string output;
bool result = paddle2onnx::RemoveMultiClassNMS(onnx_model, model_size, &output);
if (result) {
*out_model_size = output.size();
*out_model = new char[*out_model_size];
std::memcpy(*out_model, output.c_str(), *out_model_size);
}
return result ? 0 : 1;
} catch (...) {
return -1;
}
}
PADDLE2ONNX_C_API int Paddle2ONNX_ConvertFP32ToFP16(
const char* onnx_model,
int model_size,
char** out_model,
int* out_model_size) {
try {
std::string output;
bool result = paddle2onnx::ConvertFP32ToFP16(onnx_model, model_size, &output);
if (result) {
*out_model_size = output.size();
*out_model = new char[*out_model_size];
std::memcpy(*out_model, output.c_str(), *out_model_size);
}
return result ? 0 : 1;
} catch (...) {
return -1;
}
}
#ifdef __cplusplus
}
#endif
请供参考,由于我不太懂cmake的原理,因此希望上面这些代码对您有帮助。
from paddle2onnx.
我简单翻了一下代码。
CMake 里有个编译选项: WITH_STATIC
,打开它编译后,配合 header 文件:https://github.com/PaddlePaddle/Paddle2ONNX/blob/develop/paddle2onnx/converter.h 。应该已经算是一个 C API 了。
只是好像没什么文档,我也没有实际跑过。
from paddle2onnx.
Related Issues (20)
- 【🚀需求🚀】支持飞桨 broadcast_tensors 算子转ONNX
- 【🚀需求🚀】支持飞桨 distribute_fpn_proposals 算子转ONNX
- 【🚀需求🚀】支持飞桨 generate_proposals_v2 算子转ONNX
- 【🚀需求🚀】支持飞桨 lod_array_length 算子转ONNX
- 【🚀需求🚀】支持飞桨 tensor_array_to_tensor 算子转ONNX
- 【🚀需求🚀】支持飞桨 write_to_array 算子转ONNX
- 【🚀需求🚀】支持飞桨 tril_triu 算子转ONNX HOT 1
- ppocr INFO: In PP-OCRv3, rec_image_shape parameter defaults to '3, 48, 320', if you are using recognition model with PP-OCRv2 or an older version, please set --rec_image_shape='3,32,320 HOT 2
- There are some operators not supported yet, including box_coder,density_prior_box,multiclass_nms2 HOT 2
- 想请问下有预计什么时间支持到paddle3.0吗 HOT 5
- 为什么model_zoo没有了呢??? HOT 1
- 转onnx后 lstm的seq_len 不参与计算
- 使用paddle2onnx转换mot_ppyoloe_l_36e_pipeline模型到onnx格式,使用onnxruntime加载转换后的onnx模型文件报错。
- python3.8编译paddle2onnx报错 HOT 3
- SLANetv2 转 onnx 不支持while,什么时候解决呢?或者有没有其他方式可以导出onnx呢? HOT 1
- can't export SLANet with import error HOT 1
- RTFormer 转onnx格式在paddle2.6版本下一致,paddle3.0beta版本不一致
- 请教大家一个问题,我在使用paddlespeech的kws时,想把模型转为静态图或者onnx,均出现以下报错,要怎么解决才能导出模型?
- 【开源任务】Paddle2ONNX适配Paddle IR完成常用模型转化
- ppocr4 paddle转onnx模型后在cpu上的推理速度远大于在gpu的推理速度
Recommend Projects
-
React
A declarative, efficient, and flexible JavaScript library for building user interfaces.
-
Vue.js
🖖 Vue.js is a progressive, incrementally-adoptable JavaScript framework for building UI on the web.
-
Typescript
TypeScript is a superset of JavaScript that compiles to clean JavaScript output.
-
TensorFlow
An Open Source Machine Learning Framework for Everyone
-
Django
The Web framework for perfectionists with deadlines.
-
Laravel
A PHP framework for web artisans
-
D3
Bring data to life with SVG, Canvas and HTML. 📊📈🎉
-
Recommend Topics
-
javascript
JavaScript (JS) is a lightweight interpreted programming language with first-class functions.
-
web
Some thing interesting about web. New door for the world.
-
server
A server is a program made to process requests and deliver data to clients.
-
Machine learning
Machine learning is a way of modeling and interpreting data that allows a piece of software to respond intelligently.
-
Visualization
Some thing interesting about visualization, use data art
-
Game
Some thing interesting about game, make everyone happy.
Recommend Org
-
Facebook
We are working to build community through open source technology. NB: members must have two-factor auth.
-
Microsoft
Open source projects and samples from Microsoft.
-
Google
Google ❤️ Open Source for everyone.
-
Alibaba
Alibaba Open Source for everyone
-
D3
Data-Driven Documents codes.
-
Tencent
China tencent open source team.
from paddle2onnx.