Qualcomm NPU边缘部署:ONNX Runtime + Hexagon DSP优化指南

适用平台: Snapdragon 8 Gen 2/3, QCS8255, QCS8775
核心工具: ONNX Runtime QNN Execution Provider
目标: DMS/OMS模型在车载嵌入式平台的高效部署


Qualcomm AI生态概览

硬件架构

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
┌─────────────────────────────────────────────────────────────────┐
│ Snapdragon 芯片架构 │
├─────────────────────────────────────────────────────────────────┤
│ │
│ ┌─────────────────────────────────────────────────────────┐ │
│ │ Kryo CPU │ │
│ │ - 性能核心 (Prime) │ │
│ │ - 效率核心 │ │
│ │ - 适用: 控制逻辑、预处理 │ │
│ └─────────────────────────────────────────────────────────┘ │
│ │
│ ┌─────────────────────────────────────────────────────────┐ │
│ │ Adreno GPU │ │
│ │ - 图形渲染 │ │
│ │ - 计算加速 (OpenCL/Vulkan) │ │
│ │ - 适用: 图像处理、部分推理 │ │
│ └─────────────────────────────────────────────────────────┘ │
│ │
│ ┌─────────────────────────────────────────────────────────┐ │
│ │ Hexagon DSP / NPU ★ │ │
│ │ │ │
│ │ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │ │
│ │ │ Hexagon │ │ HVX │ │ HTP/AI │ │ │
│ │ │ Processor │ │ (向量扩展) │ │ Engine │ │ │
│ │ └─────────────┘ └─────────────┘ └─────────────┘ │ │
│ │ │ │
│ │ - 低功耗AI推理 │ │
│ │ - INT8量化加速 │ │
│ │ - 适用: 神经网络推理 (DMS/OMS核心) │ │
│ └─────────────────────────────────────────────────────────┘ │
│ │
│ ┌─────────────────────────────────────────────────────────┐ │
│ │ Hexagon Sensor DSP │ │
│ │ - 传感器数据处理 │ │
│ │ - 低功耗音频处理 │ │
│ └─────────────────────────────────────────────────────────┘ │
│ │
└─────────────────────────────────────────────────────────────────┘

计算能力对比

平台 NPU TOPS CPU算力 GPU算力 典型应用
QCS8255 26 TOPS 100k DMIPS 3.1 TFLOPS L2/L2+ ADAS
QCS8775 30 TOPS 140k DMIPS 4.2 TFLOPS L3 ADAS
Snapdragon 8 Gen 3 45 TOPS - - 旗舰手机

ONNX Runtime QNN部署流程

完整部署流程

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
PyTorch模型 (.pt)


┌─────────────────────────────────────────┐
1. 导出ONNX │
│ torch.onnx.export() │
└─────────────────────────────────────────┘


┌─────────────────────────────────────────┐
2. 模型量化 │
│ - 动态量化 │
│ - 静态量化 (推荐) │
│ - QAT量化感知训练 │
└─────────────────────────────────────────┘


┌─────────────────────────────────────────┐
3. 转换QNN格式 │
│ qnn-onnx-converter │
└─────────────────────────────────────────┘


┌─────────────────────────────────────────┐
4. 编译为HTP二进制 │
│ qnn-htp-compiler │
└─────────────────────────────────────────┘


┌─────────────────────────────────────────┐
5. 设备部署 │
│ ONNX Runtime + QNN EP │
└─────────────────────────────────────────┘


车载嵌入式设备运行

核心代码实现

1. 模型导出与量化

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
"""
PyTorch模型导出ONNX + 量化
"""

import torch
import torch.nn as nn
import onnx
from onnxruntime.quantization import quantize_static, quantize_dynamic, QuantFormat, QuantType
from onnxruntime.quantization.shape_inference import quant_pre_process
from typing import Tuple, Optional
import numpy as np


class DMSModel(nn.Module):
"""
示例DMS模型

包含:人脸检测 + 眼睛状态 + 头部姿态
"""

def __init__(
self,
backbone: str = 'mobilenetv3',
num_classes: int = 10
):
super().__init__()

# 骨干网络
if backbone == 'mobilenetv3':
from torchvision.models import mobilenet_v3_small
self.backbone = mobilenet_v3_small(pretrained=True)
self.backbone.classifier = nn.Identity()
feature_dim = 576
else:
raise ValueError(f"Unknown backbone: {backbone}")

# 检测头
self.face_head = nn.Sequential(
nn.Linear(feature_dim, 256),
nn.ReLU(inplace=True),
nn.Linear(256, 4) # bbox
)

# 眼睛状态头
self.eye_head = nn.Sequential(
nn.Linear(feature_dim, 128),
nn.ReLU(inplace=True),
nn.Linear(128, 2) # open/closed
)

# 头部姿态头
self.pose_head = nn.Sequential(
nn.Linear(feature_dim, 128),
nn.ReLU(inplace=True),
nn.Linear(128, 3) # pitch, yaw, roll
)

def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Args:
x: (batch, 3, 224, 224)

Returns:
face_bbox: (batch, 4)
eye_state: (batch, 2)
head_pose: (batch, 3)
"""
features = self.backbone(x)

face_bbox = self.face_head(features)
eye_state = self.eye_head(features)
head_pose = self.pose_head(features)

return face_bbox, eye_state, head_pose


def export_to_onnx(
model: nn.Module,
save_path: str,
input_size: Tuple[int, int, int] = (1, 3, 224, 224),
opset_version: int = 17
) -> str:
"""
导出ONNX模型

Args:
model: PyTorch模型
save_path: 保存路径
input_size: 输入尺寸
opset_version: ONNX opset版本

Returns:
onnx_path: ONNX文件路径
"""
model.eval()

# 创建虚拟输入
dummy_input = torch.randn(*input_size)

# 导出
onnx_path = save_path.replace('.pt', '.onnx')

torch.onnx.export(
model,
dummy_input,
onnx_path,
export_params=True,
opset_version=opset_version,
do_constant_folding=True,
input_names=['input'],
output_names=['face_bbox', 'eye_state', 'head_pose'],
dynamic_axes={
'input': {0: 'batch_size'},
'face_bbox': {0: 'batch_size'},
'eye_state': {0: 'batch_size'},
'head_pose': {0: 'batch_size'}
}
)

# 验证
onnx_model = onnx.load(onnx_path)
onnx.checker.check_model(onnx_model)

print(f"ONNX模型已导出: {onnx_path}")
return onnx_path


def quantize_onnx_static(
onnx_path: str,
calibration_data: np.ndarray,
quant_format: QuantFormat = QuantFormat.QDQ,
activation_type: QuantType = QuantType.QUInt8,
weight_type: QuantType = QuantType.QInt8
) -> str:
"""
静态量化ONNX模型

Args:
onnx_path: ONNX模型路径
calibration_data: 校准数据
quant_format: 量化格式
activation_type: 激活类型
weight_type: 权重类型

Returns:
quantized_path: 量化后模型路径
"""
from onnxruntime.quantization import CalibrationDataReader

class CalibrationData(CalibrationDataReader):
def __init__(self, data):
self.data = data
self.index = 0

def get_next(self):
if self.index >= len(self.data):
return None
item = {'input': self.data[self.index]}
self.index += 1
return item

def rewind(self):
self.index = 0

# 预处理
preprocessed_path = onnx_path.replace('.onnx', '_preprocessed.onnx')
quant_pre_process(onnx_path, preprocessed_path)

# 量化
quantized_path = onnx_path.replace('.onnx', '_quantized.onnx')

calibration_reader = CalibrationData(calibration_data)

quantize_static(
model_input=preprocessed_path,
model_output=quantized_path,
calibration_data_reader=calibration_reader,
quant_format=quant_format,
per_channel=False,
weight_type=weight_type,
activation_type=activation_type
)

print(f"量化模型已保存: {quantized_path}")
return quantized_path


def quantize_onnx_dynamic(
onnx_path: str,
weight_type: QuantType = QuantType.QInt8
) -> str:
"""
动态量化ONNX模型(简单快速)

Args:
onnx_path: ONNX模型路径
weight_type: 权重类型

Returns:
quantized_path: 量化后模型路径
"""
quantized_path = onnx_path.replace('.onnx', '_dynamic_quant.onnx')

quantize_dynamic(
model_input=onnx_path,
model_output=quantized_path,
weight_type=weight_type,
optimize_model=True
)

print(f"动态量化模型已保存: {quantized_path}")
return quantized_path


# 测试
if __name__ == "__main__":
# 创建模型
model = DMSModel(backbone='mobilenetv3')

# 导出ONNX
onnx_path = export_to_onnx(
model,
"dms_model.onnx",
input_size=(1, 3, 224, 224)
)

# 模型大小
import os
original_size = os.path.getsize(onnx_path) / 1024 / 1024
print(f"原始模型大小: {original_size:.2f} MB")

# 动态量化
quantized_path = quantize_onnx_dynamic(onnx_path)
quantized_size = os.path.getsize(quantized_path) / 1024 / 1024
print(f"量化后模型大小: {quantized_size:.2f} MB")
print(f"压缩比: {original_size / quantized_size:.2f}x")

2. QNN模型转换

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
"""
ONNX模型转换为QNN格式
用于Qualcomm NPU部署
"""

import subprocess
import os
from typing import Optional, List


class QNNConverter:
"""
QNN模型转换器

将ONNX模型转换为QNN格式,用于Hexagon NPU
"""

def __init__(
self,
qnn_sdk_path: str = "/opt/qnn-sdk",
target_arch: str = "aarch64"
):
"""
Args:
qnn_sdk_path: QNN SDK路径
target_arch: 目标架构
"""
self.qnn_sdk_path = qnn_sdk_path
self.target_arch = target_arch

# 设置环境变量
os.environ['QNN_SDK_ROOT'] = qnn_sdk_path

def convert_onnx_to_qnn(
self,
onnx_path: str,
output_path: str,
input_shapes: Optional[dict] = None
) -> str:
"""
将ONNX转换为QNN上下文二进制

Args:
onnx_path: ONNX模型路径
output_path: 输出路径
input_shapes: 输入形状 {"input": "1,3,224,224"}

Returns:
qnn_path: QNN模型路径
"""
if input_shapes is None:
input_shapes = {"input": "1,3,224,224"}

# 构造命令
converter_path = f"{self.qnn_sdk_path}/bin/x86_64-linux-clang/qnn-onnx-converter"

cmd = [
converter_path,
"--input_model", onnx_path,
"--output_path", output_path,
]

for name, shape in input_shapes.items():
cmd.extend(["--input_shape", f"{name}={shape}"])

# 执行转换
result = subprocess.run(cmd, capture_output=True, text=True)

if result.returncode != 0:
print(f"转换失败: {result.stderr}")
return None

print(f"QNN模型已生成: {output_path}")
return output_path

def compile_to_htp(
self,
qnn_path: str,
output_path: str,
target_chip: str = "SM8550" # Snapdragon 8 Gen 2
) -> str:
"""
编译QNN模型为HTP二进制

Args:
qnn_path: QNN模型路径
output_path: 输出路径
target_chip: 目标芯片

Returns:
htp_path: HTP二进制路径
"""
compiler_path = f"{self.qnn_sdk_path}/bin/x86_64-linux-clang/qnn-htp-compiler"

cmd = [
compiler_path,
"--model", qnn_path,
"--output", output_path,
"--target_chip", target_chip
]

result = subprocess.run(cmd, capture_output=True, text=True)

if result.returncode != 0:
print(f"编译失败: {result.stderr}")
return None

print(f"HTP二进制已生成: {output_path}")
return output_path


# QNN量化配置
QNN_QUANT_CONFIG = {
"activation_encodings": {
"input": {
"bitwidth": 8,
"dtype": "int8",
"is_symmetric": "False",
"scale": 0.0235,
"offset": -128
}
},
"param_encodings": {
"weight": {
"bitwidth": 8,
"dtype": "int8",
"is_symmetric": "True"
}
},
"execution_providers": ["qnn"],
"provider_options": {
"qnn": {
"backend_path": "libQnnHtp.so",
"profiling_level": "off"
}
}
}

3. ONNX Runtime QNN推理

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
"""
ONNX Runtime QNN Execution Provider推理
在Qualcomm NPU上执行模型推理
"""

import numpy as np
import onnxruntime as ort
from typing import Dict, Optional, Tuple
import time


class QNNInferenceEngine:
"""
QNN推理引擎

使用ONNX Runtime + QNN Execution Provider
在Qualcomm Hexagon NPU上执行推理
"""

def __init__(
self,
model_path: str,
backend: str = "htp", # "htp" or "cpu"
profiling: bool = False
):
"""
Args:
model_path: 模型路径
backend: 后端类型 ("htp" for NPU, "cpu" for CPU)
profiling: 是否开启性能分析
"""
self.model_path = model_path
self.backend = backend
self.profiling = profiling

# 创建会话
self.session = self._create_session()

# 获取输入输出信息
self.input_names = [inp.name for inp in self.session.get_inputs()]
self.output_names = [out.name for out in self.session.get_outputs()]

def _create_session(self) -> ort.InferenceSession:
"""创建ONNX Runtime会话"""

if self.backend == "htp":
# QNN Execution Provider
providers = [
('QNNExecutionProvider', {
'backend_path': 'libQnnHtp.so',
'profiling_level': 'detailed' if self.profiling else 'off',
'htp_performance_mode': 'high_performance',
'htp_graph_finalization_optimization_mode': '3'
}),
'CPUExecutionProvider'
]
else:
providers = ['CPUExecutionProvider']

sess_options = ort.SessionOptions()
sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL

session = ort.InferenceSession(
self.model_path,
sess_options=sess_options,
providers=providers
)

return session

def infer(
self,
inputs: Dict[str, np.ndarray]
) -> Dict[str, np.ndarray]:
"""
执行推理

Args:
inputs: 输入数据字典

Returns:
outputs: 输出数据字典
"""
outputs = self.session.run(
self.output_names,
inputs
)

return dict(zip(self.output_names, outputs))

def benchmark(
self,
inputs: Dict[str, np.ndarray],
num_runs: int = 100,
warmup: int = 10
) -> Dict[str, float]:
"""
性能基准测试

Args:
inputs: 输入数据
num_runs: 运行次数
warmup: 预热次数

Returns:
metrics: 性能指标
"""
# 预热
for _ in range(warmup):
self.infer(inputs)

# 计时
latencies = []

for _ in range(num_runs):
start = time.perf_counter()
self.infer(inputs)
end = time.perf_counter()
latencies.append((end - start) * 1000) # ms

metrics = {
'mean_latency_ms': np.mean(latencies),
'std_latency_ms': np.std(latencies),
'min_latency_ms': np.min(latencies),
'max_latency_ms': np.max(latencies),
'p95_latency_ms': np.percentile(latencies, 95),
'throughput_fps': 1000 / np.mean(latencies)
}

return metrics


class DMSInferencePipeline:
"""
DMS完整推理流水线

包含:预处理 + 推理 + 后处理
"""

def __init__(
self,
model_path: str,
backend: str = "htp"
):
"""
Args:
model_path: 模型路径
backend: 后端
"""
self.engine = QNNInferenceEngine(model_path, backend)

# 预处理参数
self.input_size = (224, 224)
self.mean = np.array([0.485, 0.456, 0.406], dtype=np.float32)
self.std = np.array([0.229, 0.224, 0.225], dtype=np.float32)

def preprocess(
self,
image: np.ndarray
) -> np.ndarray:
"""
图像预处理

Args:
image: BGR图像, shape=(H, W, 3)

Returns:
tensor: 预处理后的张量
"""
import cv2

# 缩放
image = cv2.resize(image, self.input_size)

# BGR -> RGB
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

# 归一化
image = image.astype(np.float32) / 255.0
image = (image - self.mean) / self.std

# HWC -> CHW
image = image.transpose(2, 0, 1)

# 添加batch维度
tensor = np.expand_dims(image, 0)

return tensor

def postprocess(
self,
outputs: Dict[str, np.ndarray],
original_size: Tuple[int, int]
) -> Dict:
"""
后处理

Args:
outputs: 模型输出
original_size: 原始图像尺寸

Returns:
results: 检测结果
"""
face_bbox = outputs['face_bbox'][0]
eye_state = outputs['eye_state'][0]
head_pose = outputs['head_pose'][0]

# 将bbox映射回原始尺寸
h, w = original_size
face_bbox[0] *= w / self.input_size[1]
face_bbox[1] *= h / self.input_size[0]
face_bbox[2] *= w / self.input_size[1]
face_bbox[3] *= h / self.input_size[0]

# 眼睛状态
eye_open_prob = self._softmax(eye_state)[1]

# 头部姿态
pitch, yaw, roll = head_pose

return {
'face_bbox': face_bbox.tolist(),
'eye_open_probability': float(eye_open_prob),
'head_pose': {
'pitch': float(pitch),
'yaw': float(yaw),
'roll': float(roll)
}
}

def _softmax(self, x: np.ndarray) -> np.ndarray:
"""Softmax函数"""
exp_x = np.exp(x - np.max(x))
return exp_x / np.sum(exp_x)

def run(
self,
image: np.ndarray
) -> Dict:
"""
完整推理流程

Args:
image: 输入图像

Returns:
results: 检测结果
"""
original_size = image.shape[:2]

# 预处理
tensor = self.preprocess(image)

# 推理
inputs = {'input': tensor}
outputs = self.engine.infer(inputs)

# 后处理
results = self.postprocess(outputs, original_size)

return results


# 测试
if __name__ == "__main__":
# 模拟测试
model_path = "dms_model_quantized.onnx"

# CPU后端测试
engine_cpu = QNNInferenceEngine(model_path, backend="cpu")

# 创建模拟输入
inputs = {'input': np.random.randn(1, 3, 224, 224).astype(np.float32)}

# 基准测试
metrics = engine_cpu.benchmark(inputs, num_runs=100)

print("=== QNN推理基准测试 ===")
print(f"后端: CPU")
print(f"平均延迟: {metrics['mean_latency_ms']:.2f} ms")
print(f"P95延迟: {metrics['p95_latency_ms']:.2f} ms")
print(f"吞吐量: {metrics['throughput_fps']:.1f} FPS")

# 模型大小
import os
if os.path.exists(model_path):
size_mb = os.path.getsize(model_path) / 1024 / 1024
print(f"模型大小: {size_mb:.2f} MB")

性能优化技巧

量化策略对比

方法 精度损失 模型压缩 推理加速 实现难度
动态量化 4x 2-3x
静态量化 4x 3-5x
QAT量化感知训练 4x 3-5x
FP16 2x 1.5-2x

推荐配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
# DMS模型推荐配置
DMS_DEPLOYMENT_CONFIG = {
'model': {
'backbone': 'mobilenetv3_small',
'quantization': 'static_int8',
'input_size': (224, 224)
},
'inference': {
'backend': 'qnn_htp',
'precision': 'int8',
'performance_mode': 'high_performance'
},
'target': {
'platform': 'QCS8255',
'expected_latency_ms': 15,
'expected_fps': 60
}
}

# OMS模型推荐配置
OMS_DEPLOYMENT_CONFIG = {
'model': {
'backbone': 'efficientnet_b0',
'quantization': 'static_int8',
'input_size': (320, 320)
},
'inference': {
'backend': 'qnn_htp',
'precision': 'int8'
},
'target': {
'platform': 'QCS8255',
'expected_latency_ms': 25,
'expected_fps': 40
}
}

部署清单

开发环境

1
2
3
4
5
6
7
8
9
10
# 1. 安装QNN SDK
wget https://.qualcomm.com/qnn-sdk-linux.tar.gz
tar -xzvf qnn-sdk-linux.tar.gz
export QNN_SDK_ROOT=/opt/qnn-sdk

# 2. 安装ONNX Runtime
pip install onnxruntime-qnn

# 3. 安装量化工具
pip install onnxruntime

设备部署

1
2
3
4
5
6
7
8
9
10
11
# 1. 推送模型到设备
adb push dms_model_quantized.onnx /data/local/tmp/

# 2. 推送QNN库
adb push $QNN_SDK_ROOT/lib/aarch64/libQnnHtp.so /data/local/tmp/

# 3. 设置权限
adb shell chmod 755 /data/local/tmp/*.so

# 4. 运行测试
adb shell ./dms_test_app

总结

维度 内容
部署工具 ONNX Runtime + QNN EP
目标硬件 Qualcomm Hexagon NPU
量化方法 INT8静态量化
性能提升 3-5x加速,4x压缩
DMS延迟 ~15ms (QCS8255)
OMS延迟 ~25ms (QCS8255)

发布时间: 2026-04-22
标签: #Qualcomm #NPU #ONNX #边缘部署 #DMS #IMS


Qualcomm NPU边缘部署:ONNX Runtime + Hexagon DSP优化指南
https://dapalm.com/2026/04/22/2026-04-22-qualcomm-npu-onnx-deployment/
作者
Mars
发布于
2026年4月22日
许可协议