多传感器融合在DMS/OMS中的应用:雷达、摄像头与UWB协同

引言:单一传感器的局限

多传感器融合优势

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
单一传感器局限

┌─────────────────────────────────┐
│ 摄像头 │
│ ├── 优点:高分辨率、纹理丰富 │
│ └── 缺点:光线依赖、遮挡敏感 │
└─────────────────────────────────┘

┌─────────────────────────────────┐
│ 雷达 │
│ ├── 优点:全天候、穿透遮挡 │
│ └── 缺点:分辨率低、纹理缺失 │
└─────────────────────────────────┘

┌─────────────────────────────────┐
│ 多传感器融合 │
│ ├── 优点:互补性强、鲁棒性高 │
│ └── 缺点:集成复杂、计算量大 │
└─────────────────────────────────┘

一、雷达+摄像头融合

1.1 融合架构

融合层级

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
融合架构

┌─────────────────────────────────┐
│ 早期融合(Early Fusion) │
│ ├── 原始数据融合 │
│ ├── 像素级融合 │
│ └── 计算量大 │
└─────────────────────────────────┘

┌─────────────────────────────────┐
│ 中期融合(Mid Fusion) │
│ ├── 特征级融合 │
│ ├── 检测框融合 │
│ └── 计算量适中 │
└─────────────────────────────────┘

┌─────────────────────────────────┐
│ 晚期融合(Late Fusion) │
│ ├── 决策级融合 │
│ ├── 结果投票 │
│ └── 计算量小 │
└─────────────────────────────────┘

1.2 毫米波雷达

60GHz雷达特点

特性 说明
频率 60 GHz
分辨率 1° 方位角,5° 俯仰角
距离 0.1-100m
速度 ±100 km/h
穿透力 透雾、透雨、透烟

雷达信号处理

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
class MillimeterWaveRadar:
"""
毫米波雷达处理
"""
def __init__(self):
self.frequency = 60 # GHz
self.max_range = 100 # m
self.max_velocity = 100 # km/h

def detect_targets(self, raw_data):
"""
检测目标
"""
# 1. 雷达脉冲压缩
compressed = self.pulse_compression(raw_data)

# 2. 目标检测
detections = self.target_detection(compressed)

# 3. 多径抑制
detections = self.multipath_suppression(detections)

# 4. 距离速度解算
targets = self.ranging_velocity(detections)

return targets

def pulse_compression(self, raw_data):
"""
脉冲压缩
"""
# 使用匹配滤波器
matched_filter = np.fft.ifft(np.fft.fft(raw_data) *
np.conj(np.fft.fft(self.pulse_signal)))
return np.abs(matched_filter)

def target_detection(self, compressed_data):
"""
目标检测
"""
# CFAR检测
threshold = self.cfar_threshold(compressed_data)
detections = []

for i in range(len(compressed_data)):
if compressed_data[i] > threshold:
detections.append({
'range': i * self.range_resolution,
'intensity': compressed_data[i]
})

return detections

1.3 4D MIMO雷达

4D MIMO雷达优势

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
class MIMORadar4D:
"""
4D MIMO雷达
"""
def __init__(self):
# 4D参数
self.dimensions = {
'range': True, # 距离
'velocity': True, # 速度
'angle_azimuth': True, # 方位角
'angle_elevation': True # 俯仰角
}

# 分辨率
self.resolution = {
'range': 0.1, # m
'velocity': 0.1, # m/s
'angle': 1.0 # 度
}

def track_human(self, radar_data):
"""
跟踪人体
"""
# 1. 目标检测
targets = self.detect_targets(radar_data)

# 2. 轨迹拟合
trajectories = self.fit_trajectories(targets)

# 3. 人体识别
humans = []
for traj in trajectories:
if self.is_human(traj):
humans.append({
'position': traj['position'],
'velocity': traj['velocity'],
'heart_rate': self.extract_heart_rate(traj),
'respiration': self.extract_respiration(traj)
})

return humans

def extract_heart_rate(self, trajectory):
"""
提取心率
"""
# 基于雷达微多普勒特征
micro_doppler = trajectory['micro_doppler']

# FFT分析
fft_result = np.fft.fft(micro_doppler)
power_spectrum = np.abs(fft_result)**2

# 找到主频率
dominant_freq = np.argmax(power_spectrum)

# 转换为心率
heart_rate = dominant_freq * 60 # Hz -> bpm

return heart_rate

二、UWB定位

2.1 UWB技术

UWB特点

特性 说明
频率 3.1-10.6 GHz
带宽 >500 MHz
精度 10-30 cm
穿透力 优秀
低功耗 支持

UWB定位原理

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
class UWBPositioning:
"""
UWB定位
"""
def __init__(self):
# 基站位置
self.base_stations = [
{'id': 'BS1', 'position': np.array([0, 0, 1.5])},
{'id': 'BS2', 'position': np.array([3, 0, 1.5])},
{'id': 'BS3', 'position': np.array([1.5, 3, 1.5])}
]

def trilateration(self, distances):
"""
三边定位
"""
# 使用TDOA(到达时间差)
positions = []

for i in range(len(distances)):
for j in range(i+1, len(distances)):
# 计算TDOA
tdoa = distances[i] - distances[j]

# 构建双曲线方程
pos = self.solve_hyperbola(
self.base_stations[i]['position'],
self.base_stations[j]['position'],
tdoa
)

if pos is not None:
positions.append(pos)

# 多基站解算
if len(positions) >= 3:
return self.compute_position(positions)
else:
return None

def solve_hyperbola(self, bs1, bs2, tdoa):
"""
解双曲线方程
"""
# 双曲线方程: |x - bs1| - |x - bs2| = tdoa
# 使用数值方法求解
def error_function(x):
return np.linalg.norm(x - bs1) - np.linalg.norm(x - bs2) - tdoa

# 初始猜测
x0 = (bs1 + bs2) / 2

# 牛顿迭代法
x = x0
for _ in range(10):
grad = self.gradient(error_function, x)
hess = self.hessian(error_function, x)

delta = np.linalg.solve(hess, grad)
x = x - delta

if np.linalg.norm(delta) < 1e-6:
break

return x

def compute_position(self, positions):
"""
计算位置(最小二乘法)
"""
# 构建线性方程组
A = []
b = []

for pos in positions:
A.append([2*(pos[0] - positions[0][0]),
2*(pos[1] - positions[0][1])])
b.append(pos[0]**2 + pos[1]**2 - positions[0][0]**2 - positions[0][1]**2)

A = np.array(A)
b = np.array(b)

# 最小二乘解
x, _, _, _ = np.linalg.lstsq(A, b, rcond=None)

return np.array([x[0], x[1], 1.5]) # 假设高度固定

三、卡尔曼滤波融合

3.1 融合策略

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
class SensorFusion:
"""
传感器融合
"""
def __init__(self):
# 卡尔曼滤波器
self.kf_camera = KalmanFilter()
self.kf_radar = KalmanFilter()
self.kf_uwb = KalmanFilter()

# 融合权重
self.weights = {
'camera': 0.4,
'radar': 0.4,
'uwb': 0.2
}

def fuse_detections(self, camera_detections, radar_detections, uwb_detections):
"""
融合检测结果
"""
# 1. 转换到统一坐标系
camera_points = self.transform_to_global(camera_detections)
radar_points = self.transform_to_global(radar_detections)
uwb_points = self.transform_to_global(uwb_detections)

# 2. 卡尔曼滤波预测
self.kf_camera.predict()
self.kf_radar.predict()
self.kf_uwb.predict()

# 3. 更新滤波器
for point in camera_points:
self.kf_camera.update(point)

for point in radar_points:
self.kf_radar.update(point)

for point in uwb_points:
self.kf_uwb.update(point)

# 4. 融合估计
fused = self.weighted_fusion(
self.kf_camera.get_state(),
self.kf_radar.get_state(),
self.kf_uwb.get_state()
)

return fused

def weighted_fusion(self, camera_state, radar_state, uwb_state):
"""
加权融合
"""
# 根据传感器可靠性调整权重
camera_weight = self.weights['camera'] * self.get_camera_reliability()
radar_weight = self.weights['radar'] * self.get_radar_reliability()
uwb_weight = self.weights['uwb'] * self.get_uwb_reliability()

total_weight = camera_weight + radar_weight + uwb_weight

fused = (camera_state * camera_weight +
radar_state * radar_weight +
uwb_state * uwb_weight) / total_weight

return fused

def get_camera_reliability(self):
"""
获取摄像头可靠性
"""
# 根据光线、遮挡等因素动态调整
reliability = 0.8

if self.light_level == 'dark':
reliability *= 0.6
elif self.light_level == 'low':
reliability *= 0.8

if self.obstruction_level > 0.5:
reliability *= 0.5

return reliability

def get_radar_reliability(self):
"""
获取雷达可靠性
"""
# 根据天气、干扰等因素动态调整
reliability = 0.9

if self.weather == 'rain':
reliability *= 0.7
elif self.weather == 'snow':
reliability *= 0.6

return reliability

def get_uwb_reliability(self):
"""
获取UWB可靠性
"""
# 根据基站数量、干扰等因素动态调整
num_base_stations = len(self.base_stations)

if num_base_stations < 3:
return 0.5
elif num_base_stations < 5:
return 0.7
else:
return 0.9

四、DMS/OMS应用场景

4.1 分心检测融合

场景:检测驾驶员分心(视线离开道路)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
class DistractionDetectionFusion:
"""
分心检测融合
"""
def __init__(self):
# 多传感器数据
self.eye_tracker = EyeTracker()
self.head_pose = HeadPoseDetector()
self.radar = RadarDetector()

def detect_distraction(self, frame):
"""
检测分心
"""
# 1. 摄像头分析
eye_gaze = self.eye_tracker.estimate_gaze(frame)
head_pose = self.head_pose.estimate_pose(frame)

# 2. 雷达分析
head_position = self.radar.detect_head_position(frame)

# 3. 融合判断
distraction_score = self.fuse_gaze_and_head(
eye_gaze, head_pose, head_position
)

# 4. 决策
if distraction_score > 0.8:
return {
'status': 'distraction',
'confidence': distraction_score,
'sources': ['eye_gaze', 'head_pose', 'radar']
}
else:
return {
'status': 'focused',
'confidence': 1 - distraction_score,
'sources': ['eye_gaze', 'head_pose', 'radar']
}

def fuse_gaze_and_head(self, eye_gaze, head_pose, head_position):
"""
融合视线和头部姿态
"""
# 视线偏离度
gaze_deviation = self.compute_gaze_deviation(eye_gaze)

# 头部偏转度
head_yaw = self.compute_head_yaw(head_pose)

# 雷达头部位置
head_position_error = self.compute_position_error(
head_position, eye_gaze['direction']
)

# 综合分数
score = (0.4 * gaze_deviation +
0.3 * head_yaw +
0.3 * head_position_error)

return score

4.2 儿童检测融合

场景:检测儿童遗留(CPD)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
class ChildDetectionFusion:
"""
儿童检测融合
"""
def __init__(self):
self.camera = CameraDetector()
self.radar = RadarDetector()
self.uwb = UWBDetector()

def detect_child(self, frame):
"""
检测儿童
"""
# 1. 摄像头检测
camera_result = self.camera.detect_child(frame)

# 2. 雷达检测
radar_result = self.radar.detect_child(frame)

# 3. UWB定位
uwb_result = self.uwb.detect_child()

# 4. 融合判断
if camera_result['confidence'] > 0.8:
return camera_result

if radar_result['confidence'] > 0.9:
return radar_result

# 多传感器融合
fused_result = self.fuse_results(
camera_result, radar_result, uwb_result
)

return fused_result

def fuse_results(self, camera, radar, uwb):
"""
融合检测结果
"""
# 使用加权平均
score = (camera['confidence'] * 0.3 +
radar['confidence'] * 0.5 +
uwb['confidence'] * 0.2)

# 确定位置
position = self.estimate_position(camera, radar, uwb)

return {
'status': 'child_detected',
'confidence': score,
'position': position,
'sources': [s['source'] for s in [camera, radar, uwb]]
}

五、总结

5.1 关键要点

要点 说明
多传感器互补 摄像头+雷达+UWB互补性强
融合层级 早期→中期→晚期,根据需求选择
卡尔曼滤波 动态融合、平滑轨迹
实时性 需优化计算,满足<30ms要求

5.2 实施建议

  1. 硬件选择:根据场景选择合适传感器
  2. 融合策略:先中期融合,再决策级融合
  3. 动态权重:根据环境调整权重
  4. 实时优化:使用边缘AI芯片加速

参考文献

  1. IEEE. “Integrated Sensor Fusion Based on 4D MIMO Radar and Camera.” 2022.
  2. MDPI. “A Review of Multi-Sensor Fusion in Autonomous Driving.” 2025.

本文是多传感器融合系列文章之一,下一篇:边缘AI芯片对比


多传感器融合在DMS/OMS中的应用:雷达、摄像头与UWB协同
https://dapalm.com/2026/03/13/2026-03-13-多传感器融合在DMS-OMS中的应用-雷达-摄像头与UWB协同/
作者
Mars
发布于
2026年3月13日
许可协议