DMS/OMS合成数据生成:Anyverse vs Unity/Unreal对比与实践指南

核心问题: 车载感知AI训练数据不足、标注成本高、边缘案例难采集
解决方案: 合成数据生成(SDG)
平台对比: Anyverse(专用)vs Unity/Unreal(通用)


合成数据生成的核心价值

DMS/OMS数据痛点

痛点 真实数据 合成数据
采集成本 高(需要车队、驾驶员) 低(云端生成)
标注成本 高(人工标注) 零(自动生成GT)
边缘案例 难采集(危险、罕见场景) 可控生成
隐私合规 需授权 无隐私问题
多样性 受限于采集条件 无限变化

Euro NCAP场景覆盖需求

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
Euro NCAP DSM场景:
├── 疲劳检测 (F-01 ~ F-05)
│ ├── PERCLOS阈值检测
│ ├── 微睡眠检测
│ ├── 打哈欠检测
│ └── 眼睑下垂检测

├── 分心检测 (D-01 ~ D-08)
│ ├── 视线偏离
│ ├── 手机使用
│ ├── 饮食/吸烟
│ └── 操作中控

└── OMS场景
├── 儿童检测 (CPD)
├── 乘员姿态
└── 安全带状态

Anyverse vs Unity/Unreal 架构对比

核心差异

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
┌─────────────────────────────────────────────────────────────────┐
│ Unity / Unreal Engine │
├─────────────────────────────────────────────────────────────────┤
│ 设计目标:游戏/影视渲染 │
│ │
│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │
│ │ 游戏逻辑 │ │ 物理引擎 │ │ 渲染管线 │ │
│ └─────────────┘ └─────────────┘ └─────────────┘ │
│ ↓ ↓ ↓ │
│ ┌─────────────────────────────────────────────────────────┐ │
│ │ 通用渲染框架 │ │
│ │ - 光照近似 │ │
│ │ - 材质简化 │ │
│ │ - 传感器模拟需额外开发 │ │
│ └─────────────────────────────────────────────────────────┘ │
│ │
│ 车载感知适配:需要大量定制开发 │
└─────────────────────────────────────────────────────────────────┘

┌─────────────────────────────────────────────────────────────────┐
│ Anyverse InCabin │
├─────────────────────────────────────────────────────────────────┤
│ 设计目标:车载感知AI训练数据 │
│ │
│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │
│ │ Euro NCAP │ │ 物理光传输 │ │ 传感器模拟 │ │
│ │ 场景库 │ │ 渲染引擎 │ │ RGB-IR/Radar│ │
│ └─────────────┘ └─────────────┘ └─────────────┘ │
│ ↓ ↓ ↓ │
│ ┌─────────────────────────────────────────────────────────┐ │
│ │ 车载专用数据生成 │ │
│ │ - 光谱渲染 │ │
│ │ - 传感器精确模拟 │ │
│ │ - 自动标注输出 │ │
│ └─────────────────────────────────────────────────────────┘ │
│ │
│ 开箱即用:面向DMS/OMS/CPD
└─────────────────────────────────────────────────────────────────┘

功能对比表

功能 Anyverse Unity Perception Unreal
RGB生成 ✅ 物理精确 ✅ 近似 ✅ 高质量
红外/NIR ✅ 内置 ❌ 需开发 ❌ 需开发
Radar模拟 ✅ 内置 ❌ 无 ❌ 无
深度图 ✅ 精确
语义分割 ✅ 像素级
3D姿态GT ✅ 自动 ⚠️ 需配置 ⚠️ 需配置
Euro NCAP场景 ✅ 内置 ❌ 需自建 ❌ 需自建
光照变化 ✅ 物理准确 ⚠️ 近似 ⚠️ 近似
驾驶员模型 ✅ 多样化 ⚠️ 需购买 ⚠️ 需购买
车内环境 ✅ 内置 ❌ 需建模 ❌ 需建模
域迁移优化 ✅ 低gap ⚠️ 高gap ⚠️ 高gap
云端扩展 ✅ 原生 ⚠️ 需配置 ⚠️ 需配置

核心代码实现

1. Unity Perception数据生成

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
"""
Unity Perception合成数据生成配置
适用于DMS训练数据
"""

from typing import Dict, List, Optional
import json
import numpy as np


class UnityPerceptionConfig:
"""
Unity Perception数据生成配置

适用于Unity 2021.3+ Perception Package
"""

def __init__(self):
self.scenarios = []
self.cameras = []
self.labelers = []
self.randomizers = []

def add_camera(
self,
name: str,
position: List[float],
rotation: List[float],
fov: float = 60.0,
resolution: List[int] = [640, 480]
) -> Dict:
"""
添加相机配置

Args:
name: 相机名称
position: 位置 [x, y, z]
rotation: 旋转 [x, y, z]
fov: 视场角
resolution: 分辨率

Returns:
camera_config: 相机配置字典
"""
camera = {
"name": name,
"position": position,
"rotation": rotation,
"fieldOfView": fov,
"resolution": resolution,
"sensorType": "RGB"
}

self.cameras.append(camera)
return camera

def add_labeler(
self,
labeler_type: str,
output_format: str = "PNG"
) -> Dict:
"""
添加标注器

Args:
labeler_type: 标注类型
- "bounding_box": 2D bbox
- "semantic_segmentation": 语义分割
- "instance_segmentation": 实例分割
- "depth": 深度图
- "keypoints": 关键点
output_format: 输出格式

Returns:
labeler_config: 标注器配置
"""
labeler = {
"type": labeler_type,
"outputFormat": output_format
}

self.labelers.append(labeler)
return labeler

def add_randomizer(
self,
randomizer_type: str,
parameters: Dict
) -> Dict:
"""
添加随机化器

Args:
randomizer_type: 随机化类型
- "lighting": 光照
- "camera_pose": 相机位姿
- "object_pose": 物体位姿
- "texture": 纹理
- "weather": 天气
parameters: 参数

Returns:
randomizer_config: 随机化器配置
"""
randomizer = {
"type": randomizer_type,
"parameters": parameters
}

self.randomizers.append(randomizer)
return randomizer

def create_dms_scenario(
self,
scenario_name: str,
driver_variations: int = 10,
lighting_variations: int = 5
) -> Dict:
"""
创建DMS场景

Args:
scenario_name: 场景名称
driver_variations: 驾驶员变体数
lighting_variations: 光照变体数

Returns:
scenario: 完整场景配置
"""
# 驾驶员状态
driver_states = [
{"state": "normal", "eyes_open": True, "head_forward": True},
{"state": "drowsy", "eyes_open": 0.6, "head_forward": True},
{"state": "yawning", "eyes_open": 0.3, "mouth_open": 0.8},
{"state": "looking_left", "eyes_open": True, "head_yaw": -30},
{"state": "looking_right", "eyes_open": True, "head_yaw": 30},
{"state": "looking_down", "eyes_open": True, "head_pitch": 20},
{"state": "phone_call", "eyes_open": True, "hand_to_ear": True},
{"state": "microsleep", "eyes_open": 0.0, "duration": 1.5}
]

# 光照条件
lighting_conditions = [
{"time": "day", "intensity": 1000, "color": [1.0, 1.0, 1.0]},
{"time": "sunset", "intensity": 500, "color": [1.0, 0.8, 0.6]},
{"time": "night", "intensity": 10, "color": [0.1, 0.1, 0.2]},
{"time": "tunnel", "intensity": 200, "color": [0.9, 0.9, 1.0]},
{"time": "backlight", "intensity": 2000, "color": [1.0, 1.0, 0.9]}
]

# 遮挡条件
occlusions = [
{"type": "none", "factor": 0},
{"type": "sunglasses", "factor": 0.3},
{"type": "mask", "factor": 0.5},
{"type": "hair", "factor": 0.2},
{"type": "hand", "factor": 0.15}
]

scenario = {
"name": scenario_name,
"driverStates": driver_states,
"lightingConditions": lighting_conditions[:lighting_variations],
"occlusions": occlusions,
"totalVariations": driver_variations * lighting_variations * len(occlusions)
}

self.scenarios.append(scenario)
return scenario

def export_config(self, filepath: str) -> None:
"""导出配置为JSON"""
config = {
"version": "0.10.0",
"scenarios": self.scenarios,
"cameras": self.cameras,
"labelers": self.labelers,
"randomizers": self.randomizers
}

with open(filepath, 'w') as f:
json.dump(config, f, indent=2)


# 使用示例
if __name__ == "__main__":
config = UnityPerceptionConfig()

# 添加DMS相机
config.add_camera(
name="dms_camera",
position=[0.0, 0.3, 0.5], # 方向盘后方
rotation=[0.0, 15.0, 0.0], # 略向下倾斜
fov=60.0,
resolution=[640, 480]
)

# 添加标注器
config.add_labeler("bounding_box")
config.add_labeler("semantic_segmentation")
config.add_labeler("keypoints")

# 添加随机化
config.add_randomizer("lighting", {
"intensity_range": [100, 2000],
"color_temperature_range": [3000, 7000]
})

# 创建DMS场景
config.create_dms_scenario("dms_training", driver_variations=10)

# 导出
config.export_config("unity_perception_config.json")

print("Unity Perception配置已生成")

2. Anyverse风格的数据生成器

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
"""
车载感知合成数据生成器
模拟Anyverse核心功能
"""

import numpy as np
from typing import Dict, List, Tuple, Optional
from dataclasses import dataclass
import json


@dataclass
class DriverState:
"""驾驶员状态"""
state_name: str
eye_openness: float # 0-1
mouth_openness: float # 0-1
head_pitch: float # degrees
head_yaw: float # degrees
head_roll: float # degrees
gaze_direction: Tuple[float, float, float] # 3D vector
blink_rate: float # blinks/min
perclos: float # percentage


@dataclass
class LightingCondition:
"""光照条件"""
name: str
ambient_intensity: float # lux
sun_intensity: float # lux
sun_angle: Tuple[float, float] # altitude, azimuth
color_temperature: int # Kelvin
ir_intensity: float # for NIR camera


@dataclass
class CameraConfig:
"""相机配置"""
name: str
sensor_type: str # RGB, RGB-IR, NIR
resolution: Tuple[int, int]
fov: float
position: Tuple[float, float, float]
rotation: Tuple[float, float, float]
exposure_time: float # ms
gain: float # ISO


class SyntheticDataGenerator:
"""
合成数据生成器

模拟Anyverse的核心功能
"""

# Euro NCAP场景定义
EURO_NCAP_SCENARIOS = {
"F-01_PERCLOS": {
"description": "PERCLOS阈值检测",
"trigger_condition": "PERCLOS >= 30% over 60s",
"label": "fatigue_level_2"
},
"F-02_microsleep": {
"description": "微睡眠检测",
"trigger_condition": "eyes_closed >= 1.5s",
"label": "microsleep"
},
"F-04_eyelid_droop": {
"description": "眼睑下垂",
"trigger_condition": "eye_openness <= 50%",
"label": "eyelid_droop"
},
"F-05_yawning": {
"description": "打哈欠",
"trigger_condition": "mouth_open > 80% for >= 3s",
"label": "yawning"
},
"D-01_long_distraction": {
"description": "长时间分心",
"trigger_condition": "gaze_off_road >= 3s",
"label": "distraction"
},
"D-02_phone_handheld": {
"description": "手持电话",
"trigger_condition": "hand_near_ear AND phone_detected",
"label": "phone_use"
}
}

def __init__(
self,
output_dir: str = "./synthetic_data"
):
"""
Args:
output_dir: 输出目录
"""
self.output_dir = output_dir
self.generated_samples = []

def generate_driver_states(
self,
num_samples: int = 1000,
include_variations: bool = True
) -> List[DriverState]:
"""
生成多样化的驾驶员状态

Args:
num_samples: 样本数
include_variations: 是否包含变体

Returns:
states: 驾驶员状态列表
"""
states = []

# 定义基础状态
base_states = [
# 正常驾驶
DriverState(
state_name="normal",
eye_openness=0.85,
mouth_openness=0.1,
head_pitch=0.0,
head_yaw=0.0,
head_roll=0.0,
gaze_direction=(0.0, 0.0, 1.0),
blink_rate=15.0,
perclos=5.0
),
# 疲劳 - 轻度
DriverState(
state_name="drowsy_mild",
eye_openness=0.6,
mouth_openness=0.15,
head_pitch=5.0,
head_yaw=0.0,
head_roll=0.0,
gaze_direction=(0.0, -0.1, 1.0),
blink_rate=25.0,
perclos=20.0
),
# 疲劳 - 重度
DriverState(
state_name="drowsy_severe",
eye_openness=0.3,
mouth_openness=0.2,
head_pitch=10.0,
head_yaw=0.0,
head_roll=2.0,
gaze_direction=(0.0, -0.2, 0.98),
blink_rate=35.0,
perclos=45.0
),
# 微睡眠
DriverState(
state_name="microsleep",
eye_openness=0.0,
mouth_openness=0.1,
head_pitch=15.0,
head_yaw=0.0,
head_roll=0.0,
gaze_direction=(0.0, 0.0, 1.0),
blink_rate=0.0,
perclos=100.0
),
# 打哈欠
DriverState(
state_name="yawning",
eye_openness=0.2,
mouth_openness=0.85,
head_pitch=5.0,
head_yaw=0.0,
head_roll=0.0,
gaze_direction=(0.0, 0.0, 1.0),
blink_rate=20.0,
perclos=40.0
),
# 左顾右盼
DriverState(
state_name="looking_left",
eye_openness=0.85,
mouth_openness=0.1,
head_pitch=0.0,
head_yaw=-35.0,
head_roll=0.0,
gaze_direction=(-0.6, 0.0, 0.8),
blink_rate=15.0,
perclos=5.0
),
# 低头看手机
DriverState(
state_name="looking_down_phone",
eye_openness=0.85,
mouth_openness=0.1,
head_pitch=25.0,
head_yaw=0.0,
head_roll=0.0,
gaze_direction=(0.0, -0.5, 0.87),
blink_rate=10.0,
perclos=5.0
),
# 打电话
DriverState(
state_name="phone_call",
eye_openness=0.7,
mouth_openness=0.4,
head_pitch=5.0,
head_yaw=-15.0,
head_roll=10.0,
gaze_direction=(-0.3, 0.0, 0.95),
blink_rate=12.0,
perclos=10.0
)
]

# 生成样本
samples_per_state = num_samples // len(base_states)

for base in base_states:
for i in range(samples_per_state):
if include_variations:
# 添加噪声变体
state = DriverState(
state_name=base.state_name,
eye_openness=np.clip(base.eye_openness + np.random.normal(0, 0.05), 0, 1),
mouth_openness=np.clip(base.mouth_openness + np.random.normal(0, 0.05), 0, 1),
head_pitch=base.head_pitch + np.random.normal(0, 3),
head_yaw=base.head_yaw + np.random.normal(0, 5),
head_roll=base.head_roll + np.random.normal(0, 2),
gaze_direction=tuple(np.array(base.gaze_direction) + np.random.normal(0, 0.05, 3)),
blink_rate=max(0, base.blink_rate + np.random.normal(0, 3)),
perclos=np.clip(base.perclos + np.random.normal(0, 5), 0, 100)
)
else:
state = base

states.append(state)

return states

def generate_lighting_variations(
self,
num_variations: int = 100
) -> List[LightingCondition]:
"""
生成光照变化

Args:
num_variations: 变化数量

Returns:
conditions: 光照条件列表
"""
conditions = []

# 预定义光照场景
presets = [
LightingCondition("day_clear", 1000, 50000, (60, 180), 6500, 0),
LightingCondition("day_overcast", 800, 20000, (45, 180), 7000, 0),
LightingCondition("sunset", 400, 5000, (10, 270), 3500, 0),
LightingCondition("night_urban", 50, 0, (0, 0), 3000, 200),
LightingCondition("tunnel_entrance", 300, 1000, (45, 180), 5500, 100),
LightingCondition("tunnel_exit", 600, 20000, (45, 180), 6500, 50),
LightingCondition("backlight", 200, 80000, (20, 180), 5500, 0),
LightingCondition("garage", 100, 0, (0, 0), 4000, 150)
]

# 生成变体
for _ in range(num_variations):
base = presets[np.random.randint(len(presets))]

# 添加随机变化
condition = LightingCondition(
name=base.name,
ambient_intensity=max(0, base.ambient_intensity + np.random.normal(0, 100)),
sun_intensity=max(0, base.sun_intensity + np.random.normal(0, 5000)),
sun_angle=(base.sun_angle[0] + np.random.normal(0, 5),
base.sun_angle[1] + np.random.normal(0, 20)),
color_temperature=int(base.color_temperature + np.random.normal(0, 500)),
ir_intensity=max(0, base.ir_intensity + np.random.normal(0, 30))
)

conditions.append(condition)

return conditions

def generate_sample(
self,
driver_state: DriverState,
lighting: LightingCondition,
camera: CameraConfig
) -> Dict:
"""
生成单个样本

Args:
driver_state: 驾驶员状态
lighting: 光照条件
camera: 相机配置

Returns:
sample: 样本数据
"""
# 模拟图像生成(实际中调用渲染引擎)
image_id = f"{driver_state.state_name}_{lighting.name}_{np.random.randint(10000)}"

# 生成标注
annotations = {
"image_id": image_id,
"driver_state": {
"state": driver_state.state_name,
"eye_openness": driver_state.eye_openness,
"mouth_openness": driver_state.mouth_openness,
"head_pose": {
"pitch": driver_state.head_pitch,
"yaw": driver_state.head_yaw,
"roll": driver_state.head_roll
},
"gaze": list(driver_state.gaze_direction),
"perclos": driver_state.perclos
},
"lighting": {
"condition": lighting.name,
"ambient": lighting.ambient_intensity,
"color_temp": lighting.color_temperature
},
"camera": {
"type": camera.sensor_type,
"resolution": list(camera.resolution)
},
"euro_ncap_label": self._get_euro_ncap_label(driver_state),
"bounding_boxes": self._generate_bbox(driver_state, camera),
"keypoints": self._generate_keypoints(driver_state, camera)
}

return annotations

def _get_euro_ncap_label(self, state: DriverState) -> str:
"""根据状态获取Euro NCAP标签"""
if state.perclos >= 30:
return "F-01_PERCLOS"
elif state.eye_openness == 0:
return "F-02_microsleep"
elif state.eye_openness <= 0.5:
return "F-04_eyelid_droop"
elif state.mouth_openness >= 0.8:
return "F-05_yawning"
elif abs(state.head_yaw) >= 30 or abs(state.head_pitch) >= 20:
return "D-01_long_distraction"
else:
return "normal"

def _generate_bbox(
self,
state: DriverState,
camera: CameraConfig
) -> Dict:
"""生成边界框标注"""
# 简化:假设驾驶员在画面中央
# 实际中需要根据相机参数和驾驶员位姿计算

base_x, base_y = camera.resolution[0] // 2, camera.resolution[1] // 2
face_size = 200 # pixels

# 根据头部姿态调整bbox
yaw_offset = int(state.head_yaw * 2)
pitch_offset = int(state.head_pitch * 2)

return {
"face": [
base_x - face_size // 2 + yaw_offset,
base_y - face_size // 2 + pitch_offset,
face_size,
face_size
],
"left_eye": [
base_x - 50 + yaw_offset,
base_y - 30 + pitch_offset,
40, 20
],
"right_eye": [
base_x + 10 + yaw_offset,
base_y - 30 + pitch_offset,
40, 20
],
"mouth": [
base_x - 30 + yaw_offset,
base_y + 30 + pitch_offset,
60, int(40 * state.mouth_openness)
]
}

def _generate_keypoints(
self,
state: DriverState,
camera: CameraConfig
) -> List[Dict]:
"""生成关键点标注"""
# 面部关键点(简化版)
base_x, base_y = camera.resolution[0] // 2, camera.resolution[1] // 2

yaw_offset = int(state.head_yaw * 2)
pitch_offset = int(state.head_pitch * 2)

keypoints = [
{"name": "left_eye_outer", "x": base_x - 60 + yaw_offset, "y": base_y - 35 + pitch_offset},
{"name": "left_eye_inner", "x": base_x - 30 + yaw_offset, "y": base_y - 35 + pitch_offset},
{"name": "right_eye_inner", "x": base_x + 10 + yaw_offset, "y": base_y - 35 + pitch_offset},
{"name": "right_eye_outer", "x": base_x + 40 + yaw_offset, "y": base_y - 35 + pitch_offset},
{"name": "nose_tip", "x": base_x + yaw_offset, "y": base_y + pitch_offset},
{"name": "mouth_left", "x": base_x - 25 + yaw_offset, "y": base_y + 40 + pitch_offset},
{"name": "mouth_right", "x": base_x + 25 + yaw_offset, "y": base_y + 40 + pitch_offset},
{"name": "chin", "x": base_x + yaw_offset, "y": base_y + 70 + pitch_offset}
]

return keypoints

def generate_dataset(
self,
num_samples: int = 10000,
output_format: str = "coco"
) -> Dict:
"""
生成完整数据集

Args:
num_samples: 样本数
output_format: 输出格式 (coco, yolo, voc)

Returns:
dataset: 数据集信息
"""
# 生成状态和条件
driver_states = self.generate_driver_states(num_samples)
lighting_conditions = self.generate_lighting_variations(100)

# 相机配置
camera = CameraConfig(
name="dms_camera",
sensor_type="RGB-IR",
resolution=(640, 480),
fov=60.0,
position=(0.0, 0.3, 0.5),
rotation=(0.0, 15.0, 0.0),
exposure_time=10.0,
gain=100.0
)

# 生成样本
samples = []
for i, state in enumerate(driver_states):
lighting = lighting_conditions[i % len(lighting_conditions)]
sample = self.generate_sample(state, lighting, camera)
samples.append(sample)

if (i + 1) % 1000 == 0:
print(f"已生成 {i + 1}/{num_samples} 样本")

# 统计信息
dataset_info = {
"total_samples": len(samples),
"states_distribution": {},
"output_format": output_format
}

for sample in samples:
state = sample["driver_state"]["state"]
dataset_info["states_distribution"][state] = \
dataset_info["states_distribution"].get(state, 0) + 1

return dataset_info


# 使用示例
if __name__ == "__main__":
generator = SyntheticDataGenerator()

# 生成数据集
dataset_info = generator.generate_dataset(num_samples=10000)

print("=== 合成数据生成完成 ===")
print(f"总样本数: {dataset_info['total_samples']}")
print(f"状态分布:")
for state, count in dataset_info['states_distribution'].items():
print(f" {state}: {count}")

最佳实践建议

数据配比策略

场景 真实数据 合成数据 说明
正常驾驶 40% 10% 真实数据更可靠
疲劳场景 20% 30% 合成数据补充边缘案例
分心场景 15% 25% 合成数据增加多样性
极端条件 5% 45% 合成数据覆盖罕见场景

域迁移优化

1
2
3
4
5
6
7
# 减少域迁移gap的方法
DOMAIN_ADAPTATION_STRATEGIES = {
"style_transfer": "使用CycleGAN将合成图像转为真实风格",
"domain_randomization": "增加合成数据多样性,覆盖真实分布",
"mixed_training": "混合真实和合成数据训练",
"fine_tuning": "真实数据微调预训练模型"
}

IMS 开发建议

需求 推荐方案 理由
快速原型 Unity Perception 低成本,快速验证
Euro NCAP合规 Anyverse 内置场景,官方认可
特定定制 Unity + 自建场景 完全控制
预算有限 开源工具 + 数据增强 成本最优

总结

维度 Unity/Unreal Anyverse
学习曲线 中等
定制灵活性
Euro NCAP支持 需自建 内置
传感器模拟 需开发 原生支持
成本 中(人力成本) 高(许可费)
适用阶段 原型验证 生产部署

发布时间: 2026-04-22
标签: #合成数据 #DMS #OMS #Unity #Anyverse #Euro NCAP


DMS/OMS合成数据生成:Anyverse vs Unity/Unreal对比与实践指南
https://dapalm.com/2026/04/22/2026-04-22-synthetic-data-generation-dms-oms/
作者
Mars
发布于
2026年4月22日
许可协议