|
| 1 | +""" |
| 2 | +TouchDesigner WebSocket 接收端示例 |
| 3 | +在 TouchDesigner 中创建一个 Web Server DAT,并将此脚本设置为其回调处理脚本 |
| 4 | +
|
| 5 | +配置 Web Server DAT: |
| 6 | +- Protocol: WebSocket |
| 7 | +- Local Port: 8080 |
| 8 | +- Enable WebSocket: On |
| 9 | +- Callbacks: 设置为此脚本所在的 DAT |
| 10 | +
|
| 11 | +此脚本将接收来自 Web 端的语音对话消息和状态更新, |
| 12 | +并可以驱动相应的可视化效果。 |
| 13 | +""" |
| 14 | + |
| 15 | +import json |
| 16 | + |
| 17 | +# 全局变量来存储当前状态 |
| 18 | +current_status = 'idle' |
| 19 | +last_message = '' |
| 20 | +last_user = '' |
| 21 | +current_volume = 0 |
| 22 | +current_spectrum = [] |
| 23 | + |
| 24 | +def onWebSocketReceiveText(dat, data, peer): |
| 25 | + """ |
| 26 | + 接收 WebSocket 文本消息的回调函数 |
| 27 | + |
| 28 | + Args: |
| 29 | + dat: Web Server DAT 对象 |
| 30 | + data: 接收到的文本数据 |
| 31 | + peer: 客户端信息 |
| 32 | + """ |
| 33 | + global current_status, last_message, last_user, current_volume, current_spectrum |
| 34 | + |
| 35 | + try: |
| 36 | + # 解析 JSON 消息 |
| 37 | + message = json.loads(data) |
| 38 | + message_type = message.get('type', '') |
| 39 | + |
| 40 | + print(f"📨 收到消息类型: {message_type}") |
| 41 | + |
| 42 | + if message_type == 'user_message': |
| 43 | + # 用户消息 |
| 44 | + handle_user_message(message) |
| 45 | + |
| 46 | + elif message_type == 'ai_message': |
| 47 | + # AI 消息 |
| 48 | + handle_ai_message(message) |
| 49 | + |
| 50 | + elif message_type == 'status_update': |
| 51 | + # 状态更新 |
| 52 | + handle_status_update(message) |
| 53 | + |
| 54 | + elif message_type == 'audio_data': |
| 55 | + # 音频数据 |
| 56 | + handle_audio_data(message) |
| 57 | + |
| 58 | + except json.JSONDecodeError as e: |
| 59 | + print(f"❌ JSON 解析失败: {e}") |
| 60 | + except Exception as e: |
| 61 | + print(f"❌ 处理消息失败: {e}") |
| 62 | + |
| 63 | +def handle_user_message(message): |
| 64 | + """处理用户消息""" |
| 65 | + global last_message, last_user |
| 66 | + |
| 67 | + text = message.get('text', '') |
| 68 | + user = message.get('user', '') |
| 69 | + definite = message.get('definite', False) |
| 70 | + paragraph = message.get('paragraph', False) |
| 71 | + |
| 72 | + last_message = text |
| 73 | + last_user = user |
| 74 | + |
| 75 | + print(f"👤 用户消息: {text}") |
| 76 | + |
| 77 | + # 更新 TouchDesigner 中的文本显示 |
| 78 | + update_text_display(text, 'user') |
| 79 | + |
| 80 | + # 触发用户消息可视化效果 |
| 81 | + trigger_user_visual_effect(text, definite, paragraph) |
| 82 | + |
| 83 | +def handle_ai_message(message): |
| 84 | + """处理 AI 消息""" |
| 85 | + global last_message, last_user |
| 86 | + |
| 87 | + text = message.get('text', '') |
| 88 | + user = message.get('user', '') |
| 89 | + definite = message.get('definite', False) |
| 90 | + paragraph = message.get('paragraph', False) |
| 91 | + is_interrupted = message.get('isInterrupted', False) |
| 92 | + |
| 93 | + last_message = text |
| 94 | + last_user = user |
| 95 | + |
| 96 | + print(f"🤖 AI 消息: {text}") |
| 97 | + |
| 98 | + # 更新 TouchDesigner 中的文本显示 |
| 99 | + update_text_display(text, 'ai') |
| 100 | + |
| 101 | + # 触发 AI 消息可视化效果 |
| 102 | + trigger_ai_visual_effect(text, definite, paragraph, is_interrupted) |
| 103 | + |
| 104 | +def handle_status_update(message): |
| 105 | + """处理状态更新""" |
| 106 | + global current_status |
| 107 | + |
| 108 | + status = message.get('status', 'idle') |
| 109 | + current_status = status |
| 110 | + |
| 111 | + print(f"🔄 状态更新: {status}") |
| 112 | + |
| 113 | + # 根据状态更新可视化效果 |
| 114 | + if status == 'speaking': |
| 115 | + trigger_ai_speaking_effect() |
| 116 | + elif status == 'listening': |
| 117 | + trigger_listening_effect() |
| 118 | + elif status == 'thinking': |
| 119 | + trigger_thinking_effect() |
| 120 | + elif status == 'idle': |
| 121 | + trigger_idle_effect() |
| 122 | + |
| 123 | +def handle_audio_data(message): |
| 124 | + """处理音频数据""" |
| 125 | + global current_volume, current_spectrum |
| 126 | + |
| 127 | + volume = message.get('volume', 0) |
| 128 | + spectrum = message.get('spectrum', []) |
| 129 | + |
| 130 | + current_volume = volume |
| 131 | + current_spectrum = spectrum |
| 132 | + |
| 133 | + # 更新音频可视化 |
| 134 | + update_audio_visualization(volume, spectrum) |
| 135 | + |
| 136 | +def update_text_display(text, speaker_type): |
| 137 | + """更新文本显示""" |
| 138 | + try: |
| 139 | + # 假设有一个 Text TOP 用于显示消息 |
| 140 | + text_top = op('message_display') |
| 141 | + if text_top: |
| 142 | + # 根据说话者类型设置不同的颜色 |
| 143 | + if speaker_type == 'user': |
| 144 | + text_top.par.text = f"User: {text}" |
| 145 | + # 设置用户消息颜色(蓝色) |
| 146 | + text_top.par.fontcolorr = 0.2 |
| 147 | + text_top.par.fontcolorg = 0.6 |
| 148 | + text_top.par.fontcolorb = 1.0 |
| 149 | + else: |
| 150 | + text_top.par.text = f"AI: {text}" |
| 151 | + # 设置 AI 消息颜色(绿色) |
| 152 | + text_top.par.fontcolorr = 0.2 |
| 153 | + text_top.par.fontcolorg = 1.0 |
| 154 | + text_top.par.fontcolorb = 0.3 |
| 155 | + except: |
| 156 | + print("⚠️ 更新文本显示失败") |
| 157 | + |
| 158 | +def trigger_user_visual_effect(text, definite, paragraph): |
| 159 | + """触发用户消息的可视化效果""" |
| 160 | + try: |
| 161 | + # 示例:触发用户输入的粒子效果 |
| 162 | + particle_comp = op('user_particles') |
| 163 | + if particle_comp: |
| 164 | + # 根据消息长度调整粒子数量 |
| 165 | + particle_count = min(len(text) * 2, 1000) |
| 166 | + particle_comp.par.count = particle_count |
| 167 | + |
| 168 | + # 如果是完整句子,触发爆发效果 |
| 169 | + if definite or paragraph: |
| 170 | + particle_comp.par.birthrate = 100 |
| 171 | + # 1秒后恢复正常 |
| 172 | + run("op('user_particles').par.birthrate = 10", delayFrames=30) |
| 173 | + except: |
| 174 | + print("⚠️ 触发用户可视化效果失败") |
| 175 | + |
| 176 | +def trigger_ai_visual_effect(text, definite, paragraph, is_interrupted): |
| 177 | + """触发 AI 消息的可视化效果""" |
| 178 | + try: |
| 179 | + # 示例:AI 响应的光环效果 |
| 180 | + ai_visual = op('ai_visual') |
| 181 | + if ai_visual: |
| 182 | + # 根据消息长度调整效果强度 |
| 183 | + intensity = min(len(text) / 50.0, 1.0) |
| 184 | + ai_visual.par.intensity = intensity |
| 185 | + |
| 186 | + # 如果被打断,显示特殊效果 |
| 187 | + if is_interrupted: |
| 188 | + ai_visual.par.colorr = 1.0 # 红色表示被打断 |
| 189 | + ai_visual.par.colorg = 0.3 |
| 190 | + ai_visual.par.colorb = 0.3 |
| 191 | + else: |
| 192 | + ai_visual.par.colorr = 0.3 # 正常绿色 |
| 193 | + ai_visual.par.colorg = 1.0 |
| 194 | + ai_visual.par.colorb = 0.3 |
| 195 | + except: |
| 196 | + print("⚠️ 触发 AI 可视化效果失败") |
| 197 | + |
| 198 | +def trigger_ai_speaking_effect(): |
| 199 | + """AI 说话状态的视觉效果""" |
| 200 | + try: |
| 201 | + # 启动说话状态的动画 |
| 202 | + speaking_anim = op('speaking_animation') |
| 203 | + if speaking_anim: |
| 204 | + speaking_anim.par.play = True |
| 205 | + |
| 206 | + # 调整整体场景亮度 |
| 207 | + scene_light = op('main_light') |
| 208 | + if scene_light: |
| 209 | + scene_light.par.dimmer = 0.8 |
| 210 | + except: |
| 211 | + print("⚠️ 触发 AI 说话效果失败") |
| 212 | + |
| 213 | +def trigger_listening_effect(): |
| 214 | + """监听状态的视觉效果""" |
| 215 | + try: |
| 216 | + # 启动监听状态的脉冲效果 |
| 217 | + listening_pulse = op('listening_pulse') |
| 218 | + if listening_pulse: |
| 219 | + listening_pulse.par.amplitude = 0.5 |
| 220 | + |
| 221 | + # 调整整体场景为蓝色调 |
| 222 | + scene_light = op('main_light') |
| 223 | + if scene_light: |
| 224 | + scene_light.par.colorr = 0.3 |
| 225 | + scene_light.par.colorg = 0.6 |
| 226 | + scene_light.par.colorb = 1.0 |
| 227 | + except: |
| 228 | + print("⚠️ 触发监听效果失败") |
| 229 | + |
| 230 | +def trigger_thinking_effect(): |
| 231 | + """思考状态的视觉效果""" |
| 232 | + try: |
| 233 | + # 启动思考状态的旋转效果 |
| 234 | + thinking_rotation = op('thinking_rotation') |
| 235 | + if thinking_rotation: |
| 236 | + thinking_rotation.par.speed = 2.0 |
| 237 | + |
| 238 | + # 调整整体场景为橙色调 |
| 239 | + scene_light = op('main_light') |
| 240 | + if scene_light: |
| 241 | + scene_light.par.colorr = 1.0 |
| 242 | + scene_light.par.colorg = 0.7 |
| 243 | + scene_light.par.colorb = 0.2 |
| 244 | + except: |
| 245 | + print("⚠️ 触发思考效果失败") |
| 246 | + |
| 247 | +def trigger_idle_effect(): |
| 248 | + """空闲状态的视觉效果""" |
| 249 | + try: |
| 250 | + # 恢复默认状态 |
| 251 | + default_anim = op('default_animation') |
| 252 | + if default_anim: |
| 253 | + default_anim.par.play = True |
| 254 | + |
| 255 | + # 恢复默认光照 |
| 256 | + scene_light = op('main_light') |
| 257 | + if scene_light: |
| 258 | + scene_light.par.colorr = 1.0 |
| 259 | + scene_light.par.colorg = 1.0 |
| 260 | + scene_light.par.colorb = 1.0 |
| 261 | + scene_light.par.dimmer = 0.6 |
| 262 | + except: |
| 263 | + print("⚠️ 触发空闲效果失败") |
| 264 | + |
| 265 | +def update_audio_visualization(volume, spectrum): |
| 266 | + """更新音频可视化""" |
| 267 | + try: |
| 268 | + # 更新音量表 |
| 269 | + volume_meter = op('volume_meter') |
| 270 | + if volume_meter: |
| 271 | + volume_meter.par.value0 = volume / 255.0 # 归一化到 0-1 |
| 272 | + |
| 273 | + # 更新频谱显示 |
| 274 | + if spectrum and len(spectrum) > 0: |
| 275 | + spectrum_viz = op('spectrum_viz') |
| 276 | + if spectrum_viz: |
| 277 | + # 将频谱数据写入 CHOP 或 DAT |
| 278 | + for i, freq_value in enumerate(spectrum): |
| 279 | + if i < 32: # 限制为前32个频段 |
| 280 | + # 这里需要根据实际的 TouchDesigner 网络结构调整 |
| 281 | + channel_name = f'freq_{i:02d}' |
| 282 | + # spectrum_viz.chan(channel_name).val = freq_value / 255.0 |
| 283 | + except: |
| 284 | + print("⚠️ 更新音频可视化失败") |
| 285 | + |
| 286 | +def onWebSocketReceiveBytes(dat, data, peer): |
| 287 | + """ |
| 288 | + 接收 WebSocket 二进制消息的回调函数 |
| 289 | + (如果需要传输音频数据等二进制内容时使用) |
| 290 | + """ |
| 291 | + print(f"📦 收到二进制数据,大小: {len(data)} 字节") |
| 292 | + |
| 293 | +def onWebSocketOpen(dat, peer): |
| 294 | + """WebSocket 连接建立时的回调""" |
| 295 | + print(f"🔗 WebSocket 连接已建立: {peer}") |
| 296 | + |
| 297 | +def onWebSocketClose(dat, peer): |
| 298 | + """WebSocket 连接关闭时的回调""" |
| 299 | + print(f"❌ WebSocket 连接已关闭: {peer}") |
| 300 | + |
| 301 | +# 辅助函数:获取当前状态信息(可在其他地方调用) |
| 302 | +def get_current_status(): |
| 303 | + """获取当前状态信息""" |
| 304 | + return { |
| 305 | + 'status': current_status, |
| 306 | + 'last_message': last_message, |
| 307 | + 'last_user': last_user, |
| 308 | + 'volume': current_volume, |
| 309 | + 'spectrum_length': len(current_spectrum) |
| 310 | + } |
| 311 | + |
| 312 | +# 使用示例: |
| 313 | +# 在 TouchDesigner 中,可以通过以下方式获取状态: |
| 314 | +# status_info = op('websocket_handler').get_current_status() |
| 315 | +# print(status_info) |
0 commit comments