refactor: 迁移前端到 React 19 + Zustand + Tailwind CSS v4

- 将 vanilla TS 单文件 (app.ts 395行) 拆分为 React 组件化架构
- 引入 Zustand 管理全局状态 (连接/录音/预览/历史/toast)
- 自定义 hooks 封装 WebSocket 连接和音频录制管线
- CSS 全面 Tailwind 化,style.css 从 234 行精简到 114 行 (仅保留 tokens + keyframes)
- 新增依赖: react, react-dom, zustand, @vitejs/plugin-react
- Go 后端 embed 路径 web/dist 不变,无需改动
This commit is contained in:
2026-03-02 06:36:02 +08:00
parent ea46ad71bf
commit 70344bcd98
21 changed files with 914 additions and 687 deletions

View File

@@ -0,0 +1,127 @@
import { useCallback, useRef } from "react";
import { resampleTo16kInt16 } from "../lib/resample";
import { useAppStore } from "../stores/app-store";
import audioProcessorUrl from "../workers/audio-processor.ts?worker&url";
interface UseRecorderOptions {
sendJSON: (obj: Record<string, unknown>) => void;
sendBinary: (data: Int16Array) => void;
}
export function useRecorder({ sendJSON, sendBinary }: UseRecorderOptions) {
const audioCtxRef = useRef<AudioContext | null>(null);
const workletRef = useRef<AudioWorkletNode | null>(null);
const streamRef = useRef<MediaStream | null>(null);
const abortRef = useRef<AbortController | null>(null);
// Keep stable refs so callbacks never go stale
const sendJSONRef = useRef(sendJSON);
const sendBinaryRef = useRef(sendBinary);
sendJSONRef.current = sendJSON;
sendBinaryRef.current = sendBinary;
const initAudio = useCallback(async () => {
if (audioCtxRef.current) return;
// Use device native sample rate — we resample to 16kHz in software
const ctx = new AudioContext();
// Chrome requires resume() after user gesture
if (ctx.state === "suspended") await ctx.resume();
await ctx.audioWorklet.addModule(audioProcessorUrl);
audioCtxRef.current = ctx;
}, []);
const startRecording = useCallback(async () => {
const store = useAppStore.getState();
if (store.recording || store.pendingStart) return;
store.setPendingStart(true);
const abort = new AbortController();
abortRef.current = abort;
try {
await initAudio();
if (abort.signal.aborted) {
store.setPendingStart(false);
return;
}
const ctx = audioCtxRef.current as AudioContext;
if (ctx.state === "suspended") await ctx.resume();
if (abort.signal.aborted) {
store.setPendingStart(false);
return;
}
const stream = await navigator.mediaDevices.getUserMedia({
audio: {
echoCancellation: true,
noiseSuppression: true,
channelCount: 1,
},
});
if (abort.signal.aborted) {
stream.getTracks().forEach((t) => {
t.stop();
});
store.setPendingStart(false);
return;
}
streamRef.current = stream;
const source = ctx.createMediaStreamSource(stream);
const worklet = new AudioWorkletNode(ctx, "audio-processor");
worklet.port.onmessage = (e: MessageEvent) => {
if (e.data.type === "audio") {
sendBinaryRef.current(
resampleTo16kInt16(e.data.samples, e.data.sampleRate),
);
}
};
source.connect(worklet);
worklet.port.postMessage({ command: "start" });
workletRef.current = worklet;
store.setPendingStart(false);
abortRef.current = null;
store.setRecording(true);
sendJSONRef.current({ type: "start" });
store.clearPreview();
} catch (err) {
useAppStore.getState().setPendingStart(false);
abortRef.current = null;
useAppStore
.getState()
.showToast(`\u9ea6\u514b\u98ce\u9519\u8bef: ${(err as Error).message}`);
}
}, [initAudio]);
const stopRecording = useCallback(() => {
const store = useAppStore.getState();
if (store.pendingStart) {
abortRef.current?.abort();
abortRef.current = null;
store.setPendingStart(false);
return;
}
if (!store.recording) return;
store.setRecording(false);
if (workletRef.current) {
workletRef.current.port.postMessage({ command: "stop" });
workletRef.current.disconnect();
workletRef.current = null;
}
if (streamRef.current) {
streamRef.current.getTracks().forEach((t) => {
t.stop();
});
streamRef.current = null;
}
sendJSONRef.current({ type: "stop" });
}, []);
return { startRecording, stopRecording };
}