React.js 中的实时音频流:处理和播放实时音频缓冲区

本文分享在项目中如何使用 React.js 实现实时音频流功能,核心是处理通过 WebSocket 接收到的音频 Blob,将其存储在队列中,并无缝播放。

关键组件

  • WebSocket 连接
  • 处理媒体事件
  • 在队列中存储缓冲数据
  • 处理缓冲队列并转换为 URL 队列
  • 通过 URL 队列播放音频

WebSocket 连接

要实现实时音频流,第一步是建立一个 WebSocket,以接收从后台传送音频缓冲的媒体事件。下面是建立 WebSocket 连接的代码片段。

// page.js

const [socket, setSocket] = useState(null);

async function handleConnectWebSocket(){
  const serverUrl = "Your socket server url starting with ws or wss";
  const newSocket = new WebSocket(serverUrl);

  newSocket.onopen = () => {
        console.log("Connected to socket");
      };
  newSocket.onclose = () => {
    console.log("Disconnected");
  };

  setSocket(newSocket);
}

处理媒体事件

当服务器发送音频缓冲区时,我们需要适当处理这些数据。下面的代码段演示了如何管理传入的媒体事件。

// page.js

const [buffers, setBuffers] = useState([]);

useEffect(() => {
  async function handleMessage(event){
    const message = JSON.parse(event.data);
    if (message?.event === "media") {
            const mediaPayload = message.media.payload;
            setBuffers((prevBuffers) => [...prevBuffers, mediaPayload?.data]);
      }
    }
    if (socket) {
      socket.onmessage = handleMessage;
    }

    return () => {
      if (socket) {
        socket.onmessage = null;
      }
    };
}, [socket]);

处理缓冲队列并转换为 URL 队列

一旦队列中出现音频缓冲区,我们就需要对其进行处理,并将其转换为可播放的 URL。

// page.js

  useEffect(() => {
    if (buffers.length > 0) {
      const audioData = new Uint8Array(buffers.flat());
      const blob = new Blob([audioData], { type: "audio/mpeg" });
      const url = window.URL.createObjectURL(blob);
      setUrlQueue((prevUrlQueue) => [...prevUrlQueue, url]);
      setBuffers([]);
    }
  }, [buffers]);

通过 URL 队列播放音频

有了 URL 队列中的 URL,我们就可以使用 JavaScript 的音频对象播放音频,确保音频自动播放。

  const [isPlaying, setIsPlaying] = useState(false);
  const [audioPlaying, setAudioPlaying] = useState(null);

  useEffect(() => {
    const playNAudio = async () => {
      const nextUrl = urlQueue[0];
      try {
        if (urlQueue.length) {
          const audio = new Audio();
          setAudioPlaying(audio);

          audio.src = nextUrl;
          audio.autoplay = true;
          audio.preload = "auto";
          setIsPlaying(true);
          audio.onended = () => {
            setIsPlaying(false);
            setUrlQueue((prevQ) => prevQ.slice(1));
          };
          setAudioElem(audio);
        }
      } catch (error) {
        console.error("Error playing Mp3 audio:", error);
      }
    };
    if (!isPlaying && urlQueue.length > 0) {
      playNAudio();
    }
  }, [urlQueue, isPlaying]);

完整代码

// page.js
import React, {useState, useEffect} from "react"

function StreamAudio(){
  /* States */
  const [socket, setSocket] = useState(null);
  const [buffers, setBuffers] = useState([]);
  const [isPlaying, setIsPlaying] = useState(false);
  const [audioPlaying, setAudioPlaying] = useState(null);
  
  /* Connect Web Socket Fn */
  async function handleConnectWebSocket(){
    const serverUrl = "Your socket server url starting with ws or wss";
    const newSocket = new WebSocket(serverUrl);
  
    newSocket.onopen = () => {
          console.log("Connected to socket");
        };
    newSocket.onclose = () => {
      console.log("Disconnected");
    };
  
    setSocket(newSocket);
  }

  /* Handling Web Socket Media Event */
  useEffect(() => {
    async function handleMessage(event){
      const message = JSON.parse(event.data);
      if (message?.event === "media") {
              const mediaPayload = message.media.payload;
              setBuffers((prevBuffers) => [...prevBuffers, mediaPayload?.data]);
        }
      }
      if (socket) {
        socket.onmessage = handleMessage;
      }
  
      return () => {
        if (socket) {
          socket.onmessage = null;
        }
      };
  }, [socket]);
  
  /* Process Buffer Array And Add Url To UrlQueue */
  useEffect(() => {
      if (buffers.length > 0) {
        const audioData = new Uint8Array(buffers.flat());
        const blob = new Blob([audioData], { type: "audio/mpeg" });
        const url = window.URL.createObjectURL(blob);
        setUrlQueue((prevUrlQueue) => [...prevUrlQueue, url]);
        setBuffers([]);
      }
    }, [buffers]);
  
  /* Play Audio Through UrlQueue */
  useEffect(() => {
    const playNAudio = async () => {
      const nextUrl = urlQueue[0];
      try {
        if (urlQueue.length) {
          const audio = new Audio();
          setAudioPlaying(audio);

          audio.src = nextUrl;
          audio.autoplay = true;
          audio.preload = "auto";
          setIsPlaying(true);
          audio.onended = () => {
            setIsPlaying(false);
            setUrlQueue((prevQ) => prevQ.slice(1));
          };
          setAudioElem(audio);
        }
      } catch (error) {
        console.error("Error playing Mp3 audio:", error);
      }
    };
    if (!isPlaying && urlQueue.length > 0) {
      playNAudio();
    }
  }, [urlQueue, isPlaying]);
   
  return (
  <div>
  <h1>Mastering Real-Time Audio: How to Live Stream Audio Blobs and Play Them in React.js</h1>
  <div> 
)
}

export default StreamAudio;

结论

按照以上步骤,您就可以在 React 应用程序中有效地处理音频流。该功能可增强语音对话式人工智能应用和音乐流媒体平台的用户体验,确保无缝和高质量的音频交互。

作者:Sandeep Lakhiwal

本文来自作者投稿,版权归原作者所有。如需转载,请注明出处:https://www.nxrte.com/jishu/49779.html

(0)

相关推荐

发表回复

登录后才能评论