[raspberrypi 0w and respeaker 2mic]实时音频波形

发布于:2025-04-11 ⋅ 阅读:(42) ⋅ 点赞:(0)

0. 环境

ubuntu22主机,    192.168.8.162,
raspberry 0w,    192.168.8.220
路由器
 

1. 树莓派

# rpi - send.py
# 或者命令行:arecord -D plughw:1,0 -t wav -f cd -r 16000 -c 2 | nc 192.168.8.162 12345

import socket
import pyaudio

s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
host = '192.168.8.162'
port = 12345

s.connect((host, port)) 

RESPEAKER_RATE = 16000
RESPEAKER_CHANNELS = 2
RESPEAKER_WIDTH = 2
RESPEAKER_INDEX = 0  # refer to input device id
CHUNK = 4096

p = pyaudio.PyAudio()

stream = p.open(
            rate=RESPEAKER_RATE,
            format=p.get_format_from_width(RESPEAKER_WIDTH),
            channels=RESPEAKER_CHANNELS,
            input=True,
            input_device_index=RESPEAKER_INDEX,
			frames_per_buffer=CHUNK,
			)

while True:
    data = stream.read(CHUNK, exception_on_overflow=False)
    s.sendall(data)
    #print("data.hex()", data.hex())
    print("len(data)", len(data))

stream.stop_stream()
stream.close()
s.close()

2. 电脑端

# ubuntu22主机:client - recv2micfft.py
import threading
import time
import socket
import pyaudio
import numpy as np
import matplotlib.pyplot as plt

host = '192.168.8.162'
port = 12345

RESPEAKER_RATE = 16000
RESPEAKER_CHANNELS = 2
RESPEAKER_WIDTH = 2
RESPEAKER_INDEX = 0  # refer to input device id
CHUNK = 4096
RECORD_SECONDS = 2

np_ch1 = None
np_ch2 = None


def audio_stream():
    global np_ch1
    global np_ch2
    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

    s.bind((host, port))
    print('server listening at',(host, (port)))

    s.listen(1)

    conn, addr = s.accept()

    p = pyaudio.PyAudio()

    stream = p.open(
        rate=RESPEAKER_RATE,
        format=p.get_format_from_width(RESPEAKER_WIDTH),
        channels=RESPEAKER_CHANNELS,
        output=True,
        input_device_index=RESPEAKER_INDEX,
        frames_per_buffer=CHUNK,
    )

    while True:
        data = conn.recv(CHUNK*RESPEAKER_CHANNELS*RESPEAKER_WIDTH)
        stream.write(data)

        np_data1 = np.fromstring(data, dtype=np.int16)[0::2]
        np_data2 = np.fromstring(data, dtype=np.int16)[1::2]

        if np_ch1 is None:
            np_ch1 = np_data1
            np_ch2 = np_data2
        elif np_ch1.shape[0] / RESPEAKER_RATE < RECORD_SECONDS:
            np_ch1 = np.append(np_ch1, np_data1)
            np_ch2 = np.append(np_ch2, np_data2)
        else:
            idx = []
            for i in range(len(np_data1)):
                idx.append(i)
            np_ch1 = np.delete(np_ch1, idx)
            np_ch2 = np.delete(np_ch2, idx)
            np_ch1 = np.append(np_ch1, np_data1)
            np_ch2 = np.append(np_ch2, np_data2)
            
    stream.stop_stream()
    stream.close()
    conn.close()
    s.close()


def fft_show():
    global np_ch1
    global np_ch2

    plt.ion()

    # fig, ax = plt.subplots()
    # 创建两个子图
    f, (ax1, ax2) = plt.subplots(2, 1)

    try:
        while True:
            if np_ch1 is None:
                continue
            # 将音频数据转换为NumPy数组
            #np_data1 = np.frombuffer(data, dtype=np.int16)[0::2]
            #np_data2 = np.frombuffer(data, dtype=np.int16)[1::2]

            # 计算频谱
            # freq_data1 = np.fft.fft(np_data1)
            # freq_data1 = np.abs(freq_data1)
            # freq_data2 = np.fft.fft(np_data2)
            # freq_data2 = np.abs(freq_data2)

            # 更新绘图
            ax1.clear()
            ax1.plot(np_ch1)
            ax2.clear()
            ax2.plot(np_ch2)

            plt.draw()
            plt.pause(0.01)

    except KeyboardInterrupt:
        print("实时频谱分析结束。")

    # 关闭绘图窗口
    plt.ioff()
    plt.close()

t1 = threading.Thread(target=audio_stream, args=())
t2 = threading.Thread(target=fft_show, args=())
t1.start()
t2.start()










网站公告

今日签到

点亮在社区的每一天
去签到