Separate audio_fft.rs into two files audio.rs and fft.rs

This commit is contained in:
Lucas Schumacher 2024-05-31 13:31:52 -04:00
parent 02c0f54079
commit 243fcdb31e
3 changed files with 74 additions and 34 deletions

View File

@ -6,8 +6,10 @@ pub mod debug_plot;
use debug_plot::DebugPlots; use debug_plot::DebugPlots;
mod waterfall; mod waterfall;
use waterfall::Waterfall; use waterfall::Waterfall;
mod audio_fft; mod audio;
use audio_fft::AudioFFT; use audio::Audio;
mod fft;
use fft::Fft;
pub mod turbo_colormap; pub mod turbo_colormap;
const FFT_SIZE: usize = 1024; const FFT_SIZE: usize = 1024;
@ -19,7 +21,8 @@ pub struct TemplateApp {
value: f32, value: f32,
/// Behind an `Arc<Mutex<…>>` so we can pass it to [`egui::PaintCallback`] and paint later. /// Behind an `Arc<Mutex<…>>` so we can pass it to [`egui::PaintCallback`] and paint later.
waterfall: Arc<Mutex<Waterfall>>, waterfall: Arc<Mutex<Waterfall>>,
_stream: AudioFFT, _stream: Audio,
_fft: Fft,
} }
impl TemplateApp { impl TemplateApp {
@ -32,8 +35,12 @@ impl TemplateApp {
// Note that you must enable the `persistence` feature for this to work. // Note that you must enable the `persistence` feature for this to work.
let plots = DebugPlots::new(); let plots = DebugPlots::new();
let (stream, rx) = AudioFFT::new(FFT_SIZE, plots.get_sender()).unwrap();
let wf_size = stream.output_len; //let (stream, rx) = AudioFFT::new(FFT_SIZE, plots.get_sender()).unwrap();
let (fft, rx) = Fft::new(FFT_SIZE, plots.get_sender()).unwrap();
let stream = Audio::new(fft.tx.clone(), plots.get_sender()).unwrap();
let wf_size = fft.output_len;
let gl = cc let gl = cc
.gl .gl
.as_ref() .as_ref()
@ -46,6 +53,7 @@ impl TemplateApp {
value: 2.7, value: 2.7,
waterfall: Arc::new(Mutex::new(Waterfall::new(gl, wf_size, wf_size, rx))), waterfall: Arc::new(Mutex::new(Waterfall::new(gl, wf_size, wf_size, rx))),
_stream: stream, _stream: stream,
_fft: fft,
} }
} }
} }

43
src/app/audio.rs Normal file
View File

@ -0,0 +1,43 @@
use anyhow::{anyhow, Result};
use cpal::{
self,
traits::{DeviceTrait, HostTrait},
BufferSize, StreamConfig,
};
use std::sync::mpsc::Sender;
use super::debug_plot::PlotData;
pub struct Audio {
pub stream: cpal::Stream,
}
impl Audio {
pub fn new(
fft_input: Sender<Vec<f32>>,
_plot_tx: Sender<(&'static str, PlotData)>,
) -> Result<Self> {
// Setup audio input
let host = cpal::default_host();
let device = host
.default_input_device()
.ok_or(anyhow!("No input audio device found"))?;
// Basic config that 'should' be suppoted by most devices
let config = StreamConfig {
channels: 1,
sample_rate: cpal::SampleRate(44100),
buffer_size: BufferSize::Default,
};
let stream = device.build_input_stream(
&config,
move |data: &[f32], _: &cpal::InputCallbackInfo| {
fft_input.send(data.to_vec()).unwrap();
},
move |err| log::error!("Audio Thread Error: {err}"),
None,
)?;
Ok(Self { stream })
}
}

View File

@ -1,20 +1,15 @@
use anyhow::{anyhow, Result}; use anyhow::{anyhow, Result};
use cpal::{
self,
traits::{DeviceTrait, HostTrait},
BufferSize, StreamConfig,
};
use realfft::RealFftPlanner; use realfft::RealFftPlanner;
use std::sync::mpsc::{self, Sender}; use std::sync::mpsc::{self, Receiver, Sender};
use super::debug_plot::PlotData; use super::debug_plot::PlotData;
pub struct AudioFFT { pub struct Fft {
pub stream: cpal::Stream, pub tx: Sender<Vec<f32>>,
pub output_len: usize, pub output_len: usize,
} }
impl AudioFFT { impl Fft {
pub fn new( pub fn new(
size: usize, size: usize,
plot_tx: Sender<(&'static str, PlotData)>, plot_tx: Sender<(&'static str, PlotData)>,
@ -23,29 +18,19 @@ impl AudioFFT {
// Create mpsc queue // Create mpsc queue
let (tx, rx) = mpsc::channel(); let (tx, rx) = mpsc::channel();
let (in_tx, in_rx): (Sender<Vec<f32>>, Receiver<Vec<f32>>) = mpsc::channel();
// Setup fft use f32 for now // Setup fft use f32 for now
let mut fft_planner = RealFftPlanner::<f32>::new(); let mut fft_planner = RealFftPlanner::<f32>::new();
let fft = fft_planner.plan_fft_forward(size); let fft = fft_planner.plan_fft_forward(size);
// Setup audio input
let host = cpal::default_host();
let device = host
.default_input_device()
.ok_or(anyhow!("No input audio device found"))?;
// Basic config that 'should' be suppoted by most devices
let config = StreamConfig {
channels: 1,
sample_rate: cpal::SampleRate(44100),
buffer_size: BufferSize::Default,
};
let mut fft_in: Vec<f32> = Vec::with_capacity(size); let mut fft_in: Vec<f32> = Vec::with_capacity(size);
let mut fft_out = fft.make_output_vec(); let mut fft_out = fft.make_output_vec();
let mut fft_scratch = fft.make_scratch_vec(); let mut fft_scratch = fft.make_scratch_vec();
let stream = device.build_input_stream(
&config, std::thread::spawn(move || {
move |mut data: &[f32], _: &cpal::InputCallbackInfo| { while let Ok(samples) = in_rx.recv() {
let mut data = samples.as_slice();
while data.fill_vec(&mut fft_in, size).is_ok() { while data.fill_vec(&mut fft_in, size).is_ok() {
assert_eq!(size, fft_in.len()); assert_eq!(size, fft_in.len());
fft.process_with_scratch(&mut fft_in, &mut fft_out, &mut fft_scratch) fft.process_with_scratch(&mut fft_in, &mut fft_out, &mut fft_scratch)
@ -67,12 +52,16 @@ impl AudioFFT {
.unwrap(); .unwrap();
tx.send(output).unwrap(); tx.send(output).unwrap();
} }
}, }
move |err| log::error!("Audio Thread Error: {err}"), });
None,
)?;
Ok((Self { stream, output_len }, rx)) Ok((
Self {
tx: in_tx,
output_len,
},
rx,
))
} }
} }