generated from lks/eframe_template_android
Separate audio_fft.rs into two files audio.rs and fft.rs
This commit is contained in:
parent
02c0f54079
commit
243fcdb31e
18
src/app.rs
18
src/app.rs
@ -6,8 +6,10 @@ pub mod debug_plot;
|
||||
use debug_plot::DebugPlots;
|
||||
mod waterfall;
|
||||
use waterfall::Waterfall;
|
||||
mod audio_fft;
|
||||
use audio_fft::AudioFFT;
|
||||
mod audio;
|
||||
use audio::Audio;
|
||||
mod fft;
|
||||
use fft::Fft;
|
||||
pub mod turbo_colormap;
|
||||
|
||||
const FFT_SIZE: usize = 1024;
|
||||
@ -19,7 +21,8 @@ pub struct TemplateApp {
|
||||
value: f32,
|
||||
/// Behind an `Arc<Mutex<…>>` so we can pass it to [`egui::PaintCallback`] and paint later.
|
||||
waterfall: Arc<Mutex<Waterfall>>,
|
||||
_stream: AudioFFT,
|
||||
_stream: Audio,
|
||||
_fft: Fft,
|
||||
}
|
||||
|
||||
impl TemplateApp {
|
||||
@ -32,8 +35,12 @@ impl TemplateApp {
|
||||
// Note that you must enable the `persistence` feature for this to work.
|
||||
|
||||
let plots = DebugPlots::new();
|
||||
let (stream, rx) = AudioFFT::new(FFT_SIZE, plots.get_sender()).unwrap();
|
||||
let wf_size = stream.output_len;
|
||||
|
||||
//let (stream, rx) = AudioFFT::new(FFT_SIZE, plots.get_sender()).unwrap();
|
||||
let (fft, rx) = Fft::new(FFT_SIZE, plots.get_sender()).unwrap();
|
||||
let stream = Audio::new(fft.tx.clone(), plots.get_sender()).unwrap();
|
||||
|
||||
let wf_size = fft.output_len;
|
||||
let gl = cc
|
||||
.gl
|
||||
.as_ref()
|
||||
@ -46,6 +53,7 @@ impl TemplateApp {
|
||||
value: 2.7,
|
||||
waterfall: Arc::new(Mutex::new(Waterfall::new(gl, wf_size, wf_size, rx))),
|
||||
_stream: stream,
|
||||
_fft: fft,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
43
src/app/audio.rs
Normal file
43
src/app/audio.rs
Normal file
@ -0,0 +1,43 @@
|
||||
use anyhow::{anyhow, Result};
|
||||
use cpal::{
|
||||
self,
|
||||
traits::{DeviceTrait, HostTrait},
|
||||
BufferSize, StreamConfig,
|
||||
};
|
||||
use std::sync::mpsc::Sender;
|
||||
|
||||
use super::debug_plot::PlotData;
|
||||
|
||||
pub struct Audio {
|
||||
pub stream: cpal::Stream,
|
||||
}
|
||||
|
||||
impl Audio {
|
||||
pub fn new(
|
||||
fft_input: Sender<Vec<f32>>,
|
||||
_plot_tx: Sender<(&'static str, PlotData)>,
|
||||
) -> Result<Self> {
|
||||
// Setup audio input
|
||||
let host = cpal::default_host();
|
||||
let device = host
|
||||
.default_input_device()
|
||||
.ok_or(anyhow!("No input audio device found"))?;
|
||||
// Basic config that 'should' be suppoted by most devices
|
||||
let config = StreamConfig {
|
||||
channels: 1,
|
||||
sample_rate: cpal::SampleRate(44100),
|
||||
buffer_size: BufferSize::Default,
|
||||
};
|
||||
|
||||
let stream = device.build_input_stream(
|
||||
&config,
|
||||
move |data: &[f32], _: &cpal::InputCallbackInfo| {
|
||||
fft_input.send(data.to_vec()).unwrap();
|
||||
},
|
||||
move |err| log::error!("Audio Thread Error: {err}"),
|
||||
None,
|
||||
)?;
|
||||
|
||||
Ok(Self { stream })
|
||||
}
|
||||
}
|
||||
@ -1,20 +1,15 @@
|
||||
use anyhow::{anyhow, Result};
|
||||
use cpal::{
|
||||
self,
|
||||
traits::{DeviceTrait, HostTrait},
|
||||
BufferSize, StreamConfig,
|
||||
};
|
||||
use realfft::RealFftPlanner;
|
||||
use std::sync::mpsc::{self, Sender};
|
||||
use std::sync::mpsc::{self, Receiver, Sender};
|
||||
|
||||
use super::debug_plot::PlotData;
|
||||
|
||||
pub struct AudioFFT {
|
||||
pub stream: cpal::Stream,
|
||||
pub struct Fft {
|
||||
pub tx: Sender<Vec<f32>>,
|
||||
pub output_len: usize,
|
||||
}
|
||||
|
||||
impl AudioFFT {
|
||||
impl Fft {
|
||||
pub fn new(
|
||||
size: usize,
|
||||
plot_tx: Sender<(&'static str, PlotData)>,
|
||||
@ -23,29 +18,19 @@ impl AudioFFT {
|
||||
|
||||
// Create mpsc queue
|
||||
let (tx, rx) = mpsc::channel();
|
||||
let (in_tx, in_rx): (Sender<Vec<f32>>, Receiver<Vec<f32>>) = mpsc::channel();
|
||||
|
||||
// Setup fft use f32 for now
|
||||
let mut fft_planner = RealFftPlanner::<f32>::new();
|
||||
let fft = fft_planner.plan_fft_forward(size);
|
||||
|
||||
// Setup audio input
|
||||
let host = cpal::default_host();
|
||||
let device = host
|
||||
.default_input_device()
|
||||
.ok_or(anyhow!("No input audio device found"))?;
|
||||
// Basic config that 'should' be suppoted by most devices
|
||||
let config = StreamConfig {
|
||||
channels: 1,
|
||||
sample_rate: cpal::SampleRate(44100),
|
||||
buffer_size: BufferSize::Default,
|
||||
};
|
||||
|
||||
let mut fft_in: Vec<f32> = Vec::with_capacity(size);
|
||||
let mut fft_out = fft.make_output_vec();
|
||||
let mut fft_scratch = fft.make_scratch_vec();
|
||||
let stream = device.build_input_stream(
|
||||
&config,
|
||||
move |mut data: &[f32], _: &cpal::InputCallbackInfo| {
|
||||
|
||||
std::thread::spawn(move || {
|
||||
while let Ok(samples) = in_rx.recv() {
|
||||
let mut data = samples.as_slice();
|
||||
while data.fill_vec(&mut fft_in, size).is_ok() {
|
||||
assert_eq!(size, fft_in.len());
|
||||
fft.process_with_scratch(&mut fft_in, &mut fft_out, &mut fft_scratch)
|
||||
@ -67,12 +52,16 @@ impl AudioFFT {
|
||||
.unwrap();
|
||||
tx.send(output).unwrap();
|
||||
}
|
||||
},
|
||||
move |err| log::error!("Audio Thread Error: {err}"),
|
||||
None,
|
||||
)?;
|
||||
}
|
||||
});
|
||||
|
||||
Ok((Self { stream, output_len }, rx))
|
||||
Ok((
|
||||
Self {
|
||||
tx: in_tx,
|
||||
output_len,
|
||||
},
|
||||
rx,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user